From e54a8c4b5753d1c427c8fec7f9e43e7c22ec00cc Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 30 Nov 2016 22:21:56 +0300 Subject: mwifiex: clean up some messy indenting These lines were indented one tab extra. Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sta_cmd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 125e448712dd..ad2dfd8135b8 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -1935,8 +1935,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, mwifiex_dbg(priv->adapter, ERROR, "0x%x command not supported by firmware\n", cmd_no); - return -EOPNOTSUPP; - } + return -EOPNOTSUPP; + } /* Prepare command */ switch (cmd_no) { -- cgit v1.2.3 From d15697de60db5570532fdedb8e13b2251d65b8e3 Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sat, 3 Dec 2016 00:52:46 +0300 Subject: adm80211: add checks for dma mapping errors The driver does not check if mapping dma memory succeed. The patch adds the checks and failure handling. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Signed-off-by: Kalle Valo --- drivers/net/wireless/admtek/adm8211.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index 70ecd82d674d..2b4a3eb38dfa 100644 --- a/drivers/net/wireless/admtek/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c @@ -413,6 +413,13 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev) skb_tail_pointer(newskb), RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(priv->pdev, + priv->rx_buffers[entry].mapping)) { + priv->rx_buffers[entry].skb = NULL; + dev_kfree_skb(newskb); + skb = NULL; + /* TODO: update rx dropped stats */ + } } else { skb = NULL; /* TODO: update rx dropped stats */ @@ -1450,6 +1457,12 @@ static int adm8211_init_rings(struct ieee80211_hw *dev) skb_tail_pointer(rx_info->skb), RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) { + dev_kfree_skb(rx_info->skb); + rx_info->skb = NULL; + break; + } + desc->buffer1 = cpu_to_le32(rx_info->mapping); desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL); } @@ -1613,7 +1626,7 @@ static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int } /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */ -static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, +static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, u16 plcp_signal, size_t hdrlen) { @@ -1625,6 +1638,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(priv->pdev, mapping)) + return -ENOMEM; spin_lock_irqsave(&priv->lock, flags); @@ -1657,6 +1672,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, /* Trigger transmit poll */ ADM8211_CSR_WRITE(TDR, 0); + + return 0; } /* Put adm8211_tx_hdr on skb and transmit */ @@ -1710,7 +1727,10 @@ static void adm8211_tx(struct ieee80211_hw *dev, txhdr->retry_limit = info->control.rates[0].count; - adm8211_tx_raw(dev, skb, plcp_signal, hdrlen); + if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) { + /* Drop packet */ + ieee80211_free_txskb(dev, skb); + } } static int adm8211_alloc_rings(struct ieee80211_hw *dev) -- cgit v1.2.3 From 74204f8fa117e7d0fa1d1a7a57c3c97df83ad418 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 13 Dec 2016 09:38:25 +0100 Subject: rfkill: simplify rfkill_set_hw_state() slightly Simplify the two conditions gating the schedule_work() into a single one and get rid of the additional exit point from the function in doing so. Signed-off-by: Johannes Berg --- net/rfkill/core.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 884027f62783..184bb711a06d 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -478,10 +478,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) rfkill_led_trigger_event(rfkill); - if (!rfkill->registered) - return ret; - - if (prev != blocked) + if (rfkill->registered && prev != blocked) schedule_work(&rfkill->uevent_work); return ret; -- cgit v1.2.3 From 543b921b475ad2e1897d5e7784437af238b33705 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Thu, 17 Nov 2016 12:48:53 +0000 Subject: cfg80211: get rid of name indirection trick for ieee80211_get_channel() The comment on the name indirection suggested an issue but turned out to be untrue. Digging in older kernel version showed issue with ipw2x00 but that is no longer true so get rid on the name indirection. Signed-off-by: Arend van Spriel Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath10k/htt_rx.c | 3 +-- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 2 +- include/net/cfg80211.h | 17 +++-------------- net/wireless/util.c | 5 ++--- 4 files changed, 7 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 86d082cf4eef..0bc7fe8c4b52 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2451,8 +2451,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) u32 phymode = __le32_to_cpu(resp->chan_change.phymode); u32 freq = __le32_to_cpu(resp->chan_change.freq); - ar->tgt_oper_chan = - __ieee80211_get_channel(ar->hw->wiphy, freq); + ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt chan change freq %u phymode %s\n", freq, ath10k_wmi_phymode_str(phymode)); diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 145cc4b5103b..1e3bd435a694 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2078,7 +2078,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) ie_len = ie_buf[1] + sizeof(struct ieee_types_header); band = mwifiex_band_to_radio_type(priv->curr_bss_params.band); - chan = __ieee80211_get_channel(priv->wdev.wiphy, + chan = ieee80211_get_channel(priv->wdev.wiphy, ieee80211_channel_to_frequency(bss_info.bss_chan, band)); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 814be4b4200c..ca2ac1ce5862 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3955,26 +3955,15 @@ int ieee80211_channel_to_frequency(int chan, enum nl80211_band band); */ int ieee80211_frequency_to_channel(int freq); -/* - * Name indirection necessary because the ieee80211 code also has - * a function named "ieee80211_get_channel", so if you include - * cfg80211's header file you get cfg80211's version, if you try - * to include both header files you'll (rightfully!) get a symbol - * clash. - */ -struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, - int freq); /** * ieee80211_get_channel - get channel struct from wiphy for specified frequency + * * @wiphy: the struct wiphy to get the channel for * @freq: the center frequency of the channel + * * Return: The channel struct from @wiphy at @freq. */ -static inline struct ieee80211_channel * -ieee80211_get_channel(struct wiphy *wiphy, int freq) -{ - return __ieee80211_get_channel(wiphy, freq); -} +struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq); /** * ieee80211_get_response_rate - get basic rate for a given rate diff --git a/net/wireless/util.c b/net/wireless/util.c index e9d040d29846..2cf7df8173b6 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -114,8 +114,7 @@ int ieee80211_frequency_to_channel(int freq) } EXPORT_SYMBOL(ieee80211_frequency_to_channel); -struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, - int freq) +struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq) { enum nl80211_band band; struct ieee80211_supported_band *sband; @@ -135,7 +134,7 @@ struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, return NULL; } -EXPORT_SYMBOL(__ieee80211_get_channel); +EXPORT_SYMBOL(ieee80211_get_channel); static void set_mandatory_flags_band(struct ieee80211_supported_band *sband, enum nl80211_band band) -- cgit v1.2.3 From 5a88de5342f3090d8292132a392094034d85d101 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Thu, 17 Nov 2016 09:02:40 +0000 Subject: nl80211: check NL80211_ATTR_SCHED_SCAN_INTERVAL only once The presence of the NL80211_ATTR_SCHED_SCAN_INTERVAL attribute was checked in nl80211_parse_sched_scan() and nl80211_parse_sched_scan_plans() which might be a bit redundant so removing one. Signed-off-by: Arend van Spriel Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 3df85a751a85..db1a434f6169 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6775,13 +6775,10 @@ nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans, /* * If scan plans are not specified, - * %NL80211_ATTR_SCHED_SCAN_INTERVAL must be specified. In this + * %NL80211_ATTR_SCHED_SCAN_INTERVAL will be specified. In this * case one scan plan will be set with the specified scan * interval and infinite number of iterations. */ - if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) - return -EINVAL; - interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]); if (!interval) return -EINVAL; -- cgit v1.2.3 From b528414ca3b7b57f4f81b7ff207e6f8ffde89d2e Mon Sep 17 00:00:00 2001 From: Michael Braun Date: Mon, 31 Oct 2016 14:40:59 +0100 Subject: nl80211: multicast_to_unicast can be changed while IFF_UP There is no need to prevent toggling multicast_to_unicast while interface is already up. This change simplifies reconfiguration from hostapd. Signed-off-by: Michael Braun Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index db1a434f6169..7762231abd32 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -11804,9 +11804,6 @@ static int nl80211_set_multicast_to_unicast(struct sk_buff *skb, const struct nlattr *nla; bool enabled; - if (netif_running(dev)) - return -EBUSY; - if (!rdev->ops->set_multicast_to_unicast) return -EOPNOTSUPP; -- cgit v1.2.3 From 99cd25810e326c62f5a51fa6f0dd1e848060680f Mon Sep 17 00:00:00 2001 From: Kirtika Ruchandani Date: Mon, 21 Nov 2016 21:44:24 -0800 Subject: mac80211: Remove unused 'struct ieee80211_rx_status' ptr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 554891e63a29 introduced 'struct ieee80211_rx_status' in ieee80211_rx_h_defragment but did not use it. Compiling with W=1 gives the following warning, fix it. net/mac80211/rx.c: In function ‘ieee80211_rx_h_defragment’: net/mac80211/rx.c:1911:30: warning: variable ‘status’ set but not used [-Wunused-but-set-variable] Fixes: 554891e63a29 ("mac80211: move packet flags into packet") Cc: Johannes Berg Cc: John W. Linville Signed-off-by: Kirtika Ruchandani Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index eeab7250f4b9..d2a00f2a6efe 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1908,7 +1908,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) unsigned int frag, seq; struct ieee80211_fragment_entry *entry; struct sk_buff *skb; - struct ieee80211_rx_status *status; hdr = (struct ieee80211_hdr *)rx->skb->data; fc = hdr->frame_control; @@ -2034,9 +2033,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) dev_kfree_skb(skb); } - /* Complete frame has been reassembled - process it now */ - status = IEEE80211_SKB_RXCB(rx->skb); - out: ieee80211_led_rx(rx->local); out_no_led: -- cgit v1.2.3 From 872684b1f7de40b95dac51245f3aad208528a6c3 Mon Sep 17 00:00:00 2001 From: Kirtika Ruchandani Date: Mon, 21 Nov 2016 22:04:46 -0800 Subject: mac80211: Remove unused 'rates_idx' variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit f027c2aca0cf introduced 'rates_idx' in ieee80211_tx_status_noskb but did not use it. Compiling with W=1 gives the following warning, fix it. mac80211/status.c: In function ‘ieee80211_tx_status_noskb’: mac80211/status.c:636:6: warning: variable ‘rates_idx’ set but not used [-Wunused-but-set-variable] This is a harmless warning, and is only being fixed to reduce the noise generated with W=1. Fixes: f027c2aca0cf ("mac80211: add ieee80211_tx_status_noskb") Cc: Johannes Berg Cc: Felix Fietkau Signed-off-by: Kirtika Ruchandani Signed-off-by: Johannes Berg --- net/mac80211/status.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/mac80211/status.c b/net/mac80211/status.c index ddf71c648cab..f7c5ae597639 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -633,10 +633,9 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_supported_band *sband; int retry_count; - int rates_idx; bool acked, noack_success; - rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count); + ieee80211_tx_get_rates(hw, info, &retry_count); sband = hw->wiphy->bands[info->band]; -- cgit v1.2.3 From fb803becb1180d3f940a634f073981e0b72d7030 Mon Sep 17 00:00:00 2001 From: Kirtika Ruchandani Date: Mon, 21 Nov 2016 22:54:16 -0800 Subject: mac80211: Remove unused 'struct rate_control_ref' variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 3b17fbf87d5d introduced sta_get_expected_throughput() leaving variable 'struct rate_control_ref* ref' set but unused. Compiling with W=1 gives the following warning, fix it. net/mac80211/sta_info.c: In function ‘sta_set_sinfo’: net/mac80211/sta_info.c:2052:27: warning: variable ‘ref’ set but not used [-Wunused-but-set-variable] Fixes: 3b17fbf87d5d ("mac80211: mesh: Add support for HW RC implementation") Cc: Johannes Berg Cc: Maxim Altshul Signed-off-by: Kirtika Ruchandani Signed-off-by: Johannes Berg --- net/mac80211/sta_info.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 1711bae4abf2..4ab75a9d70c7 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2049,16 +2049,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; - struct rate_control_ref *ref = NULL; u32 thr = 0; int i, ac, cpu; struct ieee80211_sta_rx_stats *last_rxstats; last_rxstats = sta_get_last_rx_stats(sta); - if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) - ref = local->rate_ctrl; - sinfo->generation = sdata->local->sta_generation; /* do before driver, so beacon filtering drivers have a -- cgit v1.2.3 From b7f2405c6bd8ae0125a974308f649637cdc81f80 Mon Sep 17 00:00:00 2001 From: Kirtika Ruchandani Date: Wed, 23 Nov 2016 20:45:36 -0800 Subject: mac80211: Remove unused 'i' variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 5bcae31d9 (mac80211: implement multi-vif in-place reservations) introduced ieee80211_vif_use_reserved_switch() with a counter variable 'i' that is set but not used. Compiling with W=1 gives the following warning, fix it. net/mac80211/chan.c: In function ‘ieee80211_vif_use_reserved_switch’: net/mac80211/chan.c:1273:6: warning: variable ‘i’ set but not used [-Wunused-but-set-variable] This is a harmless warning, and is only being fixed to reduce the noise obtained with W=1 in the kernel. Fixes: 5bcae31d9 ("mac80211: implement multi-vif in-place reservations") Cc: Michal Kazior Cc: Johannes Berg Signed-off-by: Kirtika Ruchandani Signed-off-by: Johannes Berg --- net/mac80211/chan.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index e75cbf6ecc26..7550fd264286 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -1270,7 +1270,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) struct ieee80211_sub_if_data *sdata, *sdata_tmp; struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx; struct ieee80211_chanctx *new_ctx = NULL; - int i, err, n_assigned, n_reserved, n_ready; + int err, n_assigned, n_reserved, n_ready; int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0; lockdep_assert_held(&local->mtx); @@ -1391,8 +1391,6 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) * Update all structures, values and pointers to point to new channel * context(s). */ - - i = 0; list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; -- cgit v1.2.3 From cd5861bde0299fd5189a6b9e0d1e31e205064fc6 Mon Sep 17 00:00:00 2001 From: Kirtika Ruchandani Date: Wed, 23 Nov 2016 20:45:49 -0800 Subject: mac80211: Remove unused 'len' variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 633e27132625 (mac80211: split sched scan IEs) introduced the len variable to keep track of the return value of ieee80211_build_preq_ies() but did not use it. Compiling with W=1 gives the following warning, fix it. net/mac80211/scan.c: In function ‘__ieee80211_request_sched_scan_start’: net/mac80211/scan.c:1123:9: warning: variable ‘len’ set but not used [-Wunused-but-set-variable] This is a harmless warning and is only being fixed to reduce the noise with W=1 in the kernel. Fixes: 633e27132625 ("mac80211: split sched scan IEs") Cc: David Spinadel Cc: Alexander Bondar Cc: Johannes Berg Signed-off-by: Kirtika Ruchandani Signed-off-by: Johannes Berg --- net/mac80211/scan.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 23d8ac829279..faab3c490d2b 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -1120,7 +1120,6 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, u32 rate_masks[NUM_NL80211_BANDS] = {}; u8 bands_used = 0; u8 *ie; - size_t len; iebufsz = local->scan_ies_len + req->ie_len; @@ -1145,10 +1144,9 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, ieee80211_prepare_scan_chandef(&chandef, req->scan_width); - len = ieee80211_build_preq_ies(local, ie, num_bands * iebufsz, - &sched_scan_ies, req->ie, - req->ie_len, bands_used, - rate_masks, &chandef); + ieee80211_build_preq_ies(local, ie, num_bands * iebufsz, + &sched_scan_ies, req->ie, + req->ie_len, bands_used, rate_masks, &chandef); ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies); if (ret == 0) { -- cgit v1.2.3 From 148f284b23061b6de4024423a6606437cc427f1c Mon Sep 17 00:00:00 2001 From: Kirtika Ruchandani Date: Wed, 23 Nov 2016 20:46:14 -0800 Subject: mac80211: Remove unused 'sband' and 'local' variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b1bce14a7954 (mac80211: update opmode when adding new station) refactored ieee80211_vht_handle_opmode into __ieee80211_vht_handle_opmode and ieee80211_vht_handle_opmode leaving a set but unused variable (sband) in the former. Compiling with W=1 gives the following warning, fix it. net/mac80211/vht.c: In function ‘__ieee80211_vht_handle_opmode’: net/mac80211/vht.c:424:35: warning: variable ‘sband’ set but not used [-Wunused-but-set-variable] Remove 'struct ieee80211_local* local' as well, it was only used to set sband. This is a harmless warning, and is only being fixed to reduce the noise with W=1 in the kernel. Fixes: b1bce14a7954 ("mac80211: update opmode when adding new station") Cc: Marek Kwaczynski Cc: Johannes Berg Signed-off-by: Kirtika Ruchandani Signed-off-by: Johannes Berg --- net/mac80211/vht.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 6832bf6ab69f..d5a56e039106 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -436,14 +436,10 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, enum nl80211_band band) { - struct ieee80211_local *local = sdata->local; - struct ieee80211_supported_band *sband; enum ieee80211_sta_rx_bandwidth new_bw; u32 changed = 0; u8 nss; - sband = local->hw.wiphy->bands[band]; - /* ignore - no support for BF yet */ if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF) return 0; -- cgit v1.2.3 From ab7257251cc63eef5fa6702c3c616c6e2524d777 Mon Sep 17 00:00:00 2001 From: Kirtika Ruchandani Date: Wed, 23 Nov 2016 20:46:27 -0800 Subject: mac80211: Remove unused 'beaconint_us' variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 4a733ef1bea7 (mac80211: remove PM-QoS listener) removed all use of 'beaconint_us' from ieee80211_recalc_ps() but left the variable intact. Compiling with W=1 gives the following warning, fix it. net/mac80211/mlme.c: In function ‘ieee80211_recalc_ps’: net/mac80211/mlme.c:1481:7: warning: variable ‘beaconint_us’ set but not used [-Wunused-but-set-variable] iee80211_tu_to_usec has no side-effects and is safe to remove. Fixes: 4a733ef1bea7 ("mac80211: remove PM-QoS listener") Cc: Johannes Berg Signed-off-by: Kirtika Ruchandani Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 098ce9b179ee..8a6344518674 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1486,10 +1486,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local) if (count == 1 && ieee80211_powersave_allowed(found)) { u8 dtimper = found->u.mgd.dtim_period; - s32 beaconint_us; - - beaconint_us = ieee80211_tu_to_usec( - found->vif.bss_conf.beacon_int); timeout = local->dynamic_ps_forced_timeout; if (timeout < 0) -- cgit v1.2.3 From ebceec860fc370b2f4c23e95c51daa932e047913 Mon Sep 17 00:00:00 2001 From: Michael Braun Date: Tue, 22 Nov 2016 11:52:18 +0100 Subject: mac80211: multicast to unicast conversion Add the ability for an AP (and associated VLANs) to perform multicast-to-unicast conversion for ARP, IPv4 and IPv6 frames (possibly within 802.1Q). If enabled, such frames are to be sent to each station separately, with the DA replaced by their own MAC address rather than the group address. Note that this may break certain expectations of the receiver, such as the ability to drop unicast IP packets received within multicast L2 frames, or the ability to not send ICMP destination unreachable messages for packets received in L2 multicast (which is required, but the receiver can't tell the difference if this new option is enabled.) This also doesn't implement the 802.11 DMS (directed multicast service). Signed-off-by: Michael Braun [use true/false, rename label to the correct "multicast", use __be16 for ethertype and network order for constants] Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 12 +++++ net/mac80211/debugfs_netdev.c | 3 ++ net/mac80211/ieee80211_i.h | 1 + net/mac80211/tx.c | 122 +++++++++++++++++++++++++++++++++++++++++- 4 files changed, 137 insertions(+), 1 deletion(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index e91e503bf992..a0be2f6cd121 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3563,6 +3563,17 @@ void ieee80211_nan_func_match(struct ieee80211_vif *vif, } EXPORT_SYMBOL(ieee80211_nan_func_match); +static int ieee80211_set_multicast_to_unicast(struct wiphy *wiphy, + struct net_device *dev, + const bool enabled) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + sdata->u.ap.multicast_to_unicast = enabled; + + return 0; +} + const struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -3653,4 +3664,5 @@ const struct cfg80211_ops mac80211_config_ops = { .nan_change_conf = ieee80211_nan_change_conf, .add_nan_func = ieee80211_add_nan_func, .del_nan_func = ieee80211_del_nan_func, + .set_multicast_to_unicast = ieee80211_set_multicast_to_unicast, }; diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 1a05f85cb1f0..8f5fff8b2040 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -519,6 +519,8 @@ static ssize_t ieee80211_if_fmt_aqm( } IEEE80211_IF_FILE_R(aqm); +IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX); + /* IBSS attributes */ static ssize_t ieee80211_if_fmt_tsf( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) @@ -683,6 +685,7 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata) DEBUGFS_ADD(dtim_count); DEBUGFS_ADD(num_buffered_multicast); DEBUGFS_ADD_MODE(tkip_mic_test, 0200); + DEBUGFS_ADD_MODE(multicast_to_unicast, 0600); } static void add_vlan_files(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index d37a577f63a1..a3af14e3e64d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -297,6 +297,7 @@ struct ieee80211_if_ap { driver_smps_mode; /* smps mode request */ struct work_struct request_smps_work; + bool multicast_to_unicast; }; struct ieee80211_if_wds { diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 2c21b7039136..a39c0f07021b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -3573,6 +3574,115 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, rcu_read_unlock(); } +static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta) +{ + struct ethhdr *eth; + int err; + + err = skb_ensure_writable(skb, ETH_HLEN); + if (unlikely(err)) + return err; + + eth = (void *)skb->data; + ether_addr_copy(eth->h_dest, sta->sta.addr); + + return 0; +} + +static bool ieee80211_multicast_to_unicast(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + const struct ethhdr *eth = (void *)skb->data; + const struct vlan_ethhdr *ethvlan = (void *)skb->data; + __be16 ethertype; + + if (likely(!is_multicast_ether_addr(eth->h_dest))) + return false; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + if (sdata->u.vlan.sta) + return false; + if (sdata->wdev.use_4addr) + return false; + /* fall through */ + case NL80211_IFTYPE_AP: + /* check runtime toggle for this bss */ + if (!sdata->bss->multicast_to_unicast) + return false; + break; + default: + return false; + } + + /* multicast to unicast conversion only for some payload */ + ethertype = eth->h_proto; + if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN) + ethertype = ethvlan->h_vlan_encapsulated_proto; + switch (ethertype) { + case htons(ETH_P_ARP): + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + break; + default: + return false; + } + + return true; +} + +static void +ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev, + struct sk_buff_head *queue) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + const struct ethhdr *eth = (struct ethhdr *)skb->data; + struct sta_info *sta, *first = NULL; + struct sk_buff *cloned_skb; + + rcu_read_lock(); + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata) + /* AP-VLAN mismatch */ + continue; + if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr))) + /* do not send back to source */ + continue; + if (!first) { + first = sta; + continue; + } + cloned_skb = skb_clone(skb, GFP_ATOMIC); + if (!cloned_skb) + goto multicast; + if (unlikely(ieee80211_change_da(cloned_skb, sta))) { + dev_kfree_skb(cloned_skb); + goto multicast; + } + __skb_queue_tail(queue, cloned_skb); + } + + if (likely(first)) { + if (unlikely(ieee80211_change_da(skb, first))) + goto multicast; + __skb_queue_tail(queue, skb); + } else { + /* no STA connected, drop */ + kfree_skb(skb); + skb = NULL; + } + + goto out; +multicast: + __skb_queue_purge(queue); + __skb_queue_tail(queue, skb); +out: + rcu_read_unlock(); +} + /** * ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs * @skb: packet to be sent @@ -3583,7 +3693,17 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev) { - __ieee80211_subif_start_xmit(skb, dev, 0); + if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) { + struct sk_buff_head queue; + + __skb_queue_head_init(&queue); + ieee80211_convert_to_unicast(skb, dev, &queue); + while ((skb = __skb_dequeue(&queue))) + __ieee80211_subif_start_xmit(skb, dev, 0); + } else { + __ieee80211_subif_start_xmit(skb, dev, 0); + } + return NETDEV_TX_OK; } -- cgit v1.2.3 From 11197d006bcfabf0173a7820a163fcaac420d10e Mon Sep 17 00:00:00 2001 From: Masashi Honma Date: Wed, 30 Nov 2016 09:06:04 +0900 Subject: mac80211: Suppress NEW_PEER_CANDIDATE event if no room Previously, kernel sends NEW_PEER_CANDIDATE event to user land even if the found peer does not have any room to accept other peer. This causes continuous connection trials. Signed-off-by: Masashi Honma Signed-off-by: Johannes Berg --- net/mac80211/mesh_plink.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 7fcdcf622655..fcba70e57073 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -505,12 +505,14 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr, /* Userspace handles station allocation */ if (sdata->u.mesh.user_mpm || - sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) - cfg80211_notify_new_peer_candidate(sdata->dev, addr, - elems->ie_start, - elems->total_len, - GFP_KERNEL); - else + sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) { + if (mesh_peer_accepts_plinks(elems) && + mesh_plink_availables(sdata)) + cfg80211_notify_new_peer_candidate(sdata->dev, addr, + elems->ie_start, + elems->total_len, + GFP_KERNEL); + } else sta = __mesh_sta_info_alloc(sdata, addr); return sta; -- cgit v1.2.3 From 4a5eccaa93505cd186939a01d7c115568b1881a6 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Mon, 5 Dec 2016 10:58:30 -0800 Subject: mac80211: Show pending txqlen in debugfs. Could be useful for debugging memory consumption issues, and perhaps power-save as well. Signed-off-by: Ben Greear Signed-off-by: Johannes Berg --- net/mac80211/debugfs.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index e02ba42ca827..f62cd0e13c58 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -243,6 +243,31 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf, return rv; } +static ssize_t misc_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + /* Max len of each line is 16 characters, plus 9 for 'pending:\n' */ + size_t bufsz = IEEE80211_MAX_QUEUES * 16 + 9; + char *buf = kzalloc(bufsz, GFP_KERNEL); + char *pos = buf, *end = buf + bufsz - 1; + ssize_t rv; + int i; + int ln; + + pos += scnprintf(pos, end - pos, "pending:\n"); + + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { + ln = skb_queue_len(&local->pending[i]); + pos += scnprintf(pos, end - pos, "[%i] %d\n", + i, ln); + } + + rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); + kfree(buf); + return rv; +} + static ssize_t queues_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -263,6 +288,7 @@ static ssize_t queues_read(struct file *file, char __user *user_buf, DEBUGFS_READONLY_FILE_OPS(hwflags); DEBUGFS_READONLY_FILE_OPS(queues); +DEBUGFS_READONLY_FILE_OPS(misc); /* statistics stuff */ @@ -331,6 +357,7 @@ void debugfs_hw_add(struct ieee80211_local *local) DEBUGFS_ADD(total_ps_buffered); DEBUGFS_ADD(wep_iv); DEBUGFS_ADD(queues); + DEBUGFS_ADD(misc); #ifdef CONFIG_PM DEBUGFS_ADD_MODE(reset, 0200); #endif -- cgit v1.2.3 From 6124c53edeeaac4394755ebb38c522bc4eef1460 Mon Sep 17 00:00:00 2001 From: Michał Kępień Date: Thu, 8 Dec 2016 08:30:51 +0100 Subject: rfkill: Cleanup error handling in rfkill_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use a separate label per error condition in rfkill_init() to make it a bit cleaner and easier to extend. Signed-off-by: Michał Kępień Signed-off-by: Johannes Berg --- net/rfkill/core.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 184bb711a06d..9eb26a4cdac8 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -1263,24 +1263,25 @@ static int __init rfkill_init(void) error = class_register(&rfkill_class); if (error) - goto out; + goto error_class; error = misc_register(&rfkill_miscdev); - if (error) { - class_unregister(&rfkill_class); - goto out; - } + if (error) + goto error_misc; #ifdef CONFIG_RFKILL_INPUT error = rfkill_handler_init(); - if (error) { - misc_deregister(&rfkill_miscdev); - class_unregister(&rfkill_class); - goto out; - } + if (error) + goto error_input; #endif - out: + return 0; + +error_input: + misc_deregister(&rfkill_miscdev); +error_misc: + class_unregister(&rfkill_class); +error_class: return error; } subsys_initcall(rfkill_init); -- cgit v1.2.3 From 73f4f76a196d7adb11a1e192bd8024fe0bc83910 Mon Sep 17 00:00:00 2001 From: Michał Kępień Date: Thu, 8 Dec 2016 08:30:52 +0100 Subject: rfkill: Add rfkill-any LED trigger MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new "global" (i.e. not per-rfkill device) LED trigger, rfkill-any, which may be useful on laptops with a single "radio LED" and multiple radio transmitters. The trigger is meant to turn a LED on whenever there is at least one radio transmitter active and turn it off otherwise. This requires taking rfkill_global_mutex before calling rfkill_set_block() in rfkill_resume(): since __rfkill_any_led_trigger_event() is called from rfkill_set_block() unconditionally, each caller of the latter needs to take care of locking rfkill_global_mutex. Signed-off-by: Michał Kępień Signed-off-by: Johannes Berg --- net/rfkill/core.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 9eb26a4cdac8..f6b01f3616e5 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -176,6 +176,47 @@ static void rfkill_led_trigger_unregister(struct rfkill *rfkill) { led_trigger_unregister(&rfkill->led_trigger); } + +static struct led_trigger rfkill_any_led_trigger; + +static void __rfkill_any_led_trigger_event(void) +{ + enum led_brightness brightness = LED_OFF; + struct rfkill *rfkill; + + list_for_each_entry(rfkill, &rfkill_list, node) { + if (!(rfkill->state & RFKILL_BLOCK_ANY)) { + brightness = LED_FULL; + break; + } + } + + led_trigger_event(&rfkill_any_led_trigger, brightness); +} + +static void rfkill_any_led_trigger_event(void) +{ + mutex_lock(&rfkill_global_mutex); + __rfkill_any_led_trigger_event(); + mutex_unlock(&rfkill_global_mutex); +} + +static void rfkill_any_led_trigger_activate(struct led_classdev *led_cdev) +{ + rfkill_any_led_trigger_event(); +} + +static int rfkill_any_led_trigger_register(void) +{ + rfkill_any_led_trigger.name = "rfkill-any"; + rfkill_any_led_trigger.activate = rfkill_any_led_trigger_activate; + return led_trigger_register(&rfkill_any_led_trigger); +} + +static void rfkill_any_led_trigger_unregister(void) +{ + led_trigger_unregister(&rfkill_any_led_trigger); +} #else static void rfkill_led_trigger_event(struct rfkill *rfkill) { @@ -189,6 +230,23 @@ static inline int rfkill_led_trigger_register(struct rfkill *rfkill) static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) { } + +static void __rfkill_any_led_trigger_event(void) +{ +} + +static void rfkill_any_led_trigger_event(void) +{ +} + +static int rfkill_any_led_trigger_register(void) +{ + return 0; +} + +static void rfkill_any_led_trigger_unregister(void) +{ +} #endif /* CONFIG_RFKILL_LEDS */ static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, @@ -297,6 +355,7 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked) spin_unlock_irqrestore(&rfkill->lock, flags); rfkill_led_trigger_event(rfkill); + __rfkill_any_led_trigger_event(); if (prev != curr) rfkill_event(rfkill); @@ -477,6 +536,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) spin_unlock_irqrestore(&rfkill->lock, flags); rfkill_led_trigger_event(rfkill); + rfkill_any_led_trigger_event(); if (rfkill->registered && prev != blocked) schedule_work(&rfkill->uevent_work); @@ -520,6 +580,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) schedule_work(&rfkill->uevent_work); rfkill_led_trigger_event(rfkill); + rfkill_any_led_trigger_event(); return blocked; } @@ -569,6 +630,7 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) schedule_work(&rfkill->uevent_work); rfkill_led_trigger_event(rfkill); + rfkill_any_led_trigger_event(); } } EXPORT_SYMBOL(rfkill_set_states); @@ -812,8 +874,10 @@ static int rfkill_resume(struct device *dev) rfkill->suspended = false; if (!rfkill->persistent) { + mutex_lock(&rfkill_global_mutex); cur = !!(rfkill->state & RFKILL_BLOCK_SW); rfkill_set_block(rfkill, cur); + mutex_unlock(&rfkill_global_mutex); } if (rfkill->ops->poll && !rfkill->polling_paused) @@ -985,6 +1049,7 @@ int __must_check rfkill_register(struct rfkill *rfkill) #endif } + __rfkill_any_led_trigger_event(); rfkill_send_events(rfkill, RFKILL_OP_ADD); mutex_unlock(&rfkill_global_mutex); @@ -1017,6 +1082,7 @@ void rfkill_unregister(struct rfkill *rfkill) mutex_lock(&rfkill_global_mutex); rfkill_send_events(rfkill, RFKILL_OP_DEL); list_del_init(&rfkill->node); + __rfkill_any_led_trigger_event(); mutex_unlock(&rfkill_global_mutex); rfkill_led_trigger_unregister(rfkill); @@ -1269,6 +1335,10 @@ static int __init rfkill_init(void) if (error) goto error_misc; + error = rfkill_any_led_trigger_register(); + if (error) + goto error_led_trigger; + #ifdef CONFIG_RFKILL_INPUT error = rfkill_handler_init(); if (error) @@ -1278,6 +1348,8 @@ static int __init rfkill_init(void) return 0; error_input: + rfkill_any_led_trigger_unregister(); +error_led_trigger: misc_deregister(&rfkill_miscdev); error_misc: class_unregister(&rfkill_class); @@ -1291,6 +1363,7 @@ static void __exit rfkill_exit(void) #ifdef CONFIG_RFKILL_INPUT rfkill_handler_exit(); #endif + rfkill_any_led_trigger_unregister(); misc_deregister(&rfkill_miscdev); class_unregister(&rfkill_class); } -- cgit v1.2.3 From 76f43b4c0a9337af22827d78de4f2b8fd5328489 Mon Sep 17 00:00:00 2001 From: Masashi Honma Date: Thu, 8 Dec 2016 10:15:50 +0900 Subject: mac80211: Remove invalid flag operations in mesh TSF synchronization mesh_sync_offset_adjust_tbtt() implements Extensible synchronization framework ([1] 13.13.2 Extensible synchronization framework). It shall not operate the flag "TBTT Adjusting subfield" ([1] 8.4.2.100.8 Mesh Capability), since it is used only for MBCA ([1] 13.13.4 Mesh beacon collision avoidance, see 13.13.4.4.3 TBTT scanning and adjustment procedures for detail). So this patch remove the flag operations. [1] IEEE Std 802.11 2012 Signed-off-by: Masashi Honma [remove adjusting_tbtt entirely, since it's now unused] Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 1 - net/mac80211/mesh.c | 3 --- net/mac80211/mesh_sync.c | 11 ----------- 3 files changed, 15 deletions(-) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a3af14e3e64d..03e3230479f3 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -689,7 +689,6 @@ struct ieee80211_if_mesh { const struct ieee80211_mesh_sync_ops *sync_ops; s64 sync_offset_clockdrift_max; spinlock_t sync_offset_lock; - bool adjusting_tbtt; /* mesh power save */ enum nl80211_mesh_power_mode nonpeer_pm; int ps_peers_light_sleep; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 42120d965263..b9628374dcbd 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -279,8 +279,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */ *pos |= ifmsh->ps_peers_deep_sleep ? IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00; - *pos++ |= ifmsh->adjusting_tbtt ? - IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00; *pos++ = 0x00; return 0; @@ -850,7 +848,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) ifmsh->mesh_cc_id = 0; /* Disabled */ /* register sync ops from extensible synchronization framework */ ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id); - ifmsh->adjusting_tbtt = false; ifmsh->sync_offset_clockdrift_max = 0; set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); ieee80211_mesh_root_setup(ifmsh); diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index faca22cd02b5..75608c07dc7b 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c @@ -123,7 +123,6 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, */ if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { - clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr); goto no_sync; @@ -172,11 +171,9 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata, struct beacon_data *beacon) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - u8 cap; WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); WARN_ON(!rcu_read_lock_held()); - cap = beacon->meshconf->meshconf_cap; spin_lock_bh(&ifmsh->sync_offset_lock); @@ -190,21 +187,13 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata, "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n", ifmsh->sync_offset_clockdrift_max); set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags); - - ifmsh->adjusting_tbtt = true; } else { msync_dbg(sdata, "TBTT : max clockdrift=%lld; too small to adjust\n", (long long)ifmsh->sync_offset_clockdrift_max); ifmsh->sync_offset_clockdrift_max = 0; - - ifmsh->adjusting_tbtt = false; } spin_unlock_bh(&ifmsh->sync_offset_lock); - - beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ? - IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap : - ~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap; } static const struct sync_method sync_methods[] = { -- cgit v1.2.3 From 445cd452fe5187e676eef02c917c9e5f837c749e Mon Sep 17 00:00:00 2001 From: Masashi Honma Date: Thu, 8 Dec 2016 10:15:51 +0900 Subject: mac80211: Use appropriate name for functions and messages These functions drifts TSF timers, not TBTT. Signed-off-by: Masashi Honma Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 4 ++-- net/mac80211/mesh.c | 2 +- net/mac80211/mesh.h | 2 +- net/mac80211/mesh_sync.c | 16 ++++++++-------- net/mac80211/tx.c | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 03e3230479f3..e5d73118d08f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -625,8 +625,8 @@ struct ieee80211_mesh_sync_ops { struct ieee80211_rx_status *rx_status); /* should be called with beacon_data under RCU read lock */ - void (*adjust_tbtt)(struct ieee80211_sub_if_data *sdata, - struct beacon_data *beacon); + void (*adjust_tsf)(struct ieee80211_sub_if_data *sdata, + struct beacon_data *beacon); /* add other framework functions here */ }; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index b9628374dcbd..cc2a63bd233f 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1346,7 +1346,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) ieee80211_mesh_rootpath(sdata); if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags)) - mesh_sync_adjust_tbtt(sdata); + mesh_sync_adjust_tsf(sdata); if (test_and_clear_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags)) mesh_bss_info_changed(sdata); diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 26b9ccbe1fce..7e5f271e3c30 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -341,7 +341,7 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) } void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); -void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata); +void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata); void ieee80211s_stop(void); #else static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index 75608c07dc7b..a435f094a82e 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c @@ -12,7 +12,7 @@ #include "mesh.h" #include "driver-ops.h" -/* This is not in the standard. It represents a tolerable tbtt drift below +/* This is not in the standard. It represents a tolerable tsf drift below * which we do no TSF adjustment. */ #define TOFFSET_MINIMUM_ADJUSTMENT 10 @@ -46,7 +46,7 @@ static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie) IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING) != 0; } -void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) +void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; @@ -57,12 +57,12 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) spin_lock_bh(&ifmsh->sync_offset_lock); if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { - msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n", + msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting\n", (long long) ifmsh->sync_offset_clockdrift_max); tsfdelta = -ifmsh->sync_offset_clockdrift_max; ifmsh->sync_offset_clockdrift_max = 0; } else { - msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting by %llu\n", + msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting by %llu\n", (long long) ifmsh->sync_offset_clockdrift_max, (unsigned long long) beacon_int_fraction); tsfdelta = -beacon_int_fraction; @@ -167,7 +167,7 @@ no_sync: rcu_read_unlock(); } -static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata, +static void mesh_sync_offset_adjust_tsf(struct ieee80211_sub_if_data *sdata, struct beacon_data *beacon) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; @@ -184,12 +184,12 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata, * the tsf adjustment to the mesh tasklet */ msync_dbg(sdata, - "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n", + "TSF : kicking off TSF adjustment with clockdrift_max=%lld\n", ifmsh->sync_offset_clockdrift_max); set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags); } else { msync_dbg(sdata, - "TBTT : max clockdrift=%lld; too small to adjust\n", + "TSF : max clockdrift=%lld; too small to adjust\n", (long long)ifmsh->sync_offset_clockdrift_max); ifmsh->sync_offset_clockdrift_max = 0; } @@ -201,7 +201,7 @@ static const struct sync_method sync_methods[] = { .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, .ops = { .rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp, - .adjust_tbtt = &mesh_sync_offset_adjust_tbtt, + .adjust_tsf = &mesh_sync_offset_adjust_tsf, } }, }; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a39c0f07021b..058652d000d9 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -4196,7 +4196,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, } if (ifmsh->sync_ops) - ifmsh->sync_ops->adjust_tbtt(sdata, beacon); + ifmsh->sync_ops->adjust_tsf(sdata, beacon); skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + -- cgit v1.2.3 From 81a834e3483c1bc7ae15825e4d0110932586a927 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 5 Dec 2016 13:27:32 +0200 Subject: ath9k: Add a #define for the EEPROM "eepmisc" endianness bit This replaces a magic number with a named #define. Additionally it removes two "eeprom format" specific #defines for the "big endianness" bit which are the same on all eeprom formats. Signed-off-by: Martin Blumenstingl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | 3 ++- drivers/net/wireless/ath/ath9k/ar9003_eeprom.h | 1 - drivers/net/wireless/ath/ath9k/eeprom.h | 4 +++- drivers/net/wireless/ath/ath9k/eeprom_4k.c | 2 +- drivers/net/wireless/ath/ath9k/eeprom_9287.c | 2 +- drivers/net/wireless/ath/ath9k/eeprom_def.c | 2 +- 6 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 08607d7fdb56..ea7b8190b3bd 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3468,7 +3468,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_N_5G_HT40)); - PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc & 0x01)); + PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc & + AR5416_EEPMISC_BIG_ENDIAN)); PR_EEP("RF Silent", pBase->rfSilent); PR_EEP("BT option", pBase->blueToothOptions); PR_EEP("Device Cap", pBase->deviceCap); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 107bcfbbe0fb..0a4c7360e847 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -38,7 +38,6 @@ #define AR9300_NUM_CTLS_2G 12 #define AR9300_NUM_BAND_EDGES_5G 8 #define AR9300_NUM_BAND_EDGES_2G 4 -#define AR9300_EEPMISC_BIG_ENDIAN 0x01 #define AR9300_EEPMISC_WOW 0x02 #define AR9300_CUSTOMER_DATA_SIZE 20 diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 4465c6566f20..c466adaad00c 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -161,6 +161,9 @@ #define AR5416_EEP_TXGAIN_ORIGINAL 0 #define AR5416_EEP_TXGAIN_HIGH_POWER 1 +/* Endianness of EEPROM content */ +#define AR5416_EEPMISC_BIG_ENDIAN 0x01 + #define AR5416_EEP4K_START_LOC 64 #define AR5416_EEP4K_NUM_2G_CAL_PIERS 3 #define AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS 3 @@ -191,7 +194,6 @@ #define AR9287_NUM_CTLS 12 #define AR9287_NUM_BAND_EDGES 4 #define AR9287_PD_GAIN_ICEPTS 1 -#define AR9287_EEPMISC_BIG_ENDIAN 0x01 #define AR9287_EEPMISC_WOW 0x02 #define AR9287_MAX_CHAINS 2 #define AR9287_ANT_16S 32 diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 5da0826bf1be..780cb49db66c 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -154,7 +154,7 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); - PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01)); + PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN)); PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 1a019a39eda1..f483ba2d004b 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -150,7 +150,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); - PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01)); + PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN)); PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 959682f7909c..39b1b27585c1 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -232,7 +232,7 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); - PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01)); + PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN)); PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); -- cgit v1.2.3 From 291478b7b3c020210bcabd78a0348c0c381a362c Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 5 Dec 2016 13:27:33 +0200 Subject: ath9k: indicate that the AR9003 EEPROM template values are little endian The eepMisc field was not set explicitly. The default value of 0 means that the values in the EEPROM (template) should be interpreted as little endian. However, this is not clear until comparing the AR9003 code with the other EEPROM formats. To make the code easier to understand we explicitly state that the values are little endian - there are no functional changes with this patch. Signed-off-by: Martin Blumenstingl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | 10 +++++----- drivers/net/wireless/ath/ath9k/ar9003_eeprom.h | 3 +++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index ea7b8190b3bd..270d4aea5268 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -53,7 +53,7 @@ static const struct ar9300_eeprom ar9300_default = { .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, - .eepMisc = 0, + .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN, }, .rfSilent = 0, .blueToothOptions = 0, @@ -631,7 +631,7 @@ static const struct ar9300_eeprom ar9300_x113 = { .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11A, - .eepMisc = 0, + .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN, }, .rfSilent = 0, .blueToothOptions = 0, @@ -1210,7 +1210,7 @@ static const struct ar9300_eeprom ar9300_h112 = { .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, - .eepMisc = 0, + .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN, }, .rfSilent = 0, .blueToothOptions = 0, @@ -1789,7 +1789,7 @@ static const struct ar9300_eeprom ar9300_x112 = { .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, - .eepMisc = 0, + .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN, }, .rfSilent = 0, .blueToothOptions = 0, @@ -2367,7 +2367,7 @@ static const struct ar9300_eeprom ar9300_h116 = { .txrxMask = 0x33, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, - .eepMisc = 0, + .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN, }, .rfSilent = 0, .blueToothOptions = 0, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 0a4c7360e847..7dc7205dc877 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -69,6 +69,9 @@ #define AR9300_BASE_ADDR 0x3ff #define AR9300_BASE_ADDR_512 0x1ff +/* AR5416_EEPMISC_BIG_ENDIAN not set indicates little endian */ +#define AR9300_EEPMISC_LITTLE_ENDIAN 0 + #define AR9300_OTP_BASE \ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) #define AR9300_OTP_STATUS \ -- cgit v1.2.3 From d8ec2e2a63e8136fc7d687cf75bcd034d9615c24 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 5 Dec 2016 13:27:34 +0200 Subject: ath9k: Add an eeprom_ops callback for retrieving the eepmisc value This allows deciding if we have to swap the EEPROM data (so it matches the system's native endianness) even if no byte-swapping (swab16, based on the first two bytes in the EEPROM) is needed. Signed-off-by: Martin Blumenstingl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | 8 +++++++- drivers/net/wireless/ath/ath9k/eeprom.h | 1 + drivers/net/wireless/ath/ath9k/eeprom_4k.c | 8 +++++++- drivers/net/wireless/ath/ath9k/eeprom_9287.c | 8 +++++++- drivers/net/wireless/ath/ath9k/eeprom_def.c | 8 +++++++- 5 files changed, 29 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 270d4aea5268..3dbfd86ebe36 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -5498,6 +5498,11 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, } } +static u8 ar9003_get_eepmisc(struct ath_hw *ah) +{ + return ah->eeprom.map4k.baseEepHeader.eepMisc; +} + const struct eeprom_ops eep_ar9300_ops = { .check_eeprom = ath9k_hw_ar9300_check_eeprom, .get_eeprom = ath9k_hw_ar9300_get_eeprom, @@ -5508,5 +5513,6 @@ const struct eeprom_ops eep_ar9300_ops = { .set_board_values = ath9k_hw_ar9300_set_board_values, .set_addac = ath9k_hw_ar9300_set_addac, .set_txpower = ath9k_hw_ar9300_set_txpower, - .get_spur_channel = ath9k_hw_ar9300_get_spur_channel + .get_spur_channel = ath9k_hw_ar9300_get_spur_channel, + .get_eepmisc = ar9003_get_eepmisc }; diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index c466adaad00c..408cfa797589 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -655,6 +655,7 @@ struct eeprom_ops { u16 cfgCtl, u8 twiceAntennaReduction, u8 powerLimit, bool test); u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz); + u8 (*get_eepmisc)(struct ath_hw *ah); }; void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val); diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 780cb49db66c..7cd9c64d9cbd 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -1064,6 +1064,11 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) return ah->eeprom.map4k.modalHeader.spurChans[i].spurChan; } +static u8 ath9k_hw_4k_get_eepmisc(struct ath_hw *ah) +{ + return ah->eeprom.map4k.baseEepHeader.eepMisc; +} + const struct eeprom_ops eep_4k_ops = { .check_eeprom = ath9k_hw_4k_check_eeprom, .get_eeprom = ath9k_hw_4k_get_eeprom, @@ -1073,5 +1078,6 @@ const struct eeprom_ops eep_4k_ops = { .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev, .set_board_values = ath9k_hw_4k_set_board_values, .set_txpower = ath9k_hw_4k_set_txpower, - .get_spur_channel = ath9k_hw_4k_get_spur_channel + .get_spur_channel = ath9k_hw_4k_get_spur_channel, + .get_eepmisc = ath9k_hw_4k_get_eepmisc }; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index f483ba2d004b..e1ba9bc0afb8 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -986,6 +986,11 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah, return ah->eeprom.map9287.modalHeader.spurChans[i].spurChan; } +static u8 ath9k_hw_ar9287_get_eepmisc(struct ath_hw *ah) +{ + return ah->eeprom.map9287.baseEepHeader.eepMisc; +} + const struct eeprom_ops eep_ar9287_ops = { .check_eeprom = ath9k_hw_ar9287_check_eeprom, .get_eeprom = ath9k_hw_ar9287_get_eeprom, @@ -995,5 +1000,6 @@ const struct eeprom_ops eep_ar9287_ops = { .get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev, .set_board_values = ath9k_hw_ar9287_set_board_values, .set_txpower = ath9k_hw_ar9287_set_txpower, - .get_spur_channel = ath9k_hw_ar9287_get_spur_channel + .get_spur_channel = ath9k_hw_ar9287_get_spur_channel, + .get_eepmisc = ath9k_hw_ar9287_get_eepmisc }; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 39b1b27585c1..087bdb7c10d6 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -1317,6 +1317,11 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) return ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan; } +static u8 ath9k_hw_def_get_eepmisc(struct ath_hw *ah) +{ + return ah->eeprom.def.baseEepHeader.eepMisc; +} + const struct eeprom_ops eep_def_ops = { .check_eeprom = ath9k_hw_def_check_eeprom, .get_eeprom = ath9k_hw_def_get_eeprom, @@ -1327,5 +1332,6 @@ const struct eeprom_ops eep_def_ops = { .set_board_values = ath9k_hw_def_set_board_values, .set_addac = ath9k_hw_def_set_addac, .set_txpower = ath9k_hw_def_set_txpower, - .get_spur_channel = ath9k_hw_def_get_spur_channel + .get_spur_channel = ath9k_hw_def_get_spur_channel, + .get_eepmisc = ath9k_hw_def_get_eepmisc }; -- cgit v1.2.3 From 7d7dc5386836dd973eaef48b11c4e77368dd07a4 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 5 Dec 2016 13:27:34 +0200 Subject: ath9k: replace eeprom_param EEP_MINOR_REV with get_eeprom_rev get_eeprom(ah, EEP_MINOR_REV) and get_eeprom_rev(ah) are both doing the same thing: returning the EEPROM revision (12 lowest bits). Make the code consistent by using get_eeprom_rev(ah) everywhere. Signed-off-by: Martin Blumenstingl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar5008_phy.c | 2 +- drivers/net/wireless/ath/ath9k/ar9002_hw.c | 6 ++---- drivers/net/wireless/ath/ath9k/eeprom.h | 1 - drivers/net/wireless/ath/ath9k/eeprom_4k.c | 5 ----- drivers/net/wireless/ath/ath9k/eeprom_9287.c | 6 +----- drivers/net/wireless/ath/ath9k/eeprom_def.c | 2 -- 6 files changed, 4 insertions(+), 18 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 8eea8d22e72e..7922550c2159 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -524,7 +524,7 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah, return true; /* Setup rf parameters */ - eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV); + eepMinorRev = ah->eep_ops->get_eeprom_rev(ah); for (i = 0; i < ah->iniBank6.ia_rows; i++) ah->analogBank6Data[i] = INI_RA(&ah->iniBank6, i, modesIndex); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index d480d2f3e185..ae68f674829b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -108,8 +108,7 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah) { u32 rxgain_type; - if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= - AR5416_EEP_MINOR_VER_17) { + if (ah->eep_ops->get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_17) { rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE); if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF) @@ -129,8 +128,7 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah) static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type) { - if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= - AR5416_EEP_MINOR_VER_19) { + if (ah->eep_ops->get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19) { if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9280Modes_high_power_tx_gain_9280_2); diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 408cfa797589..8ef8090d0c9c 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -230,7 +230,6 @@ enum eeprom_param { EEP_DB_5, EEP_OB_2, EEP_DB_2, - EEP_MINOR_REV, EEP_TX_MASK, EEP_RX_MASK, EEP_FSTCLK_5G, diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 7cd9c64d9cbd..d4668712f866 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -254,9 +254,6 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &eep->modalHeader; struct base_eep_header_4k *pBase = &eep->baseEepHeader; - u16 ver_minor; - - ver_minor = pBase->version & AR5416_EEP_VER_MINOR_MASK; switch (param) { case EEP_NFTHRESH_2: @@ -279,8 +276,6 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, return pModal->ob_0; case EEP_DB_2: return pModal->db1_1; - case EEP_MINOR_REV: - return ver_minor; case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index e1ba9bc0afb8..7f85bd1435b2 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -250,9 +250,7 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah, struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct modal_eep_ar9287_header *pModal = &eep->modalHeader; struct base_eep_ar9287_header *pBase = &eep->baseEepHeader; - u16 ver_minor; - - ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK; + u16 ver_minor = ath9k_hw_ar9287_get_eeprom_rev(ah); switch (param) { case EEP_NFTHRESH_2: @@ -271,8 +269,6 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah, return pBase->opCapFlags; case EEP_RF_SILENT: return pBase->rfSilent; - case EEP_MINOR_REV: - return ver_minor; case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 087bdb7c10d6..e9af4a50234e 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -380,8 +380,6 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, return pModal[1].ob; case EEP_DB_2: return pModal[1].db; - case EEP_MINOR_REV: - return AR5416_VER_MASK; case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: -- cgit v1.2.3 From 9bff7428d55a5fbc3b6e8d3ba725040cadd71a59 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 5 Dec 2016 13:27:35 +0200 Subject: ath9k: consistently use get_eeprom_rev(ah) The AR5416_VER_MASK macro does the same as get_eeprom_rev, except that one has to know the actual EEPROM type (and providing a reference to that in a variable named "eep"). Additionally the eeprom_*.c implementations used the same shifting logic multiple times to get the eeprom revision which was also unnecessary duplication of get_eeprom_rev. Also use the AR5416_EEP_VER_MINOR_MASK macro where needed and introduce a similar macro (AR5416_EEP_VER_MAJOR_MASK) for the major version. Finally drop AR9287_EEP_VER_MINOR_MASK since it simply duplicates the already defined AR5416_EEP_VER_MINOR_MASK. Signed-off-by: Martin Blumenstingl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/eeprom.h | 4 +-- drivers/net/wireless/ath/ath9k/eeprom_4k.c | 32 ++++++++++------------ drivers/net/wireless/ath/ath9k/eeprom_9287.c | 19 +++++++------ drivers/net/wireless/ath/ath9k/eeprom_def.c | 41 +++++++++++++++------------- drivers/net/wireless/ath/ath9k/xmit.c | 3 +- 5 files changed, 52 insertions(+), 47 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 8ef8090d0c9c..5a24fb5dc0a0 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -99,7 +99,6 @@ #define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x)) #define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) -#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) #define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \ ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) #define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_11_OR_LATER(ah) && \ @@ -121,6 +120,8 @@ #define AR5416_EEP_NO_BACK_VER 0x1 #define AR5416_EEP_VER 0xE +#define AR5416_EEP_VER_MAJOR_SHIFT 12 +#define AR5416_EEP_VER_MAJOR_MASK 0xF000 #define AR5416_EEP_VER_MINOR_MASK 0x0FFF #define AR5416_EEP_MINOR_VER_2 0x2 #define AR5416_EEP_MINOR_VER_3 0x3 @@ -177,7 +178,6 @@ #define AR9280_TX_GAIN_TABLE_SIZE 22 #define AR9287_EEP_VER 0xE -#define AR9287_EEP_VER_MINOR_MASK 0xFFF #define AR9287_EEP_MINOR_VER_1 0x1 #define AR9287_EEP_MINOR_VER_2 0x2 #define AR9287_EEP_MINOR_VER_3 0x3 diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index d4668712f866..76ce109629b9 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -20,12 +20,17 @@ static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) { - return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF); + u16 version = ah->eeprom.map4k.baseEepHeader.version; + + return (version & AR5416_EEP_VER_MAJOR_MASK) >> + AR5416_EEP_VER_MAJOR_SHIFT; } static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) { - return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); + u16 version = ah->eeprom.map4k.baseEepHeader.version; + + return version & AR5416_EEP_VER_MINOR_MASK; } #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) @@ -136,8 +141,8 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, goto out; } - PR_EEP("Major Version", pBase->version >> 12); - PR_EEP("Minor Version", pBase->version & 0xFFF); + PR_EEP("Major Version", ath9k_hw_4k_get_eeprom_ver(ah)); + PR_EEP("Minor Version", ath9k_hw_4k_get_eeprom_rev(ah)); PR_EEP("Checksum", pBase->checksum); PR_EEP("Length", pBase->length); PR_EEP("RegDomain1", pBase->regDmn[0]); @@ -314,14 +319,12 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, xpdMask = pEepData->modalHeader.xpdGain; - if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; - } else { + else pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), AR_PHY_TPCRG5_PD_GAIN_OVERLAP)); - } pCalBChans = pEepData->calFreqPier2G; numPiers = AR5416_EEP4K_NUM_2G_CAL_PIERS; @@ -607,10 +610,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah, memset(ratesArray, 0, sizeof(ratesArray)); - if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; - } ath9k_hw_set_4k_power_per_rate_table(ah, chan, &ratesArray[0], cfgCtl, @@ -730,8 +731,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah, SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF), AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF); - if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_3) { + if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) { txRxAttenLocal = pModal->txRxAttenCh[0]; REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, @@ -1009,16 +1009,14 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62); - if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) { REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON, pModal->txFrameToPaOn); } - if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_3) { + if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) { if (IS_CHAN_HT40(chan)) REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 7f85bd1435b2..daeaf12ca23e 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -22,12 +22,17 @@ static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah) { - return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF; + u16 version = ah->eeprom.map9287.baseEepHeader.version; + + return (version & AR5416_EEP_VER_MAJOR_MASK) >> + AR5416_EEP_VER_MAJOR_SHIFT; } static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah) { - return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF; + u16 version = ah->eeprom.map9287.baseEepHeader.version; + + return version & AR5416_EEP_VER_MINOR_MASK; } static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) @@ -132,8 +137,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, goto out; } - PR_EEP("Major Version", pBase->version >> 12); - PR_EEP("Minor Version", pBase->version & 0xFFF); + PR_EEP("Major Version", ath9k_hw_ar9287_get_eeprom_ver(ah)); + PR_EEP("Minor Version", ath9k_hw_ar9287_get_eeprom_rev(ah)); PR_EEP("Checksum", pBase->checksum); PR_EEP("Length", pBase->length); PR_EEP("RegDomain1", pBase->regDmn[0]); @@ -383,8 +388,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah, xpdMask = pEepData->modalHeader.xpdGain; - if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >= - AR9287_EEP_MINOR_VER_2) + if (ath9k_hw_ar9287_get_eeprom_rev(ah) >= AR9287_EEP_MINOR_VER_2) pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; else pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), @@ -733,8 +737,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah, memset(ratesArray, 0, sizeof(ratesArray)); - if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >= - AR9287_EEP_MINOR_VER_2) + if (ath9k_hw_ar9287_get_eeprom_rev(ah) >= AR9287_EEP_MINOR_VER_2) ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; ath9k_hw_set_ar9287_power_per_rate_table(ah, chan, diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index e9af4a50234e..bd5bd627e00a 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -79,12 +79,17 @@ static void ath9k_olc_get_pdadcs(struct ath_hw *ah, static int ath9k_hw_def_get_eeprom_ver(struct ath_hw *ah) { - return ((ah->eeprom.def.baseEepHeader.version >> 12) & 0xF); + u16 version = ah->eeprom.def.baseEepHeader.version; + + return (version & AR5416_EEP_VER_MAJOR_MASK) >> + AR5416_EEP_VER_MAJOR_SHIFT; } static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah) { - return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF); + u16 version = ah->eeprom.def.baseEepHeader.version; + + return version & AR5416_EEP_VER_MINOR_MASK; } #define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) @@ -214,8 +219,8 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, goto out; } - PR_EEP("Major Version", pBase->version >> 12); - PR_EEP("Minor Version", pBase->version & 0xFFF); + PR_EEP("Major Version", ath9k_hw_def_get_eeprom_ver(ah)); + PR_EEP("Minor Version", ath9k_hw_def_get_eeprom_rev(ah)); PR_EEP("Checksum", pBase->checksum); PR_EEP("Length", pBase->length); PR_EEP("RegDomain1", pBase->regDmn[0]); @@ -391,27 +396,27 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, case EEP_TXGAIN_TYPE: return pBase->txGainType; case EEP_OL_PWRCTRL: - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19) return pBase->openLoopPwrCntl ? true : false; else return false; case EEP_RC_CHAIN_MASK: - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19) return pBase->rcChainMask; else return 0; case EEP_DAC_HPWR_5G: - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_20) return pBase->dacHiPwrMode_5G; else return 0; case EEP_FRAC_N_5G: - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_22) + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_22) return pBase->frac_n_5g; else return 0; case EEP_PWR_TABLE_OFFSET: - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_21) + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_21) return pBase->pwr_table_offset; else return AR5416_PWR_TABLE_OFFSET_DB; @@ -434,7 +439,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah, u8 txRxAttenLocal, int regChainOffset, int i) { ENABLE_REG_RMW_BUFFER(ah); - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) { txRxAttenLocal = pModal->txRxAttenCh[i]; if (AR_SREV_9280_20_OR_LATER(ah)) { @@ -603,7 +608,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, pModal->thresh62); } - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) { REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); @@ -611,7 +616,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, pModal->txFrameToPaOn); } - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) { if (IS_CHAN_HT40(chan)) REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, @@ -619,13 +624,14 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, } if (AR_SREV_9280_20_OR_LATER(ah) && - AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) + ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19) REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL, AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK, pModal->miscBits); - if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) { + if (AR_SREV_9280_20(ah) && + ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_20) { if (IS_CHAN_2GHZ(chan)) REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, eep->baseEepHeader.dacLpMode); @@ -796,8 +802,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, pwr_table_offset = ah->eep_ops->get_eeprom(ah, EEP_PWR_TABLE_OFFSET); - if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) { pdGainOverlap_t2 = pEepData->modalHeader[modalIdx].pdGainOverlap; } else { @@ -1169,10 +1174,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah, memset(ratesArray, 0, sizeof(ratesArray)); - if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; - } ath9k_hw_set_def_power_per_rate_table(ah, chan, &ratesArray[0], cfgCtl, diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 486afa98a5b8..5fa98e26f042 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1151,8 +1151,9 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, if (is_40) { u8 power_ht40delta; struct ar5416_eeprom_def *eep = &ah->eeprom.def; + u16 eeprom_rev = ah->eep_ops->get_eeprom_rev(ah); - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) { + if (eeprom_rev >= AR5416_EEP_MINOR_VER_2) { bool is_2ghz; struct modal_eep_header *pmodal; -- cgit v1.2.3 From 68fbe792916cdf6c96def64c1bc50a39443a136a Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 5 Dec 2016 13:27:36 +0200 Subject: ath9k: Make the EEPROM swapping check use the eepmisc register There are two ways of swapping the EEPROM data in the ath9k driver: 1) swab16 based on the first two EEPROM "magic" bytes (same for all EEPROM formats) 2) field and EEPROM format specific swab16/swab32 (different for eeprom_def, eeprom_4k and eeprom_9287) The result of the first check was used to also enable the second swap. This behavior seems incorrect, since the data may only be byte-swapped (afterwards the data could be in the correct endianness). Thus we introduce a separate check based on the "eepmisc" register (which is part of the EEPROM data). When bit 0 is set, then the EEPROM format specific values are in "big endian". This is also done by the FreeBSD kernel, see [0] for example. This allows us to parse EEPROMs with the "correct" magic bytes but swapped EEPROM format specific values. These EEPROMs (mostly found in lantiq and broadcom based big endian MIPS based devices) only worked due to platform specific "hacks" which swapped the EEPROM so the magic was inverted, which also enabled the format specific swapping. With this patch the old behavior is still supported, but neither recommended nor needed anymore. [0] https://github.com/freebsd/freebsd/blob/50719b56d9ce8d7d4beb53b16e9edb2e9a4a7a18/sys/dev/ath/ath_hal/ah_eeprom_9287.c#L351 Signed-off-by: Martin Blumenstingl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/eeprom.c | 57 ++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index a449588a8009..0e46797601be 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -155,11 +155,19 @@ bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data) return ret; } +#ifdef __BIG_ENDIAN +#define EXPECTED_EEPMISC_ENDIAN AR5416_EEPMISC_BIG_ENDIAN +#else +#define EXPECTED_EEPMISC_ENDIAN 0 +#endif + int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size) { u16 magic; u16 *eepdata; + u8 eepmisc; int i; + bool needs_byteswap = false; struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { @@ -167,36 +175,53 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size) return -EIO; } - *swap_needed = false; if (swab16(magic) == AR5416_EEPROM_MAGIC) { + needs_byteswap = true; + ath_dbg(common, EEPROM, + "EEPROM needs byte-swapping to correct endianness.\n"); + } else if (magic != AR5416_EEPROM_MAGIC) { + if (ath9k_hw_use_flash(ah)) { + ath_dbg(common, EEPROM, + "Ignoring invalid EEPROM magic (0x%04x).\n", + magic); + } else { + ath_err(common, + "Invalid EEPROM magic (0x%04x).\n", magic); + return -EINVAL; + } + } + + if (needs_byteswap) { if (ah->ah_flags & AH_NO_EEP_SWAP) { ath_info(common, "Ignoring endianness difference in EEPROM magic bytes.\n"); } else { - *swap_needed = true; - } - } else if (magic != AR5416_EEPROM_MAGIC) { - if (ath9k_hw_use_flash(ah)) - return 0; + eepdata = (u16 *)(&ah->eeprom); - ath_err(common, - "Invalid EEPROM Magic (0x%04x).\n", magic); - return -EINVAL; + for (i = 0; i < size; i++) + eepdata[i] = swab16(eepdata[i]); + } } - eepdata = (u16 *)(&ah->eeprom); - - if (*swap_needed) { - ath_dbg(common, EEPROM, - "EEPROM Endianness is not native.. Changing.\n"); + *swap_needed = false; - for (i = 0; i < size; i++) - eepdata[i] = swab16(eepdata[i]); + eepmisc = ah->eep_ops->get_eepmisc(ah); + if ((eepmisc & AR5416_EEPMISC_BIG_ENDIAN) != EXPECTED_EEPMISC_ENDIAN) { + if (ah->ah_flags & AH_NO_EEP_SWAP) { + ath_info(common, + "Ignoring endianness difference in eepmisc register.\n"); + } else { + *swap_needed = true; + ath_dbg(common, EEPROM, + "EEPROM needs swapping according to the eepmisc register.\n"); + } } return 0; } +#undef EXPECTED_EEPMISC_VAL + bool ath9k_hw_nvram_validate_checksum(struct ath_hw *ah, int size) { u32 i, sum = 0; -- cgit v1.2.3 From 4bca5303eb55d3876e719367290c08b11c70cf78 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 5 Dec 2016 13:27:37 +0200 Subject: ath9k: define all EEPROM fields in Little Endian format The ar9300_eeprom logic is already using only 8-bit (endian neutral), __le16 and __le32 fields to state explicitly how the values should be interpreted. All other EEPROM implementations (4k, 9287 and def) were using u16 and u32 fields with additional logic to swap the values (read from the original EEPROM) so they match the current CPUs endianness. The EEPROM format defaults to "all values are Little Endian", indicated by the absence of the AR5416_EEPMISC_BIG_ENDIAN in the u8 EEPMISC register. If we detect that the EEPROM indicates Big Endian mode (AR5416_EEPMISC_BIG_ENDIAN is set in the EEPMISC register) then we'll swap the values to convert them into Little Endian. This is done by activating the EEPMISC based logic in ath9k_hw_nvram_swap_data even if AH_NO_EEP_SWAP is set (this makes ath9k behave like the FreeBSD driver, which also does not have a flag to enable swapping based on the AR5416_EEPMISC_BIG_ENDIAN bit). Before this logic was only used to enable swapping when "current CPU endianness != EEPROM endianness". After changing all relevant fields to __le16 and __le32 sparse was used to check that all code which reads any of these fields uses le{16,32}_to_cpu. Signed-off-by: Martin Blumenstingl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/eeprom.c | 27 ++----- drivers/net/wireless/ath/ath9k/eeprom.h | 75 ++++++++++-------- drivers/net/wireless/ath/ath9k/eeprom_4k.c | 94 +++++++++------------- drivers/net/wireless/ath/ath9k/eeprom_9287.c | 98 ++++++++++------------- drivers/net/wireless/ath/ath9k/eeprom_def.c | 114 ++++++++++++--------------- 5 files changed, 174 insertions(+), 234 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index 0e46797601be..fb80ec86e53d 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -155,17 +155,10 @@ bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data) return ret; } -#ifdef __BIG_ENDIAN -#define EXPECTED_EEPMISC_ENDIAN AR5416_EEPMISC_BIG_ENDIAN -#else -#define EXPECTED_EEPMISC_ENDIAN 0 -#endif - int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size) { u16 magic; u16 *eepdata; - u8 eepmisc; int i; bool needs_byteswap = false; struct ath_common *common = ath9k_hw_common(ah); @@ -203,25 +196,17 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size) } } - *swap_needed = false; - - eepmisc = ah->eep_ops->get_eepmisc(ah); - if ((eepmisc & AR5416_EEPMISC_BIG_ENDIAN) != EXPECTED_EEPMISC_ENDIAN) { - if (ah->ah_flags & AH_NO_EEP_SWAP) { - ath_info(common, - "Ignoring endianness difference in eepmisc register.\n"); - } else { - *swap_needed = true; - ath_dbg(common, EEPROM, - "EEPROM needs swapping according to the eepmisc register.\n"); - } + if (ah->eep_ops->get_eepmisc(ah) & AR5416_EEPMISC_BIG_ENDIAN) { + *swap_needed = true; + ath_dbg(common, EEPROM, + "Big Endian EEPROM detected according to EEPMISC register.\n"); + } else { + *swap_needed = false; } return 0; } -#undef EXPECTED_EEPMISC_VAL - bool ath9k_hw_nvram_validate_checksum(struct ath_hw *ah, int size) { u32 i, sum = 0; diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 5a24fb5dc0a0..30bf722e33ed 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -23,6 +23,17 @@ #include #include "ar9003_eeprom.h" +/* helpers to swap EEPROM fields, which are stored as __le16 or __le32. Since + * we are 100% sure about it we __force these to u16/u32 for the swab calls to + * silence the sparse checks. These macros are used when we have a Big Endian + * EEPROM (according to AR5416_EEPMISC_BIG_ENDIAN) and need to convert the + * fields to __le16/__le32. + */ +#define EEPROM_FIELD_SWAB16(field) \ + (field = (__force __le16)swab16((__force u16)field)) +#define EEPROM_FIELD_SWAB32(field) \ + (field = (__force __le32)swab32((__force u32)field)) + #ifdef __BIG_ENDIAN #define AR5416_EEPROM_MAGIC 0x5aa5 #else @@ -270,19 +281,19 @@ enum ath9k_hal_freq_band { }; struct base_eep_header { - u16 length; - u16 checksum; - u16 version; + __le16 length; + __le16 checksum; + __le16 version; u8 opCapFlags; u8 eepMisc; - u16 regDmn[2]; + __le16 regDmn[2]; u8 macAddr[6]; u8 rxMask; u8 txMask; - u16 rfSilent; - u16 blueToothOptions; - u16 deviceCap; - u32 binBuildNumber; + __le16 rfSilent; + __le16 blueToothOptions; + __le16 deviceCap; + __le32 binBuildNumber; u8 deviceType; u8 pwdclkind; u8 fastClk5g; @@ -300,33 +311,33 @@ struct base_eep_header { } __packed; struct base_eep_header_4k { - u16 length; - u16 checksum; - u16 version; + __le16 length; + __le16 checksum; + __le16 version; u8 opCapFlags; u8 eepMisc; - u16 regDmn[2]; + __le16 regDmn[2]; u8 macAddr[6]; u8 rxMask; u8 txMask; - u16 rfSilent; - u16 blueToothOptions; - u16 deviceCap; - u32 binBuildNumber; + __le16 rfSilent; + __le16 blueToothOptions; + __le16 deviceCap; + __le32 binBuildNumber; u8 deviceType; u8 txGainType; } __packed; struct spur_chan { - u16 spurChan; + __le16 spurChan; u8 spurRangeLow; u8 spurRangeHigh; } __packed; struct modal_eep_header { - u32 antCtrlChain[AR5416_MAX_CHAINS]; - u32 antCtrlCommon; + __le32 antCtrlChain[AR5416_MAX_CHAINS]; + __le32 antCtrlCommon; u8 antennaGainCh[AR5416_MAX_CHAINS]; u8 switchSettling; u8 txRxAttenCh[AR5416_MAX_CHAINS]; @@ -361,7 +372,7 @@ struct modal_eep_header { u8 db_ch1; u8 lna_ctl; u8 miscBits; - u16 xpaBiasLvlFreq[3]; + __le16 xpaBiasLvlFreq[3]; u8 futureModal[6]; struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS]; @@ -375,8 +386,8 @@ struct calDataPerFreqOpLoop { } __packed; struct modal_eep_4k_header { - u32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS]; - u32 antCtrlCommon; + __le32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS]; + __le32 antCtrlCommon; u8 antennaGainCh[AR5416_EEP4K_MAX_CHAINS]; u8 switchSettling; u8 txRxAttenCh[AR5416_EEP4K_MAX_CHAINS]; @@ -440,19 +451,19 @@ struct modal_eep_4k_header { } __packed; struct base_eep_ar9287_header { - u16 length; - u16 checksum; - u16 version; + __le16 length; + __le16 checksum; + __le16 version; u8 opCapFlags; u8 eepMisc; - u16 regDmn[2]; + __le16 regDmn[2]; u8 macAddr[6]; u8 rxMask; u8 txMask; - u16 rfSilent; - u16 blueToothOptions; - u16 deviceCap; - u32 binBuildNumber; + __le16 rfSilent; + __le16 blueToothOptions; + __le16 deviceCap; + __le32 binBuildNumber; u8 deviceType; u8 openLoopPwrCntl; int8_t pwrTableOffset; @@ -462,8 +473,8 @@ struct base_eep_ar9287_header { } __packed; struct modal_eep_ar9287_header { - u32 antCtrlChain[AR9287_MAX_CHAINS]; - u32 antCtrlCommon; + __le32 antCtrlChain[AR9287_MAX_CHAINS]; + __le32 antCtrlCommon; int8_t antennaGainCh[AR9287_MAX_CHAINS]; u8 switchSettling; u8 txRxAttenCh[AR9287_MAX_CHAINS]; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 76ce109629b9..4a01ebe53053 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -20,7 +20,7 @@ static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) { - u16 version = ah->eeprom.map4k.baseEepHeader.version; + u16 version = le16_to_cpu(ah->eeprom.map4k.baseEepHeader.version); return (version & AR5416_EEP_VER_MAJOR_MASK) >> AR5416_EEP_VER_MAJOR_SHIFT; @@ -28,7 +28,7 @@ static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) { - u16 version = ah->eeprom.map4k.baseEepHeader.version; + u16 version = le16_to_cpu(ah->eeprom.map4k.baseEepHeader.version); return version & AR5416_EEP_VER_MINOR_MASK; } @@ -76,8 +76,8 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_4k_header *modal_hdr) { - PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]); - PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon); + PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); + PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Switch Settle", modal_hdr->switchSettling); PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]); @@ -132,6 +132,7 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct base_eep_header_4k *pBase = &eep->baseEepHeader; + u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber); if (!dump_base_hdr) { len += scnprintf(buf + len, size - len, @@ -143,10 +144,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, PR_EEP("Major Version", ath9k_hw_4k_get_eeprom_ver(ah)); PR_EEP("Minor Version", ath9k_hw_4k_get_eeprom_rev(ah)); - PR_EEP("Checksum", pBase->checksum); - PR_EEP("Length", pBase->length); - PR_EEP("RegDomain1", pBase->regDmn[0]); - PR_EEP("RegDomain2", pBase->regDmn[1]); + PR_EEP("Checksum", le16_to_cpu(pBase->checksum)); + PR_EEP("Length", le16_to_cpu(pBase->length)); + PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0])); + PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1])); PR_EEP("TX Mask", pBase->txMask); PR_EEP("RX Mask", pBase->rxMask); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A)); @@ -160,9 +161,9 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN)); - PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); - PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); - PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); + PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF); + PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF); + PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF); PR_EEP("TX Gain type", pBase->txGainType); len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", @@ -194,54 +195,31 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) return err; if (need_swap) - el = swab16(eep->baseEepHeader.length); + el = swab16((__force u16)eep->baseEepHeader.length); else - el = eep->baseEepHeader.length; + el = le16_to_cpu(eep->baseEepHeader.length); el = min(el / sizeof(u16), SIZE_EEPROM_4K); if (!ath9k_hw_nvram_validate_checksum(ah, el)) return -EINVAL; if (need_swap) { - u32 integer; - u16 word; - - word = swab16(eep->baseEepHeader.length); - eep->baseEepHeader.length = word; - - word = swab16(eep->baseEepHeader.checksum); - eep->baseEepHeader.checksum = word; - - word = swab16(eep->baseEepHeader.version); - eep->baseEepHeader.version = word; - - word = swab16(eep->baseEepHeader.regDmn[0]); - eep->baseEepHeader.regDmn[0] = word; - - word = swab16(eep->baseEepHeader.regDmn[1]); - eep->baseEepHeader.regDmn[1] = word; - - word = swab16(eep->baseEepHeader.rfSilent); - eep->baseEepHeader.rfSilent = word; - - word = swab16(eep->baseEepHeader.blueToothOptions); - eep->baseEepHeader.blueToothOptions = word; - - word = swab16(eep->baseEepHeader.deviceCap); - eep->baseEepHeader.deviceCap = word; - - integer = swab32(eep->modalHeader.antCtrlCommon); - eep->modalHeader.antCtrlCommon = integer; - - for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { - integer = swab32(eep->modalHeader.antCtrlChain[i]); - eep->modalHeader.antCtrlChain[i] = integer; - } - - for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { - word = swab16(eep->modalHeader.spurChans[i].spurChan); - eep->modalHeader.spurChans[i].spurChan = word; - } + EEPROM_FIELD_SWAB16(eep->baseEepHeader.length); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.version); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap); + EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlCommon); + + for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) + EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlChain[i]); + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) + EEPROM_FIELD_SWAB16( + eep->modalHeader.spurChans[i].spurChan); } if (!ath9k_hw_nvram_check_version(ah, AR5416_EEP_VER, @@ -270,13 +248,13 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, case EEP_MAC_MSW: return get_unaligned_be16(pBase->macAddr + 4); case EEP_REG_0: - return pBase->regDmn[0]; + return le16_to_cpu(pBase->regDmn[0]); case EEP_OP_CAP: - return pBase->deviceCap; + return le16_to_cpu(pBase->deviceCap); case EEP_OP_MODE: return pBase->opCapFlags; case EEP_RF_SILENT: - return pBase->rfSilent; + return le16_to_cpu(pBase->rfSilent); case EEP_OB_2: return pModal->ob_0; case EEP_DB_2: @@ -724,7 +702,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah, { ENABLE_REG_RMW_BUFFER(ah); REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, - pModal->antCtrlChain[0], 0); + le32_to_cpu(pModal->antCtrlChain[0]), 0); REG_RMW(ah, AR_PHY_TIMING_CTRL4(0), SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | @@ -790,7 +768,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, pModal = &eep->modalHeader; txRxAttenLocal = 23; - REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon); + REG_WRITE(ah, AR_PHY_SWITCH_COM, le32_to_cpu(pModal->antCtrlCommon)); /* Single chain for 4K EEPROM*/ ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal); @@ -1054,7 +1032,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { - return ah->eeprom.map4k.modalHeader.spurChans[i].spurChan; + return le16_to_cpu(ah->eeprom.map4k.modalHeader.spurChans[i].spurChan); } static u8 ath9k_hw_4k_get_eepmisc(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index daeaf12ca23e..9611f020f7c0 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -22,7 +22,7 @@ static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah) { - u16 version = ah->eeprom.map9287.baseEepHeader.version; + u16 version = le16_to_cpu(ah->eeprom.map9287.baseEepHeader.version); return (version & AR5416_EEP_VER_MAJOR_MASK) >> AR5416_EEP_VER_MAJOR_SHIFT; @@ -30,7 +30,7 @@ static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah) static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah) { - u16 version = ah->eeprom.map9287.baseEepHeader.version; + u16 version = le16_to_cpu(ah->eeprom.map9287.baseEepHeader.version); return version & AR5416_EEP_VER_MINOR_MASK; } @@ -79,9 +79,9 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) static u32 ar9287_dump_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_ar9287_header *modal_hdr) { - PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]); - PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]); - PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon); + PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); + PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1])); + PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]); PR_EEP("Switch Settle", modal_hdr->switchSettling); @@ -128,6 +128,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, { struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct base_eep_ar9287_header *pBase = &eep->baseEepHeader; + u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber); if (!dump_base_hdr) { len += scnprintf(buf + len, size - len, @@ -139,10 +140,10 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, PR_EEP("Major Version", ath9k_hw_ar9287_get_eeprom_ver(ah)); PR_EEP("Minor Version", ath9k_hw_ar9287_get_eeprom_rev(ah)); - PR_EEP("Checksum", pBase->checksum); - PR_EEP("Length", pBase->length); - PR_EEP("RegDomain1", pBase->regDmn[0]); - PR_EEP("RegDomain2", pBase->regDmn[1]); + PR_EEP("Checksum", le16_to_cpu(pBase->checksum)); + PR_EEP("Length", le16_to_cpu(pBase->length)); + PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0])); + PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1])); PR_EEP("TX Mask", pBase->txMask); PR_EEP("RX Mask", pBase->rxMask); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A)); @@ -156,9 +157,9 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN)); - PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); - PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); - PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); + PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF); + PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF); + PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF); PR_EEP("Power Table Offset", pBase->pwrTableOffset); PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl); @@ -182,8 +183,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) { - u32 el, integer; - u16 word; + u32 el; int i, err; bool need_swap; struct ar9287_eeprom *eep = &ah->eeprom.map9287; @@ -193,51 +193,31 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) return err; if (need_swap) - el = swab16(eep->baseEepHeader.length); + el = swab16((__force u16)eep->baseEepHeader.length); else - el = eep->baseEepHeader.length; + el = le16_to_cpu(eep->baseEepHeader.length); el = min(el / sizeof(u16), SIZE_EEPROM_AR9287); if (!ath9k_hw_nvram_validate_checksum(ah, el)) return -EINVAL; if (need_swap) { - word = swab16(eep->baseEepHeader.length); - eep->baseEepHeader.length = word; - - word = swab16(eep->baseEepHeader.checksum); - eep->baseEepHeader.checksum = word; - - word = swab16(eep->baseEepHeader.version); - eep->baseEepHeader.version = word; - - word = swab16(eep->baseEepHeader.regDmn[0]); - eep->baseEepHeader.regDmn[0] = word; - - word = swab16(eep->baseEepHeader.regDmn[1]); - eep->baseEepHeader.regDmn[1] = word; - - word = swab16(eep->baseEepHeader.rfSilent); - eep->baseEepHeader.rfSilent = word; - - word = swab16(eep->baseEepHeader.blueToothOptions); - eep->baseEepHeader.blueToothOptions = word; - - word = swab16(eep->baseEepHeader.deviceCap); - eep->baseEepHeader.deviceCap = word; - - integer = swab32(eep->modalHeader.antCtrlCommon); - eep->modalHeader.antCtrlCommon = integer; - - for (i = 0; i < AR9287_MAX_CHAINS; i++) { - integer = swab32(eep->modalHeader.antCtrlChain[i]); - eep->modalHeader.antCtrlChain[i] = integer; - } - - for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { - word = swab16(eep->modalHeader.spurChans[i].spurChan); - eep->modalHeader.spurChans[i].spurChan = word; - } + EEPROM_FIELD_SWAB16(eep->baseEepHeader.length); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.version); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap); + EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlCommon); + + for (i = 0; i < AR9287_MAX_CHAINS; i++) + EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlChain[i]); + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) + EEPROM_FIELD_SWAB16( + eep->modalHeader.spurChans[i].spurChan); } if (!ath9k_hw_nvram_check_version(ah, AR9287_EEP_VER, @@ -267,13 +247,13 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah, case EEP_MAC_MSW: return get_unaligned_be16(pBase->macAddr + 4); case EEP_REG_0: - return pBase->regDmn[0]; + return le16_to_cpu(pBase->regDmn[0]); case EEP_OP_CAP: - return pBase->deviceCap; + return le16_to_cpu(pBase->deviceCap); case EEP_OP_MODE: return pBase->opCapFlags; case EEP_RF_SILENT: - return pBase->rfSilent; + return le16_to_cpu(pBase->rfSilent); case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: @@ -878,13 +858,13 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah, pModal = &eep->modalHeader; - REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon); + REG_WRITE(ah, AR_PHY_SWITCH_COM, le32_to_cpu(pModal->antCtrlCommon)); for (i = 0; i < AR9287_MAX_CHAINS; i++) { regChainOffset = i * 0x1000; REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset, - pModal->antCtrlChain[i]); + le32_to_cpu(pModal->antCtrlChain[i])); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) @@ -982,7 +962,9 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah, static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { - return ah->eeprom.map9287.modalHeader.spurChans[i].spurChan; + __le16 spur_ch = ah->eeprom.map9287.modalHeader.spurChans[i].spurChan; + + return le16_to_cpu(spur_ch); } static u8 ath9k_hw_ar9287_get_eepmisc(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index bd5bd627e00a..7d5223451ce9 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -79,7 +79,7 @@ static void ath9k_olc_get_pdadcs(struct ath_hw *ah, static int ath9k_hw_def_get_eeprom_ver(struct ath_hw *ah) { - u16 version = ah->eeprom.def.baseEepHeader.version; + u16 version = le16_to_cpu(ah->eeprom.def.baseEepHeader.version); return (version & AR5416_EEP_VER_MAJOR_MASK) >> AR5416_EEP_VER_MAJOR_SHIFT; @@ -87,7 +87,7 @@ static int ath9k_hw_def_get_eeprom_ver(struct ath_hw *ah) static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah) { - u16 version = ah->eeprom.def.baseEepHeader.version; + u16 version = le16_to_cpu(ah->eeprom.def.baseEepHeader.version); return version & AR5416_EEP_VER_MINOR_MASK; } @@ -135,10 +135,10 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_header *modal_hdr) { - PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]); - PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]); - PR_EEP("Chain2 Ant. Control", modal_hdr->antCtrlChain[2]); - PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon); + PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); + PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1])); + PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2])); + PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]); PR_EEP("Chain2 Ant. Gain", modal_hdr->antennaGainCh[2]); @@ -194,9 +194,9 @@ static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size, PR_EEP("Chain1 OutputBias", modal_hdr->ob_ch1); PR_EEP("Chain1 DriverBias", modal_hdr->db_ch1); PR_EEP("LNA Control", modal_hdr->lna_ctl); - PR_EEP("XPA Bias Freq0", modal_hdr->xpaBiasLvlFreq[0]); - PR_EEP("XPA Bias Freq1", modal_hdr->xpaBiasLvlFreq[1]); - PR_EEP("XPA Bias Freq2", modal_hdr->xpaBiasLvlFreq[2]); + PR_EEP("XPA Bias Freq0", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[0])); + PR_EEP("XPA Bias Freq1", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[1])); + PR_EEP("XPA Bias Freq2", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[2])); return len; } @@ -206,6 +206,7 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, { struct ar5416_eeprom_def *eep = &ah->eeprom.def; struct base_eep_header *pBase = &eep->baseEepHeader; + u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber); if (!dump_base_hdr) { len += scnprintf(buf + len, size - len, @@ -221,10 +222,10 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, PR_EEP("Major Version", ath9k_hw_def_get_eeprom_ver(ah)); PR_EEP("Minor Version", ath9k_hw_def_get_eeprom_rev(ah)); - PR_EEP("Checksum", pBase->checksum); - PR_EEP("Length", pBase->length); - PR_EEP("RegDomain1", pBase->regDmn[0]); - PR_EEP("RegDomain2", pBase->regDmn[1]); + PR_EEP("Checksum", le16_to_cpu(pBase->checksum)); + PR_EEP("Length", le16_to_cpu(pBase->length)); + PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0])); + PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1])); PR_EEP("TX Mask", pBase->txMask); PR_EEP("RX Mask", pBase->rxMask); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A)); @@ -238,9 +239,9 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN)); - PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); - PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); - PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); + PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF); + PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF); + PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF); PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl); len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", @@ -273,61 +274,40 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) return err; if (need_swap) - el = swab16(eep->baseEepHeader.length); + el = swab16((__force u16)eep->baseEepHeader.length); else - el = eep->baseEepHeader.length; + el = le16_to_cpu(eep->baseEepHeader.length); el = min(el / sizeof(u16), SIZE_EEPROM_DEF); if (!ath9k_hw_nvram_validate_checksum(ah, el)) return -EINVAL; if (need_swap) { - u32 integer, j; - u16 word; + u32 j; - word = swab16(eep->baseEepHeader.length); - eep->baseEepHeader.length = word; - - word = swab16(eep->baseEepHeader.checksum); - eep->baseEepHeader.checksum = word; - - word = swab16(eep->baseEepHeader.version); - eep->baseEepHeader.version = word; - - word = swab16(eep->baseEepHeader.regDmn[0]); - eep->baseEepHeader.regDmn[0] = word; - - word = swab16(eep->baseEepHeader.regDmn[1]); - eep->baseEepHeader.regDmn[1] = word; - - word = swab16(eep->baseEepHeader.rfSilent); - eep->baseEepHeader.rfSilent = word; - - word = swab16(eep->baseEepHeader.blueToothOptions); - eep->baseEepHeader.blueToothOptions = word; - - word = swab16(eep->baseEepHeader.deviceCap); - eep->baseEepHeader.deviceCap = word; + EEPROM_FIELD_SWAB16(eep->baseEepHeader.length); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.version); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap); for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) { struct modal_eep_header *pModal = &eep->modalHeader[j]; - integer = swab32(pModal->antCtrlCommon); - pModal->antCtrlCommon = integer; + EEPROM_FIELD_SWAB32(pModal->antCtrlCommon); - for (i = 0; i < AR5416_MAX_CHAINS; i++) { - integer = swab32(pModal->antCtrlChain[i]); - pModal->antCtrlChain[i] = integer; - } - for (i = 0; i < 3; i++) { - word = swab16(pModal->xpaBiasLvlFreq[i]); - pModal->xpaBiasLvlFreq[i] = word; - } + for (i = 0; i < AR5416_MAX_CHAINS; i++) + EEPROM_FIELD_SWAB32(pModal->antCtrlChain[i]); - for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { - word = swab16(pModal->spurChans[i].spurChan); - pModal->spurChans[i].spurChan = word; - } + for (i = 0; i < 3; i++) + EEPROM_FIELD_SWAB16(pModal->xpaBiasLvlFreq[i]); + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) + EEPROM_FIELD_SWAB16( + pModal->spurChans[i].spurChan); } } @@ -337,7 +317,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) /* Enable fixup for AR_AN_TOP2 if necessary */ if ((ah->hw_version.devid == AR9280_DEVID_PCI) && - ((eep->baseEepHeader.version & 0xff) > 0x0a) && + ((le16_to_cpu(eep->baseEepHeader.version) & 0xff) > 0x0a) && (eep->baseEepHeader.pwdclkind == 0)) ah->need_an_top2_fixup = true; @@ -370,13 +350,13 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, case EEP_MAC_MSW: return get_unaligned_be16(pBase->macAddr + 4); case EEP_REG_0: - return pBase->regDmn[0]; + return le16_to_cpu(pBase->regDmn[0]); case EEP_OP_CAP: - return pBase->deviceCap; + return le16_to_cpu(pBase->deviceCap); case EEP_OP_MODE: return pBase->opCapFlags; case EEP_RF_SILENT: - return pBase->rfSilent; + return le16_to_cpu(pBase->rfSilent); case EEP_OB_5: return pModal[0].ob; case EEP_DB_5: @@ -490,11 +470,13 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, struct ar5416_eeprom_def *eep = &ah->eeprom.def; int i, regChainOffset; u8 txRxAttenLocal; + u32 antCtrlCommon; pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44; + antCtrlCommon = le32_to_cpu(pModal->antCtrlCommon); - REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon & 0xffff); + REG_WRITE(ah, AR_PHY_SWITCH_COM, antCtrlCommon & 0xffff); for (i = 0; i < AR5416_MAX_CHAINS; i++) { if (AR_SREV_9280(ah)) { @@ -508,7 +490,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, regChainOffset = i * 0x1000; REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset, - pModal->antCtrlChain[i]); + le32_to_cpu(pModal->antCtrlChain[i])); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) & @@ -655,7 +637,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, static void ath9k_hw_def_set_addac(struct ath_hw *ah, struct ath9k_channel *chan) { -#define XPA_LVL_FREQ(cnt) (pModal->xpaBiasLvlFreq[cnt]) +#define XPA_LVL_FREQ(cnt) (le16_to_cpu(pModal->xpaBiasLvlFreq[cnt])) struct modal_eep_header *pModal; struct ar5416_eeprom_def *eep = &ah->eeprom.def; u8 biaslevel; @@ -1315,7 +1297,9 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah, static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { - return ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan; + __le16 spch = ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan; + + return le16_to_cpu(spch); } static u8 ath9k_hw_def_get_eepmisc(struct ath_hw *ah) -- cgit v1.2.3 From 63fefa050477b0974ab34f650e21a7cfc3b02d96 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Mon, 5 Dec 2016 13:27:37 +0200 Subject: ath9k: Introduce airtime fairness scheduling between stations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reworks the ath9k driver to schedule transmissions to connected stations in a way that enforces airtime fairness between them. It accomplishes this by measuring the time spent transmitting to or receiving from a station at TX and RX completion, and accounting this to a per-station, per-QoS level airtime deficit. Then, an FQ-CoDel based deficit scheduler is employed at packet dequeue time, to control which station gets the next transmission opportunity. Airtime fairness can significantly improve the efficiency of the network when station rates vary. The following throughput values are from a simple three-station test scenario, where two stations operate at the highest HT20 rate, and one station at the lowest, and the scheduler is employed at the access point: Before / After Fast station 1: 19.17 / 25.09 Mbps Fast station 2: 19.83 / 25.21 Mbps Slow station: 2.58 / 1.77 Mbps Total: 41.58 / 52.07 Mbps The benefit of airtime fairness goes up the more stations are present. In a 30-station test with one station artificially limited to 1 Mbps, we have seen aggregate throughput go from 2.14 to 17.76 Mbps. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ath9k.h | 25 +++- drivers/net/wireless/ath/ath9k/channel.c | 14 ++- drivers/net/wireless/ath/ath9k/debug.c | 3 + drivers/net/wireless/ath/ath9k/debug.h | 13 +++ drivers/net/wireless/ath/ath9k/debug_sta.c | 54 +++++++++ drivers/net/wireless/ath/ath9k/init.c | 2 + drivers/net/wireless/ath/ath9k/main.c | 6 +- drivers/net/wireless/ath/ath9k/recv.c | 65 +++++++++++ drivers/net/wireless/ath/ath9k/xmit.c | 177 +++++++++++++++++++++-------- 9 files changed, 303 insertions(+), 56 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 378d3458fddb..79e4b71f4495 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -112,6 +112,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, #define ATH_TXFIFO_DEPTH 8 #define ATH_TX_ERROR 0x01 +#define ATH_AIRTIME_QUANTUM 300 /* usec */ + /* Stop tx traffic 1ms before the GO goes away */ #define ATH_P2P_PS_STOP_TIME 1000 @@ -247,6 +249,9 @@ struct ath_atx_tid { bool has_queued; }; +void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid); +void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid); + struct ath_node { struct ath_softc *sc; struct ieee80211_sta *sta; /* station struct we're part of */ @@ -258,9 +263,12 @@ struct ath_node { bool sleeping; bool no_ps_filter; + s64 airtime_deficit[IEEE80211_NUM_ACS]; + u32 airtime_rx_start; #ifdef CONFIG_ATH9K_STATION_STATISTICS struct ath_rx_rate_stats rx_rate_stats; + struct ath_airtime_stats airtime_stats; #endif u8 key_idx[4]; @@ -317,10 +325,16 @@ struct ath_rx { /* Channel Context */ /*******************/ +struct ath_acq { + struct list_head acq_new; + struct list_head acq_old; + spinlock_t lock; +}; + struct ath_chanctx { struct cfg80211_chan_def chandef; struct list_head vifs; - struct list_head acq[IEEE80211_NUM_ACS]; + struct ath_acq acq[IEEE80211_NUM_ACS]; int hw_queue_base; /* do not dereference, use for comparison only */ @@ -575,6 +589,8 @@ void ath_txq_schedule_all(struct ath_softc *sc); int ath_tx_init(struct ath_softc *sc, int nbufs); int ath_txq_update(struct ath_softc *sc, int qnum, struct ath9k_tx_queue_info *q); +u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, + int width, int half_gi, bool shortPreamble); void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop); void ath_assign_seq(struct ath_common *common, struct sk_buff *skb); int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, @@ -963,6 +979,11 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); #define ATH9K_NUM_CHANCTX 2 /* supports 2 operating channels */ +#define AIRTIME_USE_TX BIT(0) +#define AIRTIME_USE_RX BIT(1) +#define AIRTIME_USE_NEW_QUEUES BIT(2) +#define AIRTIME_ACTIVE(flags) (!!(flags & (AIRTIME_USE_TX|AIRTIME_USE_RX))) + struct ath_softc { struct ieee80211_hw *hw; struct device *dev; @@ -1005,6 +1026,8 @@ struct ath_softc { short nbcnvifs; unsigned long ps_usecount; + u16 airtime_flags; /* AIRTIME_* */ + struct ath_rx rx; struct ath_tx tx; struct ath_beacon beacon; diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 929dd70f48eb..b84539d89f1a 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -118,8 +118,11 @@ void ath_chanctx_init(struct ath_softc *sc) INIT_LIST_HEAD(&ctx->vifs); ctx->txpower = ATH_TXPOWER_MAX; ctx->flush_timeout = HZ / 5; /* 200ms */ - for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) - INIT_LIST_HEAD(&ctx->acq[j]); + for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) { + INIT_LIST_HEAD(&ctx->acq[j].acq_new); + INIT_LIST_HEAD(&ctx->acq[j].acq_old); + spin_lock_init(&ctx->acq[j].lock); + } } } @@ -1345,8 +1348,11 @@ void ath9k_offchannel_init(struct ath_softc *sc) ctx->txpower = ATH_TXPOWER_MAX; cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20); - for (i = 0; i < ARRAY_SIZE(ctx->acq); i++) - INIT_LIST_HEAD(&ctx->acq[i]); + for (i = 0; i < ARRAY_SIZE(ctx->acq); i++) { + INIT_LIST_HEAD(&ctx->acq[i].acq_new); + INIT_LIST_HEAD(&ctx->acq[i].acq_old); + spin_lock_init(&ctx->acq[i].lock); + } sc->offchannel.chan.offchannel = true; } diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 89a94dd5f2cb..43930c336987 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1399,5 +1399,8 @@ int ath9k_init_debug(struct ath_hw *ah) debugfs_create_file("tpc", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_tpc); + debugfs_create_u16("airtime_flags", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, &sc->airtime_flags); + return 0; } diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index a078cdd3170d..249f8141cd00 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -221,6 +221,11 @@ struct ath_rx_rate_stats { } cck_stats[4]; }; +struct ath_airtime_stats { + u32 rx_airtime; + u32 tx_airtime; +}; + #define ANT_MAIN 0 #define ANT_ALT 1 @@ -314,12 +319,20 @@ ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause) void ath_debug_rate_stats(struct ath_softc *sc, struct ath_rx_status *rs, struct sk_buff *skb); +void ath_debug_airtime(struct ath_softc *sc, + struct ath_node *an, + u32 rx, u32 tx); #else static inline void ath_debug_rate_stats(struct ath_softc *sc, struct ath_rx_status *rs, struct sk_buff *skb) { } +static inline void ath_debug_airtime(struct ath_softc *sc, + struct ath_node *an, + u32 rx, u32 tx) +{ +} #endif /* CONFIG_ATH9K_STATION_STATISTICS */ #endif /* DEBUG_H */ diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index 2a3a3c4671bc..524cbf13ca9c 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -242,6 +242,59 @@ static const struct file_operations fops_node_recv = { .llseek = default_llseek, }; +void ath_debug_airtime(struct ath_softc *sc, + struct ath_node *an, + u32 rx, + u32 tx) +{ + struct ath_airtime_stats *astats = &an->airtime_stats; + + astats->rx_airtime += rx; + astats->tx_airtime += tx; +} + +static ssize_t read_airtime(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_node *an = file->private_data; + struct ath_airtime_stats *astats; + static const char *qname[4] = { + "VO", "VI", "BE", "BK" + }; + u32 len = 0, size = 256; + char *buf; + size_t retval; + int i; + + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + astats = &an->airtime_stats; + + len += scnprintf(buf + len, size - len, "RX: %u us\n", astats->rx_airtime); + len += scnprintf(buf + len, size - len, "TX: %u us\n", astats->tx_airtime); + len += scnprintf(buf + len, size - len, "Deficit: "); + for (i = 0; i < 4; i++) + len += scnprintf(buf+len, size - len, "%s: %lld us ", qname[i], an->airtime_deficit[i]); + if (len < size) + buf[len++] = '\n'; + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; +} + + +static const struct file_operations fops_airtime = { + .read = read_airtime, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + + void ath9k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -251,4 +304,5 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw, debugfs_create_file("node_aggr", S_IRUGO, dir, an, &fops_node_aggr); debugfs_create_file("node_recv", S_IRUGO, dir, an, &fops_node_recv); + debugfs_create_file("airtime", S_IRUGO, dir, an, &fops_airtime); } diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 20794660d6ae..084ad1bd495f 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -620,6 +620,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, /* Will be cleared in ath9k_start() */ set_bit(ATH_OP_INVALID, &common->op_flags); + sc->airtime_flags = (AIRTIME_USE_TX | AIRTIME_USE_RX | + AIRTIME_USE_NEW_QUEUES); sc->sc_ah = ah; sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 59e3bd0f4c20..58f06ce9a4cf 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -70,10 +70,10 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq, goto out; if (txq->mac80211_qnum >= 0) { - struct list_head *list; + struct ath_acq *acq; - list = &sc->cur_chan->acq[txq->mac80211_qnum]; - if (!list_empty(list)) + acq = &sc->cur_chan->acq[txq->mac80211_qnum]; + if (!list_empty(&acq->acq_new) || !list_empty(&acq->acq_old)) pending = true; } out: diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index fb4ba27d92b7..d79837fe333f 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1002,6 +1002,70 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc, } } +static void ath_rx_count_airtime(struct ath_softc *sc, + struct ath_rx_status *rs, + struct sk_buff *skb) +{ + struct ath_node *an; + struct ath_acq *acq; + struct ath_vif *avp; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_sta *sta; + struct ieee80211_rx_status *rxs; + const struct ieee80211_rate *rate; + bool is_sgi, is_40, is_sp; + int phy; + u16 len = rs->rs_datalen; + u32 airtime = 0; + u8 tidno, acno; + + if (!ieee80211_is_data(hdr->frame_control)) + return; + + rcu_read_lock(); + + sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL); + if (!sta) + goto exit; + an = (struct ath_node *) sta->drv_priv; + avp = (struct ath_vif *) an->vif->drv_priv; + tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + acno = TID_TO_WME_AC(tidno); + acq = &avp->chanctx->acq[acno]; + + rxs = IEEE80211_SKB_RXCB(skb); + + is_sgi = !!(rxs->flag & RX_FLAG_SHORT_GI); + is_40 = !!(rxs->flag & RX_FLAG_40MHZ); + is_sp = !!(rxs->flag & RX_FLAG_SHORTPRE); + + if (!!(rxs->flag & RX_FLAG_HT)) { + /* MCS rates */ + + airtime += ath_pkt_duration(sc, rxs->rate_idx, len, + is_40, is_sgi, is_sp); + } else { + + phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM; + rate = &common->sbands[rxs->band].bitrates[rxs->rate_idx]; + airtime += ath9k_hw_computetxtime(ah, phy, rate->bitrate * 100, + len, rxs->rate_idx, is_sp); + } + + if (!!(sc->airtime_flags & AIRTIME_USE_RX)) { + spin_lock_bh(&acq->lock); + an->airtime_deficit[acno] -= airtime; + if (an->airtime_deficit[acno] <= 0) + __ath_tx_queue_tid(sc, ATH_AN_2_TID(an, tidno)); + spin_unlock_bh(&acq->lock); + } + ath_debug_airtime(sc, an, airtime, 0); +exit: + rcu_read_unlock(); +} + int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) { struct ath_rxbuf *bf; @@ -1148,6 +1212,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) ath9k_antenna_check(sc, &rs); ath9k_apply_ampdu_details(sc, &rs, rxs); ath_debug_rate_stats(sc, &rs, skb); + ath_rx_count_airtime(sc, &rs, skb); hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_ack(hdr->frame_control)) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 5fa98e26f042..1c5a78255621 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -124,21 +124,44 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) ath_tx_status(hw, skb); } -static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid) +void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { - struct list_head *list; struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv; struct ath_chanctx *ctx = avp->chanctx; + struct ath_acq *acq; + struct list_head *tid_list; + u8 acno = TID_TO_WME_AC(tid->tidno); - if (!ctx) + if (!ctx || !list_empty(&tid->list)) return; - list = &ctx->acq[TID_TO_WME_AC(tid->tidno)]; - if (list_empty(&tid->list)) - list_add_tail(&tid->list, list); + + acq = &ctx->acq[acno]; + if ((sc->airtime_flags & AIRTIME_USE_NEW_QUEUES) && + tid->an->airtime_deficit[acno] > 0) + tid_list = &acq->acq_new; + else + tid_list = &acq->acq_old; + + list_add_tail(&tid->list, tid_list); } +void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv; + struct ath_chanctx *ctx = avp->chanctx; + struct ath_acq *acq; + + if (!ctx || !list_empty(&tid->list)) + return; + + acq = &ctx->acq[TID_TO_WME_AC(tid->tidno)]; + spin_lock_bh(&acq->lock); + __ath_tx_queue_tid(sc, tid); + spin_unlock_bh(&acq->lock); +} + + void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue) { struct ath_softc *sc = hw->priv; @@ -153,7 +176,7 @@ void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue) ath_txq_lock(sc, txq); tid->has_queued = true; - ath_tx_queue_tid(sc, txq, tid); + ath_tx_queue_tid(sc, tid); ath_txq_schedule(sc, txq); ath_txq_unlock(sc, txq); @@ -660,7 +683,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, skb_queue_splice_tail(&bf_pending, &tid->retry_q); if (!an->sleeping) { - ath_tx_queue_tid(sc, txq, tid); + ath_tx_queue_tid(sc, tid); if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) tid->clear_ps_filter = true; @@ -688,6 +711,53 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf) return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); } +static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_txq *txq, + struct ath_buf *bf, struct ath_tx_status *ts) +{ + struct ath_node *an; + struct ath_acq *acq = &sc->cur_chan->acq[txq->mac80211_qnum]; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + struct ieee80211_hw *hw = sc->hw; + struct ieee80211_tx_rate rates[4]; + struct ieee80211_sta *sta; + int i; + u32 airtime = 0; + + skb = bf->bf_mpdu; + if(!skb) + return; + + hdr = (struct ieee80211_hdr *)skb->data; + memcpy(rates, bf->rates, sizeof(rates)); + + rcu_read_lock(); + + sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); + if(!sta) + goto exit; + + + an = (struct ath_node *) sta->drv_priv; + + airtime += ts->duration * (ts->ts_longretry + 1); + + for(i=0; i < ts->ts_rateindex; i++) + airtime += ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, i) * rates[i].count; + + if (!!(sc->airtime_flags & AIRTIME_USE_TX)) { + spin_lock_bh(&acq->lock); + an->airtime_deficit[txq->mac80211_qnum] -= airtime; + if (an->airtime_deficit[txq->mac80211_qnum] <= 0) + __ath_tx_queue_tid(sc, ath_get_skb_tid(sc, an, skb)); + spin_unlock_bh(&acq->lock); + } + ath_debug_airtime(sc, an, 0, airtime); + +exit: + rcu_read_unlock(); +} + static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf, struct list_head *bf_head) @@ -709,6 +779,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, ts->ts_rateindex); + ath_tx_count_airtime(sc, txq, bf, ts); hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); @@ -1068,8 +1139,8 @@ finish: * width - 0 for 20 MHz, 1 for 40 MHz * half_gi - to use 4us v/s 3.6 us for symbol time */ -static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, - int width, int half_gi, bool shortPreamble) +u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, + int width, int half_gi, bool shortPreamble) { u32 nbits, nsymbits, duration, nsymbols; int streams; @@ -1468,7 +1539,7 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq, } static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid, bool *stop) + struct ath_atx_tid *tid) { struct ath_buf *bf; struct ieee80211_tx_info *tx_info; @@ -1490,7 +1561,6 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) || (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) { __skb_queue_tail(&tid->retry_q, bf->bf_mpdu); - *stop = true; return false; } @@ -1614,7 +1684,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) ath_txq_lock(sc, txq); tid->clear_ps_filter = true; if (ath_tid_has_buffered(tid)) { - ath_tx_queue_tid(sc, txq, tid); + ath_tx_queue_tid(sc, tid); ath_txq_schedule(sc, txq); } ath_txq_unlock_complete(sc, txq); @@ -1913,9 +1983,10 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_atx_tid *tid, *last_tid; + struct ath_atx_tid *tid; struct list_head *tid_list; - bool sent = false; + struct ath_acq *acq; + bool active = AIRTIME_ACTIVE(sc->airtime_flags); if (txq->mac80211_qnum < 0) return; @@ -1924,48 +1995,55 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) return; spin_lock_bh(&sc->chan_lock); - tid_list = &sc->cur_chan->acq[txq->mac80211_qnum]; - - if (list_empty(tid_list)) { - spin_unlock_bh(&sc->chan_lock); - return; - } - rcu_read_lock(); + acq = &sc->cur_chan->acq[txq->mac80211_qnum]; - last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list); - while (!list_empty(tid_list)) { - bool stop = false; - - if (sc->cur_chan->stopped) - break; - - tid = list_first_entry(tid_list, struct ath_atx_tid, list); - list_del_init(&tid->list); + if (sc->cur_chan->stopped) + goto out; - if (ath_tx_sched_aggr(sc, txq, tid, &stop)) - sent = true; +begin: + tid_list = &acq->acq_new; + if (list_empty(tid_list)) { + tid_list = &acq->acq_old; + if (list_empty(tid_list)) + goto out; + } + tid = list_first_entry(tid_list, struct ath_atx_tid, list); - /* - * add tid to round-robin queue if more frames - * are pending for the tid - */ - if (ath_tid_has_buffered(tid)) - ath_tx_queue_tid(sc, txq, tid); + if (active && tid->an->airtime_deficit[txq->mac80211_qnum] <= 0) { + spin_lock_bh(&acq->lock); + tid->an->airtime_deficit[txq->mac80211_qnum] += ATH_AIRTIME_QUANTUM; + list_move_tail(&tid->list, &acq->acq_old); + spin_unlock_bh(&acq->lock); + goto begin; + } - if (stop) - break; + if (!ath_tid_has_buffered(tid)) { + spin_lock_bh(&acq->lock); + if ((tid_list == &acq->acq_new) && !list_empty(&acq->acq_old)) + list_move_tail(&tid->list, &acq->acq_old); + else { + list_del_init(&tid->list); + } + spin_unlock_bh(&acq->lock); + goto begin; + } - if (tid == last_tid) { - if (!sent) - break; - sent = false; - last_tid = list_entry(tid_list->prev, - struct ath_atx_tid, list); + /* + * If we succeed in scheduling something, immediately restart to make + * sure we keep the HW busy. + */ + if(ath_tx_sched_aggr(sc, txq, tid)) { + if (!active) { + spin_lock_bh(&acq->lock); + list_move_tail(&tid->list, &acq->acq_old); + spin_unlock_bh(&acq->lock); } + goto begin; } +out: rcu_read_unlock(); spin_unlock_bh(&sc->chan_lock); } @@ -2819,6 +2897,9 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) struct ath_atx_tid *tid; int tidno, acno; + for (acno = 0; acno < IEEE80211_NUM_ACS; acno++) + an->airtime_deficit[acno] = ATH_AIRTIME_QUANTUM; + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { tid = ath_node_to_tid(an, tidno); tid->an = an; -- cgit v1.2.3 From 5c4607ebaabed2e9b414593a83c0ff97c6966f12 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Mon, 5 Dec 2016 13:27:38 +0200 Subject: ath9k: Turn ath_txq_lock/unlock() into static inlines. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are one-line functions that just call spin_lock/unlock_bh(); turn them into static inlines to avoid the function call overhead. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ath9k.h | 11 +++++++++-- drivers/net/wireless/ath/ath9k/xmit.c | 12 ------------ 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 79e4b71f4495..331947b6a667 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -569,6 +569,15 @@ static inline void ath_chanctx_check_active(struct ath_softc *sc, #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ +static inline void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) +{ + spin_lock_bh(&txq->axq_lock); +} +static inline void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) +{ + spin_unlock_bh(&txq->axq_lock); +} + void ath_startrecv(struct ath_softc *sc); bool ath_stoprecv(struct ath_softc *sc); u32 ath_calcrxfilter(struct ath_softc *sc); @@ -576,8 +585,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs); void ath_rx_cleanup(struct ath_softc *sc); int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp); struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); -void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq); -void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq); void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq); void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); bool ath_drain_all_txq(struct ath_softc *sc); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 1c5a78255621..b727f34796af 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -97,18 +97,6 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) dev_kfree_skb(skb); } -void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) - __acquires(&txq->axq_lock) -{ - spin_lock_bh(&txq->axq_lock); -} - -void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) - __releases(&txq->axq_lock) -{ - spin_unlock_bh(&txq->axq_lock); -} - void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) __releases(&txq->axq_lock) { -- cgit v1.2.3 From 02a9e08d7374f1bb680cd44a2f124793f4e7d6e7 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 7 Dec 2016 10:30:32 +0530 Subject: ath10k: Avoid potential page alloc BUG_ON in tx free path 'ath10k_htt_tx_free_cont_txbuf' and 'ath10k_htt_tx_free_cont_frag_desc' have NULL pointer checks to avoid crash if they are called twice but this is as of now not sufficient as these pointers are not assigned to NULL once the contiguous DMA memory allocation is freed, fix this. Though this may not be hit with the explicity check of state variable 'tx_mem_allocated' check, good to have this addressed as well. Below BUG_ON is hit when the above scenario is simulated with kernel debugging enabled page:f6d09a00 count:0 mapcount:-127 mapping: (null) index:0x0 flags: 0x40000000() page dumped because: VM_BUG_ON_PAGE(page_ref_count(page) == 0) ------------[ cut here ]------------ kernel BUG at ./include/linux/mm.h:445! invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC EIP is at put_page_testzero.part.88+0xd/0xf Call Trace: [] __free_pages+0x3c/0x40 [] free_pages+0x3e/0x50 [] dma_generic_free_coherent+0x24/0x30 [] ath10k_htt_tx_free_cont_txbuf+0xf8/0x140 [] ath10k_htt_tx_destroy+0x29/0xa0 [] ath10k_core_destroy+0x60/0x80 [ath10k_core] [] ath10k_pci_remove+0x79/0xa0 [ath10k_pci] [] pci_device_remove+0x38/0xb0 [] __device_release_driver+0x72/0x100 [] driver_detach+0x97/0xa0 [] bus_remove_driver+0x40/0x80 [] driver_unregister+0x2a/0x60 [] pci_unregister_driver+0x18/0x70 [] ath10k_pci_exit+0xd/0x2be [ath10k_pci] [] SyS_delete_module+0x158/0x210 [] ? __might_fault+0x41/0xa0 [] ? __might_fault+0x8b/0xa0 [] do_fast_syscall_32+0x9b/0x1c0 [] sysenter_past_esp+0x45/0x74 Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_tx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 27e49db4287a..86b427f5e2bc 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -239,6 +239,7 @@ static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt) size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr); + htt->txbuf.vaddr = NULL; } static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt) @@ -268,6 +269,7 @@ static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt) size, htt->frag_desc.vaddr, htt->frag_desc.paddr); + htt->frag_desc.vaddr = NULL; } static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) -- cgit v1.2.3 From 0c2e384267b815fb784c415a90de7bdd78da0b66 Mon Sep 17 00:00:00 2001 From: Koen Vandeputte Date: Wed, 14 Dec 2016 17:28:59 +0100 Subject: mac80211: only alloc mem if a station doesn't exist yet This speeds up the function in case a station already exists by avoiding calling an expensive kzalloc just to free it again after the next check. Signed-off-by: Koen Vandeputte Signed-off-by: Johannes Berg --- net/mac80211/sta_info.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 4ab75a9d70c7..f5a24b742eda 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -513,23 +513,23 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) { struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; - struct station_info *sinfo; + struct station_info *sinfo = NULL; int err = 0; lockdep_assert_held(&local->sta_mtx); - sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); - if (!sinfo) { - err = -ENOMEM; - goto out_err; - } - /* check if STA exists already */ if (sta_info_get_bss(sdata, sta->sta.addr)) { err = -EEXIST; goto out_err; } + sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); + if (!sinfo) { + err = -ENOMEM; + goto out_err; + } + local->num_sta++; local->sta_generation++; smp_mb(); -- cgit v1.2.3 From 41d085835d3d2258633d8959e717198e31d3862e Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 14 Dec 2016 20:46:54 +0100 Subject: mac80211: minstrel_ht: move supported bitrate mask out of group data Improves dcache footprint by ensuring that fewer cache lines need to be touched. Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 30 +++++++++++++++--------------- net/mac80211/rc80211_minstrel_ht.h | 6 +++--- net/mac80211/rc80211_minstrel_ht_debugfs.c | 8 ++++---- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 30fbabf4bcbc..ed8a34d46e73 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -301,7 +301,7 @@ minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, break; /* short preamble */ - if (!(mi->groups[group].supported & BIT(idx))) + if (!(mi->supported[group] & BIT(idx))) idx += 4; } return &mi->groups[group].rates[idx]; @@ -486,7 +486,7 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) MCS_GROUP_RATES].streams; for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { mg = &mi->groups[group]; - if (!mg->supported || group == MINSTREL_CCK_GROUP) + if (!mi->supported[group] || group == MINSTREL_CCK_GROUP) continue; tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; @@ -540,7 +540,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { mg = &mi->groups[group]; - if (!mg->supported) + if (!mi->supported[group]) continue; mi->sample_count++; @@ -550,7 +550,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) tmp_group_tp_rate[j] = group; for (i = 0; i < MCS_GROUP_RATES; i++) { - if (!(mg->supported & BIT(i))) + if (!(mi->supported[group] & BIT(i))) continue; index = MCS_GROUP_RATES * group + i; @@ -636,7 +636,7 @@ minstrel_set_next_sample_idx(struct minstrel_ht_sta *mi) mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups); mg = &mi->groups[mi->sample_group]; - if (!mg->supported) + if (!mi->supported[mi->sample_group]) continue; if (++mg->index >= MCS_GROUP_RATES) { @@ -657,7 +657,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary) while (group > 0) { group--; - if (!mi->groups[group].supported) + if (!mi->supported[group]) continue; if (minstrel_mcs_groups[group].streams > @@ -994,7 +994,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) sample_idx = sample_table[mg->column][mg->index]; minstrel_set_next_sample_idx(mi); - if (!(mg->supported & BIT(sample_idx))) + if (!(mi->supported[sample_group] & BIT(sample_idx))) return -1; mrs = &mg->rates[sample_idx]; @@ -1052,7 +1052,7 @@ static void minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, bool val) { - u8 supported = mi->groups[MINSTREL_CCK_GROUP].supported; + u8 supported = mi->supported[MINSTREL_CCK_GROUP]; if (!supported || !mi->cck_supported_short) return; @@ -1061,7 +1061,7 @@ minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp, return; supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4); - mi->groups[MINSTREL_CCK_GROUP].supported = supported; + mi->supported[MINSTREL_CCK_GROUP] = supported; } static void @@ -1154,7 +1154,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, mi->cck_supported_short |= BIT(i); } - mi->groups[MINSTREL_CCK_GROUP].supported = mi->cck_supported; + mi->supported[MINSTREL_CCK_GROUP] = mi->cck_supported; } static void @@ -1224,7 +1224,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, u32 gflags = minstrel_mcs_groups[i].flags; int bw, nss; - mi->groups[i].supported = 0; + mi->supported[i] = 0; if (i == MINSTREL_CCK_GROUP) { minstrel_ht_update_cck(mp, mi, sband, sta); continue; @@ -1256,8 +1256,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, if (use_vht && minstrel_vht_only) continue; #endif - mi->groups[i].supported = mcs->rx_mask[nss - 1]; - if (mi->groups[i].supported) + mi->supported[i] = mcs->rx_mask[nss - 1]; + if (mi->supported[i]) n_supported++; continue; } @@ -1283,10 +1283,10 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, else bw = BW_20; - mi->groups[i].supported = minstrel_get_valid_vht_rates(bw, nss, + mi->supported[i] = minstrel_get_valid_vht_rates(bw, nss, vht_cap->vht_mcs.tx_mcs_map); - if (mi->groups[i].supported) + if (mi->supported[i]) n_supported++; } diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h index e8b52a94d24b..de1646c42e82 100644 --- a/net/mac80211/rc80211_minstrel_ht.h +++ b/net/mac80211/rc80211_minstrel_ht.h @@ -52,9 +52,6 @@ struct minstrel_mcs_group_data { u8 index; u8 column; - /* bitfield of supported MCS rates of this group */ - u16 supported; - /* sorted rate set within a MCS group*/ u16 max_group_tp_rate[MAX_THR_RATES]; u16 max_group_prob_rate; @@ -101,6 +98,9 @@ struct minstrel_ht_sta { u8 cck_supported; u8 cck_supported_short; + /* Bitfield of supported MCS rates of all groups */ + u16 supported[MINSTREL_GROUPS_NB]; + /* MCS rate group info and statistics */ struct minstrel_mcs_group_data groups[MINSTREL_GROUPS_NB]; }; diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index 5320e35ed3d0..1942d4b3cd18 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -24,7 +24,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) char gimode = 'L'; u32 gflags; - if (!mi->groups[i].supported) + if (!mi->supported[i]) return p; mg = &minstrel_mcs_groups[i]; @@ -42,7 +42,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) static const int bitrates[4] = { 10, 20, 55, 110 }; int idx = i * MCS_GROUP_RATES + j; - if (!(mi->groups[i].supported & BIT(j))) + if (!(mi->supported[i] & BIT(j))) continue; if (gflags & IEEE80211_TX_RC_MCS) { @@ -170,7 +170,7 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) char gimode = 'L'; u32 gflags; - if (!mi->groups[i].supported) + if (!mi->supported[i]) return p; mg = &minstrel_mcs_groups[i]; @@ -188,7 +188,7 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) static const int bitrates[4] = { 10, 20, 55, 110 }; int idx = i * MCS_GROUP_RATES + j; - if (!(mi->groups[i].supported & BIT(j))) + if (!(mi->supported[i] & BIT(j))) continue; if (gflags & IEEE80211_TX_RC_MCS) { -- cgit v1.2.3 From 782dda00ab8e9485ddc2a1fb8d8681dcc5136283 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 14 Dec 2016 20:46:55 +0100 Subject: mac80211: minstrel_ht: move short preamble check out of get_rate Test short preamble support in minstrel_ht_update_caps instead of looking at the per-packet flag. Makes the code more efficient. Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index ed8a34d46e73..ae45dcb9d7a7 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -14,6 +14,7 @@ #include #include #include "rate.h" +#include "sta_info.h" #include "rc80211_minstrel.h" #include "rc80211_minstrel_ht.h" @@ -1048,22 +1049,6 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) return sample_idx; } -static void -minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp, - struct minstrel_ht_sta *mi, bool val) -{ - u8 supported = mi->supported[MINSTREL_CCK_GROUP]; - - if (!supported || !mi->cck_supported_short) - return; - - if (supported & (mi->cck_supported_short << (val * 4))) - return; - - supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4); - mi->supported[MINSTREL_CCK_GROUP] = supported; -} - static void minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, struct ieee80211_tx_rate_control *txrc) @@ -1087,7 +1072,6 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, minstrel_aggr_check(sta, txrc->skb); info->flags |= mi->tx_flags; - minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble); #ifdef CONFIG_MAC80211_DEBUGFS if (mp->fixed_rate_idx != -1) @@ -1168,6 +1152,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; u16 sta_cap = sta->ht_cap.cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + struct sta_info *sinfo = container_of(sta, struct sta_info, sta); int use_vht; int n_supported = 0; int ack_dur; @@ -1293,6 +1278,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, if (!n_supported) goto use_legacy; + if (test_sta_flag(sinfo, WLAN_STA_SHORT_PREAMBLE)) + mi->cck_supported_short |= mi->cck_supported_short << 4; + /* create an initial rate table with the lowest supported rates */ minstrel_ht_update_stats(mp, mi); minstrel_ht_update_rates(mp, mi); -- cgit v1.2.3 From 70550df2d354f13ae0544f12aa14b79f094f0de2 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 14 Dec 2016 20:46:56 +0100 Subject: mac80211: minstrel_ht: make att_hist and succ_hist u32 instead of u64 They are only used for debugging purposes and take a very long time to overflow. Visibly reduces the size of the per-sta rate control data. Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index c230bbe93262..209961cbb7e8 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h @@ -59,7 +59,7 @@ struct minstrel_rate_stats { u16 success, last_success; /* total attempts/success counters */ - u64 att_hist, succ_hist; + u32 att_hist, succ_hist; /* statistis of packet delivery probability * cur_prob - current prob within last update intervall -- cgit v1.2.3 From 95cd470c75a8822d35fa82a1b276d47da37229d1 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 14 Dec 2016 20:46:57 +0100 Subject: mac80211: check for MCS in ieee80211_duration before fetching chanctx Makes the code a bit more efficient Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 058652d000d9..4dea18be385c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -64,6 +64,10 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, struct ieee80211_chanctx_conf *chanctx_conf; u32 rate_flags = 0; + /* assume HW handles this */ + if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS)) + return 0; + rcu_read_lock(); chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf); if (chanctx_conf) { @@ -72,10 +76,6 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, } rcu_read_unlock(); - /* assume HW handles this */ - if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS)) - return 0; - /* uh huh? */ if (WARN_ON_ONCE(tx->rate.idx < 0)) return 0; -- cgit v1.2.3 From 1109dc392ea0248f7bfca3537011aa7afaa05cee Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 14 Dec 2016 20:46:58 +0100 Subject: mac80211: minstrel: remove cur_prob from debugfs This field is redundant, because it is simply last success divided by last attempt count. Removing it from the rate stats struct saves about 1.2 KiB per HT station. Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel.c | 10 ++++++---- net/mac80211/rc80211_minstrel.h | 2 -- net/mac80211/rc80211_minstrel_debugfs.c | 16 ++++++---------- net/mac80211/rc80211_minstrel_ht_debugfs.c | 16 ++++++---------- 4 files changed, 18 insertions(+), 26 deletions(-) diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 14c5ba3a1b1c..a284ea08f048 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -159,21 +159,23 @@ minstrel_update_rates(struct minstrel_priv *mp, struct minstrel_sta_info *mi) void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs) { + unsigned int cur_prob; + if (unlikely(mrs->attempts > 0)) { mrs->sample_skipped = 0; - mrs->cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts); + cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts); if (unlikely(!mrs->att_hist)) { - mrs->prob_ewma = mrs->cur_prob; + mrs->prob_ewma = cur_prob; } else { /* update exponential weighted moving variance */ mrs->prob_ewmsd = minstrel_ewmsd(mrs->prob_ewmsd, - mrs->cur_prob, + cur_prob, mrs->prob_ewma, EWMA_LEVEL); /*update exponential weighted moving avarage */ mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma, - mrs->cur_prob, + cur_prob, EWMA_LEVEL); } mrs->att_hist += mrs->attempts; diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index 209961cbb7e8..94b89b1b406c 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h @@ -62,10 +62,8 @@ struct minstrel_rate_stats { u32 att_hist, succ_hist; /* statistis of packet delivery probability - * cur_prob - current prob within last update intervall * prob_ewma - exponential weighted moving average of prob * prob_ewmsd - exp. weighted moving standard deviation of prob */ - unsigned int cur_prob; unsigned int prob_ewma; u16 prob_ewmsd; diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index 820b0abc9c0d..ca0bafd6a44e 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c @@ -75,7 +75,7 @@ minstrel_stats_open(struct inode *inode, struct file *file) { struct minstrel_sta_info *mi = inode->i_private; struct minstrel_debugfs_info *ms; - unsigned int i, tp_max, tp_avg, prob, eprob; + unsigned int i, tp_max, tp_avg, eprob; char *p; ms = kmalloc(2048, GFP_KERNEL); @@ -86,9 +86,9 @@ minstrel_stats_open(struct inode *inode, struct file *file) p = ms->buf; p += sprintf(p, "\n"); p += sprintf(p, - "best __________rate_________ ________statistics________ ________last_______ ______sum-of________\n"); + "best __________rate_________ ________statistics________ ____last_____ ______sum-of________\n"); p += sprintf(p, - "rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [prob.|retry|suc|att] [#success | #attempts]\n"); + "rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [retry|suc|att] [#success | #attempts]\n"); for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; @@ -107,17 +107,15 @@ minstrel_stats_open(struct inode *inode, struct file *file) tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100)); tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma); - prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u" - " %3u.%1u %3u %3u %-3u " + " %3u %3u %-3u " "%9llu %-9llu\n", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10, - prob / 10, prob % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, @@ -148,7 +146,7 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file) { struct minstrel_sta_info *mi = inode->i_private; struct minstrel_debugfs_info *ms; - unsigned int i, tp_max, tp_avg, prob, eprob; + unsigned int i, tp_max, tp_avg, eprob; char *p; ms = kmalloc(2048, GFP_KERNEL); @@ -175,16 +173,14 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file) tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100)); tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma); - prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); - p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u," + p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u," "%llu,%llu,%d,%d\n", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10, - prob / 10, prob % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index 1942d4b3cd18..0cecd23bb834 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -19,7 +19,7 @@ static char * minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) { const struct mcs_group *mg; - unsigned int j, tp_max, tp_avg, prob, eprob, tx_time; + unsigned int j, tp_max, tp_avg, eprob, tx_time; char htmode = '2'; char gimode = 'L'; u32 gflags; @@ -83,17 +83,15 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100)); tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma); - prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u" - " %3u.%1u %3u %3u %-3u " + " %3u %3u %-3u " "%9llu %-9llu\n", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10, - prob / 10, prob % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, @@ -130,9 +128,9 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) p += sprintf(p, "\n"); p += sprintf(p, - " best ____________rate__________ ________statistics________ ________last_______ ______sum-of________\n"); + " best ____________rate__________ ________statistics________ _____last____ ______sum-of________\n"); p += sprintf(p, - "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [prob.|retry|suc|att] [#success | #attempts]\n"); + "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [retry|suc|att] [#success | #attempts]\n"); p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p); for (i = 0; i < MINSTREL_CCK_GROUP; i++) @@ -165,7 +163,7 @@ static char * minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) { const struct mcs_group *mg; - unsigned int j, tp_max, tp_avg, prob, eprob, tx_time; + unsigned int j, tp_max, tp_avg, eprob, tx_time; char htmode = '2'; char gimode = 'L'; u32 gflags; @@ -226,16 +224,14 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100)); tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma); - prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); - p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u.%u,%u,%u," + p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u," "%u,%llu,%llu,", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10, - prob / 10, prob % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, -- cgit v1.2.3 From 0217eefa64509d63fbe8d1205310bafb2355cf4d Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 14 Dec 2016 20:46:59 +0100 Subject: mac80211: minstrel: reduce MINSTREL_SCALE The loss of a bit of extra precision does not hurt the calculation, 12 bits is still enough to calculate probabilities well. Reducing the scale makes it easier to avoid overflows Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index 94b89b1b406c..03716fddaf24 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h @@ -14,7 +14,7 @@ #define SAMPLE_COLUMNS 10 /* number of columns in sample table */ /* scaled fraction values */ -#define MINSTREL_SCALE 16 +#define MINSTREL_SCALE 12 #define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div) #define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE) -- cgit v1.2.3 From 4f0bc9c61bae9c74ffcf5dbdbd241cbf0ec3a44e Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 14 Dec 2016 20:47:00 +0100 Subject: mac80211: minstrel: store probability variance instead of standard deviation This avoids the costly int_sqrt calls in the statistics update and moves it to the debugfs code instead. This also fixes an overflow in the previous standard deviation calculation. Signed-off-by: Thomas Huehn Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel.c | 8 ++++---- net/mac80211/rc80211_minstrel.h | 25 ++++++++++++++----------- net/mac80211/rc80211_minstrel_debugfs.c | 8 ++++++-- net/mac80211/rc80211_minstrel_ht_debugfs.c | 8 ++++++-- 4 files changed, 30 insertions(+), 19 deletions(-) diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index a284ea08f048..11a4cc393a98 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -168,10 +168,10 @@ minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs) mrs->prob_ewma = cur_prob; } else { /* update exponential weighted moving variance */ - mrs->prob_ewmsd = minstrel_ewmsd(mrs->prob_ewmsd, - cur_prob, - mrs->prob_ewma, - EWMA_LEVEL); + mrs->prob_ewmv = minstrel_ewmv(mrs->prob_ewmv, + cur_prob, + mrs->prob_ewma, + EWMA_LEVEL); /*update exponential weighted moving avarage */ mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma, diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index 03716fddaf24..ea86e6784154 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h @@ -36,21 +36,16 @@ minstrel_ewma(int old, int new, int weight) } /* - * Perform EWMSD (Exponentially Weighted Moving Standard Deviation) calculation + * Perform EWMV (Exponentially Weighted Moving Variance) calculation */ static inline int -minstrel_ewmsd(int old_ewmsd, int cur_prob, int prob_ewma, int weight) +minstrel_ewmv(int old_ewmv, int cur_prob, int prob_ewma, int weight) { - int diff, incr, tmp_var; + int diff, incr; - /* calculate exponential weighted moving variance */ - diff = MINSTREL_TRUNC((cur_prob - prob_ewma) * 1000000); + diff = cur_prob - prob_ewma; incr = (EWMA_DIV - weight) * diff / EWMA_DIV; - tmp_var = old_ewmsd * old_ewmsd; - tmp_var = weight * (tmp_var + diff * incr / 1000000) / EWMA_DIV; - - /* return standard deviation */ - return (u16) int_sqrt(tmp_var); + return weight * (old_ewmv + MINSTREL_TRUNC(diff * incr)) / EWMA_DIV; } struct minstrel_rate_stats { @@ -65,7 +60,7 @@ struct minstrel_rate_stats { * prob_ewma - exponential weighted moving average of prob * prob_ewmsd - exp. weighted moving standard deviation of prob */ unsigned int prob_ewma; - u16 prob_ewmsd; + u16 prob_ewmv; /* maximum retry counts */ u8 retry_count; @@ -151,6 +146,14 @@ struct minstrel_debugfs_info { char buf[]; }; +/* Get EWMSD (Exponentially Weighted Moving Standard Deviation) * 10 */ +static inline int +minstrel_get_ewmsd10(struct minstrel_rate_stats *mrs) +{ + unsigned int ewmv = mrs->prob_ewmv; + return int_sqrt(MINSTREL_TRUNC(ewmv * 1000 * 1000)); +} + extern const struct rate_control_ops mac80211_minstrel; void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); void minstrel_remove_sta_debugfs(void *priv, void *priv_sta); diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index ca0bafd6a44e..36fc971deb86 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c @@ -93,6 +93,7 @@ minstrel_stats_open(struct inode *inode, struct file *file) for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; struct minstrel_rate_stats *mrs = &mi->r[i].stats; + unsigned int prob_ewmsd; *(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' '; *(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' '; @@ -108,6 +109,7 @@ minstrel_stats_open(struct inode *inode, struct file *file) tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100)); tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); + prob_ewmsd = minstrel_get_ewmsd10(mrs); p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u" " %3u %3u %-3u " @@ -115,7 +117,7 @@ minstrel_stats_open(struct inode *inode, struct file *file) tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, - mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10, + prob_ewmsd / 10, prob_ewmsd % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, @@ -159,6 +161,7 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file) for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; struct minstrel_rate_stats *mrs = &mi->r[i].stats; + unsigned int prob_ewmsd; p += sprintf(p, "%s" ,((i == mi->max_tp_rate[0]) ? "A" : "")); p += sprintf(p, "%s" ,((i == mi->max_tp_rate[1]) ? "B" : "")); @@ -174,13 +177,14 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file) tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100)); tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); + prob_ewmsd = minstrel_get_ewmsd10(mrs); p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u," "%llu,%llu,%d,%d\n", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, - mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10, + prob_ewmsd / 10, prob_ewmsd % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index 0cecd23bb834..7d969e300fb3 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -41,6 +41,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j]; static const int bitrates[4] = { 10, 20, 55, 110 }; int idx = i * MCS_GROUP_RATES + j; + unsigned int prob_ewmsd; if (!(mi->supported[i] & BIT(j))) continue; @@ -84,6 +85,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100)); tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); + prob_ewmsd = minstrel_get_ewmsd10(mrs); p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u" " %3u %3u %-3u " @@ -91,7 +93,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, - mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10, + prob_ewmsd / 10, prob_ewmsd % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, @@ -185,6 +187,7 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j]; static const int bitrates[4] = { 10, 20, 55, 110 }; int idx = i * MCS_GROUP_RATES + j; + unsigned int prob_ewmsd; if (!(mi->supported[i] & BIT(j))) continue; @@ -225,13 +228,14 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100)); tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); + prob_ewmsd = minstrel_get_ewmsd10(mrs); p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u," "%u,%llu,%llu,", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, - mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10, + prob_ewmsd / 10, prob_ewmsd % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, -- cgit v1.2.3 From 4cae4cd10d058eb4057e20cb72e82fe76ae739af Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 14 Dec 2016 20:47:01 +0100 Subject: mac80211: minstrel: make prob_ewma u16 instead of u32 Saves about 1.2 KiB memory per station Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index ea86e6784154..be6c3f35f48b 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h @@ -59,7 +59,7 @@ struct minstrel_rate_stats { /* statistis of packet delivery probability * prob_ewma - exponential weighted moving average of prob * prob_ewmsd - exp. weighted moving standard deviation of prob */ - unsigned int prob_ewma; + u16 prob_ewma; u16 prob_ewmv; /* maximum retry counts */ -- cgit v1.2.3 From 73b16589b859130eb8b49882e71c7cd1caa5986d Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 14 Dec 2016 20:47:02 +0100 Subject: mac80211: minstrel_ht: remove obsolete #if for >= 3 streams This was added during early development when 3x3 hardware was not very common yet. This is completely unnecessary now. Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index ae45dcb9d7a7..8e783e197e93 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -155,67 +155,47 @@ MODULE_PARM_DESC(minstrel_vht_only, const struct mcs_group minstrel_mcs_groups[] = { MCS_GROUP(1, 0, BW_20), MCS_GROUP(2, 0, BW_20), -#if MINSTREL_MAX_STREAMS >= 3 MCS_GROUP(3, 0, BW_20), -#endif MCS_GROUP(1, 1, BW_20), MCS_GROUP(2, 1, BW_20), -#if MINSTREL_MAX_STREAMS >= 3 MCS_GROUP(3, 1, BW_20), -#endif MCS_GROUP(1, 0, BW_40), MCS_GROUP(2, 0, BW_40), -#if MINSTREL_MAX_STREAMS >= 3 MCS_GROUP(3, 0, BW_40), -#endif MCS_GROUP(1, 1, BW_40), MCS_GROUP(2, 1, BW_40), -#if MINSTREL_MAX_STREAMS >= 3 MCS_GROUP(3, 1, BW_40), -#endif CCK_GROUP, #ifdef CONFIG_MAC80211_RC_MINSTREL_VHT VHT_GROUP(1, 0, BW_20), VHT_GROUP(2, 0, BW_20), -#if MINSTREL_MAX_STREAMS >= 3 VHT_GROUP(3, 0, BW_20), -#endif VHT_GROUP(1, 1, BW_20), VHT_GROUP(2, 1, BW_20), -#if MINSTREL_MAX_STREAMS >= 3 VHT_GROUP(3, 1, BW_20), -#endif VHT_GROUP(1, 0, BW_40), VHT_GROUP(2, 0, BW_40), -#if MINSTREL_MAX_STREAMS >= 3 VHT_GROUP(3, 0, BW_40), -#endif VHT_GROUP(1, 1, BW_40), VHT_GROUP(2, 1, BW_40), -#if MINSTREL_MAX_STREAMS >= 3 VHT_GROUP(3, 1, BW_40), -#endif VHT_GROUP(1, 0, BW_80), VHT_GROUP(2, 0, BW_80), -#if MINSTREL_MAX_STREAMS >= 3 VHT_GROUP(3, 0, BW_80), -#endif VHT_GROUP(1, 1, BW_80), VHT_GROUP(2, 1, BW_80), -#if MINSTREL_MAX_STREAMS >= 3 VHT_GROUP(3, 1, BW_80), #endif -#endif }; static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly; -- cgit v1.2.3 From 38252e9ef68936f104a5791740f4ca2f81510a33 Mon Sep 17 00:00:00 2001 From: Thomas Huehn Date: Wed, 14 Dec 2016 20:47:03 +0100 Subject: mac80211: minstrel: avoid port control frames for sampling Makes connections more reliable Signed-off-by: Thomas Huehn Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 11a4cc393a98..3ebe4405a2d4 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -367,6 +367,11 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, return; #endif + /* Don't use EAPOL frames for sampling on non-mrr hw */ + if (mp->hw->max_rates == 1 && + (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) + return; + delta = (mi->total_packets * sampling_ratio / 100) - (mi->sample_packets + mi->sample_deferred / 2); -- cgit v1.2.3 From f6b4122c93f43c3f7b5787ca20fa3ac458f50d05 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 16 Dec 2016 09:44:08 +0100 Subject: rfkill: hide unused goto label A cleanup introduced a harmless warning in some configurations: net/rfkill/core.c: In function 'rfkill_init': net/rfkill/core.c:1350:1: warning: label 'error_input' defined but not used [-Wunused-label] This adds another #ifdef around the label to match that around the caller. Fixes: 6124c53edeea ("rfkill: Cleanup error handling in rfkill_init()") Signed-off-by: Arnd Bergmann Signed-off-by: Johannes Berg --- net/rfkill/core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/rfkill/core.c b/net/rfkill/core.c index f6b01f3616e5..b7adaee69756 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -1347,8 +1347,10 @@ static int __init rfkill_init(void) return 0; +#ifdef CONFIG_RFKILL_INPUT error_input: rfkill_any_led_trigger_unregister(); +#endif error_led_trigger: misc_deregister(&rfkill_miscdev); error_misc: -- cgit v1.2.3 From 505a2e882bfae6627c84586edb276485df05c2ef Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Fri, 16 Dec 2016 11:21:54 +0000 Subject: nl80211: rework {sched_,}scan event related functions A couple of functions used with scan events were named with term "send" although they were only preparing the the event message so renamed those. Also remove nl80211_send_sched_scan_results() in favor of just calling nl80211_send_sched_scan() with the right value. Signed-off-by: Arend van Spriel [mention nl80211_send_sched_scan_results() in the commit log] Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 34 ++++++++-------------------------- net/wireless/nl80211.h | 6 ++---- net/wireless/scan.c | 9 +++++---- 3 files changed, 15 insertions(+), 34 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7762231abd32..23692658fe98 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -12804,7 +12804,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg, return -ENOBUFS; } -static int nl80211_send_scan_msg(struct sk_buff *msg, +static int nl80211_prep_scan_msg(struct sk_buff *msg, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u32 portid, u32 seq, int flags, @@ -12835,7 +12835,7 @@ static int nl80211_send_scan_msg(struct sk_buff *msg, } static int -nl80211_send_sched_scan_msg(struct sk_buff *msg, +nl80211_prep_sched_scan_msg(struct sk_buff *msg, struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 portid, u32 seq, int flags, u32 cmd) @@ -12867,7 +12867,7 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, if (!msg) return; - if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0, + if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0, NL80211_CMD_TRIGGER_SCAN) < 0) { nlmsg_free(msg); return; @@ -12886,7 +12886,7 @@ struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, if (!msg) return NULL; - if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0, + if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0, aborted ? NL80211_CMD_SCAN_ABORTED : NL80211_CMD_NEW_SCAN_RESULTS) < 0) { nlmsg_free(msg); @@ -12896,31 +12896,13 @@ struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, return msg; } -void nl80211_send_scan_result(struct cfg80211_registered_device *rdev, - struct sk_buff *msg) -{ - if (!msg) - return; - - genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, - NL80211_MCGRP_SCAN, GFP_KERNEL); -} - -void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, - struct net_device *netdev) +/* send message created by nl80211_build_scan_msg() */ +void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev, + struct sk_buff *msg) { - struct sk_buff *msg; - - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; - if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, - NL80211_CMD_SCHED_SCAN_RESULTS) < 0) { - nlmsg_free(msg); - return; - } - genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_SCAN, GFP_KERNEL); } @@ -12934,7 +12916,7 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, if (!msg) return; - if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) { + if (nl80211_prep_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) { nlmsg_free(msg); return; } diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 7e3821d7fcc5..75f82520211d 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -14,12 +14,10 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, bool aborted); -void nl80211_send_scan_result(struct cfg80211_registered_device *rdev, - struct sk_buff *msg); +void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev, + struct sk_buff *msg); void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 cmd); -void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, - struct net_device *netdev); void nl80211_common_reg_change_event(enum nl80211_commands cmd_id, struct regulatory_request *request); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 35ad69fd0838..21be56b3128e 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -227,7 +227,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, ASSERT_RTNL(); if (rdev->scan_msg) { - nl80211_send_scan_result(rdev, rdev->scan_msg); + nl80211_send_scan_msg(rdev, rdev->scan_msg); rdev->scan_msg = NULL; return; } @@ -273,7 +273,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, if (!send_message) rdev->scan_msg = msg; else - nl80211_send_scan_result(rdev, msg); + nl80211_send_scan_msg(rdev, msg); } void __cfg80211_scan_done(struct work_struct *wk) @@ -321,7 +321,8 @@ void __cfg80211_sched_scan_results(struct work_struct *wk) spin_unlock_bh(&rdev->bss_lock); request->scan_start = jiffies; } - nl80211_send_sched_scan_results(rdev, request->dev); + nl80211_send_sched_scan(rdev, request->dev, + NL80211_CMD_SCHED_SCAN_RESULTS); } rtnl_unlock(); @@ -1147,7 +1148,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, else rcu_assign_pointer(tmp.pub.beacon_ies, ies); rcu_assign_pointer(tmp.pub.ies, ies); - + memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN); tmp.pub.channel = channel; tmp.pub.scan_width = data->scan_width; -- cgit v1.2.3 From e77a8be9a0a7f2c10151967e3c72c5afcbd41117 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Fri, 16 Dec 2016 12:15:54 +0000 Subject: nl80211: better describe field in struct nl80211_bss_select_rssi_adjust The two fields in struct nl80211_bss_select_rssi_adjust did not state their type or unit. Adding documentation. Reported-by: Jouni Malinen Signed-off-by: Arend van Spriel Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 6b76e3b0c18e..d74e10b1246a 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4964,8 +4964,9 @@ enum nl80211_sched_scan_plan { /** * struct nl80211_bss_select_rssi_adjust - RSSI adjustment parameters. * - * @band: band of BSS that must match for RSSI value adjustment. - * @delta: value used to adjust the RSSI value of matching BSS. + * @band: band of BSS that must match for RSSI value adjustment. The value + * of this field is according to &enum nl80211_band. + * @delta: value used to adjust the RSSI value of matching BSS in dB. */ struct nl80211_bss_select_rssi_adjust { __u8 band; -- cgit v1.2.3 From 7b854982b273134e4ff02c61a74e70e1b1876286 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 19 Dec 2016 09:51:11 +0100 Subject: Revert "rfkill: Add rfkill-any LED trigger" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 73f4f76a196d7adb11a1e192bd8024fe0bc83910. As Mike reported, and I should've seen in review, we can't call the new LED functions, which acquire the mutex, from places like rfkill_set_sw_state() that are documented to be callable from any context the user likes to use. For Mike's case it led to a deadlock, but other scenarios are possible. Reported-by: Михаил Кринкин Signed-off-by: Johannes Berg --- net/rfkill/core.c | 75 +------------------------------------------------------ 1 file changed, 1 insertion(+), 74 deletions(-) diff --git a/net/rfkill/core.c b/net/rfkill/core.c index b7adaee69756..afa4f71b4c7b 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -176,47 +176,6 @@ static void rfkill_led_trigger_unregister(struct rfkill *rfkill) { led_trigger_unregister(&rfkill->led_trigger); } - -static struct led_trigger rfkill_any_led_trigger; - -static void __rfkill_any_led_trigger_event(void) -{ - enum led_brightness brightness = LED_OFF; - struct rfkill *rfkill; - - list_for_each_entry(rfkill, &rfkill_list, node) { - if (!(rfkill->state & RFKILL_BLOCK_ANY)) { - brightness = LED_FULL; - break; - } - } - - led_trigger_event(&rfkill_any_led_trigger, brightness); -} - -static void rfkill_any_led_trigger_event(void) -{ - mutex_lock(&rfkill_global_mutex); - __rfkill_any_led_trigger_event(); - mutex_unlock(&rfkill_global_mutex); -} - -static void rfkill_any_led_trigger_activate(struct led_classdev *led_cdev) -{ - rfkill_any_led_trigger_event(); -} - -static int rfkill_any_led_trigger_register(void) -{ - rfkill_any_led_trigger.name = "rfkill-any"; - rfkill_any_led_trigger.activate = rfkill_any_led_trigger_activate; - return led_trigger_register(&rfkill_any_led_trigger); -} - -static void rfkill_any_led_trigger_unregister(void) -{ - led_trigger_unregister(&rfkill_any_led_trigger); -} #else static void rfkill_led_trigger_event(struct rfkill *rfkill) { @@ -230,23 +189,6 @@ static inline int rfkill_led_trigger_register(struct rfkill *rfkill) static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) { } - -static void __rfkill_any_led_trigger_event(void) -{ -} - -static void rfkill_any_led_trigger_event(void) -{ -} - -static int rfkill_any_led_trigger_register(void) -{ - return 0; -} - -static void rfkill_any_led_trigger_unregister(void) -{ -} #endif /* CONFIG_RFKILL_LEDS */ static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, @@ -355,7 +297,6 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked) spin_unlock_irqrestore(&rfkill->lock, flags); rfkill_led_trigger_event(rfkill); - __rfkill_any_led_trigger_event(); if (prev != curr) rfkill_event(rfkill); @@ -536,7 +477,6 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) spin_unlock_irqrestore(&rfkill->lock, flags); rfkill_led_trigger_event(rfkill); - rfkill_any_led_trigger_event(); if (rfkill->registered && prev != blocked) schedule_work(&rfkill->uevent_work); @@ -580,7 +520,6 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) schedule_work(&rfkill->uevent_work); rfkill_led_trigger_event(rfkill); - rfkill_any_led_trigger_event(); return blocked; } @@ -630,7 +569,6 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) schedule_work(&rfkill->uevent_work); rfkill_led_trigger_event(rfkill); - rfkill_any_led_trigger_event(); } } EXPORT_SYMBOL(rfkill_set_states); @@ -874,10 +812,8 @@ static int rfkill_resume(struct device *dev) rfkill->suspended = false; if (!rfkill->persistent) { - mutex_lock(&rfkill_global_mutex); cur = !!(rfkill->state & RFKILL_BLOCK_SW); rfkill_set_block(rfkill, cur); - mutex_unlock(&rfkill_global_mutex); } if (rfkill->ops->poll && !rfkill->polling_paused) @@ -1049,7 +985,6 @@ int __must_check rfkill_register(struct rfkill *rfkill) #endif } - __rfkill_any_led_trigger_event(); rfkill_send_events(rfkill, RFKILL_OP_ADD); mutex_unlock(&rfkill_global_mutex); @@ -1082,7 +1017,6 @@ void rfkill_unregister(struct rfkill *rfkill) mutex_lock(&rfkill_global_mutex); rfkill_send_events(rfkill, RFKILL_OP_DEL); list_del_init(&rfkill->node); - __rfkill_any_led_trigger_event(); mutex_unlock(&rfkill_global_mutex); rfkill_led_trigger_unregister(rfkill); @@ -1335,10 +1269,6 @@ static int __init rfkill_init(void) if (error) goto error_misc; - error = rfkill_any_led_trigger_register(); - if (error) - goto error_led_trigger; - #ifdef CONFIG_RFKILL_INPUT error = rfkill_handler_init(); if (error) @@ -1349,10 +1279,8 @@ static int __init rfkill_init(void) #ifdef CONFIG_RFKILL_INPUT error_input: - rfkill_any_led_trigger_unregister(); -#endif -error_led_trigger: misc_deregister(&rfkill_miscdev); +#endif error_misc: class_unregister(&rfkill_class); error_class: @@ -1365,7 +1293,6 @@ static void __exit rfkill_exit(void) #ifdef CONFIG_RFKILL_INPUT rfkill_handler_exit(); #endif - rfkill_any_led_trigger_unregister(); misc_deregister(&rfkill_miscdev); class_unregister(&rfkill_class); } -- cgit v1.2.3 From eab59075d3cd7f3535aa2dbbc19a198dfee58892 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Wed, 28 Dec 2016 09:26:31 -0200 Subject: sctp: reduce indent level at sctp_sf_tabort_8_4_8 Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/sm_statefuns.c | 44 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 8ec20a64a3f8..32587b1f84e7 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3237,36 +3237,34 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, struct sctp_chunk *abort; packet = sctp_ootb_pkt_new(net, asoc, chunk); + if (!packet) + return SCTP_DISPOSITION_NOMEM; - if (packet) { - /* Make an ABORT. The T bit will be set if the asoc - * is NULL. - */ - abort = sctp_make_abort(asoc, chunk, 0); - if (!abort) { - sctp_ootb_pkt_free(packet); - return SCTP_DISPOSITION_NOMEM; - } + /* Make an ABORT. The T bit will be set if the asoc + * is NULL. + */ + abort = sctp_make_abort(asoc, chunk, 0); + if (!abort) { + sctp_ootb_pkt_free(packet); + return SCTP_DISPOSITION_NOMEM; + } - /* Reflect vtag if T-Bit is set */ - if (sctp_test_T_bit(abort)) - packet->vtag = ntohl(chunk->sctp_hdr->vtag); + /* Reflect vtag if T-Bit is set */ + if (sctp_test_T_bit(abort)) + packet->vtag = ntohl(chunk->sctp_hdr->vtag); - /* Set the skb to the belonging sock for accounting. */ - abort->skb->sk = ep->base.sk; + /* Set the skb to the belonging sock for accounting. */ + abort->skb->sk = ep->base.sk; - sctp_packet_append_chunk(packet, abort); + sctp_packet_append_chunk(packet, abort); - sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, - SCTP_PACKET(packet)); - - SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, + SCTP_PACKET(packet)); - sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); - return SCTP_DISPOSITION_CONSUME; - } + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); - return SCTP_DISPOSITION_NOMEM; + sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + return SCTP_DISPOSITION_CONSUME; } /* -- cgit v1.2.3 From 1ff0156167dfb625c2e82aefded6b4cf245476ce Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Wed, 28 Dec 2016 09:26:32 -0200 Subject: sctp: reduce indent level in sctp_sf_shut_8_4_5 Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/sm_statefuns.c | 58 ++++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 32587b1f84e7..a95915ef9dba 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3501,45 +3501,43 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, struct sctp_chunk *shut; packet = sctp_ootb_pkt_new(net, asoc, chunk); + if (!packet) + return SCTP_DISPOSITION_NOMEM; - if (packet) { - /* Make an SHUTDOWN_COMPLETE. - * The T bit will be set if the asoc is NULL. - */ - shut = sctp_make_shutdown_complete(asoc, chunk); - if (!shut) { - sctp_ootb_pkt_free(packet); - return SCTP_DISPOSITION_NOMEM; - } - - /* Reflect vtag if T-Bit is set */ - if (sctp_test_T_bit(shut)) - packet->vtag = ntohl(chunk->sctp_hdr->vtag); + /* Make an SHUTDOWN_COMPLETE. + * The T bit will be set if the asoc is NULL. + */ + shut = sctp_make_shutdown_complete(asoc, chunk); + if (!shut) { + sctp_ootb_pkt_free(packet); + return SCTP_DISPOSITION_NOMEM; + } - /* Set the skb to the belonging sock for accounting. */ - shut->skb->sk = ep->base.sk; + /* Reflect vtag if T-Bit is set */ + if (sctp_test_T_bit(shut)) + packet->vtag = ntohl(chunk->sctp_hdr->vtag); - sctp_packet_append_chunk(packet, shut); + /* Set the skb to the belonging sock for accounting. */ + shut->skb->sk = ep->base.sk; - sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, - SCTP_PACKET(packet)); + sctp_packet_append_chunk(packet, shut); - SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, + SCTP_PACKET(packet)); - /* If the chunk length is invalid, we don't want to process - * the reset of the packet. - */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); - /* We need to discard the rest of the packet to prevent - * potential bomming attacks from additional bundled chunks. - * This is documented in SCTP Threats ID. - */ + /* If the chunk length is invalid, we don't want to process + * the reset of the packet. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); - } - return SCTP_DISPOSITION_NOMEM; + /* We need to discard the rest of the packet to prevent + * potential bomming attacks from additional bundled chunks. + * This is documented in SCTP Threats ID. + */ + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* -- cgit v1.2.3 From 0630c56e40b0bcca299d3b4c20ffcfddbe6a0218 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Wed, 28 Dec 2016 09:26:33 -0200 Subject: sctp: simplify addr copy Make it a bit easier to read. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/ipv6.c | 16 +++++++--------- net/sctp/protocol.c | 18 +++++++----------- 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 5ed8e79bf102..6619367bb6ca 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -412,22 +412,20 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, static void sctp_v6_from_skb(union sctp_addr *addr, struct sk_buff *skb, int is_saddr) { - __be16 *port; - struct sctphdr *sh; + /* Always called on head skb, so this is safe */ + struct sctphdr *sh = sctp_hdr(skb); + struct sockaddr_in6 *sa = &addr->v6; - port = &addr->v6.sin6_port; addr->v6.sin6_family = AF_INET6; addr->v6.sin6_flowinfo = 0; /* FIXME */ addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif; - /* Always called on head skb, so this is safe */ - sh = sctp_hdr(skb); if (is_saddr) { - *port = sh->source; - addr->v6.sin6_addr = ipv6_hdr(skb)->saddr; + sa->sin6_port = sh->source; + sa->sin6_addr = ipv6_hdr(skb)->saddr; } else { - *port = sh->dest; - addr->v6.sin6_addr = ipv6_hdr(skb)->daddr; + sa->sin6_port = sh->dest; + sa->sin6_addr = ipv6_hdr(skb)->daddr; } } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 616a9428e0c4..f9c3c37c9ae0 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -237,23 +237,19 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, int is_saddr) { - void *from; - __be16 *port; - struct sctphdr *sh; + /* Always called on head skb, so this is safe */ + struct sctphdr *sh = sctp_hdr(skb); + struct sockaddr_in *sa = &addr->v4; - port = &addr->v4.sin_port; addr->v4.sin_family = AF_INET; - /* Always called on head skb, so this is safe */ - sh = sctp_hdr(skb); if (is_saddr) { - *port = sh->source; - from = &ip_hdr(skb)->saddr; + sa->sin_port = sh->source; + sa->sin_addr.s_addr = ip_hdr(skb)->saddr; } else { - *port = sh->dest; - from = &ip_hdr(skb)->daddr; + sa->sin_port = sh->dest; + sa->sin_addr.s_addr = ip_hdr(skb)->daddr; } - memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); } /* Initialize an sctp_addr from a socket. */ -- cgit v1.2.3 From 66b91d2cd0344c417194596ef6e387e52be69e57 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Wed, 28 Dec 2016 09:26:34 -0200 Subject: sctp: remove return value from sctp_packet_init/config There is no reason to use this cascading. It doesn't add anything. Let's remove it and simplify. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 7 +++---- net/sctp/output.c | 14 +++++--------- net/sctp/sm_statefuns.c | 5 +++-- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 92daabdc007d..87d56cc80a3c 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -722,10 +722,9 @@ struct sctp_packet { ipfragok:1; /* So let ip fragment this packet */ }; -struct sctp_packet *sctp_packet_init(struct sctp_packet *, - struct sctp_transport *, - __u16 sport, __u16 dport); -struct sctp_packet *sctp_packet_config(struct sctp_packet *, __u32 vtag, int); +void sctp_packet_init(struct sctp_packet *, struct sctp_transport *, + __u16 sport, __u16 dport); +void sctp_packet_config(struct sctp_packet *, __u32 vtag, int); sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *, struct sctp_chunk *, int, gfp_t); sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *, diff --git a/net/sctp/output.c b/net/sctp/output.c index f5320a87341e..07ab5062e541 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -81,8 +81,8 @@ static void sctp_packet_reset(struct sctp_packet *packet) /* Config a packet. * This appears to be a followup set of initializations. */ -struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, - __u32 vtag, int ecn_capable) +void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, + int ecn_capable) { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; @@ -123,14 +123,12 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, if (chunk) sctp_packet_append_chunk(packet, chunk); } - - return packet; } /* Initialize the packet structure. */ -struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, - struct sctp_transport *transport, - __u16 sport, __u16 dport) +void sctp_packet_init(struct sctp_packet *packet, + struct sctp_transport *transport, + __u16 sport, __u16 dport) { struct sctp_association *asoc = transport->asoc; size_t overhead; @@ -151,8 +149,6 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, packet->overhead = overhead; sctp_packet_reset(packet); packet->vtag = 0; - - return packet; } /* Free a packet. */ diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index a95915ef9dba..9a223d5b2314 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -6032,8 +6032,9 @@ static struct sctp_packet *sctp_ootb_pkt_new(struct net *net, sctp_transport_route(transport, (union sctp_addr *)&chunk->dest, sctp_sk(net->sctp.ctl_sock)); - packet = sctp_packet_init(&transport->packet, transport, sport, dport); - packet = sctp_packet_config(packet, vtag, 0); + packet = &transport->packet; + sctp_packet_init(packet, transport, sport, dport); + sctp_packet_config(packet, vtag, 0); return packet; -- cgit v1.2.3 From 509e7a311f0423bce6d3e1705ba0a470ffe770dc Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Wed, 28 Dec 2016 09:26:35 -0200 Subject: sctp: sctp_chunk_length_valid should return bool Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/sm_statefuns.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 9a223d5b2314..3382ef254e7b 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -160,23 +160,22 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, /* Small helper function that checks if the chunk length * is of the appropriate length. The 'required_length' argument * is set to be the size of a specific chunk we are testing. - * Return Values: 1 = Valid length - * 0 = Invalid length + * Return Values: true = Valid length + * false = Invalid length * */ -static inline int -sctp_chunk_length_valid(struct sctp_chunk *chunk, - __u16 required_length) +static inline bool +sctp_chunk_length_valid(struct sctp_chunk *chunk, __u16 required_length) { __u16 chunk_length = ntohs(chunk->chunk_hdr->length); /* Previously already marked? */ if (unlikely(chunk->pdiscard)) - return 0; + return false; if (unlikely(chunk_length < required_length)) - return 0; + return false; - return 1; + return true; } /********************************************************** -- cgit v1.2.3 From 8667398277afe5312ac6e70589c4c661545179c7 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Wed, 28 Dec 2016 16:46:51 +0800 Subject: driver: ipvlan: Define common functions to decrease duplicated codes used to add or del IP address There are some duplicated codes in ipvlan_add_addr6/4 and ipvlan_del_addr6/4. Now define two common functions ipvlan_add_addr and ipvlan_del_addr to decrease the duplicated codes. It could be helful to maintain the codes. Signed-off-by: Gao Feng Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_main.c | 68 +++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 39 deletions(-) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 8b0f99300cbc..975f9ddb9908 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -674,23 +674,22 @@ static int ipvlan_device_event(struct notifier_block *unused, return NOTIFY_DONE; } -static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) { struct ipvl_addr *addr; - if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) { - netif_err(ipvlan, ifup, ipvlan->dev, - "Failed to add IPv6=%pI6c addr for %s intf\n", - ip6_addr, ipvlan->dev->name); - return -EINVAL; - } addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC); if (!addr) return -ENOMEM; addr->master = ipvlan; - memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); - addr->atype = IPVL_IPV6; + if (is_v6) { + memcpy(&addr->ip6addr, iaddr, sizeof(struct in6_addr)); + addr->atype = IPVL_IPV6; + } else { + memcpy(&addr->ip4addr, iaddr, sizeof(struct in_addr)); + addr->atype = IPVL_IPV4; + } list_add_tail(&addr->anode, &ipvlan->addrs); /* If the interface is not up, the address will be added to the hash @@ -702,11 +701,11 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) return 0; } -static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) { struct ipvl_addr *addr; - addr = ipvlan_find_addr(ipvlan, ip6_addr, true); + addr = ipvlan_find_addr(ipvlan, iaddr, is_v6); if (!addr) return; @@ -717,6 +716,23 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) return; } +static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +{ + if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) { + netif_err(ipvlan, ifup, ipvlan->dev, + "Failed to add IPv6=%pI6c addr for %s intf\n", + ip6_addr, ipvlan->dev->name); + return -EINVAL; + } + + return ipvlan_add_addr(ipvlan, ip6_addr, true); +} + +static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +{ + return ipvlan_del_addr(ipvlan, ip6_addr, true); +} + static int ipvlan_addr6_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -750,45 +766,19 @@ static int ipvlan_addr6_event(struct notifier_block *unused, static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) { - struct ipvl_addr *addr; - if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) { netif_err(ipvlan, ifup, ipvlan->dev, "Failed to add IPv4=%pI4 on %s intf.\n", ip4_addr, ipvlan->dev->name); return -EINVAL; } - addr = kzalloc(sizeof(struct ipvl_addr), GFP_KERNEL); - if (!addr) - return -ENOMEM; - - addr->master = ipvlan; - memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); - addr->atype = IPVL_IPV4; - list_add_tail(&addr->anode, &ipvlan->addrs); - - /* If the interface is not up, the address will be added to the hash - * list by ipvlan_open. - */ - if (netif_running(ipvlan->dev)) - ipvlan_ht_addr_add(ipvlan, addr); - return 0; + return ipvlan_add_addr(ipvlan, ip4_addr, false); } static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) { - struct ipvl_addr *addr; - - addr = ipvlan_find_addr(ipvlan, ip4_addr, false); - if (!addr) - return; - - ipvlan_ht_addr_del(addr); - list_del(&addr->anode); - kfree_rcu(addr, rcu); - - return; + return ipvlan_del_addr(ipvlan, ip4_addr, false); } static int ipvlan_addr4_event(struct notifier_block *unused, -- cgit v1.2.3 From 3ea35d3406d31a7389c2a299e28168c52040843a Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Wed, 28 Dec 2016 16:47:42 +0800 Subject: driver: ipvlan: Remove unnecessary ipvlan NULL check in ipvlan_count_rx There are three functions which would invoke the ipvlan_count_rx. They are ipvlan_process_multicast, ipvlan_rcv_frame, and ipvlan_nf_input. The former two functions already use the ipvlan directly before ipvlan_count_rx, and ipvlan_nf_input gets the ipvlan from ipvl_addr->master, it is not possible to be NULL too. So the ipvlan pointer check is unnecessary in ipvlan_count_rx. Signed-off-by: Gao Feng Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_core.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 83ce74acf82d..8ae335d73d38 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -19,9 +19,6 @@ void ipvlan_init_secret(void) static void ipvlan_count_rx(const struct ipvl_dev *ipvlan, unsigned int len, bool success, bool mcast) { - if (!ipvlan) - return; - if (likely(success)) { struct ipvl_pcpu_stats *pcptr; -- cgit v1.2.3 From b77b7565a6d11611c3f179941d2b95c2d72939dc Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Wed, 28 Dec 2016 09:51:56 -0200 Subject: sctp: add pr_debug for tracking asocs not found This pr_debug may help identify why the system is generating some Aborts. It's not something a sysadmin would be expected to use. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/input.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/net/sctp/input.c b/net/sctp/input.c index 458e506ef84b..704ad19c1565 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -1229,13 +1229,26 @@ static struct sctp_association *__sctp_rcv_lookup(struct net *net, struct sctp_association *asoc; asoc = __sctp_lookup_association(net, laddr, paddr, transportp); + if (asoc) + goto out; /* Further lookup for INIT/INIT-ACK packets. * SCTP Implementors Guide, 2.18 Handling of address * parameters within the INIT or INIT-ACK. */ - if (!asoc) - asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp); + asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp); + if (asoc) + goto out; + if (paddr->sa.sa_family == AF_INET) + pr_debug("sctp: asoc not found for src:%pI4:%d dst:%pI4:%d\n", + &laddr->v4.sin_addr, ntohs(laddr->v4.sin_port), + &paddr->v4.sin_addr, ntohs(paddr->v4.sin_port)); + else + pr_debug("sctp: asoc not found for src:%pI6:%d dst:%pI6:%d\n", + &laddr->v6.sin6_addr, ntohs(laddr->v6.sin6_port), + &paddr->v6.sin6_addr, ntohs(paddr->v6.sin6_port)); + +out: return asoc; } -- cgit v1.2.3 From 77cf13ad57315736bcd79f3cff9b2284218e0be6 Mon Sep 17 00:00:00 2001 From: Bartosz Markowski Date: Thu, 15 Dec 2016 11:23:22 +0200 Subject: ath10k: fix IRAM banks number for QCA9377 QCA9377 firmware shall alloc 4 IRAM banks Signed-off-by: Bartosz Markowski Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 0457e315d336..983f65bbb7fb 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1973,7 +1973,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) } break; case QCA9377_1_0_DEVICE_ID: - return 2; + return 4; } ath10k_warn(ar, "unknown number of banks, assuming 1\n"); -- cgit v1.2.3 From b08b5b53a1ed2bd7a883f8fd29232c8f03604671 Mon Sep 17 00:00:00 2001 From: Bartosz Markowski Date: Thu, 15 Dec 2016 11:23:22 +0200 Subject: ath10k: override CE5 config for QCA9377 Similarly to QCA6174, QCA9377 requires the CE5 configuration to be available for other feature. Use the ath10k_pci_override_ce_config() for it as well. This is required for TF2.0 firmware. Previous FW revisions were working fine without this patch. Fixes: a70587b3389a ("ath10k: configure copy engine 5 for HTT messages") Signed-off-by: Bartosz Markowski Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 983f65bbb7fb..85367006a80a 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3132,7 +3132,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar) setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, (unsigned long)ar); - if (QCA_REV_6174(ar)) + if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) ath10k_pci_override_ce_config(ar); ret = ath10k_pci_alloc_pipes(ar); -- cgit v1.2.3 From 35bac90abf5e5de9c3675c9828b427839362cf03 Mon Sep 17 00:00:00 2001 From: Bartosz Markowski Date: Thu, 15 Dec 2016 11:23:23 +0200 Subject: ath10k: decrease num of peers support The correct number for QCA9377 chip is 33 VDEVs. This impacts also QCA6174 chip and it's max VDEV number. Signed-off-by: Bartosz Markowski Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/hw.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 883547f3347c..7feffec531cc 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -512,7 +512,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, /* Target specific defines for WMI-TLV firmware */ #define TARGET_TLV_NUM_VDEVS 4 #define TARGET_TLV_NUM_STATIONS 32 -#define TARGET_TLV_NUM_PEERS 35 +#define TARGET_TLV_NUM_PEERS 33 #define TARGET_TLV_NUM_TDLS_VDEVS 1 #define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2) #define TARGET_TLV_NUM_MSDU_DESC (1024 + 32) -- cgit v1.2.3 From 7cfe0455ee1218add152e986b89b4bb8dbeafcdd Mon Sep 17 00:00:00 2001 From: Bartosz Markowski Date: Thu, 15 Dec 2016 11:23:24 +0200 Subject: ath10k: set CTS protection VDEV param only if VDEV is up The cts protection vdev parameter, in new QCA9377 TF2.0 firmware, requires bss peer to be created for the STATION vdev type. bss peer is being allocated by the firmware after vdev_start/_up commands. mac80211 may call the cts protection setup at any time, so the we needs to track the situation and defer the cts configuration to prevent firmware asserts, like below: [00]: 0x05020001 0x000015B3 0x0099ACE2 0x00955B31 [04]: 0x0099ACE2 0x00060730 0x00000004 0x00000000 [08]: 0x0044C754 0x00412C10 0x00000000 0x00409C54 [12]: 0x00000009 0x00000000 0x00952F6C 0x00952F77 [16]: 0x00952CC4 0x00910712 0x00000000 0x00000000 [20]: 0x4099ACE2 0x0040E858 0x00421254 0x004127F4 [24]: 0x8099B9B2 0x0040E8B8 0x00000000 0xC099ACE2 [28]: 0x800B75CB 0x0040E8F8 0x00000007 0x00005008 [32]: 0x809B048A 0x0040E958 0x00000010 0x00433B10 [36]: 0x809AFBBC 0x0040E9A8 0x0042BB74 0x0042BBBC [40]: 0x8091D252 0x0040E9C8 0x0042BBBC 0x00000001 [44]: 0x809FFA45 0x0040EA78 0x0043D3E4 0x0042C2C8 [48]: 0x809FCEF4 0x0040EA98 0x0043D3E4 0x00000001 [52]: 0x80911210 0x0040EAE8 0x00000010 0x004041D0 [56]: 0x80911154 0x0040EB28 0x00400000 0x00000000 Signed-off-by: Bartosz Markowski Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 51 +++++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index aa545a1dbdc7..56f557e89912 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1227,6 +1227,36 @@ static int ath10k_monitor_recalc(struct ath10k *ar) return ath10k_monitor_stop(ar); } +static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (!arvif->is_started) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); + return false; + } + + return true; +} + +static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 vdev_param; + + lockdep_assert_held(&ar->conf_mutex); + + vdev_param = ar->wmi.vdev_param->protection_mode; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", + arvif->vdev_id, arvif->use_cts_prot); + + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + arvif->use_cts_prot ? 1 : 0); +} + static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) { struct ath10k *ar = arvif->ar; @@ -5328,20 +5358,18 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ERP_CTS_PROT) { arvif->use_cts_prot = info->use_cts_prot; - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", - arvif->vdev_id, info->use_cts_prot); ret = ath10k_recalc_rtscts_prot(arvif); if (ret) ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); - vdev_param = ar->wmi.vdev_param->protection_mode; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, - info->use_cts_prot ? 1 : 0); - if (ret) - ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n", - info->use_cts_prot, arvif->vdev_id, ret); + if (ath10k_mac_can_set_cts_prot(arvif)) { + ret = ath10k_mac_set_cts_prot(arvif); + if (ret) + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", + arvif->vdev_id, ret); + } } if (changed & BSS_CHANGED_ERP_SLOT) { @@ -7364,6 +7392,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_up = true; } + if (ath10k_mac_can_set_cts_prot(arvif)) { + ret = ath10k_mac_set_cts_prot(arvif); + if (ret) + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", + arvif->vdev_id, ret); + } + mutex_unlock(&ar->conf_mutex); return 0; -- cgit v1.2.3 From 8617690174041dfe959b6ad71f1ff93ef4bea5d3 Mon Sep 17 00:00:00 2001 From: Bartosz Markowski Date: Thu, 15 Dec 2016 11:23:24 +0200 Subject: ath10k: add debug trace to rts/cts set function Align it with the cts protection call. Signed-off-by: Bartosz Markowski Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 56f557e89912..12a5fccf04cf 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1275,6 +1275,9 @@ static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, WMI_RTSCTS_PROFILE); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", + arvif->vdev_id, rts_cts); + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rts_cts); } -- cgit v1.2.3 From 5dbc6530939126cd61cb6c2e6d7b0a95741aaee6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 21 Dec 2016 16:41:52 -0800 Subject: skfp: hwmtm: Use proper logging macros, correct mismatches Logging macros should allow format and argument validation. The DB_TX, DB_RX, and DB_GEN macros did not. Update the macros and uses and add no_printk validation to the previously compiled away #ifndef DEBUG variants. Done with coccinelle and some typing. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/fddi/skfp/h/hwmtm.h | 24 ++++-- drivers/net/fddi/skfp/hwmtm.c | 176 ++++++++++++++++++++-------------------- 2 files changed, 106 insertions(+), 94 deletions(-) diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h index 4ca2341d7f06..123cfa09c354 100644 --- a/drivers/net/fddi/skfp/h/hwmtm.h +++ b/drivers/net/fddi/skfp/h/hwmtm.h @@ -168,13 +168,25 @@ struct os_debug { #define DB_P debug #endif -#define DB_RX(a,b,c,lev) if (DB_P.d_os.hwm_rx >= (lev)) printf(a,b,c) -#define DB_TX(a,b,c,lev) if (DB_P.d_os.hwm_tx >= (lev)) printf(a,b,c) -#define DB_GEN(a,b,c,lev) if (DB_P.d_os.hwm_gen >= (lev)) printf(a,b,c) +#define DB_RX(lev, fmt, ...) \ +do { \ + if (DB_P.d_os.hwm_rx >= (lev)) \ + printf(fmt "\n", ##__VA_ARGS__); \ +} while (0) +#define DB_TX(lev, fmt, ...) \ +do { \ + if (DB_P.d_os.hwm_tx >= (lev)) \ + printf(fmt "\n", ##__VA_ARGS__); \ +} while (0) +#define DB_GEN(lev, fmt, ...) \ +do { \ + if (DB_P.d_os.hwm_gen >= (lev)) \ + printf(fmt "\n", ##__VA_ARGS__); \ +} while (0) #else /* DEBUG */ -#define DB_RX(a,b,c,lev) -#define DB_TX(a,b,c,lev) -#define DB_GEN(a,b,c,lev) +#define DB_RX(lev, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__) +#define DB_TX(lev, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__) +#define DB_GEN(lev, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__) #endif /* DEBUG */ #ifndef SK_BREAK diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c index d0a68bdd5f63..4937d36a9e1c 100644 --- a/drivers/net/fddi/skfp/hwmtm.c +++ b/drivers/net/fddi/skfp/hwmtm.c @@ -330,7 +330,7 @@ static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *d2 ; u_long phys ; - DB_GEN("descr ring starts at = %x ",(void *)start,0,3) ; + DB_GEN(3, "descr ring starts at = %p", start); for (i=count-1, d1=start; i ; i--) { d2 = d1 ; d1++ ; /* descr is owned by the host */ @@ -339,7 +339,7 @@ static u_long init_descr_ring(struct s_smc *smc, phys = mac_drv_virt2phys(smc,(void *)d1) ; d2->r.rxd_nrdadr = cpu_to_le32(phys) ; } - DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ; + DB_GEN(3, "descr ring ends at = %p", d1); d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; d1->r.rxd_next = &start->r ; phys = mac_drv_virt2phys(smc,(void *)start) ; @@ -364,7 +364,7 @@ static void init_txd_ring(struct s_smc *smc) ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p + SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ; queue = smc->hw.fp.tx[QUEUE_A0] ; - DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ; + DB_GEN(3, "Init async TxD ring, %d TxDs", HWM_ASYNC_TXD_COUNT); (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, HWM_ASYNC_TXD_COUNT) ; phys = le32_to_cpu(ds->txd_ntdadr) ; @@ -378,7 +378,7 @@ static void init_txd_ring(struct s_smc *smc) ds = (struct s_smt_fp_txd volatile *) ((char *)ds + HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ; queue = smc->hw.fp.tx[QUEUE_S] ; - DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ; + DB_GEN(3, "Init sync TxD ring, %d TxDs", HWM_SYNC_TXD_COUNT); (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, HWM_SYNC_TXD_COUNT) ; phys = le32_to_cpu(ds->txd_ntdadr) ; @@ -400,7 +400,7 @@ static void init_rxd_ring(struct s_smc *smc) */ ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ; queue = smc->hw.fp.rx[QUEUE_R1] ; - DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ; + DB_GEN(3, "Init RxD ring, %d RxDs", SMT_R1_RXD_COUNT); (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, SMT_R1_RXD_COUNT) ; phys = le32_to_cpu(ds->rxd_nrdadr) ; @@ -469,11 +469,11 @@ void init_fddi_driver(struct s_smc *smc, u_char *mac_addr) */ i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ; if (i != 16) { - DB_GEN("i = %d",i,0,3) ; + DB_GEN(3, "i = %d", i); smc->os.hwm.descr_p = (union s_fp_descr volatile *) ((char *)smc->os.hwm.descr_p+i) ; } - DB_GEN("pt to descr area = %x",(void *)smc->os.hwm.descr_p,0,3) ; + DB_GEN(3, "pt to descr area = %p", smc->os.hwm.descr_p); init_txd_ring(smc) ; init_rxd_ring(smc) ; @@ -501,7 +501,7 @@ SMbuf *smt_get_mbuf(struct s_smc *smc) mb->sm_off = 8 ; mb->sm_use_count = 1 ; } - DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ; + DB_GEN(3, "get SMbuf: mb = %p", mb); return mb; /* May be NULL */ } @@ -510,14 +510,14 @@ void smt_free_mbuf(struct s_smc *smc, SMbuf *mb) if (mb) { mb->sm_use_count-- ; - DB_GEN("free_mbuf: sm_use_count = %d",mb->sm_use_count,0,3) ; + DB_GEN(3, "free_mbuf: sm_use_count = %d", mb->sm_use_count); /* * If the use_count is != zero the MBuf is queued * more than once and must not queued into the * free MBuf queue */ if (!mb->sm_use_count) { - DB_GEN("free SMbuf: mb = %x",(void *)mb,0,3) ; + DB_GEN(3, "free SMbuf: mb = %p", mb); #ifndef COMMON_MB_POOL mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ; smc->os.hwm.mbuf_pool.mb_free = mb ; @@ -741,7 +741,7 @@ void fddi_isr(struct s_smc *smc) while ((is = GET_ISR() & ISR_MASK)) { NDD_TRACE("CH0B",is,0,0) ; - DB_GEN("ISA = 0x%x",is,0,7) ; + DB_GEN(7, "ISA = 0x%lx", is); if (is & IMASK_SLOW) { NDD_TRACE("CH1b",is,0,0) ; @@ -754,20 +754,20 @@ void fddi_isr(struct s_smc *smc) if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */ stu = inpw(FM_A(FM_ST1U)) ; stl = inpw(FM_A(FM_ST1L)) ; - DB_GEN("Slow transmit complete",0,0,6) ; + DB_GEN(6, "Slow transmit complete"); mac1_irq(smc,stu,stl) ; } if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */ stu= inpw(FM_A(FM_ST2U)) ; stl= inpw(FM_A(FM_ST2L)) ; - DB_GEN("Slow receive complete",0,0,6) ; - DB_GEN("stl = %x : stu = %x",stl,stu,7) ; + DB_GEN(6, "Slow receive complete"); + DB_GEN(7, "stl = %x : stu = %x", stl, stu); mac2_irq(smc,stu,stl) ; } if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */ stu= inpw(FM_A(FM_ST3U)) ; stl= inpw(FM_A(FM_ST3L)) ; - DB_GEN("FORMAC Mode Register 3",0,0,6) ; + DB_GEN(6, "FORMAC Mode Register 3"); mac3_irq(smc,stu,stl) ; } if (is & IS_TIMINT) { /* Timer 82C54-2 */ @@ -814,7 +814,7 @@ void fddi_isr(struct s_smc *smc) * Fast Tx complete Async/Sync Queue (BMU service) */ if (is & (IS_XS_F|IS_XA_F)) { - DB_GEN("Fast tx complete queue",0,0,6) ; + DB_GEN(6, "Fast tx complete queue"); /* * clear IRQ, Note: no IRQ is lost, because * we always service both queues @@ -829,7 +829,7 @@ void fddi_isr(struct s_smc *smc) * Fast Rx Complete (BMU service) */ if (is & IS_R1_F) { - DB_GEN("Fast receive complete",0,0,6) ; + DB_GEN(6, "Fast receive complete"); /* clear IRQ */ #ifndef USE_BREAK_ISR outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ; @@ -1083,13 +1083,13 @@ void process_receive(struct s_smc *smc) #endif n = 0 ; do { - DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ; + DB_RX(5, "Check RxD %p for OWN and EOF", r); DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl)); if (rbctrl & BMU_OWN) { NDD_TRACE("RHxE",r,rfsw,rbctrl) ; - DB_RX("End of RxDs",0,0,4) ; + DB_RX(4, "End of RxDs"); goto rx_end ; } /* @@ -1136,19 +1136,19 @@ void process_receive(struct s_smc *smc) rx_used-- ; } while (!(rbctrl & BMU_EOF)) ; used_frags = frag_count ; - DB_RX("EOF set in RxD, used_frags = %d ",used_frags,0,5) ; + DB_RX(5, "EOF set in RxD, used_frags = %d", used_frags); /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ /* BMU_ST_BUF will not be changed by the ASIC */ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { - DB_RX("Check STF bit in %x",(void *)r,0,5) ; + DB_RX(5, "Check STF bit in %p", r); r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; frag_count++ ; rx_used-- ; } - DB_RX("STF bit found",0,0,5) ; + DB_RX(5, "STF bit found"); /* * The received frame is finished for the process receive @@ -1164,7 +1164,7 @@ void process_receive(struct s_smc *smc) rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ; for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ - DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; + DB_RX(5, "dma_complete for RxD %p", r); dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR); } smc->hw.fp.err_stats.err_valid++ ; @@ -1173,34 +1173,34 @@ void process_receive(struct s_smc *smc) /* the length of the data including the FC */ len = (rfsw & RD_LENGTH) - 4 ; - DB_RX("frame length = %d",len,0,4) ; + DB_RX(4, "frame length = %d", len); /* * check the frame_length and all error flags */ if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){ if (rfsw & RD_S_MSRABT) { - DB_RX("Frame aborted by the FORMAC",0,0,2) ; + DB_RX(2, "Frame aborted by the FORMAC"); smc->hw.fp.err_stats.err_abort++ ; } /* * check frame status */ if (rfsw & RD_S_SEAC2) { - DB_RX("E-Indicator set",0,0,2) ; + DB_RX(2, "E-Indicator set"); smc->hw.fp.err_stats.err_e_indicator++ ; } if (rfsw & RD_S_SFRMERR) { - DB_RX("CRC error",0,0,2) ; + DB_RX(2, "CRC error"); smc->hw.fp.err_stats.err_crc++ ; } if (rfsw & RX_FS_IMPL) { - DB_RX("Implementer frame",0,0,2) ; + DB_RX(2, "Implementer frame"); smc->hw.fp.err_stats.err_imp_frame++ ; } goto abort_frame ; } if (len > FDDI_RAW_MTU-4) { - DB_RX("Frame too long error",0,0,2) ; + DB_RX(2, "Frame too long error"); smc->hw.fp.err_stats.err_too_long++ ; goto abort_frame ; } @@ -1209,12 +1209,12 @@ void process_receive(struct s_smc *smc) * of aborded frames to the BMU */ if (len <= 4) { - DB_RX("Frame length = 0",0,0,2) ; + DB_RX(2, "Frame length = 0"); goto abort_frame ; } if (len != (n-4)) { - DB_RX("BMU: rx len differs: [%d:%d]",len,n,4); + DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n); smc->os.hwm.rx_len_error++ ; goto abort_frame ; } @@ -1223,7 +1223,7 @@ void process_receive(struct s_smc *smc) * Check SA == MA */ virt = (u_char far *) rxd->rxd_virt ; - DB_RX("FC = %x",*virt,0,2) ; + DB_RX(2, "FC = %x", *virt); if (virt[12] == MA[5] && virt[11] == MA[4] && virt[10] == MA[3] && @@ -1250,7 +1250,7 @@ void process_receive(struct s_smc *smc) virt[3] != MA[2] || virt[2] != MA[1] || virt[1] != MA[0]) { - DB_RX("DA != MA and not multi- or broadcast",0,0,2) ; + DB_RX(2, "DA != MA and not multi- or broadcast"); goto abort_frame ; } } @@ -1259,13 +1259,13 @@ void process_receive(struct s_smc *smc) /* * LLC frame received */ - DB_RX("LLC - receive",0,0,4) ; + DB_RX(4, "LLC - receive"); mac_drv_rx_complete(smc,rxd,frag_count,len) ; } else { if (!(mb = smt_get_mbuf(smc))) { smc->hw.fp.err_stats.err_no_buf++ ; - DB_RX("No SMbuf; receive terminated",0,0,4) ; + DB_RX(4, "No SMbuf; receive terminated"); goto abort_frame ; } data = smtod(mb,char *) - 1 ; @@ -1278,7 +1278,7 @@ void process_receive(struct s_smc *smc) #else for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ; - DB_RX("cp SMT frame to mb: len = %d",n,0,6) ; + DB_RX(6, "cp SMT frame to mb: len = %d", n); memcpy(data,r->rxd_virt,n) ; data += n ; } @@ -1294,15 +1294,15 @@ void process_receive(struct s_smc *smc) switch(fc) { case FC_SMT_INFO : smc->hw.fp.err_stats.err_smt_frame++ ; - DB_RX("SMT frame received ",0,0,5) ; + DB_RX(5, "SMT frame received"); if (smc->os.hwm.pass_SMT) { - DB_RX("pass SMT frame ",0,0,5) ; + DB_RX(5, "pass SMT frame"); mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { - DB_RX("requeue RxD",0,0,5) ; + DB_RX(5, "requeue RxD"); mac_drv_requeue_rxd(smc,rxd,frag_count); } @@ -1310,7 +1310,7 @@ void process_receive(struct s_smc *smc) break ; case FC_SMT_NSA : smc->hw.fp.err_stats.err_smt_frame++ ; - DB_RX("SMT frame received ",0,0,5) ; + DB_RX(5, "SMT frame received"); /* if pass_NSA set pass the NSA frame or */ /* pass_SMT set and the A-Indicator */ @@ -1318,12 +1318,12 @@ void process_receive(struct s_smc *smc) if (smc->os.hwm.pass_NSA || (smc->os.hwm.pass_SMT && !(rfsw & A_INDIC))) { - DB_RX("pass SMT frame ",0,0,5) ; + DB_RX(5, "pass SMT frame"); mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { - DB_RX("requeue RxD",0,0,5) ; + DB_RX(5, "requeue RxD"); mac_drv_requeue_rxd(smc,rxd,frag_count); } @@ -1331,12 +1331,12 @@ void process_receive(struct s_smc *smc) break ; case FC_BEACON : if (smc->os.hwm.pass_DB) { - DB_RX("pass DB frame ",0,0,5) ; + DB_RX(5, "pass DB frame"); mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { - DB_RX("requeue RxD",0,0,5) ; + DB_RX(5, "requeue RxD"); mac_drv_requeue_rxd(smc,rxd,frag_count); } smt_free_mbuf(smc,mb) ; @@ -1345,9 +1345,9 @@ void process_receive(struct s_smc *smc) /* * unknown FC abord the frame */ - DB_RX("unknown FC error",0,0,2) ; + DB_RX(2, "unknown FC error"); smt_free_mbuf(smc,mb) ; - DB_RX("requeue RxD",0,0,5) ; + DB_RX(5, "requeue RxD"); mac_drv_requeue_rxd(smc,rxd,frag_count) ; if ((fc & 0xf0) == FC_MAC) smc->hw.fp.err_stats.err_mac_frame++ ; @@ -1358,16 +1358,16 @@ void process_receive(struct s_smc *smc) } } - DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ; + DB_RX(3, "next RxD is %p", queue->rx_curr_get); NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ; continue ; /*--------------------------------------------------------------------*/ abort_frame: - DB_RX("requeue RxD",0,0,5) ; + DB_RX(5, "requeue RxD"); mac_drv_requeue_rxd(smc,rxd,frag_count) ; - DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ; + DB_RX(3, "next RxD is %p", queue->rx_curr_get); NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ; } rx_end: @@ -1381,7 +1381,7 @@ static void smt_to_llc(struct s_smc *smc, SMbuf *mb) { u_char fc ; - DB_RX("send a queued frame to the llc layer",0,0,4) ; + DB_RX(4, "send a queued frame to the llc layer"); smc->os.hwm.r.len = mb->sm_len ; smc->os.hwm.r.mb_pos = smtod(mb,char *) ; fc = *smc->os.hwm.r.mb_pos ; @@ -1419,7 +1419,7 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, __le32 rbctrl; NDD_TRACE("RHfB",virt,len,frame_status) ; - DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ; + DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x", len, frame_status); r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; r->rxd_virt = virt ; r->rxd_rbadr = cpu_to_le32(phys) ; @@ -1475,7 +1475,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc) } queue = smc->hw.fp.rx[QUEUE_R1] ; - DB_RX("clear_rx_queue",0,0,5) ; + DB_RX(5, "clear_rx_queue"); /* * dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers @@ -1483,7 +1483,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc) r = queue->rx_curr_get ; while (queue->rx_used) { DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; - DB_RX("switch OWN bit of RxD 0x%p ",r,0,5) ; + DB_RX(5, "switch OWN bit of RxD 0x%p", r); r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; frag_count = 1 ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; @@ -1491,23 +1491,23 @@ void mac_drv_clear_rx_queue(struct s_smc *smc) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; while (r != queue->rx_curr_put && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { - DB_RX("Check STF bit in %x",(void *)r,0,5) ; + DB_RX(5, "Check STF bit in %p", r); r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; frag_count++ ; } - DB_RX("STF bit found",0,0,5) ; + DB_RX(5, "STF bit found"); next_rxd = r ; for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){ - DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; + DB_RX(5, "dma_complete for RxD %p", r); dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR); } - DB_RX("mac_drv_clear_rxd: RxD %x frag_count %d ", - (void *)queue->rx_curr_get,frag_count,5) ; + DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d", + queue->rx_curr_get, frag_count); mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ; queue->rx_curr_get = next_rxd ; @@ -1554,7 +1554,7 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ; smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ; smc->os.hwm.tx_len = frame_len ; - DB_TX("hwm_tx_init: fc = %x, len = %d",fc,frame_len,3) ; + DB_TX(3, "hwm_tx_init: fc = %x, len = %d", fc, frame_len); if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) { frame_status |= LAN_TX ; } @@ -1577,23 +1577,23 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, if (!smc->hw.mac_ring_is_up) { frame_status &= ~LAN_TX ; frame_status |= RING_DOWN ; - DB_TX("Ring is down: terminate LAN_TX",0,0,2) ; + DB_TX(2, "Ring is down: terminate LAN_TX"); } if (frag_count > smc->os.hwm.tx_p->tx_free) { #ifndef NDIS_OS2 mac_drv_clear_txd(smc) ; if (frag_count > smc->os.hwm.tx_p->tx_free) { - DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ; + DB_TX(2, "Out of TxDs, terminate LAN_TX"); frame_status &= ~LAN_TX ; frame_status |= OUT_OF_TXD ; } #else - DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ; + DB_TX(2, "Out of TxDs, terminate LAN_TX"); frame_status &= ~LAN_TX ; frame_status |= OUT_OF_TXD ; #endif } - DB_TX("frame_status = %x",frame_status,0,3) ; + DB_TX(3, "frame_status = %x", frame_status); NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ; return frame_status; } @@ -1642,10 +1642,10 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, */ t = queue->tx_curr_put ; - DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ; + DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x", len, frame_status); if (frame_status & LAN_TX) { /* '*t' is already defined */ - DB_TX("LAN_TX: TxD = %p, virt = %p ",t,virt,3) ; + DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt); t->txd_virt = virt ; t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ; t->txd_tbadr = cpu_to_le32(phys) ; @@ -1674,11 +1674,11 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, } } if (frame_status & LOC_TX) { - DB_TX("LOC_TX: ",0,0,3) ; + DB_TX(3, "LOC_TX:"); if (frame_status & FIRST_FRAG) { if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) { smc->hw.fp.err_stats.err_no_buf++ ; - DB_TX("No SMbuf; transmit terminated",0,0,4) ; + DB_TX(4, "No SMbuf; transmit terminated"); } else { smc->os.hwm.tx_data = @@ -1693,7 +1693,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, } if (smc->os.hwm.tx_mb) { #ifndef USE_OS_CPY - DB_TX("copy fragment into MBuf ",0,0,3) ; + DB_TX(3, "copy fragment into MBuf"); memcpy(smc->os.hwm.tx_data,virt,len) ; smc->os.hwm.tx_data += len ; #endif @@ -1718,7 +1718,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, smc->os.hwm.tx_data++ ; smc->os.hwm.tx_mb->sm_len = smc->os.hwm.tx_len - 1 ; - DB_TX("pass LLC frame to SMT ",0,0,3) ; + DB_TX(3, "pass LLC frame to SMT"); smt_received_pack(smc,smc->os.hwm.tx_mb, RD_FS_LOCAL) ; } @@ -1733,7 +1733,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, */ static void queue_llc_rx(struct s_smc *smc, SMbuf *mb) { - DB_GEN("queue_llc_rx: mb = %x",(void *)mb,0,4) ; + DB_GEN(4, "queue_llc_rx: mb = %p", mb); smc->os.hwm.queued_rx_frames++ ; mb->sm_next = (SMbuf *)NULL ; if (smc->os.hwm.llc_rx_pipe == NULL) { @@ -1763,7 +1763,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc) smc->os.hwm.queued_rx_frames-- ; smc->os.hwm.llc_rx_pipe = mb->sm_next ; } - DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ; + DB_GEN(4, "get_llc_rx: mb = 0x%p", mb); return mb; } @@ -1773,7 +1773,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc) */ static void queue_txd_mb(struct s_smc *smc, SMbuf *mb) { - DB_GEN("_rx: queue_txd_mb = %x",(void *)mb,0,4) ; + DB_GEN(4, "_rx: queue_txd_mb = %p", mb); smc->os.hwm.queued_txd_mb++ ; mb->sm_next = (SMbuf *)NULL ; if (smc->os.hwm.txd_tx_pipe == NULL) { @@ -1796,7 +1796,7 @@ static SMbuf *get_txd_mb(struct s_smc *smc) smc->os.hwm.queued_txd_mb-- ; smc->os.hwm.txd_tx_pipe = mb->sm_next ; } - DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ; + DB_GEN(4, "get_txd_mb: mb = 0x%p", mb); return mb; } @@ -1819,7 +1819,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) __le32 tbctrl; NDD_TRACE("THSB",mb,fc,0) ; - DB_TX("smt_send_mbuf: mb = 0x%p, fc = 0x%x",mb,fc,4) ; + DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x", mb, fc); mb->sm_off-- ; /* set to fc */ mb->sm_len++ ; /* + fc */ @@ -1838,7 +1838,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) if (n >= len) { n = len ; } - DB_TX("frag: virt/len = 0x%x/%d ",(void *)data,n,5) ; + DB_TX(5, "frag: virt/len = 0x%p/%d", data, n); virt[frag_count] = data ; frag_len[frag_count] = n ; frag_count++ ; @@ -1863,15 +1863,15 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) { frame_status &= ~LAN_TX; if (frame_status) { - DB_TX("Ring is down: terminate LAN_TX",0,0,2) ; + DB_TX(2, "Ring is down: terminate LAN_TX"); } else { - DB_TX("Ring is down: terminate transmission",0,0,2) ; + DB_TX(2, "Ring is down: terminate transmission"); smt_free_mbuf(smc,mb) ; return ; } } - DB_TX("frame_status = 0x%x ",frame_status,0,5) ; + DB_TX(5, "frame_status = 0x%x", frame_status); if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) { mb->sm_use_count = 2 ; @@ -1881,7 +1881,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) t = queue->tx_curr_put ; frame_status |= FIRST_FRAG ; for (i = 0; i < frag_count; i++) { - DB_TX("init TxD = 0x%x",(void *)t,0,5) ; + DB_TX(5, "init TxD = 0x%p", t); if (i == frag_count-1) { frame_status |= LAST_FRAG ; t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR | @@ -1912,7 +1912,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) } if (frame_status & LOC_TX) { - DB_TX("pass Mbuf to LLC queue",0,0,5) ; + DB_TX(5, "pass Mbuf to LLC queue"); queue_llc_rx(smc,mb) ; } @@ -1953,18 +1953,18 @@ static void mac_drv_clear_txd(struct s_smc *smc) for (i = QUEUE_S; i <= QUEUE_A0; i++) { queue = smc->hw.fp.tx[i] ; t1 = queue->tx_curr_get ; - DB_TX("clear_txd: QUEUE = %d (0=sync/1=async)",i,0,5) ; + DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)", i); for ( ; ; ) { frag_count = 0 ; do { DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; - DB_TX("check OWN/EOF bit of TxD 0x%p",t1,0,5) ; + DB_TX(5, "check OWN/EOF bit of TxD 0x%p", t1); tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl)); if (tbctrl & BMU_OWN || !queue->tx_used){ - DB_TX("End of TxDs queue %d",i,0,4) ; + DB_TX(4, "End of TxDs queue %d", i); goto free_next_queue ; /* next queue */ } t1 = t1->txd_next ; @@ -1988,11 +1988,11 @@ static void mac_drv_clear_txd(struct s_smc *smc) } else { #ifndef PASS_1ST_TXD_2_TX_COMP - DB_TX("mac_drv_tx_comp for TxD 0x%p",t2,0,4) ; + DB_TX(4, "mac_drv_tx_comp for TxD 0x%p", t2); mac_drv_tx_complete(smc,t2) ; #else - DB_TX("mac_drv_tx_comp for TxD 0x%x", - queue->tx_curr_get,0,4) ; + DB_TX(4, "mac_drv_tx_comp for TxD 0x%x", + queue->tx_curr_get); mac_drv_tx_complete(smc,queue->tx_curr_get) ; #endif } @@ -2043,7 +2043,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc) for (i = QUEUE_S; i <= QUEUE_A0; i++) { queue = smc->hw.fp.tx[i] ; - DB_TX("clear_tx_queue: QUEUE = %d (0=sync/1=async)",i,0,5) ; + DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)", i); /* * switch the OWN bit of all pending frames to the host @@ -2052,7 +2052,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc) tx_used = queue->tx_used ; while (tx_used) { DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; - DB_TX("switch OWN bit of TxD 0x%p ",t,0,5) ; + DB_TX(5, "switch OWN bit of TxD 0x%p", t); t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; t = t->txd_next ; -- cgit v1.2.3 From 5671e8c19c8cb4dd432e54f5bc5e2e9a9bac894b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 21 Dec 2016 19:54:53 -0800 Subject: fddi: skfp: Use more common logging styles Several macros use non-standard styles where format and arguments are not verified. Convert these to a more typical fmt, ##__VA_ARGS__ use so format and arguments match as appropriate. Miscellanea: o Fix format and argument mismatches o Realign and reindent misindented block o Strip newlines from formats and add to macro defines o Coalesce a few consecutive logging uses to more simple single uses Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/fddi/skfp/cfm.c | 22 ++++---- drivers/net/fddi/skfp/drvfbi.c | 4 +- drivers/net/fddi/skfp/ecm.c | 34 ++++++------ drivers/net/fddi/skfp/ess.c | 66 ++++++++++++------------ drivers/net/fddi/skfp/fplustm.c | 24 ++++----- drivers/net/fddi/skfp/h/cmtdef.h | 67 +++++++++++++----------- drivers/net/fddi/skfp/hwmtm.c | 2 +- drivers/net/fddi/skfp/pcmplc.c | 83 +++++++++++++++-------------- drivers/net/fddi/skfp/pmf.c | 4 +- drivers/net/fddi/skfp/rmt.c | 40 +++++++------- drivers/net/fddi/skfp/smt.c | 109 +++++++++++++++++++-------------------- drivers/net/fddi/skfp/srf.c | 14 +++-- 12 files changed, 232 insertions(+), 237 deletions(-) diff --git a/drivers/net/fddi/skfp/cfm.c b/drivers/net/fddi/skfp/cfm.c index e395ace3120b..648ff9fdb909 100644 --- a/drivers/net/fddi/skfp/cfm.c +++ b/drivers/net/fddi/skfp/cfm.c @@ -52,7 +52,6 @@ static const char ID_sccs[] = "@(#)cfm.c 2.18 98/10/06 (C) SK " ; #define ACTIONS_DONE() (smc->mib.fddiSMTCF_State &= ~AFLAG) #define ACTIONS(x) (x|AFLAG) -#ifdef DEBUG /* * symbolic state names */ @@ -68,7 +67,6 @@ static const char * const cfm_states[] = { static const char * const cfm_events[] = { "NONE","CF_LOOP_A","CF_LOOP_B","CF_JOIN_A","CF_JOIN_B" } ; -#endif /* * map from state to downstream port type @@ -230,10 +228,10 @@ void cfm(struct s_smc *smc, int event) oldstate = smc->mib.fddiSMTCF_State ; do { - DB_CFM("CFM : state %s%s", - (smc->mib.fddiSMTCF_State & AFLAG) ? "ACTIONS " : "", - cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG]) ; - DB_CFM(" event %s\n",cfm_events[event],0) ; + DB_CFM("CFM : state %s%s event %s", + smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "", + cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG], + cfm_events[event]); state = smc->mib.fddiSMTCF_State ; cfm_fsm(smc,event) ; event = 0 ; @@ -297,7 +295,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ /* Don't do the WC-Flag changing here */ ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break; case SC0_ISOLATED : /*SC07*/ @@ -338,7 +336,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ } ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break ; case SC9_C_WRAP_A : /*SC10*/ @@ -403,7 +401,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ } ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break ; case SC10_C_WRAP_B : /*SC20*/ @@ -448,7 +446,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) smc->r.rm_join = TRUE ; queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break ; case SC4_THRU_A : /*SC41*/ @@ -481,7 +479,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) smc->r.rm_join = TRUE ; queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break ; case SC5_THRU_B : /*SC51*/ @@ -519,7 +517,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ } ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break ; case SC11_C_WRAP_S : /*SC70*/ diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c index 07da97c303d6..fed3a92d3df4 100644 --- a/drivers/net/fddi/skfp/drvfbi.c +++ b/drivers/net/fddi/skfp/drvfbi.c @@ -343,8 +343,8 @@ void init_board(struct s_smc *smc, u_char *mac_addr) */ void sm_pm_bypass_req(struct s_smc *smc, int mode) { - DB_ECMN(1,"ECM : sm_pm_bypass_req(%s)\n",(mode == BP_INSERT) ? - "BP_INSERT" : "BP_DEINSERT",0) ; + DB_ECMN(1, "ECM : sm_pm_bypass_req(%s)", + mode == BP_INSERT ? "BP_INSERT" : "BP_DEINSERT"); if (smc->s.sas != SMT_DAS) return ; diff --git a/drivers/net/fddi/skfp/ecm.c b/drivers/net/fddi/skfp/ecm.c index 47d922cb3c08..eee9ba91346a 100644 --- a/drivers/net/fddi/skfp/ecm.c +++ b/drivers/net/fddi/skfp/ecm.c @@ -66,7 +66,6 @@ static const char ID_sccs[] = "@(#)ecm.c 2.7 99/08/05 (C) SK " ; #define EC6_CHECK 6 /* checking bypass */ #define EC7_DEINSERT 7 /* bypass being turnde off */ -#ifdef DEBUG /* * symbolic state names */ @@ -83,7 +82,6 @@ static const char * const ecm_events[] = { "EC_TIMEOUT_TD","EC_TIMEOUT_TMAX", "EC_TIMEOUT_IMAX","EC_TIMEOUT_INMAX","EC_TEST_DONE" } ; -#endif /* * all Globals are defined in smc.h @@ -126,10 +124,10 @@ void ecm(struct s_smc *smc, int event) int state ; do { - DB_ECM("ECM : state %s%s", - (smc->mib.fddiSMTECMState & AFLAG) ? "ACTIONS " : "", - ecm_states[smc->mib.fddiSMTECMState & ~AFLAG]) ; - DB_ECM(" event %s\n",ecm_events[event],0) ; + DB_ECM("ECM : state %s%s event %s", + smc->mib.fddiSMTECMState & AFLAG ? "ACTIONS " : "", + ecm_states[smc->mib.fddiSMTECMState & ~AFLAG], + ecm_events[event]); state = smc->mib.fddiSMTECMState ; ecm_fsm(smc,event) ; event = 0 ; @@ -379,7 +377,7 @@ static void ecm_fsm(struct s_smc *smc, int cmd) (((ls_a == PC_ILS) && (ls_b == PC_QLS)) || ((ls_a == PC_QLS) && (ls_b == PC_ILS)))){ smc->e.sb_flag = TRUE ; - DB_ECMN(1,"ECM : EC6_CHECK - stuck bypass\n",0,0) ; + DB_ECMN(1, "ECM : EC6_CHECK - stuck bypass"); AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_ERROR, (u_long) FDDI_BYPASS_STUCK, smt_get_error_word(smc)); @@ -443,29 +441,29 @@ static void prop_actions(struct s_smc *smc) return ; } - DB_ECM("ECM : prop_actions - trace_prop %d\n", smc->e.trace_prop,0) ; - DB_ECM("ECM : prop_actions - in %d out %d\n", port_in,port_out) ; + DB_ECM("ECM : prop_actions - trace_prop %lu", smc->e.trace_prop); + DB_ECM("ECM : prop_actions - in %d out %d", port_in, port_out); if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) { /* trace initiatior */ - DB_ECM("ECM : initiate TRACE on PHY %c\n",'A'+port_in-PA,0) ; + DB_ECM("ECM : initiate TRACE on PHY %c", 'A' + port_in - PA); queue_event(smc,EVENT_PCM+port_in,PC_TRACE) ; } else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PA))) && port_out != PA) { /* trace propagate upstream */ - DB_ECM("ECM : propagate TRACE on PHY B\n",0,0) ; + DB_ECM("ECM : propagate TRACE on PHY B"); queue_event(smc,EVENT_PCMB,PC_TRACE) ; } else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PB))) && port_out != PB) { /* trace propagate upstream */ - DB_ECM("ECM : propagate TRACE on PHY A\n",0,0) ; + DB_ECM("ECM : propagate TRACE on PHY A"); queue_event(smc,EVENT_PCMA,PC_TRACE) ; } else { /* signal trace termination */ - DB_ECM("ECM : TRACE terminated\n",0,0) ; + DB_ECM("ECM : TRACE terminated"); smc->e.path_test = PT_PENDING ; } smc->e.trace_prop = 0 ; @@ -482,13 +480,13 @@ static void prop_actions(struct s_smc *smc) RS_SET(smc,RS_EVENT) ; while (smc->e.trace_prop) { - DB_ECM("ECM : prop_actions - trace_prop %d\n", - smc->e.trace_prop,0) ; + DB_ECM("ECM : prop_actions - trace_prop %d", + smc->e.trace_prop); if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) { initiator = ENTITY_MAC ; smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_MAC) ; - DB_ECM("ECM: MAC initiates trace\n",0,0) ; + DB_ECM("ECM: MAC initiates trace"); } else { for (p = NUMPHYS-1 ; p >= 0 ; p--) { @@ -503,12 +501,12 @@ static void prop_actions(struct s_smc *smc) if (upstream == ENTITY_MAC) { /* signal trace termination */ - DB_ECM("ECM : TRACE terminated\n",0,0) ; + DB_ECM("ECM : TRACE terminated"); smc->e.path_test = PT_PENDING ; } else { /* trace propagate upstream */ - DB_ECM("ECM : propagate TRACE on PHY %d\n",upstream,0) ; + DB_ECM("ECM : propagate TRACE on PHY %d", upstream); queue_event(smc,EVENT_PCM+upstream,PC_TRACE) ; } } diff --git a/drivers/net/fddi/skfp/ess.c b/drivers/net/fddi/skfp/ess.c index 2fc5987b41dc..325e2c525e35 100644 --- a/drivers/net/fddi/skfp/ess.c +++ b/drivers/net/fddi/skfp/ess.c @@ -134,7 +134,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * get the resource type */ if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) { - DB_ESS("ESS: RAF frame error, parameter type not found\n",0,0) ; + DB_ESS("ESS: RAF frame error, parameter type not found"); return fs; } msg_res_type = ((struct smt_p_0015 *)p)->res_type ; @@ -146,16 +146,16 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, /* * error in frame: para ESS command was not found */ - DB_ESS("ESS: RAF frame error, parameter command not found\n",0,0); + DB_ESS("ESS: RAF frame error, parameter command not found"); return fs; } - DB_ESSN(2,"fc %x ft %x\n",sm->smt_class,sm->smt_type) ; - DB_ESSN(2,"ver %x tran %lx\n",sm->smt_version,sm->smt_tid) ; - DB_ESSN(2,"stn_id %s\n",addr_to_string(&sm->smt_source),0) ; + DB_ESSN(2, "fc %x ft %x", sm->smt_class, sm->smt_type); + DB_ESSN(2, "ver %x tran %x", sm->smt_version, sm->smt_tid); + DB_ESSN(2, "stn_id %s", addr_to_string(&sm->smt_source)); - DB_ESSN(2,"infolen %x res %x\n",sm->smt_len, msg_res_type) ; - DB_ESSN(2,"sbacmd %x\n",cmd->sba_cmd,0) ; + DB_ESSN(2, "infolen %x res %lx", sm->smt_len, msg_res_type); + DB_ESSN(2, "sbacmd %x", cmd->sba_cmd); /* * evaluate the ESS command @@ -189,7 +189,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * The ESS do not send the Frame to the network! */ smc->ess.alloc_trans_id = sm->smt_tid ; - DB_ESS("ESS: save Alloc Req Trans ID %lx\n",sm->smt_tid,0); + DB_ESS("ESS: save Alloc Req Trans ID %x", sm->smt_tid); p = (void *) sm_to_para(smc,sm,SMT_P320F) ; ((struct smt_p_320f *)p)->mib_payload = smc->mib.a[PATH0].fddiPATHSbaPayload ; @@ -220,7 +220,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * check the parameters */ if (smt_check_para(smc,sm,plist_raf_alc_res)) { - DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ; + DB_ESS("ESS: RAF with para problem, ignoring"); return fs; } @@ -241,7 +241,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, != SMT_RDF_SUCCESS) || (sm->smt_tid != smc->ess.alloc_trans_id)) { - DB_ESS("ESS: Allocation Response not accepted\n",0,0) ; + DB_ESS("ESS: Allocation Response not accepted"); return fs; } @@ -261,7 +261,8 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, } overhead = ((struct smt_p_3210 *)p)->mib_overhead ; - DB_ESSN(2,"payload= %lx overhead= %lx\n",payload,overhead) ; + DB_ESSN(2, "payload= %lx overhead= %lx", + payload, overhead); /* * process the bandwidth allocation @@ -279,7 +280,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * except only replies */ if (sm->smt_type != SMT_REQUEST) { - DB_ESS("ESS: Do not process Change Responses\n",0,0) ; + DB_ESS("ESS: Do not process Change Responses"); return fs; } @@ -287,7 +288,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * check the para for the Change Request */ if (smt_check_para(smc,sm,plist_raf_chg_req)) { - DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ; + DB_ESS("ESS: RAF with para problem, ignoring"); return fs; } @@ -299,7 +300,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, */ if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index != PRIMARY_RING) || (msg_res_type != SYNC_BW)) { - DB_ESS("ESS: RAF frame with para problem, ignoring\n",0,0) ; + DB_ESS("ESS: RAF frame with para problem, ignoring"); return fs; } @@ -311,9 +312,10 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, p = (void *) sm_to_para(smc,sm,SMT_P3210) ; overhead = ((struct smt_p_3210 *)p)->mib_overhead ; - DB_ESSN(2,"ESS: Change Request from %s\n", - addr_to_string(&sm->smt_source),0) ; - DB_ESSN(2,"payload= %lx overhead= %lx\n",payload,overhead) ; + DB_ESSN(2, "ESS: Change Request from %s", + addr_to_string(&sm->smt_source)); + DB_ESSN(2, "payload= %lx overhead= %lx", + payload, overhead); /* * process the bandwidth allocation @@ -337,18 +339,18 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * except only requests */ if (sm->smt_type != SMT_REQUEST) { - DB_ESS("ESS: Do not process a Report Reply\n",0,0) ; + DB_ESS("ESS: Do not process a Report Reply"); return fs; } - DB_ESSN(2,"ESS: Report Request from %s\n", - addr_to_string(&(sm->smt_source)),0) ; + DB_ESSN(2, "ESS: Report Request from %s", + addr_to_string(&sm->smt_source)); /* * verify that the resource type is sync bw only */ if (msg_res_type != SYNC_BW) { - DB_ESS("ESS: ignoring RAF with para problem\n",0,0) ; + DB_ESS("ESS: ignoring RAF with para problem"); return fs; } @@ -364,7 +366,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, /* * error in frame */ - DB_ESS("ESS: ignoring RAF with bad sba_cmd\n",0,0) ; + DB_ESS("ESS: ignoring RAF with bad sba_cmd"); break ; } @@ -417,17 +419,17 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe * set the mib attributes fddiPATHSbaOverhead, fddiPATHSbaPayload */ /* if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) { - DB_ESS("ESS: SMT does not accept the payload value\n",0,0) ; + DB_ESS("ESS: SMT does not accept the payload value"); return FALSE; } if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) { - DB_ESS("ESS: SMT does not accept the overhead value\n",0,0) ; + DB_ESS("ESS: SMT does not accept the overhead value"); return FALSE; } */ /* premliminary */ if (payload > MAX_PAYLOAD || overhead > 5000) { - DB_ESS("ESS: payload / overhead not accepted\n",0,0) ; + DB_ESS("ESS: payload / overhead not accepted"); return FALSE; } @@ -446,7 +448,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe * evulate the Payload */ if (payload) { - DB_ESSN(2,"ESS: turn SMT_ST_SYNC_SERVICE bit on\n",0,0) ; + DB_ESSN(2, "ESS: turn SMT_ST_SYNC_SERVICE bit on"); smc->ess.sync_bw_available = TRUE ; smc->ess.sync_bw = overhead - @@ -454,7 +456,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe payload / 1562 ; } else { - DB_ESSN(2,"ESS: turn SMT_ST_SYNC_SERVICE bit off\n",0,0) ; + DB_ESSN(2, "ESS: turn SMT_ST_SYNC_SERVICE bit off"); smc->ess.sync_bw_available = FALSE ; smc->ess.sync_bw = 0 ; overhead = 0 ; @@ -464,7 +466,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe smc->mib.a[PATH0].fddiPATHSbaOverhead = overhead ; - DB_ESSN(2,"tsync = %lx\n",smc->ess.sync_bw,0) ; + DB_ESSN(2, "tsync = %lx", smc->ess.sync_bw); ess_config_fifo(smc) ; set_formac_tsync(smc,smc->ess.sync_bw) ; @@ -541,7 +543,7 @@ void ess_timer_poll(struct s_smc *smc) if (!smc->ess.raf_act_timer_poll) return ; - DB_ESSN(2,"ESS: timer_poll\n",0,0) ; + DB_ESSN(2, "ESS: timer_poll"); smc->ess.timer_count++ ; if (smc->ess.timer_count == 10) { @@ -667,11 +669,11 @@ static void ess_send_frame(struct s_smc *smc, SMbuf *mb) /* * Send the Change Reply to the local SBA */ - DB_ESS("ESS:Send to the local SBA\n",0,0) ; + DB_ESS("ESS:Send to the local SBA"); if (!smc->ess.sba_reply_pend) smc->ess.sba_reply_pend = mb ; else { - DB_ESS("Frame is lost - another frame was pending\n",0,0); + DB_ESS("Frame is lost - another frame was pending"); smt_free_mbuf(smc,mb) ; } } @@ -679,7 +681,7 @@ static void ess_send_frame(struct s_smc *smc, SMbuf *mb) /* * Send the SBA RAF Change Reply to the network */ - DB_ESS("ESS:Send to the network\n",0,0) ; + DB_ESS("ESS:Send to the network"); smt_send_frame(smc,mb,FC_SMT_INFO,0) ; } } diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c index 7d3779ae7377..24aed28b982c 100644 --- a/drivers/net/fddi/skfp/fplustm.c +++ b/drivers/net/fddi/skfp/fplustm.c @@ -726,7 +726,7 @@ void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l) if (code_s2u & FM_SMYBEC) queue_event(smc,EVENT_RMT,RM_MY_BEACON) ; if (change_s2u & code_s2u & FM_SLOCLM) { - DB_RMTN(2,"RMT : lower claim received\n",0,0) ; + DB_RMTN(2, "RMT : lower claim received"); } if ((code_s2u & FM_SMYCLM) && !(code_s2l & FM_SDUPCLM)) { /* @@ -746,7 +746,7 @@ void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l) queue_event(smc,EVENT_RMT,RM_VALID_CLAIM) ; } if (change_s2u & code_s2u & FM_SHICLM) { - DB_RMTN(2,"RMT : higher claim received\n",0,0) ; + DB_RMTN(2, "RMT : higher claim received"); } if ( (code_s2l & FM_STRTEXP) || (code_s2l & FM_STRTEXR) ) @@ -1334,7 +1334,7 @@ void rtm_irq(struct s_smc *smc) outpw(ADDR(B2_RTM_CRTL),TIM_CL_IRQ) ; /* clear IRQ */ if (inpw(ADDR(B2_RTM_CRTL)) & TIM_RES_TOK) { outpw(FM_A(FM_CMDREG1),FM_ICL) ; /* force claim */ - DB_RMT("RMT: fddiPATHT_Rmode expired\n",0,0) ; + DB_RMT("RMT: fddiPATHT_Rmode expired"); AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_EVENT, (u_long) FDDI_RTT, smt_get_event_word(smc)); @@ -1353,8 +1353,8 @@ void rtm_set_timer(struct s_smc *smc) /* * MIB timer and hardware timer have the same resolution of 80nS */ - DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n", - (int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ; + DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns", + (int)smc->mib.a[PATH0].fddiPATHT_Rmode); outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ; } @@ -1469,13 +1469,13 @@ static void smt_split_up_fifo(struct s_smc *smc) smc->hw.fp.fifo.rx2_fifo_start = smc->hw.fp.fifo.tx_a0_start + smc->hw.fp.fifo.tx_a0_size ; - DB_SMT("FIFO split: mode = %x\n",smc->hw.fp.fifo.fifo_config_mode,0) ; - DB_SMT("rbc_ram_start = %x rbc_ram_end = %x\n", - smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end) ; - DB_SMT("rx1_fifo_start = %x tx_s_start = %x\n", - smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start) ; - DB_SMT("tx_a0_start = %x rx2_fifo_start = %x\n", - smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start) ; + DB_SMT("FIFO split: mode = %x", smc->hw.fp.fifo.fifo_config_mode); + DB_SMT("rbc_ram_start = %x rbc_ram_end = %x", + smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end); + DB_SMT("rx1_fifo_start = %x tx_s_start = %x", + smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start); + DB_SMT("tx_a0_start = %x rx2_fifo_start = %x", + smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start); } void formac_reinit_tx(struct s_smc *smc) diff --git a/drivers/net/fddi/skfp/h/cmtdef.h b/drivers/net/fddi/skfp/h/cmtdef.h index f5bc90ff2a2a..5d6891154367 100644 --- a/drivers/net/fddi/skfp/h/cmtdef.h +++ b/drivers/net/fddi/skfp/h/cmtdef.h @@ -54,43 +54,48 @@ #endif #ifdef DEBUG -#define DB_PR(flag,a,b,c) { if (flag) printf(a,b,c) ; } +#define DB_PR(flag, fmt, ...) \ + do { if (flag) printf(fmt "\n", ##__VA_ARGS__); } while (0) #else -#define DB_PR(flag,a,b,c) +#define DB_PR(flag, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__) + #endif #ifdef DEBUG_BRD -#define DB_ECM(a,b,c) DB_PR((smc->debug.d_smt&1),a,b,c) -#define DB_ECMN(n,a,b,c) DB_PR((smc->debug.d_ecm >=(n)),a,b,c) -#define DB_RMT(a,b,c) DB_PR((smc->debug.d_smt&2),a,b,c) -#define DB_RMTN(n,a,b,c) DB_PR((smc->debug.d_rmt >=(n)),a,b,c) -#define DB_CFM(a,b,c) DB_PR((smc->debug.d_smt&4),a,b,c) -#define DB_CFMN(n,a,b,c) DB_PR((smc->debug.d_cfm >=(n)),a,b,c) -#define DB_PCM(a,b,c) DB_PR((smc->debug.d_smt&8),a,b,c) -#define DB_PCMN(n,a,b,c) DB_PR((smc->debug.d_pcm >=(n)),a,b,c) -#define DB_SMT(a,b,c) DB_PR((smc->debug.d_smtf),a,b,c) -#define DB_SMTN(n,a,b,c) DB_PR((smc->debug.d_smtf >=(n)),a,b,c) -#define DB_SBA(a,b,c) DB_PR((smc->debug.d_sba),a,b,c) -#define DB_SBAN(n,a,b,c) DB_PR((smc->debug.d_sba >=(n)),a,b,c) -#define DB_ESS(a,b,c) DB_PR((smc->debug.d_ess),a,b,c) -#define DB_ESSN(n,a,b,c) DB_PR((smc->debug.d_ess >=(n)),a,b,c) +#define DB_TEST (smc->debug) #else -#define DB_ECM(a,b,c) DB_PR((debug.d_smt&1),a,b,c) -#define DB_ECMN(n,a,b,c) DB_PR((debug.d_ecm >=(n)),a,b,c) -#define DB_RMT(a,b,c) DB_PR((debug.d_smt&2),a,b,c) -#define DB_RMTN(n,a,b,c) DB_PR((debug.d_rmt >=(n)),a,b,c) -#define DB_CFM(a,b,c) DB_PR((debug.d_smt&4),a,b,c) -#define DB_CFMN(n,a,b,c) DB_PR((debug.d_cfm >=(n)),a,b,c) -#define DB_PCM(a,b,c) DB_PR((debug.d_smt&8),a,b,c) -#define DB_PCMN(n,a,b,c) DB_PR((debug.d_pcm >=(n)),a,b,c) -#define DB_SMT(a,b,c) DB_PR((debug.d_smtf),a,b,c) -#define DB_SMTN(n,a,b,c) DB_PR((debug.d_smtf >=(n)),a,b,c) -#define DB_SBA(a,b,c) DB_PR((debug.d_sba),a,b,c) -#define DB_SBAN(n,a,b,c) DB_PR((debug.d_sba >=(n)),a,b,c) -#define DB_ESS(a,b,c) DB_PR((debug.d_ess),a,b,c) -#define DB_ESSN(n,a,b,c) DB_PR((debug.d_ess >=(n)),a,b,c) +#define DB_TEST (debug) #endif +#define DB_ECM(fmt, ...) \ + DB_PR((DB_TEST).d_smt & 1, fmt, ##__VA_ARGS__) +#define DB_ECMN(n, fmt, ...) \ + DB_PR((DB_TEST).d_ecm >= (n), fmt, ##__VA_ARGS__) +#define DB_RMT(fmt, ...) \ + DB_PR((DB_TEST).d_smt & 2, fmt, ##__VA_ARGS__) +#define DB_RMTN(n, fmt, ...) \ + DB_PR((DB_TEST).d_rmt >= (n), fmt, ##__VA_ARGS__) +#define DB_CFM(fmt, ...) \ + DB_PR((DB_TEST).d_smt & 4, fmt, ##__VA_ARGS__) +#define DB_CFMN(n, fmt, ...) \ + DB_PR((DB_TEST).d_cfm >= (n), fmt, ##__VA_ARGS__) +#define DB_PCM(fmt, ...) \ + DB_PR((DB_TEST).d_smt & 8, fmt, ##__VA_ARGS__) +#define DB_PCMN(n, fmt, ...) \ + DB_PR((DB_TEST).d_pcm >= (n), fmt, ##__VA_ARGS__) +#define DB_SMT(fmt, ...) \ + DB_PR((DB_TEST).d_smtf, fmt, ##__VA_ARGS__) +#define DB_SMTN(n, fmt, ...) \ + DB_PR((DB_TEST).d_smtf >= (n), fmt, ##__VA_ARGS__) +#define DB_SBA(fmt, ...) \ + DB_PR((DB_TEST).d_sba, fmt, ##__VA_ARGS__) +#define DB_SBAN(n, fmt, ...) \ + DB_PR((DB_TEST).d_sba >= (n), fmt, ##__VA_ARGS__) +#define DB_ESS(fmt, ...) \ + DB_PR((DB_TEST).d_ess, fmt, ##__VA_ARGS__) +#define DB_ESSN(n, fmt, ...) \ + DB_PR((DB_TEST).d_ess >= (n), fmt, ##__VA_ARGS__) + #ifndef SS_NOT_DS #define SK_LOC_DECL(type,var) type var #else @@ -640,8 +645,8 @@ void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text); #define dump_smt(smc,sm,text) #endif -#ifdef DEBUG char* addr_to_string(struct fddi_addr *addr); +#ifdef DEBUG void dump_hex(char *p, int len); #endif diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c index 4937d36a9e1c..abbe309051d9 100644 --- a/drivers/net/fddi/skfp/hwmtm.c +++ b/drivers/net/fddi/skfp/hwmtm.c @@ -158,7 +158,7 @@ u_int mac_drv_check_space(void); SMbuf* smt_get_mbuf(struct s_smc *smc); #ifdef DEBUG - void mac_drv_debug_lev(void); + void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev); #endif /* diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c index 88d02d0a42c4..a9ecf923f63d 100644 --- a/drivers/net/fddi/skfp/pcmplc.c +++ b/drivers/net/fddi/skfp/pcmplc.c @@ -91,7 +91,6 @@ int p #define PC8_ACTIVE 8 #define PC9_MAINT 9 -#ifdef DEBUG /* * symbolic state names */ @@ -113,7 +112,6 @@ static const char * const pcm_events[] = { "PC_TIMEOUT_TL_MIN","PC_TIMEOUT_T_NEXT","PC_TIMEOUT_LCT", "PC_NSE","PC_LEM" } ; -#endif #ifdef MOT_ELM /* @@ -610,12 +608,11 @@ void pcm(struct s_smc *smc, const int np, int event) mib = phy->mib ; oldstate = mib->fddiPORTPCMState ; do { - DB_PCM("PCM %c: state %s", - phy->phy_name, - (mib->fddiPORTPCMState & AFLAG) ? "ACTIONS " : "") ; - DB_PCM("%s, event %s\n", - pcm_states[mib->fddiPORTPCMState & ~AFLAG], - pcm_events[event]) ; + DB_PCM("PCM %c: state %s%s, event %s", + phy->phy_name, + mib->fddiPORTPCMState & AFLAG ? "ACTIONS " : "", + pcm_states[mib->fddiPORTPCMState & ~AFLAG], + pcm_events[event]); state = mib->fddiPORTPCMState ; pcm_fsm(smc,phy,event) ; event = 0 ; @@ -1017,7 +1014,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) ACTIONS_DONE() ; break ; case PC9_MAINT : - DB_PCMN(1,"PCM %c : MAINT\n",phy->phy_name,0) ; + DB_PCMN(1, "PCM %c : MAINT", phy->phy_name); /*PC90*/ if (cmd == PC_ENABLE) { GO_STATE(PC0_OFF) ; @@ -1126,13 +1123,12 @@ static void lem_evaluate(struct s_smc *smc, struct s_phy *phy) } if (lem->lem_errors) { - DB_PCMN(1,"LEM %c :\n",phy->np == PB? 'B' : 'A',0) ; - DB_PCMN(1,"errors : %ld\n",lem->lem_errors,0) ; - DB_PCMN(1,"sum_errors : %ld\n",mib->fddiPORTLem_Ct,0) ; - DB_PCMN(1,"current BER : 10E-%d\n",ber/100,0) ; - DB_PCMN(1,"float BER : 10E-(%d/100)\n",lem->lem_float_ber,0) ; - DB_PCMN(1,"avg. BER : 10E-%d\n", - mib->fddiPORTLer_Estimate,0) ; + DB_PCMN(1, "LEM %c :", phy->np == PB ? 'B' : 'A'); + DB_PCMN(1, "errors : %ld", lem->lem_errors); + DB_PCMN(1, "sum_errors : %ld", mib->fddiPORTLem_Ct); + DB_PCMN(1, "current BER : 10E-%d", ber / 100); + DB_PCMN(1, "float BER : 10E-(%d/100)", lem->lem_float_ber); + DB_PCMN(1, "avg. BER : 10E-%d", mib->fddiPORTLer_Estimate); } lem->lem_errors = 0L ; @@ -1160,8 +1156,8 @@ static void lem_evaluate(struct s_smc *smc, struct s_phy *phy) /*PC81b*/ #ifdef CONCENTRATOR - DB_PCMN(1,"PCM: LER cutoff on port %d cutoff %d\n", - phy->np, mib->fddiPORTLer_Cutoff) ; + DB_PCMN(1, "PCM: LER cutoff on port %d cutoff %d", + phy->np, mib->fddiPORTLer_Cutoff); #endif #ifdef SMT_EXT_CUTOFF smt_port_off_event(smc,phy->np); @@ -1213,7 +1209,7 @@ static void lem_check_lct(struct s_smc *smc, struct s_phy *phy) phy->pc_lem_fail = TRUE ; break ; } - DB_PCMN(1," >>errors : %d\n",lem->lem_errors,0) ; + DB_PCMN(1, " >>errors : %lu", lem->lem_errors); } if (phy->pc_lem_fail) { mib->fddiPORTLCTFail_Ct++ ; @@ -1277,7 +1273,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) mib = phy->mib ; - DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ; + DB_PCMN(1, "SIG rec %x %x:", bit, phy->r_val[bit]); bit++ ; switch(bit) { @@ -1298,8 +1294,8 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) case 4: if (mib->fddiPORTMy_Type == TM && mib->fddiPORTNeighborType == TM) { - DB_PCMN(1,"PCM %c : E100 withhold M-M\n", - phy->phy_name,0) ; + DB_PCMN(1, "PCM %c : E100 withhold M-M", + phy->phy_name); mib->fddiPORTPC_Withhold = PC_WH_M_M ; RS_SET(smc,RS_EVENT) ; } @@ -1321,16 +1317,16 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) else { mib->fddiPORTPC_Withhold = PC_WH_OTHER ; RS_SET(smc,RS_EVENT) ; - DB_PCMN(1,"PCM %c : E101 withhold other\n", - phy->phy_name,0) ; + DB_PCMN(1, "PCM %c : E101 withhold other", + phy->phy_name); } phy->twisted = ((mib->fddiPORTMy_Type != TS) && (mib->fddiPORTMy_Type != TM) && (mib->fddiPORTNeighborType == mib->fddiPORTMy_Type)) ; if (phy->twisted) { - DB_PCMN(1,"PCM %c : E102 !!! TWISTED !!!\n", - phy->phy_name,0) ; + DB_PCMN(1, "PCM %c : E102 !!! TWISTED !!!", + phy->phy_name); } break ; case 5 : @@ -1368,7 +1364,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) if (phy->t_next[7] > smc->s.pcm_lc_medium) { start_pcm_timer0(smc,phy->t_next[7],PC_TIMEOUT_LCT,phy); } - DB_PCMN(1,"LCT timer = %ld us\n", phy->t_next[7], 0) ; + DB_PCMN(1, "LCT timer = %ld us", phy->t_next[7]); phy->t_next[9] = smc->s.pcm_t_next_9 ; break ; case 7: @@ -1379,8 +1375,9 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) break ; case 8: if (phy->t_val[7] || phy->r_val[7]) { - DB_PCMN(1,"PCM %c : E103 LCT fail %s\n", - phy->phy_name,phy->t_val[7]? "local":"remote") ; + DB_PCMN(1, "PCM %c : E103 LCT fail %s", + phy->phy_name, + phy->t_val[7] ? "local" : "remote"); queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ; } break ; @@ -1529,8 +1526,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy phy->cf_loop = FALSE ; lem_check_lct(smc,phy) ; if (phy->pc_lem_fail) { - DB_PCMN(1,"PCM %c : E104 LCT failed\n", - phy->phy_name,0) ; + DB_PCMN(1, "PCM %c : E104 LCT failed", phy->phy_name); phy->t_val[7] = 1 ; } else @@ -1580,7 +1576,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ; break ; } - DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ; + DB_PCMN(1, "SIG snd %x %x:", bit, phy->t_val[bit]); } /* @@ -1783,13 +1779,14 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd) } /*jd 05-Aug-1999 changed: Bug #10419 */ - DB_PCMN(1,"PLC %d: MDcF = %x\n", np, smc->e.DisconnectFlag); + DB_PCMN(1, "PLC %d: MDcF = %x", np, smc->e.DisconnectFlag); if (smc->e.DisconnectFlag == FALSE) { - DB_PCMN(1,"PLC %d: restart (reason %x)\n", np, reason); + DB_PCMN(1, "PLC %d: restart (reason %x)", np, reason); queue_event(smc,EVENT_PCM+np,PC_START) ; } else { - DB_PCMN(1,"PLC %d: NO!! restart (reason %x)\n", np, reason); + DB_PCMN(1, "PLC %d: NO!! restart (reason %x)", + np, reason); } return ; } @@ -1810,8 +1807,8 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd) if (cmd & PL_TRACE_PROP) { /* MLS while PC8_ACTIV || PC2_TRACE */ /*PC22b*/ if (!phy->tr_flag) { - DB_PCMN(1,"PCM : irq TRACE_PROP %d %d\n", - np,smc->mib.fddiSMTECMState) ; + DB_PCMN(1, "PCM : irq TRACE_PROP %d %d", + np, smc->mib.fddiSMTECMState); phy->tr_flag = TRUE ; smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ; queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ; @@ -1824,8 +1821,9 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd) if ((cmd & PL_SELF_TEST) && (phy->mib->fddiPORTPCMState == PC2_TRACE)) { /*PC22a*/ if (smc->e.path_test == PT_PASSED) { - DB_PCMN(1,"PCM : state = %s %d\n", get_pcmstate(smc,np), - phy->mib->fddiPORTPCMState) ; + DB_PCMN(1, "PCM : state = %s %d", + get_pcmstate(smc, np), + phy->mib->fddiPORTPCMState); smc->e.path_test = PT_PENDING ; queue_event(smc,EVENT_ECM,EC_PATH_TEST) ; @@ -1835,9 +1833,10 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd) /* break_required (TNE > NS_Max) */ if (phy->mib->fddiPORTPCMState == PC8_ACTIVE) { if (!phy->tr_flag) { - DB_PCMN(1,"PCM %c : PC81 %s\n",phy->phy_name,"NSE"); - queue_event(smc,EVENT_PCM+np,PC_START) ; - return ; + DB_PCMN(1, "PCM %c : PC81 %s", + phy->phy_name, "NSE"); + queue_event(smc, EVENT_PCM + np, PC_START); + return; } } } diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c index 52fa162a31e0..eee447315e32 100644 --- a/drivers/net/fddi/skfp/pmf.c +++ b/drivers/net/fddi/skfp/pmf.c @@ -284,7 +284,7 @@ void smt_pmf_received_pack(struct s_smc *smc, SMbuf *mb, int local) SMbuf *reply ; sm = smtod(mb,struct smt_header *) ; - DB_SMT("SMT: processing PMF frame at %p len %d\n",sm,mb->sm_len) ; + DB_SMT("SMT: processing PMF frame at %p len %d", sm, mb->sm_len); #ifdef DEBUG dump_smt(smc,sm,"PMF Received") ; #endif @@ -1585,7 +1585,7 @@ void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text) dump_hex((char *) &sm->smt_source,6) ; printf(" Class %x Type %x Version %x\n", sm->smt_class,sm->smt_type,sm->smt_version) ; - printf("TID %lx\t\tSID ",sm->smt_tid) ; + printf("TID %x\t\tSID ", sm->smt_tid); dump_hex((char *) &sm->smt_sid,8) ; printf(" LEN %x\n",sm->smt_len) ; diff --git a/drivers/net/fddi/skfp/rmt.c b/drivers/net/fddi/skfp/rmt.c index ef8d5672d9e8..52b22095273a 100644 --- a/drivers/net/fddi/skfp/rmt.c +++ b/drivers/net/fddi/skfp/rmt.c @@ -70,7 +70,6 @@ static const char ID_sccs[] = "@(#)rmt.c 2.13 99/07/02 (C) SK " ; #define RM6_DIRECTED 6 /* sending directed beacons */ #define RM7_TRACE 7 /* trace initiated */ -#ifdef DEBUG /* * symbolic state names */ @@ -91,7 +90,6 @@ static const char * const rmt_events[] = { "RM_TIMEOUT_ANNOUNCE","RM_TIMEOUT_T_DIRECT", "RM_TIMEOUT_D_MAX","RM_TIMEOUT_POLL","RM_TX_STATE_CHANGE" } ; -#endif /* * Globals @@ -149,10 +147,10 @@ void rmt(struct s_smc *smc, int event) int state ; do { - DB_RMT("RMT : state %s%s", - (smc->mib.m[MAC0].fddiMACRMTState & AFLAG) ? "ACTIONS " : "", - rmt_states[smc->mib.m[MAC0].fddiMACRMTState & ~AFLAG]) ; - DB_RMT(" event %s\n",rmt_events[event],0) ; + DB_RMT("RMT : state %s%s event %s", + smc->mib.m[MAC0].fddiMACRMTState & AFLAG ? "ACTIONS " : "", + rmt_states[smc->mib.m[MAC0].fddiMACRMTState & ~AFLAG], + rmt_events[event]); state = smc->mib.m[MAC0].fddiMACRMTState ; rmt_fsm(smc,event) ; event = 0 ; @@ -191,7 +189,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) smc->r.loop_avail = FALSE ; smc->r.sm_ma_avail = FALSE ; smc->r.no_flag = TRUE ; - DB_RMTN(1,"RMT : ISOLATED\n",0,0) ; + DB_RMTN(1, "RMT : ISOLATED"); ACTIONS_DONE() ; break ; case RM0_ISOLATED : @@ -213,7 +211,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) stop_rmt_timer1(smc) ; stop_rmt_timer2(smc) ; sm_ma_control(smc,MA_BEACON) ; - DB_RMTN(1,"RMT : RING DOWN\n",0,0) ; + DB_RMTN(1, "RMT : RING DOWN"); RS_SET(smc,RS_NORINGOP) ; smc->r.sm_ma_avail = FALSE ; rmt_indication(smc,0) ; @@ -248,7 +246,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) else smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ; } - DB_RMTN(1,"RMT : RING UP\n",0,0) ; + DB_RMTN(1, "RMT : RING UP"); RS_CLEAR(smc,RS_NORINGOP) ; RS_SET(smc,RS_RINGOPCHANGE) ; rmt_indication(smc,1) ; @@ -285,7 +283,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ; start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ; sm_mac_check_beacon_claim(smc) ; - DB_RMTN(1,"RMT : RM3_DETECT\n",0,0) ; + DB_RMTN(1, "RMT : RM3_DETECT"); ACTIONS_DONE() ; break ; case RM3_DETECT : @@ -327,7 +325,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) * trace ! */ if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) { - DB_RMTN(2,"RMT : DETECT && TRT_EXPIRED && T4/T5\n",0,0); + DB_RMTN(2, "RMT : DETECT && TRT_EXPIRED && T4/T5"); smc->r.bn_flag = TRUE ; /* * If one of the upstream stations beaconed @@ -344,9 +342,8 @@ static void rmt_fsm(struct s_smc *smc, int cmd) * must be cleared in order to get in this condition. */ - DB_RMTN(2, - "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n", - tx,smc->r.bn_flag) ; + DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)", + tx, smc->r.bn_flag); } /*RM34a*/ else if (cmd == RM_MY_CLAIM && smc->r.timer0_exp) { @@ -378,7 +375,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ; start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ; sm_mac_check_beacon_claim(smc) ; - DB_RMTN(1,"RMT : RM4_NON_OP_DUP\n",0,0) ; + DB_RMTN(1, "RMT : RM4_NON_OP_DUP"); ACTIONS_DONE() ; break ; case RM4_NON_OP_DUP : @@ -406,7 +403,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) * trace ! */ if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) { - DB_RMTN(2,"RMT : NOPDUP && TRT_EXPIRED && T4/T5\n",0,0); + DB_RMTN(2, "RMT : NOPDUP && TRT_EXPIRED && T4/T5"); smc->r.bn_flag = TRUE ; /* * If one of the upstream stations beaconed @@ -423,9 +420,8 @@ static void rmt_fsm(struct s_smc *smc, int cmd) * must be cleared in order to get in this condition. */ - DB_RMTN(2, - "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n", - tx,smc->r.bn_flag) ; + DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)", + tx, smc->r.bn_flag); } /*RM44c*/ else if (cmd == RM_TIMEOUT_ANNOUNCE && !smc->r.bn_flag) { @@ -448,7 +444,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) stop_rmt_timer0(smc) ; stop_rmt_timer1(smc) ; stop_rmt_timer2(smc) ; - DB_RMTN(1,"RMT : RM5_RING_OP_DUP\n",0,0) ; + DB_RMTN(1, "RMT : RM5_RING_OP_DUP"); ACTIONS_DONE() ; break; case RM5_RING_OP_DUP : @@ -472,7 +468,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ; sm_ma_control(smc,MA_DIRECTED) ; RS_SET(smc,RS_BEACON) ; - DB_RMTN(1,"RMT : RM6_DIRECTED\n",0,0) ; + DB_RMTN(1, "RMT : RM6_DIRECTED"); ACTIONS_DONE() ; break ; case RM6_DIRECTED : @@ -515,7 +511,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) stop_rmt_timer2(smc) ; smc->e.trace_prop |= ENTITY_BIT(ENTITY_MAC) ; queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ; - DB_RMTN(1,"RMT : RM7_TRACE\n",0,0) ; + DB_RMTN(1, "RMT : RM7_TRACE"); ACTIONS_DONE() ; break ; case RM7_TRACE : diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c index e80a08903fcf..ab939ae7e5b5 100644 --- a/drivers/net/fddi/skfp/smt.c +++ b/drivers/net/fddi/skfp/smt.c @@ -35,7 +35,6 @@ static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ; #define SMT_TID_MAGIC 0x1f0a7b3c -#ifdef DEBUG static const char *const smt_type_name[] = { "SMT_00??", "SMT_INFO", "SMT_02??", "SMT_03??", "SMT_04??", "SMT_05??", "SMT_06??", "SMT_07??", @@ -47,7 +46,7 @@ static const char *const smt_class_name[] = { "UNKNOWN","NIF","SIF_CONFIG","SIF_OPER","ECF","RAF","RDF", "SRF","PMF_GET","PMF_SET","ESF" } ; -#endif + #define LAST_CLASS (SMT_PMF_SET) static const struct fddi_addr SMT_Unknown = { @@ -203,7 +202,7 @@ void smt_agent_task(struct s_smc *smc) { smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L, EV_TOKEN(EVENT_SMT,SM_TIMER)) ; - DB_SMT("SMT agent task\n",0,0) ; + DB_SMT("SMT agent task"); } #ifndef SMT_REAL_TOKEN_CT @@ -396,7 +395,7 @@ void smt_event(struct s_smc *smc, int event) */ if (smc->sm.smt_tvu && time - smc->sm.smt_tvu > 228*TICKS_PER_SECOND) { - DB_SMT("SMT : UNA expired\n",0,0) ; + DB_SMT("SMT : UNA expired"); smc->sm.smt_tvu = 0 ; if (!is_equal(&smc->mib.m[MAC0].fddiMACUpstreamNbr, @@ -419,7 +418,7 @@ void smt_event(struct s_smc *smc, int event) } if (smc->sm.smt_tvd && time - smc->sm.smt_tvd > 228*TICKS_PER_SECOND) { - DB_SMT("SMT : DNA expired\n",0,0) ; + DB_SMT("SMT : DNA expired"); smc->sm.smt_tvd = 0 ; if (!is_equal(&smc->mib.m[MAC0].fddiMACDownstreamNbr, &SMT_Unknown)){ @@ -504,10 +503,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) #endif smt_swap_para(sm,(int) mb->sm_len,1) ; - DB_SMT("SMT : received packet [%s] at 0x%p\n", - smt_type_name[m_fc(mb) & 0xf],sm) ; - DB_SMT("SMT : version %d, class %s\n",sm->smt_version, - smt_class_name[(sm->smt_class>LAST_CLASS)?0 : sm->smt_class]) ; + DB_SMT("SMT : received packet [%s] at 0x%p", + smt_type_name[m_fc(mb) & 0xf], sm); + DB_SMT("SMT : version %d, class %s", + sm->smt_version, + smt_class_name[sm->smt_class > LAST_CLASS ? 0 : sm->smt_class]); #ifdef SBA /* @@ -524,8 +524,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) * ignore any packet with NSA and A-indicator set */ if ( (fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) { - DB_SMT("SMT : ignoring NSA with A-indicator set from %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : ignoring NSA with A-indicator set from %s", + addr_to_string(&sm->smt_source)); smt_free_mbuf(smc,mb) ; return ; } @@ -556,15 +556,15 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) break ; } if (illegal) { - DB_SMT("SMT : version = %d, dest = %s\n", - sm->smt_version,addr_to_string(&sm->smt_source)) ; + DB_SMT("SMT : version = %d, dest = %s", + sm->smt_version, addr_to_string(&sm->smt_source)); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_VERSION,local) ; smt_free_mbuf(smc,mb) ; return ; } if ((sm->smt_len > mb->sm_len - sizeof(struct smt_header)) || ((sm->smt_len & 3) && (sm->smt_class != SMT_ECF))) { - DB_SMT("SMT: info length error, len = %d\n",sm->smt_len,0) ; + DB_SMT("SMT: info length error, len = %d", sm->smt_len); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,local) ; smt_free_mbuf(smc,mb) ; return ; @@ -572,7 +572,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) switch (sm->smt_class) { case SMT_NIF : if (smt_check_para(smc,sm,plist_nif)) { - DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ; + DB_SMT("SMT: NIF with para problem, ignoring"); break ; } switch (sm->smt_type) { @@ -586,8 +586,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) if (!is_equal( &smc->mib.m[MAC0].fddiMACUpstreamNbr, &sm->smt_source)) { - DB_SMT("SMT : updated my UNA = %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : updated my UNA = %s", + addr_to_string(&sm->smt_source)); if (!is_equal(&smc->mib.m[MAC0]. fddiMACUpstreamNbr,&SMT_Unknown)){ /* Do not update unknown address */ @@ -616,8 +616,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) is_individual(&sm->smt_source) && ((!(fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) || (m_fc(mb) != FC_SMT_NSA))) { - DB_SMT("SMT : replying to NIF request %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : replying to NIF request %s", + addr_to_string(&sm->smt_source)); smt_send_nif(smc,&sm->smt_source, FC_SMT_INFO, sm->smt_tid, @@ -625,11 +625,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) } break ; case SMT_REPLY : - DB_SMT("SMT : received NIF response from %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : received NIF response from %s", + addr_to_string(&sm->smt_source)); if (fs & A_INDICATOR) { smc->sm.pend[SMT_TID_NIF] = 0 ; - DB_SMT("SMT : duplicate address\n",0,0) ; + DB_SMT("SMT : duplicate address"); smc->mib.m[MAC0].fddiMACDupAddressTest = DA_FAILED ; smc->r.dup_addr_test = DA_FAILED ; @@ -644,7 +644,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) if (!is_equal( &smc->mib.m[MAC0].fddiMACDownstreamNbr, &sm->smt_source)) { - DB_SMT("SMT : updated my DNA\n",0,0) ; + DB_SMT("SMT : updated my DNA"); if (!is_equal(&smc->mib.m[MAC0]. fddiMACDownstreamNbr, &SMT_Unknown)){ /* Do not update unknown address */ @@ -671,11 +671,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) } else if (sm->smt_tid == smc->sm.pend[SMT_TID_NIF_TEST]) { - DB_SMT("SMT : NIF test TID ok\n",0,0) ; + DB_SMT("SMT : NIF test TID ok"); } else { - DB_SMT("SMT : expected TID %lx, got %lx\n", - smc->sm.pend[SMT_TID_NIF],sm->smt_tid) ; + DB_SMT("SMT : expected TID %lx, got %x", + smc->sm.pend[SMT_TID_NIF], sm->smt_tid); } break ; default : @@ -686,53 +686,53 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) case SMT_SIF_CONFIG : /* station information */ if (sm->smt_type != SMT_REQUEST) break ; - DB_SMT("SMT : replying to SIF Config request from %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : replying to SIF Config request from %s", + addr_to_string(&sm->smt_source)); smt_send_sif_config(smc,&sm->smt_source,sm->smt_tid,local) ; break ; case SMT_SIF_OPER : /* station information */ if (sm->smt_type != SMT_REQUEST) break ; - DB_SMT("SMT : replying to SIF Operation request from %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : replying to SIF Operation request from %s", + addr_to_string(&sm->smt_source)); smt_send_sif_operation(smc,&sm->smt_source,sm->smt_tid,local) ; break ; case SMT_ECF : /* echo frame */ switch (sm->smt_type) { case SMT_REPLY : smc->mib.priv.fddiPRIVECF_Reply_Rx++ ; - DB_SMT("SMT: received ECF reply from %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT: received ECF reply from %s", + addr_to_string(&sm->smt_source)); if (sm_to_para(smc,sm,SMT_P_ECHODATA) == NULL) { - DB_SMT("SMT: ECHODATA missing\n",0,0) ; + DB_SMT("SMT: ECHODATA missing"); break ; } if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF]) { - DB_SMT("SMT : ECF test TID ok\n",0,0) ; + DB_SMT("SMT : ECF test TID ok"); } else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_UNA]) { - DB_SMT("SMT : ECF test UNA ok\n",0,0) ; + DB_SMT("SMT : ECF test UNA ok"); } else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_DNA]) { - DB_SMT("SMT : ECF test DNA ok\n",0,0) ; + DB_SMT("SMT : ECF test DNA ok"); } else { - DB_SMT("SMT : expected TID %lx, got %lx\n", - smc->sm.pend[SMT_TID_ECF], - sm->smt_tid) ; + DB_SMT("SMT : expected TID %lx, got %x", + smc->sm.pend[SMT_TID_ECF], + sm->smt_tid); } break ; case SMT_REQUEST : smc->mib.priv.fddiPRIVECF_Req_Rx++ ; { if (sm->smt_len && !sm_to_para(smc,sm,SMT_P_ECHODATA)) { - DB_SMT("SMT: ECF with para problem,sending RDF\n",0,0) ; + DB_SMT("SMT: ECF with para problem,sending RDF"); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH, local) ; break ; } - DB_SMT("SMT - sending ECF reply to %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT - sending ECF reply to %s", + addr_to_string(&sm->smt_source)); /* set destination addr. & reply */ sm->smt_dest = sm->smt_source ; @@ -750,7 +750,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) #ifndef BOOT case SMT_RAF : /* resource allocation */ #ifdef ESS - DB_ESSN(2,"ESS: RAF frame received\n",0,0) ; + DB_ESSN(2, "ESS: RAF frame received"); fs = ess_raf_received_pack(smc,mb,sm,fs) ; #endif @@ -764,7 +764,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) break ; case SMT_ESF : /* extended service - not supported */ if (sm->smt_type == SMT_REQUEST) { - DB_SMT("SMT - received ESF, sending RDF\n",0,0) ; + DB_SMT("SMT - received ESF, sending RDF"); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ; } break ; @@ -782,7 +782,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) */ if ((sm->smt_class == SMT_PMF_SET) && !is_individual(&sm->smt_dest)) { - DB_SMT("SMT: ignoring PMF-SET with I/G set\n",0,0) ; + DB_SMT("SMT: ignoring PMF-SET with I/G set"); break ; } smt_pmf_received_pack(smc,mb, local) ; @@ -798,16 +798,15 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) * we need to send a RDF frame according to 8.1.3.1.1, * only if it is a REQUEST. */ - DB_SMT("SMT : class = %d, send RDF to %s\n", - sm->smt_class, addr_to_string(&sm->smt_source)) ; + DB_SMT("SMT : class = %d, send RDF to %s", + sm->smt_class, addr_to_string(&sm->smt_source)); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ; break ; #endif } if (illegal) { - DB_SMT("SMT: discarding invalid frame, reason = %d\n", - illegal,0) ; + DB_SMT("SMT: discarding invalid frame, reason = %d", illegal); } smt_free_mbuf(smc,mb) ; } @@ -869,8 +868,8 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason, if (sm->smt_type != SMT_REQUEST) return ; - DB_SMT("SMT: sending RDF to %s,reason = 0x%x\n", - addr_to_string(&sm->smt_source),reason) ; + DB_SMT("SMT: sending RDF to %s,reason = 0x%x", + addr_to_string(&sm->smt_source), reason); /* @@ -1653,7 +1652,7 @@ int smt_check_para(struct s_smc *smc, struct smt_header *sm, const u_short *p = list ; while (*p) { if (!sm_to_para(smc,sm,(int) *p)) { - DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0); + DB_SMT("SMT: smt_check_para - missing para %hx", *p); return -1; } p++ ; @@ -1679,11 +1678,11 @@ void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para) p += plen ; len -= plen ; if (len < 0) { - DB_SMT("SMT : sm_to_para - length error %d\n",plen,0) ; + DB_SMT("SMT : sm_to_para - length error %d", plen); return NULL; } if ((plen & 3) && (para != SMT_P_ECHODATA)) { - DB_SMT("SMT : sm_to_para - odd length %d\n",plen,0) ; + DB_SMT("SMT : sm_to_para - odd length %d", plen); return NULL; } if (found) @@ -1937,7 +1936,7 @@ int smt_action(struct s_smc *smc, int class, int code, int index) { int event ; int port ; - DB_SMT("SMT: action %d code %d\n",class,code) ; + DB_SMT("SMT: action %d code %d", class, code); switch(class) { case SMT_STATION_ACTION : switch(code) { diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c index 9956680402de..4e286c1ba9cd 100644 --- a/drivers/net/fddi/skfp/srf.c +++ b/drivers/net/fddi/skfp/srf.c @@ -173,7 +173,6 @@ static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index) #define THRESHOLD_2 (2*TICKS_PER_SECOND) #define THRESHOLD_32 (32*TICKS_PER_SECOND) -#ifdef DEBUG static const char * const srf_names[] = { "None","MACPathChangeEvent", "MACNeighborChangeEvent", "PORTPathChangeEvent", "PORTUndesiredConnectionAttemptEvent", @@ -182,7 +181,6 @@ static const char * const srf_names[] = { "MACNotCopiedCondition", "PORTEBErrorCondition", "PORTLerCondition" } ; -#endif void smt_srf_event(struct s_smc *smc, int code, int index, int cond) { @@ -198,10 +196,10 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond) } if (code) { - DB_SMT("SRF: %s index %d\n",srf_names[code],index) ; + DB_SMT("SRF: %s index %d", srf_names[code], index); if (!(evc = smt_get_evc(smc,code,index))) { - DB_SMT("SRF : smt_get_evc() failed\n",0,0) ; + DB_SMT("SRF : smt_get_evc() failed"); return ; } /* @@ -217,7 +215,7 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond) */ smt_set_timestamp(smc,smc->mib.fddiSMTTransitionTimeStamp) ; if (SMT_IS_CONDITION(code)) { - DB_SMT("SRF: condition is %s\n",cond ? "ON":"OFF",0) ; + DB_SMT("SRF: condition is %s", cond ? "ON" : "OFF"); if (cond) { *evc->evc_cond_state = TRUE ; evc->evc_rep_required = TRUE ; @@ -414,9 +412,9 @@ static void smt_send_srf(struct s_smc *smc) smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ; mb->sm_len = smt->smt_len + sizeof(struct smt_header) ; - DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ; - DB_SMT("SRF: state SR%d Threshold %d\n", - smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ; + DB_SMT("SRF: sending SRF at %p, len %d", smt, mb->sm_len); + DB_SMT("SRF: state SR%d Threshold %lu", + smc->srf.sr_state, smc->srf.SRThreshold / TICKS_PER_SECOND); #ifdef DEBUG dump_smt(smc,smt,"SRF Send") ; #endif -- cgit v1.2.3 From 801822d1beea4f11a38df991b420ca917f6a917b Mon Sep 17 00:00:00 2001 From: Shyam Saini Date: Sat, 24 Dec 2016 00:44:58 +0530 Subject: net: Use kmemdup instead of kmalloc and memcpy when some other buffer is immediately copied into allocated region. Replace calls to kmalloc followed by a memcpy with a direct call to kmemdup. Signed-off-by: Shyam Saini Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4a105006ca63..2cea022e6e6e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1236,10 +1236,9 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p) struct sockaddr *addr; struct scatterlist sg; - addr = kmalloc(sizeof(*addr), GFP_KERNEL); + addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); if (!addr) return -ENOMEM; - memcpy(addr, p, sizeof(*addr)); ret = eth_prepare_mac_addr_change(dev, addr); if (ret) -- cgit v1.2.3 From 1946e672c173559155a3e210fe95dbf8b7b8ddf7 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Wed, 28 Dec 2016 17:52:32 +0800 Subject: ipv4: Namespaceify tcp_tw_recycle and tcp_max_tw_buckets knob Different namespace application might require fast recycling TIME-WAIT sockets independently of the host. Signed-off-by: Haishuang Yan Signed-off-by: David S. Miller --- include/net/inet_timewait_sock.h | 13 +------------ include/net/netns/ipv4.h | 11 +++++++++++ include/net/tcp.h | 1 - net/ipv4/af_inet.c | 2 -- net/ipv4/inet_timewait_sock.c | 3 +-- net/ipv4/proc.c | 2 +- net/ipv4/sysctl_net_ipv4.c | 28 ++++++++++++++-------------- net/ipv4/tcp.c | 3 ++- net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_ipv4.c | 12 ++++++++---- net/ipv4/tcp_minisocks.c | 14 +++++--------- net/ipv6/tcp_ipv6.c | 7 ++++--- 12 files changed, 48 insertions(+), 50 deletions(-) diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index c9b3eb70f340..6a75d67a30fd 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -29,16 +29,6 @@ #include -struct inet_hashinfo; - -struct inet_timewait_death_row { - atomic_t tw_count; - - struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; - int sysctl_tw_recycle; - int sysctl_max_tw_buckets; -}; - struct inet_bind_bucket; /* @@ -125,8 +115,7 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); -void inet_twsk_purge(struct inet_hashinfo *hashinfo, - struct inet_timewait_death_row *twdr, int family); +void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family); static inline struct net *twsk_net(const struct inet_timewait_sock *twsk) diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 0378e88f6fd3..fffd38453985 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -27,6 +27,16 @@ struct ping_group_range { kgid_t range[2]; }; +struct inet_hashinfo; + +struct inet_timewait_death_row { + atomic_t tw_count; + + struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; + int sysctl_tw_recycle; + int sysctl_max_tw_buckets; +}; + struct netns_ipv4 { #ifdef CONFIG_SYSCTL struct ctl_table_header *forw_hdr; @@ -111,6 +121,7 @@ struct netns_ipv4 { int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_tcp_tw_reuse; + struct inet_timewait_death_row tcp_death_row; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; diff --git a/include/net/tcp.h b/include/net/tcp.h index 6061963cca98..1da0aa724929 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -231,7 +231,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); */ #define TFO_SERVER_WO_SOCKOPT1 0x400 -extern struct inet_timewait_death_row tcp_death_row; /* sysctl variables for tcp */ extern int sysctl_tcp_timestamps; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index f75069883f2b..aae410bb655a 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1831,8 +1831,6 @@ static int __init inet_init(void) ip_init(); - tcp_v4_init(); - /* Setup TCP slab cache for open requests. */ tcp_init(); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index ddcd56c08d14..f8aff2c71cde 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -257,8 +257,7 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm) } EXPORT_SYMBOL_GPL(__inet_twsk_schedule); -void inet_twsk_purge(struct inet_hashinfo *hashinfo, - struct inet_timewait_death_row *twdr, int family) +void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family) { struct inet_timewait_sock *tw; struct sock *sk; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 7143ca1a6af9..0247ca032232 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -65,7 +65,7 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) socket_seq_show(seq); seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n", sock_prot_inuse_get(net, &tcp_prot), orphans, - atomic_read(&tcp_death_row.tw_count), sockets, + atomic_read(&net->ipv4.tcp_death_row.tw_count), sockets, proto_memory_allocated(&tcp_prot)); seq_printf(seq, "UDP: inuse %d mem %ld\n", sock_prot_inuse_get(net, &udp_prot), diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 22cbd61079b5..66f8f1b1dc78 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -289,13 +289,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, - { - .procname = "tcp_max_tw_buckets", - .data = &tcp_death_row.sysctl_max_tw_buckets, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "tcp_fastopen", .data = &sysctl_tcp_fastopen, @@ -309,13 +302,6 @@ static struct ctl_table ipv4_table[] = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH * 2) + 10), .proc_handler = proc_tcp_fastopen_key, }, - { - .procname = "tcp_tw_recycle", - .data = &tcp_death_row.sysctl_tw_recycle, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "tcp_abort_on_overflow", .data = &sysctl_tcp_abort_on_overflow, @@ -960,6 +946,20 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_max_tw_buckets", + .data = &init_net.ipv4.tcp_death_row.sysctl_max_tw_buckets, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { + .procname = "tcp_tw_recycle", + .data = &init_net.ipv4.tcp_death_row.sysctl_tw_recycle, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, #ifdef CONFIG_IP_ROUTE_MULTIPATH { .procname = "fib_multipath_use_neigh", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4a044964da66..7f0d81c090ce 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3334,6 +3334,7 @@ void __init tcp_init(void) percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL); + inet_hashinfo_init(&tcp_hashinfo); tcp_hashinfo.bind_bucket_cachep = kmem_cache_create("tcp_bind_bucket", sizeof(struct inet_bind_bucket), 0, @@ -3378,7 +3379,6 @@ void __init tcp_init(void) cnt = tcp_hashinfo.ehash_mask + 1; - tcp_death_row.sysctl_max_tw_buckets = cnt / 2; sysctl_tcp_max_orphans = cnt / 2; sysctl_max_syn_backlog = max(128, cnt / 256); @@ -3399,6 +3399,7 @@ void __init tcp_init(void) pr_info("Hash tables configured (established %u bind %u)\n", tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); + tcp_v4_init(); tcp_metrics_init(); BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); tcp_tasklet_init(); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6c790754ae3e..c61480249835 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6363,7 +6363,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, * timewait bucket, so that all the necessary checks * are made in the function processing timewait state. */ - if (tcp_death_row.sysctl_tw_recycle) { + if (net->ipv4.tcp_death_row.sysctl_tw_recycle) { bool strict; dst = af_ops->route_req(sk, &fl, req, &strict); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index fe9da4fb96bf..56b5f49e3f97 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -146,6 +146,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) struct rtable *rt; int err; struct ip_options_rcu *inet_opt; + struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; @@ -196,7 +197,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) tp->write_seq = 0; } - if (tcp_death_row.sysctl_tw_recycle && + if (tcp_death_row->sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) tcp_fetch_timewait_stamp(sk, &rt->dst); @@ -215,7 +216,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) * complete initialization after this. */ tcp_set_state(sk, TCP_SYN_SENT); - err = inet_hash_connect(&tcp_death_row, sk); + err = inet_hash_connect(tcp_death_row, sk); if (err) goto failure; @@ -2457,6 +2458,10 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; net->ipv4.sysctl_tcp_tw_reuse = 0; + net->ipv4.tcp_death_row.sysctl_tw_recycle = 0; + net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (tcp_hashinfo.ehash_mask + 1) / 2; + net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; + return 0; fail: tcp_sk_exit(net); @@ -2466,7 +2471,7 @@ fail: static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) { - inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET); + inet_twsk_purge(&tcp_hashinfo, AF_INET); } static struct pernet_operations __net_initdata tcp_sk_ops = { @@ -2477,7 +2482,6 @@ static struct pernet_operations __net_initdata tcp_sk_ops = { void __init tcp_v4_init(void) { - inet_hashinfo_init(&tcp_hashinfo); if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 28ce5ee831f5..06fde26a82b7 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -29,12 +29,6 @@ int sysctl_tcp_abort_on_overflow __read_mostly; -struct inet_timewait_death_row tcp_death_row = { - .sysctl_max_tw_buckets = NR_FILE * 2, - .hashinfo = &tcp_hashinfo, -}; -EXPORT_SYMBOL_GPL(tcp_death_row); - static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) { if (seq == s_win) @@ -100,6 +94,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, struct tcp_options_received tmp_opt; struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); bool paws_reject = false; + struct inet_timewait_death_row *tcp_death_row = &sock_net((struct sock*)tw)->ipv4.tcp_death_row; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { @@ -153,7 +148,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, tcptw->tw_ts_recent = tmp_opt.rcv_tsval; } - if (tcp_death_row.sysctl_tw_recycle && + if (tcp_death_row->sysctl_tw_recycle && tcptw->tw_ts_recent_stamp && tcp_tw_remember_stamp(tw)) inet_twsk_reschedule(tw, tw->tw_timeout); @@ -264,11 +259,12 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) const struct tcp_sock *tp = tcp_sk(sk); struct inet_timewait_sock *tw; bool recycle_ok = false; + struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; - if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) + if (tcp_death_row->sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) recycle_ok = tcp_remember_stamp(sk); - tw = inet_twsk_alloc(sk, &tcp_death_row, state); + tw = inet_twsk_alloc(sk, tcp_death_row, state); if (tw) { struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 73bc8fc68acd..a4cdf6a34c30 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -123,6 +123,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, struct dst_entry *dst; int addr_type; int err; + struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; @@ -258,7 +259,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, sk->sk_gso_type = SKB_GSO_TCPV6; ip6_dst_store(sk, dst, NULL, NULL); - if (tcp_death_row.sysctl_tw_recycle && + if (tcp_death_row->sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr)) tcp_fetch_timewait_stamp(sk, dst); @@ -273,7 +274,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, inet->inet_dport = usin->sin6_port; tcp_set_state(sk, TCP_SYN_SENT); - err = inet6_hash_connect(&tcp_death_row, sk); + err = inet6_hash_connect(tcp_death_row, sk); if (err) goto late_failure; @@ -1948,7 +1949,7 @@ static void __net_exit tcpv6_net_exit(struct net *net) static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list) { - inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); + inet_twsk_purge(&tcp_hashinfo, AF_INET6); } static struct pernet_operations tcpv6_net_ops = { -- cgit v1.2.3 From fee83d097b1620530f23bf6063f4ea251ba9c8c7 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Wed, 28 Dec 2016 17:52:33 +0800 Subject: ipv4: Namespaceify tcp_max_syn_backlog knob Different namespace application might require different maximal number of remembered connection requests. Signed-off-by: Haishuang Yan Signed-off-by: David S. Miller --- include/net/netns/ipv4.h | 1 + include/net/request_sock.h | 4 +--- net/core/request_sock.c | 2 -- net/ipv4/sysctl_net_ipv4.c | 14 +++++++------- net/ipv4/tcp.c | 2 -- net/ipv4/tcp_input.c | 4 ++-- net/ipv4/tcp_ipv4.c | 7 +++++-- 7 files changed, 16 insertions(+), 18 deletions(-) diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index fffd38453985..8e3f5b6f26d5 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -122,6 +122,7 @@ struct netns_ipv4 { unsigned int sysctl_tcp_notsent_lowat; int sysctl_tcp_tw_reuse; struct inet_timewait_death_row tcp_death_row; + int sysctl_max_syn_backlog; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 6ebe13eb1c4c..a12a5d25b27e 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -1,7 +1,7 @@ /* * NET Generic infrastructure for Network protocols. * - * Definitions for request_sock + * Definitions for request_sock * * Authors: Arnaldo Carvalho de Melo * @@ -123,8 +123,6 @@ static inline void reqsk_put(struct request_sock *req) reqsk_free(req); } -extern int sysctl_max_syn_backlog; - /* * For a TCP Fast Open listener - * lock - protects the access to all the reqsk, which is co-owned by diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 5d26056b6d8f..9b8727c67b58 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -34,8 +34,6 @@ * and it will increase in proportion to the memory of machine. * Note : Dont forget somaxconn that may limit backlog too. */ -int sysctl_max_syn_backlog = 256; -EXPORT_SYMBOL(sysctl_max_syn_backlog); void reqsk_queue_alloc(struct request_sock_queue *queue) { diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 66f8f1b1dc78..134d8e191366 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -323,13 +323,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, - { - .procname = "tcp_max_syn_backlog", - .data = &sysctl_max_syn_backlog, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "inet_peer_threshold", .data = &inet_peer_threshold, @@ -960,6 +953,13 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_max_syn_backlog", + .data = &init_net.ipv4.sysctl_max_syn_backlog, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, #ifdef CONFIG_IP_ROUTE_MULTIPATH { .procname = "fib_multipath_use_neigh", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7f0d81c090ce..2e3807d8eba8 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3378,9 +3378,7 @@ void __init tcp_init(void) cnt = tcp_hashinfo.ehash_mask + 1; - sysctl_tcp_max_orphans = cnt / 2; - sysctl_max_syn_backlog = max(128, cnt / 256); tcp_init_mem(); /* Set per-socket limits to no more than 1/128 the pressure threshold */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c61480249835..ec6d84363024 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6377,8 +6377,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, } /* Kill the following clause, if you dislike this way. */ else if (!net->ipv4.sysctl_tcp_syncookies && - (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < - (sysctl_max_syn_backlog >> 2)) && + (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < + (net->ipv4.sysctl_max_syn_backlog >> 2)) && !tcp_peer_is_proven(req, dst, false, tmp_opt.saw_tstamp)) { /* Without syncookies last quarter of diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 56b5f49e3f97..7e4be4f361f3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2419,7 +2419,7 @@ static void __net_exit tcp_sk_exit(struct net *net) static int __net_init tcp_sk_init(struct net *net) { - int res, cpu; + int res, cpu, cnt; net->ipv4.tcp_sk = alloc_percpu(struct sock *); if (!net->ipv4.tcp_sk) @@ -2458,10 +2458,13 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; net->ipv4.sysctl_tcp_tw_reuse = 0; + cnt = tcp_hashinfo.ehash_mask + 1; net->ipv4.tcp_death_row.sysctl_tw_recycle = 0; - net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (tcp_hashinfo.ehash_mask + 1) / 2; + net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; + net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256); + return 0; fail: tcp_sk_exit(net); -- cgit v1.2.3 From 9eb12474787d32a968b5b01ecbf83acd2d230b25 Mon Sep 17 00:00:00 2001 From: jpinto Date: Wed, 28 Dec 2016 12:57:48 +0000 Subject: stmmac: enable rx queues When the hardware is synthesized with multiple queues, all queues are disabled for default. This patch adds the rx queues configuration. This patch was successfully tested in a Synopsys QoS Reference design. Signed-off-by: Joao Pinto Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 5 +++++ drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 8 +++++++ drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 12 +++++++++++ drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 5 +++++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 26 +++++++++++++++++++++++ 5 files changed, 56 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b13a144f72ad..6c9629138462 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -323,6 +323,9 @@ struct dma_features { /* TX and RX number of channels */ unsigned int number_rx_channel; unsigned int number_tx_channel; + /* TX and RX number of queues */ + unsigned int number_rx_queues; + unsigned int number_tx_queues; /* Alternate (enhanced) DESC mode */ unsigned int enh_desc; }; @@ -454,6 +457,8 @@ struct stmmac_ops { void (*core_init)(struct mac_device_info *hw, int mtu); /* Enable and verify that the IPC module is supported */ int (*rx_ipc)(struct mac_device_info *hw); + /* Enable RX Queues */ + void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue); /* Dump MAC registers */ void (*dump_regs)(struct mac_device_info *hw); /* Handle extra events on specific interrupts hw dependent */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 3e8d4fefa5e0..b524598b1c9e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -22,6 +22,7 @@ #define GMAC_HASH_TAB_32_63 0x00000014 #define GMAC_RX_FLOW_CTRL 0x00000090 #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) +#define GMAC_RXQ_CTRL0 0x000000a0 #define GMAC_INT_STATUS 0x000000b0 #define GMAC_INT_EN 0x000000b4 #define GMAC_PCS_BASE 0x000000e0 @@ -44,6 +45,11 @@ #define GMAC_MAX_PERFECT_ADDRESSES 128 +/* MAC RX Queue Enable */ +#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2)) +#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2) +#define GMAC_RX_DCB_QUEUE_ENABLE(queue) BIT(((queue) * 2) + 1) + /* MAC Flow Control RX */ #define GMAC_RX_FLOW_CTRL_RFE BIT(0) @@ -133,6 +139,8 @@ enum power_event { /* MAC HW features2 bitmap */ #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) +#define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6) +#define GMAC_HW_FEAT_RXQCNT GENMASK(3, 0) /* MAC HW ADDR regs */ #define GMAC_HI_DCS GENMASK(18, 16) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index eaed7cb21867..ecfbf577e26f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -59,6 +59,17 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu) writel(value, ioaddr + GMAC_INT_EN); } +static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); + + value &= GMAC_RX_QUEUE_CLEAR(queue); + value |= GMAC_RX_AV_QUEUE_ENABLE(queue); + + writel(value, ioaddr + GMAC_RXQ_CTRL0); +} + static void dwmac4_dump_regs(struct mac_device_info *hw) { void __iomem *ioaddr = hw->pcsr; @@ -392,6 +403,7 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) static const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, .dump_regs = dwmac4_dump_regs, .host_irq_status = dwmac4_irq_status, .flow_ctrl = dwmac4_flow_ctrl, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 8196ab5fc33c..377d1b44d4f2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -303,6 +303,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; dma_cap->number_tx_channel = ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; + /* TX and RX number of queues */ + dma_cap->number_rx_queues = + ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; + dma_cap->number_tx_queues = + ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; /* IEEE 1588-2002 */ dma_cap->time_stamp = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index bb40382e205d..c97870f880e1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1270,6 +1270,28 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) kfree(priv->tx_skbuff); } +/** + * stmmac_mac_enable_rx_queues - Enable MAC rx queues + * @priv: driver private structure + * Description: It is used for enabling the rx queues in the MAC + */ +static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) +{ + int rx_count = priv->dma_cap.number_rx_queues; + int queue = 0; + + /* If GMAC does not have multiple queues, then this is not necessary*/ + if (rx_count == 1) + return; + + /** + * If the core is synthesized with multiple rx queues / multiple + * dma channels, then rx queues will be disabled by default. + * For now only rx queue 0 is enabled. + */ + priv->hw->mac->rx_queue_enable(priv->hw, queue); +} + /** * stmmac_dma_operation_mode - HW DMA operation mode * @priv: driver private structure @@ -1691,6 +1713,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); + /* Initialize MAC RX Queues */ + if (priv->hw->mac->rx_queue_enable) + stmmac_mac_enable_rx_queues(priv); + ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); -- cgit v1.2.3 From de8499cee59465cfa6135591d665a065539d456b Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 28 Dec 2016 11:53:18 -0500 Subject: ipv6: remove unnecessary inet6_sk check np is already assigned in the variable declaration of ping_v6_sendmsg. At this point, we have already dereferenced np several times, so the NULL check is also redundant. Suggested-by: Eric Dumazet Signed-off-by: Dave Jones Signed-off-by: David S. Miller --- net/ipv6/ping.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index e1f8b34d7a2e..9b522fa90e6d 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -126,12 +126,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) return PTR_ERR(dst); rt = (struct rt6_info *) dst; - np = inet6_sk(sk); - if (!np) { - err = -EBADF; - goto dst_err_out; - } - if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; else if (!fl6.flowi6_oif) @@ -166,7 +160,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } release_sock(sk); -dst_err_out: dst_release(dst); if (err) -- cgit v1.2.3 From ae7cd93e20a135cf6e2021de92b37f2988aec1de Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 28 Dec 2016 17:31:20 +0000 Subject: drivers: atm: eni: rename macro DAUGTHER_ID to fix spelling mistake Rename DAUGTHER_ID to DAUGHTER_ID to fix spelling mistake Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/atm/eni.c | 2 +- drivers/atm/midway.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index c53a9dd1353f..623359e407aa 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -1779,7 +1779,7 @@ static int eni_do_init(struct atm_dev *dev) printk(")\n"); printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number, eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA", - media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]); + media_name[eni_in(MID_RES_ID_MCON) & DAUGHTER_ID]); error = suni_init(dev); if (error) diff --git a/drivers/atm/midway.h b/drivers/atm/midway.h index 432525ad5e46..d8bec0f2a71c 100644 --- a/drivers/atm/midway.h +++ b/drivers/atm/midway.h @@ -56,7 +56,7 @@ #define MID_CON_SUNI 0x00000040 /* 0: UTOPIA; 1: SUNI */ #define MID_CON_V6 0x00000020 /* 0: non-pipel UTOPIA (required iff !CON_SUNI; 1: UTOPIA */ -#define DAUGTHER_ID 0x0000001f /* daugther board id */ +#define DAUGHTER_ID 0x0000001f /* daughter board id */ /* * Interrupt Status Acknowledge, Interrupt Status & Interrupt Enable -- cgit v1.2.3 From b356a2e729cec145a648d22ba5686357c009da25 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:31 -0500 Subject: bnxt_en: Remove busy poll logic in the driver. Use native NAPI polling instead. The next patch will complete the work by switching to use napi_complete_done() Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 53 +---------------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 99 ------------------------------- 2 files changed, 3 insertions(+), 149 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9608cb49a11c..b53f958ffcb8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -39,9 +39,6 @@ #include #include #include -#ifdef CONFIG_NET_RX_BUSY_POLL -#include -#endif #include #include #include @@ -1356,11 +1353,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, rc = -ENOMEM; if (likely(skb)) { skb_record_rx_queue(skb, bnapi->index); - skb_mark_napi_id(skb, &bnapi->napi); - if (bnxt_busy_polling(bnapi)) - netif_receive_skb(skb); - else - napi_gro_receive(&bnapi->napi, skb); + napi_gro_receive(&bnapi->napi, skb); rc = 1; } goto next_rx_no_prod; @@ -1460,11 +1453,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } skb_record_rx_queue(skb, bnapi->index); - skb_mark_napi_id(skb, &bnapi->napi); - if (bnxt_busy_polling(bnapi)) - netif_receive_skb(skb); - else - napi_gro_receive(&bnapi->napi, skb); + napi_gro_receive(&bnapi->napi, skb); rc = 1; next_rx: @@ -1782,9 +1771,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; int work_done = 0; - if (!bnxt_lock_napi(bnapi)) - return budget; - while (1) { work_done += bnxt_poll_work(bp, bnapi, budget - work_done); @@ -1798,36 +1784,9 @@ static int bnxt_poll(struct napi_struct *napi, int budget) } } mmiowb(); - bnxt_unlock_napi(bnapi); return work_done; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static int bnxt_busy_poll(struct napi_struct *napi) -{ - struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); - struct bnxt *bp = bnapi->bp; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - int rx_work, budget = 4; - - if (atomic_read(&bp->intr_sem) != 0) - return LL_FLUSH_FAILED; - - if (!bp->link_info.link_up) - return LL_FLUSH_FAILED; - - if (!bnxt_lock_poll(bnapi)) - return LL_FLUSH_BUSY; - - rx_work = bnxt_poll_work(bp, bnapi, budget); - - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); - - bnxt_unlock_poll(bnapi); - return rx_work; -} -#endif - static void bnxt_free_tx_skbs(struct bnxt *bp) { int i, max_idx; @@ -5094,10 +5053,8 @@ static void bnxt_disable_napi(struct bnxt *bp) if (!bp->bnapi) return; - for (i = 0; i < bp->cp_nr_rings; i++) { + for (i = 0; i < bp->cp_nr_rings; i++) napi_disable(&bp->bnapi[i]->napi); - bnxt_disable_poll(bp->bnapi[i]); - } } static void bnxt_enable_napi(struct bnxt *bp) @@ -5106,7 +5063,6 @@ static void bnxt_enable_napi(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { bp->bnapi[i]->in_reset = false; - bnxt_enable_poll(bp->bnapi[i]); napi_enable(&bp->bnapi[i]->napi); } } @@ -6765,9 +6721,6 @@ static const struct net_device_ops bnxt_netdev_ops = { #endif .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = bnxt_busy_poll, -#endif }; static void bnxt_remove_one(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 16defe9ececc..fddc316170b2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -654,21 +654,9 @@ struct bnxt_napi { struct bnxt_rx_ring_info *rx_ring; struct bnxt_tx_ring_info *tx_ring; -#ifdef CONFIG_NET_RX_BUSY_POLL - atomic_t poll_state; -#endif bool in_reset; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum bnxt_poll_state_t { - BNXT_STATE_IDLE = 0, - BNXT_STATE_NAPI, - BNXT_STATE_POLL, - BNXT_STATE_DISABLE, -}; -#endif - struct bnxt_irq { irq_handler_t handler; unsigned int vector; @@ -1141,93 +1129,6 @@ struct bnxt { ((offsetof(struct tx_port_stats, counter) + \ sizeof(struct rx_port_stats) + 512) / 8) -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) -{ - atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); -} - -/* called from the NAPI poll routine to get ownership of a bnapi */ -static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) -{ - int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, - BNXT_STATE_NAPI); - - return rc == BNXT_STATE_IDLE; -} - -static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) -{ - atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); -} - -/* called from the busy poll routine to get ownership of a bnapi */ -static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) -{ - int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, - BNXT_STATE_POLL); - - return rc == BNXT_STATE_IDLE; -} - -static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) -{ - atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); -} - -static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) -{ - return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL; -} - -static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) -{ - int old; - - while (1) { - old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, - BNXT_STATE_DISABLE); - if (old == BNXT_STATE_IDLE) - break; - usleep_range(500, 5000); - } -} - -#else - -static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) -{ -} - -static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) -{ - return true; -} - -static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) -{ -} - -static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) -{ - return false; -} - -static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) -{ -} - -static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) -{ - return false; -} - -static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) -{ -} - -#endif - #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 #define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e -- cgit v1.2.3 From e7b9569102995ebc26821789628eef45bd9840d8 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:32 -0500 Subject: bnxt_en: Use napi_complete_done() For better busy polling and GRO support. Do not re-arm IRQ if napi_complete_done() returns false. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b53f958ffcb8..3fbc8427be2e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1778,8 +1778,9 @@ static int bnxt_poll(struct napi_struct *napi, int budget) break; if (!bnxt_has_work(bp, cpr)) { - napi_complete(napi); - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + if (napi_complete_done(napi, work_done)) + BNXT_CP_DB_REARM(cpr->cp_doorbell, + cpr->cp_raw_cons); break; } } -- cgit v1.2.3 From 9d8bc09766f1a229b2d204c713a1cfc6c7fa1bb1 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:33 -0500 Subject: bnxt_en: Improve the IRQ disable sequence during shutdown. The IRQ is disabled by writing to the completion ring doorbell. This should be done before the hardware completion ring is freed for correctness. The current code disables IRQs after all the completion rings are freed. Fix it by calling bnxt_disable_int_sync() before freeing the completion rings. Rearrange the code to avoid forward declaration. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 89 ++++++++++++++++--------------- 1 file changed, 46 insertions(+), 43 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 3fbc8427be2e..277573b3d261 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2953,6 +2953,45 @@ alloc_mem_err: return rc; } +static void bnxt_disable_int(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + +static void bnxt_disable_int_sync(struct bnxt *bp) +{ + int i; + + atomic_inc(&bp->intr_sem); + + bnxt_disable_int(bp); + for (i = 0; i < bp->cp_nr_rings; i++) + synchronize_irq(bp->irq_tbl[i].vector); +} + +static void bnxt_enable_int(struct bnxt *bp) +{ + int i; + + atomic_set(&bp->intr_sem, 0); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, u16 cmpl_ring, u16 target_id) { @@ -3937,6 +3976,12 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } + /* The completion rings are about to be freed. After that the + * IRQ doorbell will not work anymore. So we need to disable + * IRQ here. + */ + bnxt_disable_int_sync(bp); + for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -4658,34 +4703,6 @@ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) return bnxt_init_chip(bp, irq_re_init); } -static void bnxt_disable_int(struct bnxt *bp) -{ - int i; - - if (!bp->bnapi) - return; - - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); - } -} - -static void bnxt_enable_int(struct bnxt *bp) -{ - int i; - - atomic_set(&bp->intr_sem, 0); - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); - } -} - static int bnxt_set_real_num_queues(struct bnxt *bp) { int rc; @@ -5640,19 +5657,6 @@ static int bnxt_open(struct net_device *dev) return __bnxt_open_nic(bp, true, true); } -static void bnxt_disable_int_sync(struct bnxt *bp) -{ - int i; - - atomic_inc(&bp->intr_sem); - if (!netif_running(bp->dev)) - return; - - bnxt_disable_int(bp); - for (i = 0; i < bp->cp_nr_rings; i++) - synchronize_irq(bp->irq_tbl[i].vector); -} - int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -5674,13 +5678,12 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) msleep(20); - /* Flush rings before disabling interrupts */ + /* Flush rings and and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init); /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ bnxt_disable_napi(bp); - bnxt_disable_int_sync(bp); del_timer_sync(&bp->timer); bnxt_free_skbs(bp); -- cgit v1.2.3 From 68515a186cf8a8f97956eaea5829277752399f58 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:34 -0500 Subject: bnxt_en: Fix and clarify link_info->advertising. The advertising field is closely related to the auto_link_speeds field. The former is the user setting while the latter is the firmware setting. Both should be u16. We should use the advertising field in bnxt_get_link_ksettings because the auto_link_speeds field may not be updated with the latest from the firmware yet. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 ++-- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 5 +++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 277573b3d261..4a8059fcc858 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5363,7 +5363,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, { u8 autoneg = bp->link_info.autoneg; u16 fw_link_speed = bp->link_info.req_link_speed; - u32 advertising = bp->link_info.advertising; + u16 advertising = bp->link_info.advertising; if (autoneg & BNXT_AUTONEG_SPEED) { req->auto_mode |= diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index fddc316170b2..0eb64013fa5e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -828,7 +828,7 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB #define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB u16 support_speeds; - u16 auto_link_speeds; + u16 auto_link_speeds; /* fw adv setting */ #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB #define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB #define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB @@ -851,7 +851,7 @@ struct bnxt_link_info { u8 req_duplex; u8 req_flow_ctrl; u16 req_link_speed; - u32 advertising; + u16 advertising; /* user adv setting */ bool force_link_chng; /* a copy of phy_qcfg output used to report link diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 784aa77610bc..1cfa7a67cadb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -893,7 +893,7 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, struct ethtool_link_ksettings *lk_ksettings) { - u16 fw_speeds = link_info->auto_link_speeds; + u16 fw_speeds = link_info->advertising; u8 fw_pause = 0; if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) @@ -1090,8 +1090,9 @@ static int bnxt_set_link_ksettings(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; const struct ethtool_link_settings *base = &lk_ksettings->base; - u32 speed, fw_advertising = 0; bool set_pause = false; + u16 fw_advertising = 0; + u32 speed; int rc = 0; if (!BNXT_SINGLE_PF(bp)) -- cgit v1.2.3 From 5910906ca9ee32943f67db24917f78a9ad1087db Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:35 -0500 Subject: bnxt_en: Refactor TPA code path. Call tcp_gro_complete() in the common code path instead of the chip- specific method. The newer 5731x method is missing the call. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4a8059fcc858..0654c3f32cea 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1127,7 +1127,6 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, dev_kfree_skb_any(skb); return NULL; } - tcp_gro_complete(skb); if (nw_off) { /* tunnel */ struct udphdr *uh = NULL; @@ -1177,6 +1176,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); + if (likely(skb)) + tcp_gro_complete(skb); #endif return skb; } -- cgit v1.2.3 From 8fdefd63c203d9b2955d679704f4ed92bf40752c Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:36 -0500 Subject: bnxt_en: Add function to get vnic capability. The new vnic RSS capability will enhance NTUPLE support, to be added in subsequent patches. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 22 +++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 34 +++++++++++++++++++++++++++ 3 files changed, 57 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0654c3f32cea..916832684ad5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3665,6 +3665,27 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, return rc; } +static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) +{ + struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_vnic_qcaps_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10600) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + if (resp->flags & + cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) + bp->flags |= BNXT_FLAG_NEW_RSS_CAP; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) { u16 i; @@ -7070,6 +7091,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; } + bnxt_hwrm_vnic_qcaps(bp); if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 0eb64013fa5e..80bf1ab7df8c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -944,6 +944,7 @@ struct bnxt { #define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_UDP_RSS_CAP 0x800 #define BNXT_FLAG_EEE_CAP 0x1000 + #define BNXT_FLAG_NEW_RSS_CAP 0x2000 #define BNXT_FLAG_ROCEV1_CAP 0x8000 #define BNXT_FLAG_ROCEV2_CAP 0x10000 #define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 2ddfa51519a1..d0d49ed71ce4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -2797,6 +2797,40 @@ struct hwrm_vnic_cfg_output { u8 valid; }; +/* hwrm_vnic_qcaps */ +/* Input (24 bytes) */ +struct hwrm_vnic_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + __le32 unused_0; +}; + +/* Output (24 bytes) */ +struct hwrm_vnic_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + u8 unused_0; + u8 unused_1; + __le32 flags; + #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL + #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + __le32 unused_2; + u8 unused_3; + u8 unused_4; + u8 unused_5; + u8 valid; +}; + /* hwrm_vnic_tpa_cfg */ /* Input (40 bytes) */ struct hwrm_vnic_tpa_cfg_input { -- cgit v1.2.3 From 8079e8f107bf02e1e5ece89239dd2fb475a4735f Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:37 -0500 Subject: bnxt_en: Refactor code that determines RFS capability. Add function bnxt_rfs_supported() that determines if the chip supports RFS. Refactor the existing function bnxt_rfs_capable() that determines if run-time conditions support RFS. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 38 +++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 916832684ad5..f7ea99ff6fa3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4835,6 +4835,24 @@ static int bnxt_setup_int_mode(struct bnxt *bp) return rc; } +static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_rsscos_ctxs; +#endif + return bp->pf.max_rsscos_ctxs; +} + +static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_vnics; +#endif + return bp->pf.max_vnics; +} + unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) { #if defined(CONFIG_BNXT_SRIOV) @@ -5962,20 +5980,30 @@ skip_uc: return rc; } +/* If the chip and firmware supports RFS */ +static bool bnxt_rfs_supported(struct bnxt *bp) +{ + if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) + return true; + return false; +} + +/* If runtime conditions support RFS */ static bool bnxt_rfs_capable(struct bnxt *bp) { #ifdef CONFIG_RFS_ACCEL - struct bnxt_pf_info *pf = &bp->pf; - int vnics; + int vnics, max_vnics, max_rss_ctxs; if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP)) return false; vnics = 1 + bp->rx_nr_rings; - if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) { + max_vnics = bnxt_get_max_func_vnics(bp); + max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); + if (vnics > max_vnics || vnics > max_rss_ctxs) { netdev_warn(bp->dev, "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", - min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1)); + min(max_rss_ctxs - 1, max_vnics - 1)); return false; } @@ -7092,7 +7120,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } bnxt_hwrm_vnic_qcaps(bp); - if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { + if (bnxt_rfs_supported(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { bp->flags |= BNXT_FLAG_RFS; -- cgit v1.2.3 From ae10ae740ad2befd92b6f5b2ab39220bce6e5da2 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:38 -0500 Subject: bnxt_en: Add new hardware RFS mode. The existing hardware RFS mode uses one hardware RSS context block per ring just to calculate the RSS hash. This is very wasteful and prevents VF functions from using it. The new hardware mode shares the same hardware RSS context for RSS placement and RFS steering. This allows VFs to enable RFS. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 27 ++++++++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f7ea99ff6fa3..0ca530e9f73e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2630,6 +2630,10 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) goto out; } + if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && + !(vnic->flags & BNXT_VNIC_RSS_FLAG)) + continue; + /* Allocate rss table and hash key */ vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &vnic->rss_table_dma_addr, @@ -3562,6 +3566,12 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); + } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { + req.rss_rule = + cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); + req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + VNIC_CFG_REQ_ENABLES_MRU); + req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); } else { req.rss_rule = cpu_to_le16(0xffff); } @@ -4490,8 +4500,12 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; int rc; + if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) + goto skip_rss_ctx; + /* allocate context for vnic */ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); if (rc) { @@ -4511,6 +4525,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) bp->rsscos_nr_ctxs++; } +skip_rss_ctx: /* configure default vnic, ring grp */ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); if (rc) { @@ -4545,13 +4560,17 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) int i, rc = 0; for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_vnic_info *vnic; u16 vnic_id = i + 1; u16 ring_id = i; if (vnic_id >= bp->nr_vnics) break; - bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; + vnic = &bp->vnic_info[vnic_id]; + vnic->flags |= BNXT_VNIC_RFS_FLAG; + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", @@ -5985,6 +6004,8 @@ static bool bnxt_rfs_supported(struct bnxt *bp) { if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) return true; + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + return true; return false; } @@ -6000,6 +6021,10 @@ static bool bnxt_rfs_capable(struct bnxt *bp) vnics = 1 + bp->rx_nr_rings; max_vnics = bnxt_get_max_func_vnics(bp); max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); + + /* RSS contexts not a limiting factor */ + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + max_rss_ctxs = max_vnics; if (vnics > max_vnics || vnics > max_rss_ctxs) { netdev_warn(bp->dev, "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 80bf1ab7df8c..75803e5f3bf5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -708,6 +708,7 @@ struct bnxt_vnic_info { #define BNXT_VNIC_RFS_FLAG 2 #define BNXT_VNIC_MCAST_FLAG 4 #define BNXT_VNIC_UCAST_FLAG 8 +#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 }; #if defined(CONFIG_BNXT_SRIOV) -- cgit v1.2.3 From 8427af811a2fcbbf0c71a4b1f904f2442abdcf39 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:39 -0500 Subject: bnxt_en: Assign additional vnics to VFs. Assign additional vnics to VFs whenever possible so that NTUPLE can be supported on the VFs. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index c69602508666..0c9f6c1db546 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -429,6 +429,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; + vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs; + vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | FUNC_CFG_REQ_ENABLES_MRU | @@ -451,7 +453,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) req.num_rx_rings = cpu_to_le16(vf_rx_rings); req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.num_l2_ctxs = cpu_to_le16(4); - vf_vnics = 1; req.num_vnics = cpu_to_le16(vf_vnics); /* FIXME spec currently uses 1 bit for stats ctx */ @@ -506,6 +507,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) min_rx_rings) rx_ok = 1; } + if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) + rx_ok = 0; if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) tx_ok = 1; -- cgit v1.2.3 From dda0e7465f040ed814d4a5c98c6bf042e59cba69 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:40 -0500 Subject: bnxt_en: Add IPV6 hardware RFS support. Accept ipv6 flows in .ndo_rx_flow_steer() and support ETHTOOL_GRXCLSRULE ipv6 flows. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 32 +++++++++++--- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 53 +++++++++++++++++------ 2 files changed, 66 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0ca530e9f73e..4d478e79d424 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3316,10 +3316,26 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; req.ip_protocol = keys->basic.ip_proto; - req.src_ipaddr[0] = keys->addrs.v4addrs.src; - req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); - req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; - req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + if (keys->basic.n_proto == htons(ETH_P_IPV6)) { + int i; + + req.ethertype = htons(ETH_P_IPV6); + req.ip_addr_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; + *(struct in6_addr *)&req.src_ipaddr[0] = + keys->addrs.v6addrs.src; + *(struct in6_addr *)&req.dst_ipaddr[0] = + keys->addrs.v6addrs.dst; + for (i = 0; i < 4; i++) { + req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + } + } else { + req.src_ipaddr[0] = keys->addrs.v4addrs.src; + req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + } req.src_port = keys->ports.src; req.src_port_mask = cpu_to_be16(0xffff); @@ -6588,12 +6604,18 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, goto err_free; } - if ((fkeys->basic.n_proto != htons(ETH_P_IP)) || + if ((fkeys->basic.n_proto != htons(ETH_P_IP) && + fkeys->basic.n_proto != htons(ETH_P_IPV6)) || ((fkeys->basic.ip_proto != IPPROTO_TCP) && (fkeys->basic.ip_proto != IPPROTO_UDP))) { rc = -EPROTONOSUPPORT; goto err_free; } + if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && + bp->hwrm_spec_code < 0x10601) { + rc = -EPROTONOSUPPORT; + goto err_free; + } memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1cfa7a67cadb..e6b11969c9a0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -524,24 +524,49 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) fltr_found: fkeys = &fltr->fkeys; - if (fkeys->basic.ip_proto == IPPROTO_TCP) - fs->flow_type = TCP_V4_FLOW; - else if (fkeys->basic.ip_proto == IPPROTO_UDP) - fs->flow_type = UDP_V4_FLOW; - else - goto fltr_err; + if (fkeys->basic.n_proto == htons(ETH_P_IP)) { + if (fkeys->basic.ip_proto == IPPROTO_TCP) + fs->flow_type = TCP_V4_FLOW; + else if (fkeys->basic.ip_proto == IPPROTO_UDP) + fs->flow_type = UDP_V4_FLOW; + else + goto fltr_err; + + fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; + fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); + + fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; + fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); - fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; - fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); + fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); - fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; - fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); + fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + } else { + int i; - fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; - fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); + if (fkeys->basic.ip_proto == IPPROTO_TCP) + fs->flow_type = TCP_V6_FLOW; + else if (fkeys->basic.ip_proto == IPPROTO_UDP) + fs->flow_type = UDP_V6_FLOW; + else + goto fltr_err; + + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = + fkeys->addrs.v6addrs.src; + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = + fkeys->addrs.v6addrs.dst; + for (i = 0; i < 4; i++) { + fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0); + fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0); + } + fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); - fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; - fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); + } fs->ring_cookie = fltr->rxq; rc = 0; -- cgit v1.2.3 From 391be5c2736456f032fe0265031ecfe17aee84a0 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:41 -0500 Subject: bnxt_en: Implement new scheme to reserve tx rings. In order to properly support TX rate limiting in SRIOV VF functions or NPAR functions, firmware needs better control over tx ring allocations. The new scheme requires the driver to reserve the number of tx rings and to query to see if the requested number of tx rings is reserved. The driver will use the new scheme when the firmware interface spec is 1.6.1 or newer. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 59 ++++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 15 ++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 10 +++- 4 files changed, 83 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4d478e79d424..338dbd03ff58 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4045,6 +4045,50 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } +/* Caller must hold bp->hwrm_cmd_lock */ +int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) +{ + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); + req.fid = cpu_to_le16(fid); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *tx_rings = le16_to_cpu(resp->alloc_tx_rings); + + return rc; +} + +int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + if (BNXT_VF(bp)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); + req.num_tx_rings = cpu_to_le16(*tx_rings); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return rc; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, u32 buf_tmrs, u16 flags, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) @@ -6509,10 +6553,16 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) sh = true; if (tc) { - int max_rx_rings, max_tx_rings, rc; + int max_rx_rings, max_tx_rings, req_tx_rings, rsv_tx_rings, rc; + req_tx_rings = bp->tx_nr_rings_per_tc * tc; rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); - if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) + if (rc || req_tx_rings > max_tx_rings) + return -ENOMEM; + + rsv_tx_rings = req_tx_rings; + if (bnxt_hwrm_reserve_tx_rings(bp, &rsv_tx_rings) || + rsv_tx_rings < req_tx_rings) return -ENOMEM; } @@ -7000,6 +7050,11 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) return rc; bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); + + rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc); + if (rc) + netdev_warn(bp->dev, "Unable to reserve tx rings\n"); + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 75803e5f3bf5..d174729de761 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1149,6 +1149,8 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, int bmap_size); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); +int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); +int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings); int bnxt_hwrm_set_coal(struct bnxt *); unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index e6b11969c9a0..dd21be4a5fdd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -388,6 +388,7 @@ static int bnxt_set_channels(struct net_device *dev, { struct bnxt *bp = netdev_priv(dev); int max_rx_rings, max_tx_rings, tcs; + int req_tx_rings, rsv_tx_rings; u32 rc = 0; bool sh = false; @@ -423,6 +424,20 @@ static int bnxt_set_channels(struct net_device *dev, channel->tx_count > max_tx_rings)) return -ENOMEM; + req_tx_rings = sh ? channel->combined_count : channel->tx_count; + req_tx_rings = min_t(int, req_tx_rings, max_tx_rings); + if (tcs > 1) + req_tx_rings *= tcs; + + rsv_tx_rings = req_tx_rings; + if (bnxt_hwrm_reserve_tx_rings(bp, &rsv_tx_rings)) + return -ENOMEM; + + if (rsv_tx_rings < req_tx_rings) { + netdev_warn(dev, "Unable to allocate the requested tx rings\n"); + return -ENOMEM; + } + if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 0c9f6c1db546..64ef0e5dad8c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -416,6 +416,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) u16 vf_ring_grps; struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + int total_vf_tx_rings = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); @@ -460,6 +461,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < num_vfs; i++) { + int vf_tx_rsvd = vf_tx_rings; + req.fid = cpu_to_le16(pf->first_vf_id + i); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -467,10 +470,15 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) break; pf->active_vfs = i + 1; pf->vf[i].fw_fid = le16_to_cpu(req.fid); + rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, + &vf_tx_rsvd); + if (rc) + break; + total_vf_tx_rings += vf_tx_rsvd; } mutex_unlock(&bp->hwrm_cmd_lock); if (!rc) { - pf->max_tx_rings -= vf_tx_rings * num_vfs; + pf->max_tx_rings -= total_vf_tx_rings; pf->max_rx_rings -= vf_rx_rings * num_vfs; pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; pf->max_cp_rings -= vf_cp_rings * num_vfs; -- cgit v1.2.3 From 486b5c22ea1d35e00e90dd79a32a9ee530b18915 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:42 -0500 Subject: bnxt_en: Set default completion ring for async events. With the added support for the bnxt_re RDMA driver, both drivers can be allocating completion rings in any order. The firmware does not know which completion ring should be receiving async events. Add an extra step to tell firmware the completion ring number for receiving async events after bnxt_en allocates the completion rings. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 338dbd03ff58..1f54a7acc3f7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3858,6 +3858,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, return rc; } +static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) +{ + int rc; + + if (BNXT_PF(bp)) { + struct hwrm_func_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = cpu_to_le16(idx); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + } else { + struct hwrm_func_vf_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + req.enables = + cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = cpu_to_le16(idx); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + } + return rc; +} + static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { int i, rc = 0; @@ -3874,6 +3898,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) goto err_out; BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; + + if (!i) { + rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); + if (rc) + netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); + } } for (i = 0; i < bp->tx_nr_rings; i++) { -- cgit v1.2.3 From bdbd1eb59c565c56a74d21076e2ae8706de00ecd Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:43 -0500 Subject: bnxt_en: Handle no aggregation ring gracefully. The current code assumes that we will always have at least 2 rx rings, 1 will be used as an aggregation ring for TPA and jumbo page placements. However, it is possible, especially on a VF, that there is only 1 rx ring available. In this scenario, the current code will fail to initialize. To handle it, we need to properly set up only 1 ring without aggregation. Set a new flag BNXT_FLAG_NO_AGG_RINGS for this condition and add logic to set up the chip to place RX data linearly into a single buffer per packet. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 25 +++++++++++++++++++++---- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1f54a7acc3f7..98e948489700 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2496,7 +2496,7 @@ void bnxt_set_ring_params(struct bnxt *bp) agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); bp->flags &= ~BNXT_FLAG_JUMBO; - if (rx_space > PAGE_SIZE) { + if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { u32 jumbo_factor; bp->flags |= BNXT_FLAG_JUMBO; @@ -6174,6 +6174,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (features & NETIF_F_LRO) flags |= BNXT_FLAG_LRO; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + flags &= ~BNXT_FLAG_TPA; + if (features & NETIF_F_HW_VLAN_CTAG_RX) flags |= BNXT_FLAG_STRIP_VLAN; @@ -7040,8 +7043,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, int rc; rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); - if (rc) - return rc; + if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { + /* Not enough rings, try disabling agg rings. */ + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); + if (rc) + return rc; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + bnxt_set_ring_params(bp); + } if (bp->flags & BNXT_FLAG_ROCE_CAP) { int max_cp, max_stat, max_irq; @@ -7236,7 +7248,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); bnxt_set_max_func_irqs(bp, max_irqs); - bnxt_set_dflt_rings(bp); + rc = bnxt_set_dflt_rings(bp); + if (rc) { + netdev_err(bp->dev, "Not enough rings available.\n"); + rc = -ENOMEM; + goto init_err; + } /* Default RSS hash cfg. */ bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index d174729de761..f6b9b1c530fe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -950,6 +950,7 @@ struct bnxt { #define BNXT_FLAG_ROCEV2_CAP 0x10000 #define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \ BNXT_FLAG_ROCEV2_CAP) + #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ -- cgit v1.2.3 From 3f0d80b6d228f11117244a16d2e17ea684b540f5 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 29 Dec 2016 12:13:44 -0500 Subject: MAINTAINERS: Add bnxt_en maintainer info. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index cfff2c9e3d94..11904a95672a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2605,6 +2605,12 @@ L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ +BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER +M: Michael Chan +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/ethernet/broadcom/bnxt/ + BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE M: Florian Fainelli M: Ray Jui -- cgit v1.2.3 From bfd2e4b8734d34632f00391994b89c558dcb7d4e Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Thu, 29 Dec 2016 15:53:28 -0200 Subject: sctp: refactor sctp_datamsg_from_user This patch refactors sctp_datamsg_from_user() in an attempt to make it better to read and avoid code duplication for handling the last fragment. It also avoids doing division and remaining operations. Even though, it should still operate similarly as before this patch. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/chunk.c | 107 +++++++++++++++++-------------------------------------- 1 file changed, 32 insertions(+), 75 deletions(-) diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 615f0ddd41df..e3621cb4827f 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -165,14 +165,12 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, struct sctp_sndrcvinfo *sinfo, struct iov_iter *from) { - int max, whole, i, offset, over, err; - int len, first_len; - int max_data; + size_t len, first_len, max_data, remaining; + size_t msg_len = iov_iter_count(from); + struct list_head *pos, *temp; struct sctp_chunk *chunk; struct sctp_datamsg *msg; - struct list_head *pos, *temp; - size_t msg_len = iov_iter_count(from); - __u8 frag; + int err; msg = sctp_datamsg_new(GFP_KERNEL); if (!msg) @@ -185,7 +183,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags) || !SCTP_PR_POLICY(sinfo->sinfo_flags))) msg->expires_at = jiffies + - msecs_to_jiffies(sinfo->sinfo_timetolive); + msecs_to_jiffies(sinfo->sinfo_timetolive); /* This is the biggest possible DATA chunk that can fit into * the packet @@ -195,7 +193,6 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk); max_data = SCTP_TRUNC4(max_data); - max = asoc->frag_point; /* If the the peer requested that we authenticate DATA chunks * we need to account for bundling of the AUTH chunks along with * DATA. @@ -208,12 +205,11 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, hmac_desc->hmac_len); } - /* Now, check if we need to reduce our max */ - if (max > max_data) - max = max_data; + /* Check what's our max considering the above */ + max_data = min_t(size_t, max_data, asoc->frag_point); - whole = 0; - first_len = max; + /* Set first_len and then account for possible bundles on first frag */ + first_len = max_data; /* Check to see if we have a pending SACK and try to let it be bundled * with this message. Do this if we don't have any data queued already. @@ -224,40 +220,38 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) && asoc->outqueue.out_qlen == 0 && list_empty(&asoc->outqueue.retransmit) && - msg_len > max) - max_data -= SCTP_PAD4(sizeof(sctp_sack_chunk_t)); + msg_len > max_data) + first_len -= SCTP_PAD4(sizeof(sctp_sack_chunk_t)); /* Encourage Cookie-ECHO bundling. */ if (asoc->state < SCTP_STATE_COOKIE_ECHOED) - max_data -= SCTP_ARBITRARY_COOKIE_ECHO_LEN; - - /* Now that we adjusted completely, reset first_len */ - if (first_len > max_data) - first_len = max_data; + first_len -= SCTP_ARBITRARY_COOKIE_ECHO_LEN; /* Account for a different sized first fragment */ if (msg_len >= first_len) { - msg_len -= first_len; - whole = 1; msg->can_delay = 0; - } - - /* How many full sized? How many bytes leftover? */ - whole += msg_len / max; - over = msg_len % max; - offset = 0; - - if ((whole > 1) || (whole && over)) SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS); + } else { + /* Which may be the only one... */ + first_len = msg_len; + } - /* Create chunks for all the full sized DATA chunks. */ - for (i = 0, len = first_len; i < whole; i++) { - frag = SCTP_DATA_MIDDLE_FRAG; + /* Create chunks for all DATA chunks. */ + for (remaining = msg_len; remaining; remaining -= len) { + u8 frag = SCTP_DATA_MIDDLE_FRAG; - if (0 == i) + if (remaining == msg_len) { + /* First frag, which may also be the last */ frag |= SCTP_DATA_FIRST_FRAG; + len = first_len; + } else { + /* Middle frags */ + len = max_data; + } - if ((i == (whole - 1)) && !over) { + if (len >= remaining) { + /* Last frag, which may also be the first */ + len = remaining; frag |= SCTP_DATA_LAST_FRAG; /* The application requests to set the I-bit of the @@ -271,7 +265,6 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0, GFP_KERNEL); - if (!chunk) { err = -ENOMEM; goto errout; @@ -282,45 +275,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, goto errout_chunk_free; /* Put the chunk->skb back into the form expected by send. */ - __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr - - (__u8 *)chunk->skb->data); - - sctp_datamsg_assign(msg, chunk); - list_add_tail(&chunk->frag_list, &msg->chunks); - - /* The first chunk, the first chunk was likely short - * to allow bundling, so reset to full size. - */ - if (0 == i) - len = max; - } - - /* .. now the leftover bytes. */ - if (over) { - if (!whole) - frag = SCTP_DATA_NOT_FRAG; - else - frag = SCTP_DATA_LAST_FRAG; - - if ((sinfo->sinfo_flags & SCTP_EOF) || - (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY)) - frag |= SCTP_DATA_SACK_IMM; - - chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, - 0, GFP_KERNEL); - - if (!chunk) { - err = -ENOMEM; - goto errout; - } - - err = sctp_user_addto_chunk(chunk, over, from); - - /* Put the chunk->skb back into the form expected by send. */ - __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr - - (__u8 *)chunk->skb->data); - if (err < 0) - goto errout_chunk_free; + __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr - + chunk->skb->data); sctp_datamsg_assign(msg, chunk); list_add_tail(&chunk->frag_list, &msg->chunks); @@ -338,6 +294,7 @@ errout: sctp_chunk_free(chunk); } sctp_datamsg_put(msg); + return ERR_PTR(err); } -- cgit v1.2.3 From afbb167415843ef23546f5bfaf2a06c86750c2f0 Mon Sep 17 00:00:00 2001 From: jpinto Date: Thu, 29 Dec 2016 17:10:27 +0000 Subject: stmmac: adding EEE to GMAC4 This patch adds Energy Efficiency Ethernet to GMAC4. Signed-off-by: Joao Pinto Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 12 +++++ drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 59 +++++++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index b524598b1c9e..73d1dabcdba3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -90,6 +90,18 @@ enum power_event { power_down = 0x00000001, }; +/* Energy Efficient Ethernet (EEE) for GMAC4 + * + * LPI status, timer and control register offset + */ +#define GMAC4_LPI_CTRL_STATUS 0xd0 +#define GMAC4_LPI_TIMER_CTRL 0xd4 + +/* LPI control and status defines */ +#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ +#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ +#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ + /* MAC Debug bitmap */ #define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) #define GMAC_DEBUG_TFCSTS_SHIFT 17 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index ecfbf577e26f..02eab798050d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -137,6 +137,61 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw, GMAC_ADDR_LOW(reg_n)); } +static void dwmac4_set_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + /* Enable the link status receive on RGMII, SGMII ore SMII + * receive path and instruct the transmit to enter in LPI + * state. + */ + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; + + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA); + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + + if (link) + value |= GMAC4_LPI_CTRL_STATUS_PLS; + else + value &= ~GMAC4_LPI_CTRL_STATUS_PLS; + + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); + + /* Program the timers in the LPI timer control register: + * LS: minimum time (ms) for which the link + * status from PHY should be ok before transmitting + * the LPI pattern. + * TW: minimum time (us) for which the core waits + * after it has stopped transmitting the LPI pattern. + */ + writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL); +} + static void dwmac4_set_filter(struct mac_device_info *hw, struct net_device *dev) { @@ -410,6 +465,10 @@ static const struct stmmac_ops dwmac4_ops = { .pmt = dwmac4_pmt, .set_umac_addr = dwmac4_set_umac_addr, .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, .pcs_rane = dwmac4_rane, .pcs_get_adv_lp = dwmac4_get_adv_lp, -- cgit v1.2.3 From 3d48b53fb2ae37158e700ffef3f45461ff15c965 Mon Sep 17 00:00:00 2001 From: Matthias Tafelmeier Date: Thu, 29 Dec 2016 21:37:21 +0100 Subject: net: dev_weight: TX/RX orthogonality Oftenly, introducing side effects on packet processing on the other half of the stack by adjusting one of TX/RX via sysctl is not desirable. There are cases of demand for asymmetric, orthogonal configurability. This holds true especially for nodes where RPS for RFS usage on top is configured and therefore use the 'old dev_weight'. This is quite a common base configuration setup nowadays, even with NICs of superior processing support (e.g. aRFS). A good example use case are nodes acting as noSQL data bases with a large number of tiny requests and rather fewer but large packets as responses. It's affordable to have large budget and rx dev_weights for the requests. But as a side effect having this large a number on TX processed in one run can overwhelm drivers. This patch therefore introduces an independent configurability via sysctl to userland. Signed-off-by: Matthias Tafelmeier Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 21 +++++++++++++++++++++ include/linux/netdevice.h | 4 ++++ net/core/dev.c | 8 ++++++-- net/core/sysctl_net_core.c | 31 ++++++++++++++++++++++++++++++- net/sched/sch_generic.c | 2 +- 5 files changed, 62 insertions(+), 4 deletions(-) diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index f0480f7ea740..b80fbd4e5575 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -61,6 +61,27 @@ The maximum number of packets that kernel can handle on a NAPI interrupt, it's a Per-CPU variable. Default: 64 +dev_weight_rx_bias +-------------- + +RPS (e.g. RFS, aRFS) processing is competing with the registered NAPI poll function +of the driver for the per softirq cycle netdev_budget. This parameter influences +the proportion of the configured netdev_budget that is spent on RPS based packet +processing during RX softirq cycles. It is further meant for making current +dev_weight adaptable for asymmetric CPU needs on RX/TX side of the network stack. +(see dev_weight_tx_bias) It is effective on a per CPU basis. Determination is based +on dev_weight and is calculated multiplicative (dev_weight * dev_weight_rx_bias). +Default: 1 + +dev_weight_tx_bias +-------------- + +Scales the maximum number of packets that can be processed during a TX softirq cycle. +Effective on a per CPU basis. Allows scaling of current dev_weight for asymmetric +net stack processing needs. Be careful to avoid making TX softirq processing a CPU hog. +Calculation is based on dev_weight (dev_weight * dev_weight_tx_bias). +Default: 1 + default_qdisc -------------- diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 994f7423a74b..ecd78b3c9aba 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3795,6 +3795,10 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; extern int weight_p; +extern int dev_weight_rx_bias; +extern int dev_weight_tx_bias; +extern int dev_rx_weight; +extern int dev_tx_weight; bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, diff --git a/net/core/dev.c b/net/core/dev.c index 8db5a0b4b520..56818f7eab2b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3427,7 +3427,11 @@ EXPORT_SYMBOL(netdev_max_backlog); int netdev_tstamp_prequeue __read_mostly = 1; int netdev_budget __read_mostly = 300; -int weight_p __read_mostly = 64; /* old backlog weight */ +int weight_p __read_mostly = 64; /* old backlog weight */ +int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ +int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ +int dev_rx_weight __read_mostly = 64; +int dev_tx_weight __read_mostly = 64; /* Called with irq disabled */ static inline void ____napi_schedule(struct softnet_data *sd, @@ -4833,7 +4837,7 @@ static int process_backlog(struct napi_struct *napi, int quota) net_rps_action_and_irq_enable(sd); } - napi->weight = weight_p; + napi->weight = dev_rx_weight; while (again) { struct sk_buff *skb; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 2a46e4009f62..eaa72eb0399c 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -222,6 +222,21 @@ static int set_default_qdisc(struct ctl_table *table, int write, } #endif +static int proc_do_dev_weight(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (ret != 0) + return ret; + + dev_rx_weight = weight_p * dev_weight_rx_bias; + dev_tx_weight = weight_p * dev_weight_tx_bias; + + return ret; +} + static int proc_do_rss_key(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { @@ -273,7 +288,21 @@ static struct ctl_table net_core_table[] = { .data = &weight_p, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_do_dev_weight, + }, + { + .procname = "dev_weight_rx_bias", + .data = &dev_weight_rx_bias, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_do_dev_weight, + }, + { + .procname = "dev_weight_tx_bias", + .data = &dev_weight_tx_bias, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_do_dev_weight, }, { .procname = "netdev_max_backlog", diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 6eb9c8e88519..b052b27a984e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -247,7 +247,7 @@ static inline int qdisc_restart(struct Qdisc *q, int *packets) void __qdisc_run(struct Qdisc *q) { - int quota = weight_p; + int quota = dev_tx_weight; int packets; while (qdisc_restart(q, &packets)) { -- cgit v1.2.3 From 3a543ef479868e36c95935de320608a7e41466ca Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 29 Dec 2016 14:20:56 -0800 Subject: net: dsa: Implement ndo_get_phys_port_id Implement ndo_get_phys_port_id() by returning the physical port number of the switch this per-port DSA created network interface corresponds to. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/slave.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 68c9eea00518..ffd91969b830 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -984,6 +984,17 @@ static void dsa_slave_poll_controller(struct net_device *dev) } #endif +static int dsa_slave_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + + ppid->id_len = sizeof(p->port); + memcpy(ppid->id, &p->port, ppid->id_len); + + return 0; +} + void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops) { ops->get_sset_count = dsa_cpu_port_get_sset_count; @@ -1031,6 +1042,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, + .ndo_get_phys_port_id = dsa_slave_get_phys_port_id, }; static const struct switchdev_ops dsa_slave_switchdev_ops = { -- cgit v1.2.3 From 15d3afcc051f74d04a285c08594629172a1a9131 Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Thu, 29 Dec 2016 17:04:47 -0800 Subject: liquidio: optimize reads from Octeon PCI console Reads from Octeon PCI console are inefficient because before each read operation, a dynamic mapping to Octeon DRAM is set up. This patch replaces the repeated setup of a dynamic mapping with a one-time setup of a static mapping. Signed-off-by: Felix Manlunas Signed-off-by: Raghu Vatsavayi Signed-off-by: Derek Chickles Signed-off-by: Satanand Burla Signed-off-by: David S. Miller --- .../net/ethernet/cavium/liquidio/octeon_config.h | 10 +++------- .../net/ethernet/cavium/liquidio/octeon_console.c | 10 ++++++++++ .../net/ethernet/cavium/liquidio/octeon_device.h | 6 ++++++ .../net/ethernet/cavium/liquidio/octeon_mem_ops.c | 21 ++++++++++++++++++++- 4 files changed, 39 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index 1cb3514fc949..b3dc2e9651a8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -429,15 +429,11 @@ struct octeon_config { /* The following config values are fixed and should not be modified. */ -/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */ -#define MAX_BAR1_MAP_INDEX 2 +#define BAR1_INDEX_DYNAMIC_MAP 2 +#define BAR1_INDEX_STATIC_MAP 15 #define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024) -/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access. - * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access. - */ -#define MAX_BAR1_IOREMAP_SIZE ((MAX_BAR1_MAP_INDEX + 1) * \ - OCTEON_BAR1_ENTRY_SIZE) +#define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE) /* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking * NoResponse Lists are now maintained with each IQ. (Dec' 2007). diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 3265e0b7923e..42b673dce533 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -549,6 +549,16 @@ int octeon_init_consoles(struct octeon_device *oct) return ret; } + /* Dedicate one of Octeon's BAR1 index registers to create a static + * mapping to a region of Octeon DRAM that contains the PCI console + * named block. + */ + oct->console_nb_info.bar1_index = BAR1_INDEX_STATIC_MAP; + oct->fn_list.bar1_idx_setup(oct, addr, oct->console_nb_info.bar1_index, + true); + oct->console_nb_info.dram_region_base = addr + & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL); + /* num_consoles > 0, is an indication that the consoles * are accessible */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 18f6836250a6..c301a3852482 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -477,6 +477,12 @@ struct octeon_device { /* Console caches */ struct octeon_console console[MAX_OCTEON_MAPS]; + /* Console named block info */ + struct { + u64 dram_region_base; + int bar1_index; + } console_nb_info; + /* Coprocessor clock rate. */ u64 coproc_clock_rate; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c index 13a18c9a7a51..5cd96e7d426c 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c @@ -23,7 +23,7 @@ #include "response_manager.h" #include "octeon_device.h" -#define MEMOPS_IDX MAX_BAR1_MAP_INDEX +#define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP #ifdef __BIG_ENDIAN_BITFIELD static inline void @@ -96,6 +96,25 @@ __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr, u32 copy_len = 0, index_reg_val = 0; unsigned long flags; u8 __iomem *mapped_addr; + u64 static_mapping_base; + + static_mapping_base = oct->console_nb_info.dram_region_base; + + if (static_mapping_base && + static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) { + int bar1_index = oct->console_nb_info.bar1_index; + + mapped_addr = oct->mmio[1].hw_addr + + (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE)) + + (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL)); + + if (op) + octeon_pci_fastread(oct, mapped_addr, hostbuf, len); + else + octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len); + + return; + } spin_lock_irqsave(&oct->mem_access_lock, flags); -- cgit v1.2.3 From 097e46d2ae90265d1afe141ba6208ba598b79e01 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 29 Dec 2016 16:12:09 +0200 Subject: ath10k: fix potential memory leak in ath10k_wmi_tlv_op_pull_fw_stats() ath10k_wmi_tlv_op_pull_fw_stats() uses tb = ath10k_wmi_tlv_parse_alloc(...) function, which allocates memory. If any of the three error-paths are taken, this tb needs to be freed. Signed-off-by: Christian Lamparter Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index f304f6632c4f..1f6bb9e8bb01 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1105,8 +1105,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_pdev *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); @@ -1126,8 +1128,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_vdev *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); @@ -1145,8 +1149,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_peer *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); -- cgit v1.2.3 From 88407beb1b1462f706a1950a355fd086e1c450b6 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Tue, 13 Dec 2016 14:55:19 -0800 Subject: ath10k: fix incorrect txpower set by P2P_DEVICE interface Ath10k reports the phy capability that supports P2P_DEVICE interface. When we use the P2P supported wpa_supplicant to start connection, it'll create two interfaces, one is wlan0 (vdev_id=0) and one is P2P_DEVICE p2p-dev-wlan0 which is for p2p control channel (vdev_id=1). ath10k_pci mac vdev create 0 (add interface) type 2 subtype 0 ath10k_add_interface: vdev_id: 0, txpower: 0, bss_power: 0 ... ath10k_pci mac vdev create 1 (add interface) type 2 subtype 1 ath10k_add_interface: vdev_id: 1, txpower: 0, bss_power: 0 And the txpower in per vif bss_conf will only be set to valid tx power when the interface is assigned with channel_ctx. But this P2P_DEVICE interface will never be used for any connection, so that the uninitialized bss_conf.txpower=0 is assinged to the arvif->txpower when interface created. Since the txpower configuration is firmware per physical interface. So the smallest txpower of all vifs will be the one limit the tx power of the physical device, that causing the low txpower issue on other active interfaces. wlan0: Limiting TX power to 21 (24 - 3) dBm ath10k_pci mac vdev_id 0 txpower 21 ath10k_mac_txpower_recalc: vdev_id: 1, txpower: 0 ath10k_mac_txpower_recalc: vdev_id: 0, txpower: 21 ath10k_pci mac txpower 0 This issue only happens when we use the wpa_supplicant that supports P2P or if we use the iw tool to create the control P2P_DEVICE interface. Signed-off-by: Ryan Hsu Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 12a5fccf04cf..1dbed8c271cd 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4702,7 +4702,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { - WARN_ON(arvif->txpower < 0); + if (arvif->txpower <= 0) + continue; if (txpower == -1) txpower = arvif->txpower; @@ -4710,8 +4711,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar) txpower = min(txpower, arvif->txpower); } - if (WARN_ON(txpower == -1)) - return -EINVAL; + if (txpower == -1) + return 0; ret = ath10k_mac_txpower_setup(ar, txpower); if (ret) { -- cgit v1.2.3 From 727000e6af34748552d13069ce52de2ad3d6c271 Mon Sep 17 00:00:00 2001 From: Arun Khandavalli Date: Wed, 21 Dec 2016 14:19:21 +0200 Subject: ath10k: support dev_coredump for crash dump Whenever firmware crashes, and both CONFIG_ATH10K_DEBUGFS and CONFIG_ALLOW_DEV_COREDUMP are enabled, dump information about the crash via a devcoredump device. Dump can be read from userspace for further analysis from: /sys/class/devcoredump/devcd*/data As until now we have provided the firmware crash dump file via fw_crash_dump debugfs keep it still available but deprecate and a warning print that the user should switch to using dev_coredump. Future improvement would be not to depend on CONFIG_ATH10K_DEBUGFS, as there might be systems which want to get the firmware crash dump but not enable debugfs. How to handle memory consumption is also something which needs to be taken into account. Signed-off-by: Arun Khandavalli [kvalo@qca.qualcomm.com: rebase, fixes, improve commit log] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 6 +++++ drivers/net/wireless/ath/ath10k/debug.c | 43 ++++++++++++++++++++++++++++++--- drivers/net/wireless/ath/ath10k/debug.h | 8 ++++++ 3 files changed, 54 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 749e381edd38..3319db178c2e 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1510,6 +1510,7 @@ static int ath10k_init_hw_params(struct ath10k *ar) static void ath10k_core_restart(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, restart_work); + int ret; set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); @@ -1561,6 +1562,11 @@ static void ath10k_core_restart(struct work_struct *work) } mutex_unlock(&ar->conf_mutex); + + ret = ath10k_debug_fw_devcoredump(ar); + if (ret) + ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d", + ret); } static void ath10k_core_set_coverage_class_work(struct work_struct *work) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 82a4c67f3672..e1a70dffc52a 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "core.h" #include "debug.h" @@ -721,7 +722,8 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) } EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data); -static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar) +static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar, + bool mark_read) { struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; struct ath10k_dump_file_data *dump_data; @@ -790,19 +792,54 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar) sizeof(crash_data->registers)); sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers); - ar->debug.fw_crash_data->crashed_since_read = false; + ar->debug.fw_crash_data->crashed_since_read = !mark_read; spin_unlock_bh(&ar->data_lock); return dump_data; } +int ath10k_debug_fw_devcoredump(struct ath10k *ar) +{ + struct ath10k_dump_file_data *dump; + void *dump_ptr; + u32 dump_len; + + /* To keep the dump file available also for debugfs don't mark the + * file read, only debugfs should do that. + */ + dump = ath10k_build_dump_file(ar, false); + if (!dump) { + ath10k_warn(ar, "no crash dump data found for devcoredump"); + return -ENODATA; + } + + /* Make a copy of the dump file for dev_coredumpv() as during the + * transition period we need to own the original file. Once + * fw_crash_dump debugfs file is removed no need to have a copy + * anymore. + */ + dump_len = le32_to_cpu(dump->len); + dump_ptr = vzalloc(dump_len); + + if (!dump_ptr) + return -ENOMEM; + + memcpy(dump_ptr, dump, dump_len); + + dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL); + + return 0; +} + static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; struct ath10k_dump_file_data *dump; - dump = ath10k_build_dump_file(ar); + ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead."); + + dump = ath10k_build_dump_file(ar, true); if (!dump) return -ENODATA; diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 335512b11ca2..2368f47314ae 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -84,6 +84,9 @@ struct ath10k_fw_crash_data * ath10k_debug_get_new_fw_crash_data(struct ath10k *ar); void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len); + +int ath10k_debug_fw_devcoredump(struct ath10k *ar); + #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, @@ -166,6 +169,11 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar) return 0; } +static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar) +{ + return 0; +} + #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) #define ath10k_debug_get_et_strings NULL -- cgit v1.2.3 From d679fa1b3c8997e3a371da3cca08c44a38054c34 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Thu, 22 Dec 2016 14:31:46 -0800 Subject: ath10k: recal the txpower when removing interface The txpower is being recalculated when adding interface to make sure txpower won't overshoot the spec, and when removing the interface, the txpower should again to be recalculated to restore the correct value from the active interface list. Following is one of the scenario vdev0 is created as STA and connected: txpower:23 vdev1 is created as P2P_DEVICE for control interface: txpower:0 vdev2 is created as p2p go/gc interface: txpower is 21 So the vdev2@txpower:21 will be set to firmware when vdev2 is created. When we tear down the vdev2, the txpower needs to be recalculated to re-set it to vdev0@txpower:23 as vdev0/vdev1 are the active interface. ath10k_pci mac vdev 0 peer create 8c:fd:f0:01:62:98 ath10k_pci mac vdev_id 0 txpower 23 ... (adding interface) ath10k_pci mac vdev create 2 (add interface) type 1 subtype 3 ath10k_pci mac vdev_id 2 txpower 21 ath10k_pci mac txpower 21 ... (removing interface) ath10k_pci mac vdev 2 delete (remove interface) ath10k_pci vdev 1 txpower 0 ath10k_pci vdev 0 txpower 23 ath10k_pci mac txpower 23 Signed-off-by: Ryan Hsu Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 1dbed8c271cd..590d7667c861 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -5228,6 +5228,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); } + ret = ath10k_mac_txpower_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + spin_lock_bh(&ar->htt.tx_lock); ath10k_mac_vif_tx_unlock_all(arvif); spin_unlock_bh(&ar->htt.tx_lock); -- cgit v1.2.3 From d2e202c06ca42d353d95df12437740921a6d05b5 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Thu, 22 Dec 2016 15:02:37 -0800 Subject: ath10k: ignore configuring the incorrect board_id With command to get board_id from otp, in the case of following boot get otp board id result 0x00000000 board_id 0 chip_id 0 boot using board name 'bus=pci,bmi-chip-id=0,bmi-board-id=0" ... failed to fetch board data for bus=pci,bmi-chip-id=0,bmi-board-id=0 from ath10k/QCA6174/hw3.0/board-2.bin The invalid board_id=0 will be used as index to search in the board-2.bin. Ignore the case with board_id=0, as it means the otp is not carrying the board id information. Signed-off-by: Ryan Hsu Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 3319db178c2e..874c2a755c66 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -694,8 +694,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) "boot get otp board id result 0x%08x board_id %d chip_id %d\n", result, board_id, chip_id); - if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0) + if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 || + (board_id == 0)) { + ath10k_warn(ar, "board id is not exist in otp, ignore it\n"); return -EOPNOTSUPP; + } ar->id.bmi_ids_valid = true; ar->id.bmi_board_id = board_id; -- cgit v1.2.3 From a7773b5db155fb5ccdbd381d778c08bea35dab31 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Tue, 27 Dec 2016 19:02:35 +0530 Subject: ath10k: Remove passing unused argument for ath10k_mac_tx 'ath10k_mac_tx' does not seems to use the per station table entry pointer 'sta' (struct ieee80211_sta), hence remove passing this unused argument Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 590d7667c861..1b34e2f12110 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3528,7 +3528,6 @@ static int ath10k_mac_tx_submit(struct ath10k *ar, */ static int ath10k_mac_tx(struct ath10k *ar, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, enum ath10k_hw_txrx_mode txmode, enum ath10k_mac_tx_path txpath, struct sk_buff *skb) @@ -3670,7 +3669,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); - ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); if (ret) { ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", ret); @@ -3857,7 +3856,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, spin_unlock_bh(&ar->htt.tx_lock); } - ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); if (unlikely(ret)) { ath10k_warn(ar, "failed to push frame: %d\n", ret); @@ -4138,7 +4137,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, spin_unlock_bh(&ar->htt.tx_lock); } - ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); if (ret) { ath10k_warn(ar, "failed to transmit frame: %d\n", ret); if (is_htt) { -- cgit v1.2.3 From 34c30b0a5e97e3f9e1e166f56654a1261b722611 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Tue, 27 Dec 2016 20:53:35 +0530 Subject: ath10k: enable advertising support for channel 169, 5Ghz Enable advertising support for channel 169, 5Ghz so that based on the regulatory domain(country code) this channel shall be active for use. For example in countries like India this channel shall be available for use with latest regulatory updates Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 2 +- drivers/net/wireless/ath/ath10k/mac.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 09ff8b8a6441..c7664d6569fa 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -46,7 +46,7 @@ #define WMI_READY_TIMEOUT (5 * HZ) #define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ) #define ATH10K_CONNECTION_LOSS_HZ (3 * HZ) -#define ATH10K_NUM_CHANS 39 +#define ATH10K_NUM_CHANS 40 /* Antenna noise floor */ #define ATH10K_DEFAULT_NOISE_FLOOR -95 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 1b34e2f12110..d1b7edba5e49 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -7590,6 +7590,7 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = { CHAN5G(157, 5785, 0), CHAN5G(161, 5805, 0), CHAN5G(165, 5825, 0), + CHAN5G(169, 5845, 0), }; struct ath10k *ath10k_mac_create(size_t priv_size) -- cgit v1.2.3 From 74c8719b8ee0922593a5cbec0bd6127d86d8a2f4 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Thu, 1 Dec 2016 19:23:31 +0530 Subject: mwifiex: sdio: fix use after free issue for save_adapter If we have sdio work requests received when sdio card reset is happening, we may end up accessing older save_adapter pointer later which is already freed during card reset. This patch solves the problem by cancelling those pending requests. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sdio.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 740d79cd91fa..43facba641bf 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -2237,6 +2237,12 @@ static void mwifiex_recreate_adapter(struct sdio_mmc_card *card) mmc_hw_reset(func->card->host); sdio_release_host(func); + /* Previous save_adapter won't be valid after this. We will cancel + * pending work requests. + */ + clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); + clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags); + mwifiex_sdio_probe(func, device_id); } -- cgit v1.2.3 From b82dd3bdf1a18bcd8da9291f71754670a0199e9f Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Fri, 16 Dec 2016 12:55:44 +0530 Subject: mwifiex: change width of MAC control variable Firmware has started making use of reserved field. Accordingly change curr_pkt_filter from u16 to u32. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/fw.h | 18 ++++++++---------- drivers/net/wireless/marvell/mwifiex/main.h | 2 +- drivers/net/wireless/marvell/mwifiex/sta_cmd.c | 4 ++-- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index ea455948a68a..f48247cfc548 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -434,14 +434,13 @@ enum mwifiex_channel_flags { #define HostCmd_ACT_BITWISE_SET 0x0002 #define HostCmd_ACT_BITWISE_CLR 0x0003 #define HostCmd_RESULT_OK 0x0000 - -#define HostCmd_ACT_MAC_RX_ON 0x0001 -#define HostCmd_ACT_MAC_TX_ON 0x0002 -#define HostCmd_ACT_MAC_WEP_ENABLE 0x0008 -#define HostCmd_ACT_MAC_ETHERNETII_ENABLE 0x0010 -#define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE 0x0080 -#define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 -#define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON 0x2000 +#define HostCmd_ACT_MAC_RX_ON BIT(0) +#define HostCmd_ACT_MAC_TX_ON BIT(1) +#define HostCmd_ACT_MAC_WEP_ENABLE BIT(3) +#define HostCmd_ACT_MAC_ETHERNETII_ENABLE BIT(4) +#define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE BIT(7) +#define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE BIT(8) +#define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON BIT(13) #define HostCmd_BSS_MODE_IBSS 0x0002 #define HostCmd_BSS_MODE_ANY 0x0003 @@ -1084,8 +1083,7 @@ struct host_cmd_ds_802_11_mac_address { }; struct host_cmd_ds_mac_control { - __le16 action; - __le16 reserved; + __le32 action; }; struct host_cmd_ds_mac_multicast_adr { diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 5c9bd944b6ea..d552a9d104d0 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -530,7 +530,7 @@ struct mwifiex_private { u8 tx_timeout_cnt; struct net_device *netdev; struct net_device_stats stats; - u16 curr_pkt_filter; + u32 curr_pkt_filter; u32 bss_mode; u32 pkt_tx_ctrl; u16 tx_power_level; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index ad2dfd8135b8..2f1f4d190b28 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -76,7 +76,7 @@ mwifiex_cmd_802_11_rssi_info(struct mwifiex_private *priv, */ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, - u16 cmd_action, u16 *action) + u16 cmd_action, u32 *action) { struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl; @@ -89,7 +89,7 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv, cmd->command = cpu_to_le16(HostCmd_CMD_MAC_CONTROL); cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mac_control) + S_DS_GEN); - mac_ctrl->action = cpu_to_le16(*action); + mac_ctrl->action = cpu_to_le32(*action); return 0; } -- cgit v1.2.3 From d7864cf2123503b83880d4b8d5786cb5641d64d8 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Fri, 16 Dec 2016 12:55:45 +0530 Subject: mwifiex: Enable dynamic bandwidth signalling Enable dynamic bandwidth signalling by setting the corresponding bit in MAC control register. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/fw.h | 1 + drivers/net/wireless/marvell/mwifiex/init.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index f48247cfc548..55db158fd156 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -441,6 +441,7 @@ enum mwifiex_channel_flags { #define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE BIT(7) #define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE BIT(8) #define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON BIT(13) +#define HostCmd_ACT_MAC_DYNAMIC_BW_ENABLE BIT(16) #define HostCmd_BSS_MODE_IBSS 0x0002 #define HostCmd_BSS_MODE_ANY 0x0003 diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index b36cb3fef358..0e89ccfa244e 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -92,7 +92,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv) for (i = 0; i < ARRAY_SIZE(priv->wep_key); i++) memset(&priv->wep_key[i], 0, sizeof(struct mwifiex_wep_key)); priv->wep_key_curr_index = 0; - priv->curr_pkt_filter = HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON | + priv->curr_pkt_filter = HostCmd_ACT_MAC_DYNAMIC_BW_ENABLE | + HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON | HostCmd_ACT_MAC_ETHERNETII_ENABLE; priv->beacon_period = 100; /* beacon interval */ -- cgit v1.2.3 From ae3cf476450674dcba4239f144d310c472a08b5d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 7 Dec 2016 07:36:46 +0100 Subject: iwlegacy: make il3945_mac_ops __ro_after_init There's no need for this to be only __read_mostly, since it's only written in a single way depending on the module parameter, so that can be moved into the module's __init function, and the ops can be __ro_after_init. This is a little bit safer since it means the ops can't be overwritten (accidentally or otherwise), which would otherwise cause an arbitrary function or bad pointer to be called. Signed-off-by: Johannes Berg Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/iwlegacy/3945-mac.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 466912eb2d87..e8e65115feba 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3469,7 +3469,7 @@ static struct attribute_group il3945_attribute_group = { .attrs = il3945_sysfs_entries, }; -static struct ieee80211_ops il3945_mac_ops __read_mostly = { +static struct ieee80211_ops il3945_mac_ops __ro_after_init = { .tx = il3945_mac_tx, .start = il3945_mac_start, .stop = il3945_mac_stop, @@ -3627,15 +3627,6 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) il->cmd_queue = IL39_CMD_QUEUE_NUM; - /* - * Disabling hardware scan means that mac80211 will perform scans - * "the hard way", rather than using device's scan. - */ - if (il3945_mod_params.disable_hw_scan) { - D_INFO("Disabling hw_scan\n"); - il3945_mac_ops.hw_scan = NULL; - } - D_INFO("*** LOAD DRIVER ***\n"); il->cfg = cfg; il->ops = &il3945_ops; @@ -3913,6 +3904,15 @@ il3945_init(void) pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); pr_info(DRV_COPYRIGHT "\n"); + /* + * Disabling hardware scan means that mac80211 will perform scans + * "the hard way", rather than using device's scan. + */ + if (il3945_mod_params.disable_hw_scan) { + pr_info("hw_scan is disabled\n"); + il3945_mac_ops.hw_scan = NULL; + } + ret = il3945_rate_control_register(); if (ret) { pr_err("Unable to register rate control algorithm: %d\n", ret); -- cgit v1.2.3 From c705a6b3aa7804d7bc6660183f51e510c61dc807 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 7 Dec 2016 14:21:22 +0300 Subject: adm80211: return an error if adm8211_alloc_rings() fails We accidentally return success when adm8211_alloc_rings() fails but we should preserve the error code. Fixes: cc0b88cf5ecf ("[PATCH] Add adm8211 802.11b wireless driver") Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/admtek/adm8211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index 2b4a3eb38dfa..098c814e22c8 100644 --- a/drivers/net/wireless/admtek/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c @@ -1863,7 +1863,8 @@ static int adm8211_probe(struct pci_dev *pdev, priv->rx_ring_size = rx_ring_size; priv->tx_ring_size = tx_ring_size; - if (adm8211_alloc_rings(dev)) { + err = adm8211_alloc_rings(dev); + if (err) { printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n", pci_name(pdev)); goto err_iounmap; -- cgit v1.2.3 From 1fef293b8a9850cfa124a53c1d8878d355010403 Mon Sep 17 00:00:00 2001 From: Andrew Lutomirski Date: Mon, 12 Dec 2016 12:55:55 -0800 Subject: orinoco: Use shash instead of ahash for MIC calculations Eric Biggers pointed out that the orinoco driver pointed scatterlists at the stack. Fix it by switching from ahash to shash. The result should be simpler, faster, and more correct. Cc: stable@vger.kernel.org # 4.9 only Reported-by: Eric Biggers Signed-off-by: Andy Lutomirski Signed-off-by: Kalle Valo --- drivers/net/wireless/intersil/orinoco/mic.c | 44 +++++++++++++++---------- drivers/net/wireless/intersil/orinoco/mic.h | 3 +- drivers/net/wireless/intersil/orinoco/orinoco.h | 4 +-- 3 files changed, 30 insertions(+), 21 deletions(-) diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c index bc7397d709d3..08bc7822f820 100644 --- a/drivers/net/wireless/intersil/orinoco/mic.c +++ b/drivers/net/wireless/intersil/orinoco/mic.c @@ -16,7 +16,7 @@ /********************************************************************/ int orinoco_mic_init(struct orinoco_private *priv) { - priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0, + priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_mic)) { printk(KERN_DEBUG "orinoco_mic_init: could not allocate " @@ -25,7 +25,7 @@ int orinoco_mic_init(struct orinoco_private *priv) return -ENOMEM; } - priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0, + priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_mic)) { printk(KERN_DEBUG "orinoco_mic_init: could not allocate " @@ -40,17 +40,16 @@ int orinoco_mic_init(struct orinoco_private *priv) void orinoco_mic_free(struct orinoco_private *priv) { if (priv->tx_tfm_mic) - crypto_free_ahash(priv->tx_tfm_mic); + crypto_free_shash(priv->tx_tfm_mic); if (priv->rx_tfm_mic) - crypto_free_ahash(priv->rx_tfm_mic); + crypto_free_shash(priv->rx_tfm_mic); } -int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key, +int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { - AHASH_REQUEST_ON_STACK(req, tfm_michael); - struct scatterlist sg[2]; + SHASH_DESC_ON_STACK(desc, tfm_michael); u8 hdr[ETH_HLEN + 2]; /* size of header + padding */ int err; @@ -67,18 +66,27 @@ int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key, hdr[ETH_ALEN * 2 + 2] = 0; hdr[ETH_ALEN * 2 + 3] = 0; - /* Use scatter gather to MIC header and data in one go */ - sg_init_table(sg, 2); - sg_set_buf(&sg[0], hdr, sizeof(hdr)); - sg_set_buf(&sg[1], data, data_len); + desc->tfm = tfm_michael; + desc->flags = 0; - if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN)) - return -1; + err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN); + if (err) + return err; + + err = crypto_shash_init(desc); + if (err) + return err; + + err = crypto_shash_update(desc, hdr, sizeof(hdr)); + if (err) + return err; + + err = crypto_shash_update(desc, data, data_len); + if (err) + return err; + + err = crypto_shash_final(desc, mic); + shash_desc_zero(desc); - ahash_request_set_tfm(req, tfm_michael); - ahash_request_set_callback(req, 0, NULL, NULL); - ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr)); - err = crypto_ahash_digest(req); - ahash_request_zero(req); return err; } diff --git a/drivers/net/wireless/intersil/orinoco/mic.h b/drivers/net/wireless/intersil/orinoco/mic.h index ce731d05cc98..e8724e889219 100644 --- a/drivers/net/wireless/intersil/orinoco/mic.h +++ b/drivers/net/wireless/intersil/orinoco/mic.h @@ -6,6 +6,7 @@ #define _ORINOCO_MIC_H_ #include +#include #define MICHAEL_MIC_LEN 8 @@ -15,7 +16,7 @@ struct crypto_ahash; int orinoco_mic_init(struct orinoco_private *priv); void orinoco_mic_free(struct orinoco_private *priv); -int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key, +int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic); diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h index 2f0c84b1c440..5fa1c3e3713f 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco.h +++ b/drivers/net/wireless/intersil/orinoco/orinoco.h @@ -152,8 +152,8 @@ struct orinoco_private { u8 *wpa_ie; int wpa_ie_len; - struct crypto_ahash *rx_tfm_mic; - struct crypto_ahash *tx_tfm_mic; + struct crypto_shash *rx_tfm_mic; + struct crypto_shash *tx_tfm_mic; unsigned int wpa_enabled:1; unsigned int tkip_cm_active:1; -- cgit v1.2.3 From a08b98196a36469700e74ec571c9fc27602de379 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:47 +0100 Subject: rt2800: make rx ampdu_factor depend on number of rx chains Initalize max ampdu_factor supported by us based on rx chains, vendor driver do the same. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 4fb79e05078f..03b7f8eb62bd 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -7593,7 +7593,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->ht.cap |= rx_chains << IEEE80211_HT_CAP_RX_STBC_SHIFT; - spec->ht.ampdu_factor = 3; + spec->ht.ampdu_factor = (rx_chains > 1) ? 3 : 2; spec->ht.ampdu_density = 4; spec->ht.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; if (tx_chains != rx_chains) { -- cgit v1.2.3 From e49abb19d1bf6c8f062f4fd824075e4fcdc7fb3d Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:48 +0100 Subject: rt2800: don't set ht parameters for non-aggregated frames Do not set ampdu_density and ba_size for frames without AMPDU bit i.e. frames that will not be aggregated to AMPDU. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00queue.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 68b620b2462f..b2364d378774 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -306,13 +306,12 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rt2x00_sta *sta_priv = NULL; + u8 density = 0; if (sta) { - txdesc->u.ht.mpdu_density = - sta->ht_cap.ampdu_density; - sta_priv = sta_to_rt2x00_sta(sta); txdesc->u.ht.wcid = sta_priv->wcid; + density = sta->ht_cap.ampdu_density; } /* @@ -345,8 +344,6 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, return; } - txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ - /* * Only one STBC stream is supported for now. */ @@ -358,8 +355,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, * frames that are intended to probe a specific tx rate. */ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && - !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) + !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); + txdesc->u.ht.mpdu_density = density; + txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ + } /* * Set 40Mhz mode if necessary (for legacy rates this will -- cgit v1.2.3 From a51b89698ccc93c7e274eb71377fae49c4593ab2 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:49 +0100 Subject: rt2800: set minimum MPDU and PSDU lengths to sane values Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 03b7f8eb62bd..4c7ac4d78d1b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -4708,8 +4708,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 2); else rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 1); - rt2x00_set_field32(®, MAX_LEN_CFG_MIN_PSDU, 0); - rt2x00_set_field32(®, MAX_LEN_CFG_MIN_MPDU, 0); + rt2x00_set_field32(®, MAX_LEN_CFG_MIN_PSDU, 10); + rt2x00_set_field32(®, MAX_LEN_CFG_MIN_MPDU, 10); rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); rt2800_register_read(rt2x00dev, LED_CFG, ®); -- cgit v1.2.3 From 8f03a7c6e7f959edd22e35158fbb9a4087962fae Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:50 +0100 Subject: rt2800: set MAX_PSDU len according to remote STAs capabilities MAX_LEN_CFG_MAX_PSDU specify maximum transmitted by HW AMPDU length (0 - 8kB, 1 - 16kB, 2 - 32kB, 3 - 64kB). Set this option according to remote stations capabilities (based on HT ampdu_factor). However limit the value based our hardware TX capabilities as some chips can not send more than 16kB (factor 1). Limit for all chips is currently 32kB (factor 2), but perhaps for some chips this could be increased to 64kB by setting drv_data->max_psdu to 3. Since MAX_LEN_CFG_MAX_PSDU is global setting, on multi stations modes (AP, IBSS, mesh) we limit according to less capable remote STA. We can not set bigger value to speed up communication with some stations and do not break communication with slow stations. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800.h | 2 ++ drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 45 ++++++++++++++++++++++++-- drivers/net/wireless/ralink/rt2x00/rt2800lib.h | 2 +- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 2 +- drivers/net/wireless/ralink/rt2x00/rt2x00mac.c | 3 +- 5 files changed, 47 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index 95c1d7c0a2f3..ec622a08a486 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -2979,7 +2979,9 @@ struct rt2800_drv_data { u8 bbp26; u8 txmixer_gain_24g; u8 txmixer_gain_5g; + u8 max_psdu; unsigned int tbtt_tick; + unsigned int ampdu_factor_cnt[4]; DECLARE_BITMAP(sta_ids, STA_IDS_SIZE); }; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 4c7ac4d78d1b..4c777f133edf 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1418,6 +1418,23 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, } EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key); +static void rt2800_set_max_psdu_len(struct rt2x00_dev *rt2x00dev) +{ + u8 i, max_psdu; + u32 reg; + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + + for (i = 0; i < 3; i++) + if (drv_data->ampdu_factor_cnt[i] > 0) + break; + + max_psdu = min(drv_data->max_psdu, i); + + rt2800_register_read(rt2x00dev, MAX_LEN_CFG, ®); + rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, max_psdu); + rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); +} + int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { @@ -1425,6 +1442,17 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + /* + * Limit global maximum TX AMPDU length to smallest value of all + * connected stations. In AP mode this can be suboptimal, but we + * do not have a choice if some connected STA is not capable to + * receive the same amount of data like the others. + */ + if (sta->ht_cap.ht_supported) { + drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]++; + rt2800_set_max_psdu_len(rt2x00dev); + } + /* * Search for the first free WCID entry and return the corresponding * index. @@ -1457,9 +1485,16 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(rt2800_sta_add); -int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid) +int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); + int wcid = sta_priv->wcid; + + if (sta->ht_cap.ht_supported) { + drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]--; + rt2800_set_max_psdu_len(rt2x00dev); + } if (wcid > WCID_END) return 0; @@ -4536,6 +4571,7 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner); */ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) { + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u32 reg; u16 eeprom; unsigned int i; @@ -4704,10 +4740,13 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE); if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) || rt2x00_rt(rt2x00dev, RT2883) || - rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E)) + rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E)) { + drv_data->max_psdu = 2; rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 2); - else + } else { + drv_data->max_psdu = 1; rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 1); + } rt2x00_set_field32(®, MAX_LEN_CFG_MIN_PSDU, 10); rt2x00_set_field32(®, MAX_LEN_CFG_MIN_MPDU, 10); rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 83f1a44fb9b4..0a8b4df665fe 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -183,7 +183,7 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, struct ieee80211_key_conf *key); int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid); +int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta); void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags); void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index aa3d4ceef4ad..92bb8be9f319 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -627,7 +627,7 @@ struct rt2x00lib_ops { struct ieee80211_vif *vif, struct ieee80211_sta *sta); int (*sta_remove) (struct rt2x00_dev *rt2x00dev, - int wcid); + struct ieee80211_sta *sta); }; /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 13da95a24cf7..d4b50fb948ee 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -539,9 +539,8 @@ int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct rt2x00_dev *rt2x00dev = hw->priv; - struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); - return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta_priv->wcid); + return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta); } EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove); -- cgit v1.2.3 From 8845254112ac139445bc039952ddcc43b2b122da Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:51 +0100 Subject: rt2800: rename adjust_freq_offset function We have different modes of adjusting freq offset on different chips. Call current adjustment similarly like vendor driver - mode1 . Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 4c777f133edf..81da77161730 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1939,7 +1939,7 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev, #define FREQ_OFFSET_BOUND 0x5f -static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev) +static void rt2800_freq_cal_mode1(struct rt2x00_dev *rt2x00dev) { u8 freq_offset, prev_freq_offset; u8 rfcsr, prev_rfcsr; @@ -2415,7 +2415,7 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev, } rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); if (conf_is_ht40(conf)) { txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40, @@ -2605,7 +2605,7 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev, rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); if (rf->channel <= 14) { if (rf->channel == 6) @@ -2646,7 +2646,7 @@ static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev, else rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); @@ -2711,7 +2711,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); if (rf->channel <= 14) { int idx = rf->channel-1; @@ -3006,7 +3006,7 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev, } /* TODO proper frequency adjustment */ - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); /* TODO merge with others */ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); @@ -6454,7 +6454,7 @@ static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev) rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1); @@ -6680,7 +6680,7 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 2, 0x80); msleep(1); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); /* Enable DC filter */ if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) -- cgit v1.2.3 From bc0077053948cdc4705a7b190459904c7c0fe086 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:52 +0100 Subject: rt2800: warn if doing VCO recalibration for unknow RF chip Since we reset TX_PIN_CFG register, we have to finish recalibration. Warn if this is not possible. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 81da77161730..017cbbd16a07 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -4346,6 +4346,8 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); break; default: + WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration", + rt2x00dev->chip.rf); return; } -- cgit v1.2.3 From 24d42ef3b152b83831a2d876eea4f1d5ae1d6d85 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:53 +0100 Subject: rt2800: perform VCO recalibration for RF5592 chip Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 017cbbd16a07..50f39bf115b6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -4341,6 +4341,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) case RF5372: case RF5390: case RF5392: + case RF5592: rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); @@ -7716,6 +7717,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF5372: case RF5390: case RF5392: + case RF5592: __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags); break; } -- cgit v1.2.3 From d96324703ffa0d690c49f8eed1d48ad2a97762c8 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:54 +0100 Subject: rt2x00: merge agc and vco works with link tuner We need to perform different actions (AGC and VCO calibrations and VGC tuning) periodically at different intervals. We don't need separate works for those, we can use link tuner work and just check for proper interval on it. This fixes performing AGC and VCO calibration when scanning on STA mode. We need to be on-channel to perform those calibrations. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00dev.c | 6 -- drivers/net/wireless/ralink/rt2x00/rt2x00lib.h | 31 +----- drivers/net/wireless/ralink/rt2x00/rt2x00link.c | 127 ++++++------------------ 3 files changed, 36 insertions(+), 128 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index eb7b71443657..6afd21542c7d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -87,9 +87,6 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) */ rt2x00queue_start_queues(rt2x00dev); rt2x00link_start_tuner(rt2x00dev); - rt2x00link_start_agc(rt2x00dev); - if (rt2x00_has_cap_vco_recalibration(rt2x00dev)) - rt2x00link_start_vcocal(rt2x00dev); /* * Start watchdog monitoring. @@ -112,9 +109,6 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) /* * Stop all queues */ - rt2x00link_stop_agc(rt2x00dev); - if (rt2x00_has_cap_vco_recalibration(rt2x00dev)) - rt2x00link_stop_vcocal(rt2x00dev); rt2x00link_stop_tuner(rt2x00dev); rt2x00queue_stop_queues(rt2x00dev); rt2x00queue_flush_queues(rt2x00dev, true); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h b/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h index fb7c349ccc9c..9ddc1681b86a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h @@ -29,9 +29,10 @@ * Interval defines */ #define WATCHDOG_INTERVAL round_jiffies_relative(HZ) -#define LINK_TUNE_INTERVAL round_jiffies_relative(HZ) -#define AGC_INTERVAL round_jiffies_relative(4 * HZ) -#define VCO_INTERVAL round_jiffies_relative(10 * HZ) /* 10 sec */ +#define LINK_TUNE_SECONDS 1 +#define LINK_TUNE_INTERVAL round_jiffies_relative(LINK_TUNE_SECONDS * HZ) +#define AGC_SECONDS 4 +#define VCO_SECONDS 10 /* * rt2x00_rate: Per rate device information @@ -270,30 +271,6 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev); */ void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev); -/** - * rt2x00link_start_agc - Start periodic gain calibration - * @rt2x00dev: Pointer to &struct rt2x00_dev. - */ -void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev); - -/** - * rt2x00link_start_vcocal - Start periodic VCO calibration - * @rt2x00dev: Pointer to &struct rt2x00_dev. - */ -void rt2x00link_start_vcocal(struct rt2x00_dev *rt2x00dev); - -/** - * rt2x00link_stop_agc - Stop periodic gain calibration - * @rt2x00dev: Pointer to &struct rt2x00_dev. - */ -void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev); - -/** - * rt2x00link_stop_vcocal - Stop periodic VCO calibration - * @rt2x00dev: Pointer to &struct rt2x00_dev. - */ -void rt2x00link_stop_vcocal(struct rt2x00_dev *rt2x00dev); - /** * rt2x00link_register - Initialize link tuning & watchdog functionality * @rt2x00dev: Pointer to &struct rt2x00_dev. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c index 017188e5a736..73cbf23b17ef 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c @@ -233,15 +233,13 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev) struct link *link = &rt2x00dev->link; /* - * Link tuning should only be performed when - * an active sta interface exists. AP interfaces - * don't need link tuning and monitor mode interfaces - * should never have to work with link tuners. + * Single monitor mode interfaces should never have + * work with link tuners. */ - if (!rt2x00dev->intf_sta_count) + if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count) return; - /** + /* * While scanning, link tuning is disabled. By default * the most sensitive settings will be used to make sure * that all beacons and probe responses will be received @@ -308,21 +306,10 @@ static void rt2x00link_reset_qual(struct rt2x00_dev *rt2x00dev) qual->tx_failed = 0; } -static void rt2x00link_tuner(struct work_struct *work) +static void rt2x00link_tuner_sta(struct rt2x00_dev *rt2x00dev, struct link *link) { - struct rt2x00_dev *rt2x00dev = - container_of(work, struct rt2x00_dev, link.work.work); - struct link *link = &rt2x00dev->link; struct link_qual *qual = &rt2x00dev->link.qual; - /* - * When the radio is shutting down we should - * immediately cease all link tuning. - */ - if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || - test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) - return; - /* * Update statistics. */ @@ -360,6 +347,33 @@ static void rt2x00link_tuner(struct work_struct *work) */ if (rt2x00lib_antenna_diversity(rt2x00dev)) rt2x00link_reset_qual(rt2x00dev); +} + +static void rt2x00link_tuner(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, link.work.work); + struct link *link = &rt2x00dev->link; + + /* + * When the radio is shutting down we should + * immediately cease all link tuning. + */ + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || + test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) + return; + + if (rt2x00dev->intf_sta_count) + rt2x00link_tuner_sta(rt2x00dev, link); + + if (rt2x00dev->ops->lib->gain_calibration && + (link->count % (AGC_SECONDS / LINK_TUNE_SECONDS)) == 0) + rt2x00dev->ops->lib->gain_calibration(rt2x00dev); + + if (rt2x00dev->ops->lib->vco_calibration && + rt2x00_has_cap_vco_recalibration(rt2x00dev) && + (link->count % (VCO_SECONDS / LINK_TUNE_SECONDS)) == 0) + rt2x00dev->ops->lib->vco_calibration(rt2x00dev); /* * Increase tuner counter, and reschedule the next link tuner run. @@ -408,85 +422,8 @@ static void rt2x00link_watchdog(struct work_struct *work) WATCHDOG_INTERVAL); } -void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev) -{ - struct link *link = &rt2x00dev->link; - - if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && - rt2x00dev->ops->lib->gain_calibration) - ieee80211_queue_delayed_work(rt2x00dev->hw, - &link->agc_work, - AGC_INTERVAL); -} - -void rt2x00link_start_vcocal(struct rt2x00_dev *rt2x00dev) -{ - struct link *link = &rt2x00dev->link; - - if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && - rt2x00dev->ops->lib->vco_calibration) - ieee80211_queue_delayed_work(rt2x00dev->hw, - &link->vco_work, - VCO_INTERVAL); -} - -void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev) -{ - cancel_delayed_work_sync(&rt2x00dev->link.agc_work); -} - -void rt2x00link_stop_vcocal(struct rt2x00_dev *rt2x00dev) -{ - cancel_delayed_work_sync(&rt2x00dev->link.vco_work); -} - -static void rt2x00link_agc(struct work_struct *work) -{ - struct rt2x00_dev *rt2x00dev = - container_of(work, struct rt2x00_dev, link.agc_work.work); - struct link *link = &rt2x00dev->link; - - /* - * When the radio is shutting down we should - * immediately cease the watchdog monitoring. - */ - if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) - return; - - rt2x00dev->ops->lib->gain_calibration(rt2x00dev); - - if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) - ieee80211_queue_delayed_work(rt2x00dev->hw, - &link->agc_work, - AGC_INTERVAL); -} - -static void rt2x00link_vcocal(struct work_struct *work) -{ - struct rt2x00_dev *rt2x00dev = - container_of(work, struct rt2x00_dev, link.vco_work.work); - struct link *link = &rt2x00dev->link; - - /* - * When the radio is shutting down we should - * immediately cease the VCO calibration. - */ - if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) - return; - - rt2x00dev->ops->lib->vco_calibration(rt2x00dev); - - if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) - ieee80211_queue_delayed_work(rt2x00dev->hw, - &link->vco_work, - VCO_INTERVAL); -} - void rt2x00link_register(struct rt2x00_dev *rt2x00dev) { - INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc); - if (rt2x00_has_cap_vco_recalibration(rt2x00dev)) - INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal); INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog); INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner); } -- cgit v1.2.3 From eb79a8fe94c8a16687112e0a3abcce8225447f6f Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:55 +0100 Subject: rt2800: replace mdelay by usleep on vco calibration. This procedure can sleep, hence mdelay is not needed. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 50f39bf115b6..13211c8854cc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -4352,7 +4352,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) return; } - mdelay(1); + usleep_range(1000, 1500); rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin); if (rt2x00dev->rf_channel <= 14) { -- cgit v1.2.3 From 31369c323ba0dc839d8a4e5a5727adc76814959c Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:56 +0100 Subject: rt2800: replace msleep() with usleep_range() on channel switch msleep(1) can sleep much more time then requested 1ms, this is not good on channel switch, which we want to be performed fast (i.e. to make scan faster). Replace msleep() with usleep_range(), which has much smaller maximum sleeping time boundary. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 13211c8854cc..5436cdb07937 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -2110,7 +2110,9 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev, rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); - msleep(1); + + usleep_range(1000, 1500); + rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); } @@ -3442,7 +3444,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, } } - msleep(1); + usleep_range(1000, 1500); /* * Clear channel statistic counters -- cgit v1.2.3 From c7d1c77781f468c639867d324d4e490139cd4c7f Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 19 Dec 2016 11:52:57 +0100 Subject: rt2x00: add mutex to synchronize config and link tuner Do not perform mac80211 config and link_tuner at the same time, this can possibly result in wrong RF or BBP configuration. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 4 ++++ drivers/net/wireless/ralink/rt2x00/rt2x00dev.c | 1 + drivers/net/wireless/ralink/rt2x00/rt2x00link.c | 5 +++++ drivers/net/wireless/ralink/rt2x00/rt2x00mac.c | 5 +++++ 4 files changed, 15 insertions(+) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 92bb8be9f319..034a07273038 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -833,6 +833,10 @@ struct rt2x00_dev { */ struct mutex csr_mutex; + /* + * Mutex to synchronize config and link tuner. + */ + struct mutex conf_mutex; /* * Current packet filter configuration for the device. * This contains all currently active FIF_* flags send diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 6afd21542c7d..8fcbc8dc94c1 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -1313,6 +1313,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) spin_lock_init(&rt2x00dev->irqmask_lock); mutex_init(&rt2x00dev->csr_mutex); + mutex_init(&rt2x00dev->conf_mutex); INIT_LIST_HEAD(&rt2x00dev->bar_list); spin_lock_init(&rt2x00dev->bar_list_lock); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c index 73cbf23b17ef..2010a7715f21 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c @@ -363,6 +363,9 @@ static void rt2x00link_tuner(struct work_struct *work) test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) return; + /* Do not race with rt2x00mac_config(). */ + mutex_lock(&rt2x00dev->conf_mutex); + if (rt2x00dev->intf_sta_count) rt2x00link_tuner_sta(rt2x00dev, link); @@ -375,6 +378,8 @@ static void rt2x00link_tuner(struct work_struct *work) (link->count % (VCO_SECONDS / LINK_TUNE_SECONDS)) == 0) rt2x00dev->ops->lib->vco_calibration(rt2x00dev); + mutex_unlock(&rt2x00dev->conf_mutex); + /* * Increase tuner counter, and reschedule the next link tuner run. */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index d4b50fb948ee..3cc1384ed2fc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -320,6 +320,9 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed) */ rt2x00queue_stop_queue(rt2x00dev->rx); + /* Do not race with with link tuner. */ + mutex_lock(&rt2x00dev->conf_mutex); + /* * When we've just turned on the radio, we want to reprogram * everything to ensure a consistent state @@ -335,6 +338,8 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed) */ rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant); + mutex_unlock(&rt2x00dev->conf_mutex); + /* Turn RX back on */ rt2x00queue_start_queue(rt2x00dev->rx); -- cgit v1.2.3 From 531940f9644da798f04382aeb5e8929295dfde61 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:22:57 -0600 Subject: rtlwifi: Replace local debug macro RT_ASSERT This macro can be replaced with WARN_ONCE. In addition to using a standard debugging macro for these critical errors, we also get a stack dump. In rtl8821ae/hw.c, a senseless comment was removed, and an incorrect indentation was fixed. This patch also fixes two places in each of rtl8192ee, rtl8723be, and rtl8821ae where the logical condition was incorrect. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/core.c | 8 ++++---- drivers/net/wireless/realtek/rtlwifi/debug.h | 14 -------------- drivers/net/wireless/realtek/rtlwifi/pci.c | 12 ++++++------ drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c | 10 +++++----- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c | 8 ++++---- .../wireless/realtek/rtlwifi/rtl8192c/fw_common.c | 6 +++--- .../wireless/realtek/rtlwifi/rtl8192c/phy_common.c | 14 +++++++------- drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c | 8 ++++---- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c | 10 +++++----- drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c | 8 ++++---- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c | 14 +++++++------- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c | 15 +++++++++------ drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c | 6 +++--- drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c | 8 ++++---- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c | 10 +++++----- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c | 8 ++++---- drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c | 12 ++++++------ drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c | 14 +++++++------- .../wireless/realtek/rtlwifi/rtl8723com/phy_common.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 16 ++++++++-------- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c | 8 ++++---- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c | 20 ++++++++++++-------- drivers/net/wireless/realtek/rtlwifi/usb.c | 2 +- 38 files changed, 133 insertions(+), 140 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 2caa4ad04dba..7ae774da24bd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1613,8 +1613,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "set pairwise key\n"); if (!sta) { - RT_ASSERT(false, - "pairwise key without mac_addr\n"); + WARN_ONCE(true, + "rtlwifi: pairwise key without mac_addr\n"); err = -EOPNOTSUPP; goto out_unlock; @@ -1804,8 +1804,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n"); return true; default: - RT_ASSERT(false, - "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); + WARN_ONCE(true, + "rtlwifi: rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h index 6156a79328c1..6f2e2b943c3d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.h +++ b/drivers/net/wireless/realtek/rtlwifi/debug.h @@ -166,15 +166,6 @@ enum dbgp_flag_e { #ifdef CONFIG_RTLWIFI_DEBUG -#define RT_ASSERT(_exp, fmt, ...) \ -do { \ - if (!(_exp)) { \ - printk(KERN_DEBUG KBUILD_MODNAME ":%s(): " fmt, \ - __func__, ##__VA_ARGS__); \ - } \ -} while (0) - - struct rtl_priv; __printf(5, 6) @@ -210,11 +201,6 @@ do { \ struct rtl_priv; -__printf(2, 3) -static inline void RT_ASSERT(int exp, const char *fmt, ...) -{ -} - __printf(4, 5) static inline void RT_TRACE(struct rtl_priv *rtlpriv, int comp, int level, diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 8bfe020edd3a..bb767186609e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -2174,15 +2174,15 @@ int rtl_pci_probe(struct pci_dev *pdev, err = pci_enable_device(pdev); if (err) { - RT_ASSERT(false, "%s : Cannot enable new PCI device\n", + WARN_ONCE(true, "%s : Cannot enable new PCI device\n", pci_name(pdev)); return err; } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { - RT_ASSERT(false, - "Unable to obtain 32bit DMA for consistent allocations\n"); + WARN_ONCE(true, + "rtlwifi: Unable to obtain 32bit DMA for consistent allocations\n"); err = -ENOMEM; goto fail1; } @@ -2193,7 +2193,7 @@ int rtl_pci_probe(struct pci_dev *pdev, hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) + sizeof(struct rtl_priv), &rtl_ops); if (!hw) { - RT_ASSERT(false, + WARN_ONCE(true, "%s : ieee80211 alloc failed\n", pci_name(pdev)); err = -ENOMEM; goto fail1; @@ -2232,7 +2232,7 @@ int rtl_pci_probe(struct pci_dev *pdev, /* MEM map */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { - RT_ASSERT(false, "Can't obtain PCI resources\n"); + WARN_ONCE(true, "rtlwifi: Can't obtain PCI resources\n"); goto fail1; } @@ -2245,7 +2245,7 @@ int rtl_pci_probe(struct pci_dev *pdev, (unsigned long)pci_iomap(pdev, rtlpriv->cfg->bar_id, pmem_len); if (rtlpriv->io.pci_mem_start == 0) { - RT_ASSERT(false, "Can't map PCI mem\n"); + WARN_ONCE(true, "rtlwifi: Can't map PCI mem\n"); err = -ENOMEM; goto fail2; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index 5360d5332359..49da81abaf82 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c @@ -434,8 +434,8 @@ void rtl88e_fill_h2c_cmd(struct ieee80211_hw *hw, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8188ee: error H2C cmd because of Fw download fail!!!\n"); return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 37d6efc3d240..1a17dfe3edf6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -1352,7 +1352,7 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8188ee: invalid aci: %d !\n", aci); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c index fffaa92eda81..ece8691ce3c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c @@ -1231,8 +1231,8 @@ u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw) return 0; if (rtlphy->set_bwmode_inprogress) return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14"); + WARN_ONCE((rtlphy->current_channel > 14), + "rtl8188ee: WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; @@ -1280,8 +1280,8 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8188ee: illegal channel for Zebra: %d\n", channel); _rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, @@ -1367,7 +1367,7 @@ static bool _rtl88e_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + WARN_ONCE(true, "rtl8188ee: cmdtable cannot be NULL.\n"); return false; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 3e3b88664883..09c908d4cf91 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -760,7 +760,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n", desc_name); break; } @@ -779,7 +779,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n", desc_name); break; } @@ -799,7 +799,7 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n", desc_name); break; } @@ -815,7 +815,7 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index 7d152466152b..9c3392ad01bc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c @@ -485,8 +485,8 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8192c-common: return H2C cmd because of Fw download fail!!!\n"); return; } @@ -510,7 +510,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw) while (u1b_tmp & BIT(2)) { delay--; if (delay == 0) { - RT_ASSERT(false, "8051 reset fail.\n"); + WARN_ONCE(true, "rtl8192c-common: 8051 reset fail.\n"); break; } udelay(50); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c index 94dd25cf1ca8..32fc4b60bc86 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c @@ -77,7 +77,7 @@ EXPORT_SYMBOL(rtl92c_phy_set_bb_reg); u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset) { - RT_ASSERT(false, "deprecated!\n"); + WARN_ONCE(true, "rtl8192c-common: _rtl92c_phy_fw_rf_serial_read deprecated!\n"); return 0; } EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read); @@ -86,7 +86,7 @@ void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data) { - RT_ASSERT(false, "deprecated!\n"); + WARN_ONCE(true, "rtl8192c-common: _rtl92c_phy_fw_rf_serial_write deprecated!\n"); } EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write); @@ -745,8 +745,8 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw) return 0; if (rtlphy->set_bwmode_inprogress) return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14"); + WARN_ONCE((rtlphy->current_channel > 14), + "rtl8192c-common: WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; @@ -792,7 +792,7 @@ static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + WARN_ONCE(true, "rtl8192c-common: cmdtable cannot be NULL.\n"); return false; } @@ -837,8 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8192c-common: illegal channel for Zebra: %d\n", channel); _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index 4483d40ecad1..b757a74339c1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -1292,7 +1292,7 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8192ce: invalid aci: %d !\n", aci); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 2ab4a00246cc..3616ba21959d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -670,7 +670,7 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n", desc_name); break; } @@ -690,7 +690,7 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n", desc_name); break; } @@ -710,7 +710,7 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n", desc_name); break; } @@ -726,7 +726,7 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(p_desc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 5c7da0cfc684..ddfcf9949d9e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1790,7 +1790,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) u4b_ac_param); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", + WARN_ONCE(true, "rtl8192cu: invalid aci: %d !\n", e_aci); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 1ea878fa7901..6da6e2acfbec 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -241,7 +241,7 @@ u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index) break; default: hw_queue_index = RTL_TXQ_BE; - RT_ASSERT(false, "QSLT_BE queue, skb_queue:%d\n", + WARN_ONCE(true, "rtl8192cu: QSLT_BE queue, skb_queue:%d\n", mac80211_queue_index); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 17f6903c14bb..fc1d4f2ba82d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -182,7 +182,7 @@ void rtl92d_firmware_selfreset(struct ieee80211_hw *hw) udelay(50); u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); } - RT_ASSERT((delay > 0), "8051 reset failed!\n"); + WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n"); RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "=====> 8051 reset success (%d)\n", delay); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index 424f54babd03..b0dc189e5579 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -2700,7 +2700,7 @@ static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL\n"); + WARN_ONCE(true, "rtl8192de: cmdtable cannot be NULL\n"); return false; } if (cmdtableidx >= cmdtablesz) @@ -2893,17 +2893,17 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw) * 5G and 2.4G band. */ if (channel <= 14) return 0; - RT_ASSERT((channel > 14), "5G but channel<=14\n"); + WARN_ONCE((channel <= 14), "rtl8192de: 5G but channel<=14\n"); break; case BAND_ON_2_4G: /* Get first channel error when change between * 5G and 2.4G band. */ if (channel > 14) return 0; - RT_ASSERT((channel <= 14), "2G but channel>14\n"); + WARN_ONCE((channel > 14), "rtl8192de: 2G but channel>14\n"); break; default: - RT_ASSERT(false, "Invalid WirelessMode(%#x)!!\n", + WARN_ONCE(true, "rtl8192de: Invalid WirelessMode(%#x)!!\n", rtlpriv->mac80211.mode); break; } @@ -3336,7 +3336,7 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw) } } if (i == 200) - RT_ASSERT(false, "Another mac power off over time\n"); + WARN_ONCE(true, "rtl8192de: Another mac power off over time\n"); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 2d65e4095292..0d98b34eff0a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -402,7 +402,7 @@ static int __init rtl92de_module_init(void) ret = pci_register_driver(&rtl92de_driver); if (ret) - RT_ASSERT(false, "No device found\n"); + WARN_ONCE(true, "rtl8192de: No device found\n"); return ret; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 5fb37564957c..5c9c8741134f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -794,7 +794,7 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n", desc_name); break; } @@ -814,7 +814,7 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n", desc_name); break; } @@ -834,7 +834,7 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n", desc_name); break; } @@ -848,7 +848,7 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name) ret = GET_RX_DESC_PKT_LEN(pdesc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index b3f6a9ed15d4..7a5cc0c746cd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -462,8 +462,8 @@ void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8192ee: error H2C cmd because of Fw download fail!!!\n"); return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index ebf663e1a81a..00b211b3fc8f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -1582,7 +1582,7 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8192ee: invalid aci: %d !\n", aci); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c index 5ad7e753c357..c35ba764a6c5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c @@ -1176,7 +1176,7 @@ static u8 _rtl92ee_phy_get_ratesection_intxpower_byrate(enum radio_path path, rate_section = 7; break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8192ee: Rate_Section is Illegal\n"); break; } return rate_section; @@ -1239,7 +1239,7 @@ static u8 _rtl92ee_get_txpower_by_rate(struct ieee80211_hw *hw, shift = 24; break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8192ee: Rate_Section is Illegal\n"); break; } @@ -1811,8 +1811,8 @@ u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw) return 0; if (rtlphy->set_bwmode_inprogress) return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14"); + WARN_ONCE((rtlphy->current_channel > 14), + "rtl8192ee: WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; @@ -1860,8 +1860,8 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8192ee: illegal channel for Zebra: %d\n", channel); _rtl92ee_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, @@ -1948,7 +1948,7 @@ static bool _rtl92ee_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + WARN_ONCE(true, "rtl8192ee: cmdtable cannot be NULL.\n"); return false; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 2d48ccd02ac8..07440e9a8ca2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -991,8 +991,9 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, - "ERR rxdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8192ee: ERR rxdesc :%d not processed\n", + desc_name); break; } } @@ -1011,8 +1012,9 @@ u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TXBUFFER_DESC_ADDR_LOW(pdesc, 1); break; default: - RT_ASSERT(false, - "ERR txdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8192ee: ERR txdesc :%d not processed\n", + desc_name); break; } } else { @@ -1027,8 +1029,9 @@ u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: - RT_ASSERT(false, - "ERR rxdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8192ee: ERR rxdesc :%d not processed\n", + desc_name); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index 26e06b2837c3..f4ab72101d04 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c @@ -1251,7 +1251,7 @@ void rtl92se_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, EDCAPARA_VO, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8192se: invalid aci: %d !\n", aci); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c index fcb9216af82d..6299dc1c8eac 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c @@ -330,7 +330,7 @@ static bool _rtl92s_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL\n"); + WARN_ONCE(true, "rtl8192se: cmdtable cannot be NULL\n"); return false; } @@ -374,8 +374,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "invalid channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8192se: invalid channel for Zebra: %d\n", channel); _rtl92s_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index 9a5a11399221..12cef01e593b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c @@ -583,7 +583,7 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n", desc_name); break; } @@ -603,7 +603,7 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_RX_STATUS_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n", desc_name); break; } @@ -623,7 +623,7 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(desc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n", desc_name); break; } @@ -639,7 +639,7 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name) ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c index 1186755e55b8..164a27e72537 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c @@ -259,8 +259,8 @@ void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8723ae: error H2C cmd because of Fw download fail!!!\n"); return; } memset(tmp_cmdbuf, 0, 8); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index f8be0bd7e326..e0da8dceff2c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -1256,7 +1256,7 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8723ae: invalid aci: %d !\n", aci); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c index 17b58cb32d55..8001dce9f277 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c @@ -133,7 +133,7 @@ static void _rtl8723e_phy_fw_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data) { - RT_ASSERT(false, "deprecated!\n"); + WARN_ONCE(true, "rtl8723ae: _rtl8723e_phy_fw_rf_serial_write deprecated!\n"); } static void _rtl8723e_phy_bb_config_1t(struct ieee80211_hw *hw) @@ -885,8 +885,8 @@ u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw) return 0; if (rtlphy->set_bwmode_inprogress) return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14"); + WARN_ONCE((rtlphy->current_channel > 14), + "rtl8723ae: WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; @@ -954,8 +954,8 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8723ae: illegal channel for Zebra: %d\n", channel); rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index e93125ebed81..c9838f52a7ea 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -617,7 +617,7 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8723ae: ERR txdesc :%d not processed\n", desc_name); break; } @@ -636,7 +636,7 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8723ae: ERR rxdesc :%d not processed\n", desc_name); break; } @@ -656,7 +656,7 @@ u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8723ae: ERR txdesc :%d not processed\n", desc_name); break; } @@ -672,7 +672,7 @@ u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8723ae: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index 8c5c27ce8e05..6b03c7fe8a04 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -224,8 +224,8 @@ void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8723be: error H2C cmd because of Fw download fail!!!\n"); return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index aba60c3145c5..56a1ae438fba 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -1631,7 +1631,7 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8723be: invalid aci: %d !\n", aci); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 3cc2232f25ca..b4e0a58f1c1f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -939,7 +939,7 @@ static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path, break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8723be: Rate_Section is Illegal\n"); break; } @@ -1004,7 +1004,7 @@ static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw, shift = 24; break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8723be: Rate_Section is Illegal\n"); break; } tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][rfpath][tx_num] @@ -1387,8 +1387,8 @@ u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw) return 0; if (rtlphy->set_bwmode_inprogress) return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14"); + WARN_ONCE((rtlphy->current_channel > 14), + "rtl8723be: WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; @@ -1438,8 +1438,8 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8723be: illegal channel for Zebra: %d\n", channel); rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 2175aecbb8f4..6f65003a895a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -666,8 +666,8 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", - desc_name); + WARN_ONCE(true, "rtl8723be: ERR txdesc :%d not processed\n", + desc_name); break; } } else { @@ -685,8 +685,8 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", - desc_name); + WARN_ONCE(true, "rtl8723be: ERR rxdesc :%d not process\n", + desc_name); break; } } @@ -705,8 +705,8 @@ u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", - desc_name); + WARN_ONCE(true, "rtl8723be: ERR txdesc :%d not process\n", + desc_name); break; } } else { @@ -721,7 +721,7 @@ u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8723be: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c index 75cbd1509b52..6b80ddc9e5c9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c @@ -283,7 +283,7 @@ bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + WARN_ONCE(true, "rtl8723-common: cmdtable cannot be NULL.\n"); return false; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index b665446351a4..9e87b96626c6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -510,8 +510,8 @@ void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8821ae: error H2C cmd because of Fw download fail!!!\n"); return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 1281ebe0c30a..b045dd065cb6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -2249,7 +2249,7 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8821ae: invalid aci: %d !\n", aci); break; } } @@ -2601,11 +2601,10 @@ static u8 _rtl8821ae_get_chnl_group(u8 chnl) group = 12; else if (173 <= chnl && chnl <= 177) group = 13; - else - /*RT_TRACE(rtlpriv, COMP_EFUSE,DBG_LOUD, - "5G, Channel %d in Group not found\n",chnl);*/ - RT_ASSERT(!COMP_EFUSE, - "5G, Channel %d in Group not found\n", chnl); + else + WARN_ONCE(true, + "rtl8821ae: 5G, Channel %d in Group not found\n", + chnl); } return group; } @@ -4135,8 +4134,9 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw, count++; } while (tmp && count < 100); - RT_ASSERT((count < 100), - "Write wake up frame mask FAIL %d value!\n", tmp); + WARN_ONCE((count >= 100), + "rtl8821ae: Write wake up frame mask FAIL %d value!\n", + tmp); } /* Disable Rx packet buffer access. */ rtl_write_byte(rtlpriv, REG_PKT_BUFF_ACCESS_CTRL, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 5dad402171c2..7e2f7b0ae446 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -1870,8 +1870,8 @@ static u8 _rtl8821ae_get_rate_section_index(u32 regaddr) else if (regaddr >= 0xE20 && regaddr <= 0xE4C) index = (u8)((regaddr - 0xE20) / 4); else - RT_ASSERT(!COMP_INIT, - "Invalid RegAddr 0x%x\n", regaddr); + WARN_ONCE(true, + "rtl8821ae: Invalid RegAddr 0x%x\n", regaddr); return index; } @@ -2322,7 +2322,7 @@ static s8 _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate) rate_section = 11; break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8821ae: Rate_Section is Illegal\n"); break; } @@ -2588,7 +2588,7 @@ static s8 _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw, shift = 24; break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8821ae: Rate_Section is Illegal\n"); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 27727186ba5f..108098152cf3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -904,8 +904,9 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); break; default: - RT_ASSERT(false, - "ERR txdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8821ae: ERR txdesc :%d not processed\n", + desc_name); break; } } else { @@ -923,8 +924,9 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, - "ERR rxdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8821ae: ERR rxdesc :%d not processed\n", + desc_name); break; } } @@ -943,8 +945,9 @@ u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break; default: - RT_ASSERT(false, - "ERR txdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8821ae: ERR txdesc :%d not processed\n", + desc_name); break; } } else { @@ -959,8 +962,9 @@ u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: - RT_ASSERT(false, - "ERR rxdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8821ae: ERR rxdesc :%d not processed\n", + desc_name); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 0a508649903d..1558f541ab15 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1059,7 +1059,7 @@ int rtl_usb_probe(struct usb_interface *intf, hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) + sizeof(struct rtl_usb_priv), &rtl_ops); if (!hw) { - RT_ASSERT(false, "ieee80211 alloc failed\n"); + WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n"); return -ENOMEM; } rtlpriv = hw->priv; -- cgit v1.2.3 From b03d968b66440652f8cfd73b163cc9ccd50afd7d Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:22:58 -0600 Subject: rtlwifi: Remove RT_TRACE messages that use DBG_EMERG These messages are always logged and represent error conditions, thus we can use pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 15 ++++----- drivers/net/wireless/realtek/rtlwifi/cam.c | 14 +++------ drivers/net/wireless/realtek/rtlwifi/core.c | 21 +++++-------- drivers/net/wireless/realtek/rtlwifi/efuse.c | 3 +- drivers/net/wireless/realtek/rtlwifi/pci.c | 36 ++++++++-------------- drivers/net/wireless/realtek/rtlwifi/ps.c | 3 +- drivers/net/wireless/realtek/rtlwifi/rc.c | 3 +- drivers/net/wireless/realtek/rtlwifi/usb.c | 46 +++++++++------------------- 8 files changed, 49 insertions(+), 92 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 4ac928bf1f8e..13325e15929e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -207,8 +207,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, *highest supported RX rate */ if (rtlpriv->dm.supp_phymode_switch) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Support phy mode switch\n"); + pr_info("Support phy mode switch\n"); ht_cap->mcs.rx_mask[0] = 0xFF; ht_cap->mcs.rx_mask[1] = 0xFF; @@ -389,8 +388,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n", - rtlhal->current_bandtype); + pr_err("Err BAND %d\n", + rtlhal->current_bandtype); } } /* <5> set hw caps */ @@ -544,7 +543,7 @@ int rtl_init_core(struct ieee80211_hw *hw) * mac80211 hw in _rtl_init_mac80211. */ if (rtl_regd_init(hw, rtl_reg_notifier)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "REGD init failed\n"); + pr_err("REGD init failed\n"); return 1; } @@ -1694,8 +1693,7 @@ void rtl_watchdog_wq_callback(void *data) * we should reconnect this AP */ if (rtlpriv->link_info.roam_times >= 5) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "AP off, try to reconnect now\n"); + pr_err("AP off, try to reconnect now\n"); rtlpriv->link_info.roam_times = 0; ieee80211_connection_loss( rtlpriv->mac80211.vif); @@ -1886,8 +1884,7 @@ void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c index 8fe8b4cfae6c..a0605d8e9970 100644 --- a/drivers/net/wireless/realtek/rtlwifi/cam.c +++ b/drivers/net/wireless/realtek/rtlwifi/cam.c @@ -285,8 +285,7 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr) u8 i, *addr; if (NULL == sta_addr) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "sta_addr is NULL.\n"); + pr_err("sta_addr is NULL.\n"); return TOTAL_CAM_ENTRY; } /* Does STA already exist? */ @@ -298,9 +297,8 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr) /* Get a free CAM entry. */ for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) { if ((bitmap & BIT(0)) == 0) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n", - rtlpriv->sec.hwsec_cam_bitmap, entry_idx); + pr_err("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n", + rtlpriv->sec.hwsec_cam_bitmap, entry_idx); rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx; memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx], sta_addr, ETH_ALEN); @@ -319,14 +317,12 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr) u8 i, *addr; if (NULL == sta_addr) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "sta_addr is NULL.\n"); + pr_err("sta_addr is NULL.\n"); return; } if (is_zero_ether_addr(sta_addr)) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "sta_addr is %pM\n", sta_addr); + pr_err("sta_addr is %pM\n", sta_addr); return; } /* Does STA already exist? */ diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 7ae774da24bd..3cfdbea40ffa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -117,8 +117,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context, } found_alt: if (firmware->size > rtlpriv->max_fw_size) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is too big!\n"); + pr_err("Firmware is too big!\n"); release_firmware(firmware); return; } @@ -303,8 +302,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, (u8 *)(&mac->basic_rates)); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "operation mode %d is not support!\n", vif->type); + pr_err("operation mode %d is not supported!\n", + vif->type); err = -EOPNOTSUPP; goto out; } @@ -764,9 +763,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) default: mac->bw_40 = false; mac->bw_80 = false; - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - channel_type); + pr_err("switch case %#x not processed\n", + channel_type); break; } } @@ -1399,8 +1397,7 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw, "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid); return rtl_rx_agg_stop(hw, sta, tid); default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "IEEE80211_AMPDU_ERR!!!!:\n"); + pr_err("IEEE80211_AMPDU_ERR!!!!:\n"); return -EOPNOTSUPP; } return 0; @@ -1536,8 +1533,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, err = -EOPNOTSUPP; goto out_unlock; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "alg_err:%x!!!!:\n", key->cipher); + pr_err("alg_err:%x!!!!:\n", key->cipher); goto out_unlock; } if (key_type == WEP40_ENCRYPTION || @@ -1662,8 +1658,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, rtl_cam_delete_one_entry(hw, mac_addr, key_idx); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "cmd_err:%x!!!!:\n", cmd); + pr_err("cmd_err:%x!!!!:\n", cmd); } out_unlock: mutex_unlock(&rtlpriv->locks.conf_mutex); diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index 7becfef6cd5c..afc7550e8e41 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -1259,8 +1259,7 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, break; case EEPROM_93C46: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL8XXX did not boot from eeprom, check it !!\n"); + pr_err("RTL8XXX did not boot from eeprom, check it !!\n"); return 1; default: diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index bb767186609e..81ac0b393304 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -174,9 +174,8 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - rtlpci->const_support_pciaspm); + pr_err("switch case %#x not processed\n", + rtlpci->const_support_pciaspm); break; } @@ -1247,9 +1246,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, &buffer_desc_dma); if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Cannot allocate TX ring (prio = %d)\n", - prio); + pr_err("Cannot allocate TX ring (prio = %d)\n", + prio); return -ENOMEM; } @@ -1266,8 +1264,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, sizeof(*desc) * entries, &desc_dma); if (!desc || (unsigned long)desc & 0xFF) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Cannot allocate TX ring (prio = %d)\n", prio); + pr_err("Cannot allocate TX ring (prio = %d)\n", prio); return -ENOMEM; } @@ -1314,8 +1311,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) &rtlpci->rx_ring[rxring_idx].dma); if (!rtlpci->rx_ring[rxring_idx].buffer_desc || (ulong)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Cannot allocate RX ring\n"); + pr_err("Cannot allocate RX ring\n"); return -ENOMEM; } @@ -1338,8 +1334,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) &rtlpci->rx_ring[rxring_idx].dma); if (!rtlpci->rx_ring[rxring_idx].desc || (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Cannot allocate RX ring\n"); + pr_err("Cannot allocate RX ring\n"); return -ENOMEM; } @@ -1799,15 +1794,13 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw) static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) { - struct rtl_priv *rtlpriv = rtl_priv(hw); int err; _rtl_pci_init_struct(hw, pdev); err = _rtl_pci_init_trx_ring(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "tx ring initialization failed\n"); + pr_err("tx ring initialization failed\n"); return err; } @@ -2275,7 +2268,7 @@ int rtl_pci_probe(struct pci_dev *pdev, rtlpriv->cfg->ops->read_eeprom_info(hw); if (rtlpriv->cfg->ops->init_sw_vars(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); + pr_err("Can't init_sw_vars\n"); err = -ENODEV; goto fail3; } @@ -2287,22 +2280,20 @@ int rtl_pci_probe(struct pci_dev *pdev, /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't allocate sw for mac80211\n"); + pr_err("Can't allocate sw for mac80211\n"); goto fail3; } /* Init PCI sw */ err = rtl_pci_init(hw, pdev); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n"); + pr_err("Failed to init PCI\n"); goto fail3; } err = ieee80211_register_hw(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't register mac80211 hw.\n"); + pr_err("Can't register mac80211 hw.\n"); err = -ENODEV; goto fail3; } @@ -2310,8 +2301,7 @@ int rtl_pci_probe(struct pci_dev *pdev, err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "failed to create sysfs device attributes\n"); + pr_err("failed to create sysfs device attributes\n"); goto fail3; } diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index d0ffc4d508cf..4f8327097086 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -150,8 +150,7 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", state_toset); + pr_err("switch case %#x not processed\n", state_toset); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index ce8621a0f7aa..951d257cd4c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -267,8 +267,7 @@ static void *rtl_rate_alloc_sta(void *ppriv, rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp); if (!rate_priv) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unable to allocate private rc structure\n"); + pr_err("Unable to allocate private rc structure\n"); return NULL; } diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 1558f541ab15..b2b62ef8b07d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -421,14 +421,12 @@ static void _rtl_rx_completed(struct urb *urb); static int _rtl_prep_rx_urb(struct ieee80211_hw *hw, struct rtl_usb *rtlusb, struct urb *urb, gfp_t gfp_mask) { - struct rtl_priv *rtlpriv = rtl_priv(hw); void *buf; buf = usb_alloc_coherent(rtlusb->udev, rtlusb->rx_max_size, gfp_mask, &urb->transfer_dma); if (!buf) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Failed to usb_alloc_coherent!!\n"); + pr_err("Failed to usb_alloc_coherent!!\n"); return -ENOMEM; } @@ -613,8 +611,6 @@ static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr, static void _rtl_rx_completed(struct urb *_urb) { struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context; - struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); - struct rtl_priv *rtlpriv = rtl_priv(hw); int err = 0; if (unlikely(IS_USB_STOP(rtlusb))) @@ -628,17 +624,15 @@ static void _rtl_rx_completed(struct urb *_urb) struct ieee80211_hdr *hdr; if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Too short packet from bulk IN! (len: %d)\n", - size); + pr_err("Too short packet from bulk IN! (len: %d)\n", + size); goto resubmit; } qlen = skb_queue_len(&rtlusb->rx_queue); if (qlen >= __RX_SKB_MAX_QUEUED) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Pending RX skbuff queue full! (qlen: %d)\n", - qlen); + pr_err("Pending RX skbuff queue full! (qlen: %d)\n", + qlen); goto resubmit; } @@ -647,8 +641,7 @@ static void _rtl_rx_completed(struct urb *_urb) skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding); if (!skb) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Can't allocate skb for bulk IN!\n"); + pr_err("Can't allocate skb for bulk IN!\n"); goto resubmit; } @@ -725,7 +718,6 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) struct urb *urb; int err; int i; - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); WARN_ON(0 == rtlusb->rx_urb_num); @@ -740,8 +732,7 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); if (err < 0) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Failed to prep_rx_urb!!\n"); + pr_err("Failed to prep_rx_urb!!\n"); usb_free_urb(urb); goto err_out; } @@ -839,7 +830,6 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb) { int err; - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); usb_anchor_urb(_urb, &rtlusb->tx_submitted); @@ -847,8 +837,7 @@ static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb) if (err < 0) { struct sk_buff *skb; - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Failed to submit urb\n"); + pr_err("Failed to submit urb\n"); usb_unanchor_urb(_urb); skb = (struct sk_buff *)_urb->context; kfree_skb(skb); @@ -859,7 +848,6 @@ static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb) static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb, struct sk_buff *skb) { - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; @@ -870,8 +858,7 @@ static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb, txinfo->flags |= IEEE80211_TX_STAT_ACK; if (urb->status) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Urb has error status 0x%X\n", urb->status); + pr_err("Urb has error status 0x%X\n", urb->status); goto out; } /* TODO: statistics */ @@ -919,7 +906,6 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, enum rtl_txq qnum) { - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); u32 ep_num; struct urb *_urb = NULL; @@ -927,8 +913,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); if (unlikely(IS_USB_STOP(rtlusb))) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "USB device is stopping...\n"); + pr_err("USB device is stopping...\n"); kfree_skb(skb); return; } @@ -936,8 +921,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, _skb = skb; _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); if (unlikely(!_urb)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't allocate urb. Drop skb!\n"); + pr_err("Can't allocate urb. Drop skb!\n"); kfree_skb(skb); return; } @@ -1102,20 +1086,18 @@ int rtl_usb_probe(struct usb_interface *intf, /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't allocate sw for mac80211\n"); + pr_err("Can't allocate sw for mac80211\n"); goto error_out; } if (rtlpriv->cfg->ops->init_sw_vars(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); + pr_err("Can't init_sw_vars\n"); goto error_out; } rtlpriv->cfg->ops->init_sw_leds(hw); err = ieee80211_register_hw(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't register mac80211 hw.\n"); + pr_err("Can't register mac80211 hw.\n"); err = -ENODEV; goto error_out; } -- cgit v1.2.3 From 004a1e167905bf664c81dfdb195f1b8917cc3649 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:22:59 -0600 Subject: rtlwifi: rtl8821ae: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8821ae/dm.c | 3 +- .../net/wireless/realtek/rtlwifi/rtl8821ae/fw.c | 12 +++--- .../net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 15 +++---- .../net/wireless/realtek/rtlwifi/rtl8821ae/phy.c | 46 +++++++++------------- .../net/wireless/realtek/rtlwifi/rtl8821ae/rf.c | 5 +-- .../net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 14 +++---- 6 files changed, 37 insertions(+), 58 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index bdfd444955d2..32900c51f024 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -604,8 +604,7 @@ static void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw) if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { rtl_dm_dig->min_undec_pwdb_for_dm = 0; - RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, - "Not connected to any\n"); + pr_debug("rtl8821ae: Not connected to any AP\n"); } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_AP || diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index 9e87b96626c6..814136cdfe2b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -125,8 +125,7 @@ static void _rtl8821ae_write_fw(struct ieee80211_hw *hw, remainsize = size % FW_8821AE_PAGE_SIZE; if (pagenums > 8) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 8\n"); + pr_err("Page numbers should not greater then 8\n"); } for (page = 0; page < pagenums; page++) { @@ -162,8 +161,8 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw) goto exit; } - RT_TRACE(rtlpriv, COMP_FW, DBG_EMERG, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); + pr_err("Checksum report OK! REG_MCUFWDL:0x%08x\n", + value32); value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; @@ -186,9 +185,8 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw) udelay(FW_8821AE_POLLING_DELAY); } while (counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", + value32); exit: return err; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index b045dd065cb6..a4e37e764563 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -822,9 +822,8 @@ static bool _rtl8821ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -1927,7 +1926,7 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw) rtstatus = _rtl8821ae_init_mac(hw); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; return err; } @@ -2174,8 +2173,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not support!\n", type); + pr_err("Network type %d not support!\n", type); return 1; } @@ -3275,7 +3273,7 @@ void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl8821ae_read_adapter_info(hw, false); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } /*hal_ReadRFType_8812A()*/ /* _rtl8821ae_hal_customized_behavior(hw); */ @@ -3950,8 +3948,7 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index, if (mac->opmode == NL80211_IFTYPE_AP) { entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "Can not find free hwsecurity cam entry\n"); + pr_err("an not find free hwsecurity cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 7e2f7b0ae446..c60f07aa4acf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -215,7 +215,6 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw, static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset) { - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); bool is_pi_mode = false; u32 retvalue = 0; @@ -223,7 +222,7 @@ static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw, /* 2009/06/17 MH We can not execute IO for power save or other accident mode.*/ if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + pr_err("return all one\n"); return 0xFFFFFFFF; } /* <20120809, Kordan> CCA OFF(when entering), @@ -284,7 +283,7 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw, u32 newoffset; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + pr_err("stop\n"); return; } offset &= 0xff; @@ -1665,7 +1664,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } _rtl8821ae_phy_init_tx_power_by_rate(hw); @@ -1674,7 +1673,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } @@ -1688,7 +1687,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_AGC_TAB); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw, @@ -2064,8 +2063,7 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, break; case RF90_PATH_C: case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpath); + pr_err("switch case %#x not processed\n", rfpath); break; } return true; @@ -2132,8 +2130,7 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, case RF90_PATH_B: case RF90_PATH_C: case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpath); + pr_err("switch case %#x not processed\n", rfpath); break; } return true; @@ -3336,8 +3333,7 @@ void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -3378,8 +3374,7 @@ static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv *rtlpriv) else if (mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_UPPER) sc_set_40 = VHT_DATA_SC_40_UPPER_OF_80MHZ; else - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "SCMapping: Not Correct Primary40MHz Setting\n"); + pr_err("SCMapping: Not Correct Primary40MHz Setting\n"); if ((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) && (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)) @@ -3394,16 +3389,14 @@ static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv *rtlpriv) (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER)) sc_set_20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ; else - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "SCMapping: Not Correct Primary40MHz Setting\n"); + pr_err("SCMapping: Not Correct Primary40MHz Setting\n"); } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ; else if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ; else - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "SCMapping: Not Correct Primary40MHz Setting\n"); + pr_err("SCMapping: Not Correct Primary40MHz Setting\n"); } return (sc_set_40 << 4) | sc_set_20; } @@ -3479,8 +3472,8 @@ void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw) break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -4660,8 +4653,8 @@ bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) postprocessing = true; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", iotype); + pr_err("switch case %#x not processed\n", + iotype); break; } } while (false); @@ -4704,9 +4697,8 @@ static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw) case IO_CMD_PAUSE_BAND1_DM_BY_SCAN: break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - rtlphy->current_io_type); + pr_err("switch case %#x not processed\n", + rtlphy->current_io_type); break; } rtlphy->set_io_inprogress = false; @@ -4811,8 +4803,8 @@ static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw, } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpwr_state); + pr_err("switch case %#x not processed\n", + rfpwr_state); bresult = false; break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c index c6ab957023e6..95489f41f8a0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c @@ -34,8 +34,6 @@ static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw); void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) { - struct rtl_priv *rtlpriv = rtl_priv(hw); - switch (bandwidth) { case HT_CHANNEL_WIDTH_20: rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 3); @@ -50,8 +48,7 @@ void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 0); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 297938e0effd..220de5f89dbc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -192,14 +192,12 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw.\n"); + pr_err("Can't alloc buffer for fw.\n"); return 1; } rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.wowlan_firmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for wowlan fw.\n"); + pr_err("Can't alloc buffer for wowlan fw.\n"); return 1; } @@ -218,8 +216,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request normal firmware!\n"); + pr_err("Failed to request normal firmware!\n"); return 1; } /*load wowlan firmware*/ @@ -229,8 +226,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_wowlan_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request wowlan firmware!\n"); + pr_err("Failed to request wowlan firmware!\n"); return 1; } return 0; @@ -313,7 +309,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = { .fwctrl_lps = true, .msi_support = true, .int_clear = true, - .debug = DBG_EMERG, + .debug = 0, .disable_watchdog = 0, }; -- cgit v1.2.3 From 4e2b4378f9d79737b0694df1e6cbd0c6f26c2a27 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:00 -0600 Subject: rtlwifi: rtl8723be: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c | 11 +++++------ drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 16 ++++++---------- .../net/wireless/realtek/rtlwifi/rtl8723be/led.c | 8 ++++---- .../net/wireless/realtek/rtlwifi/rtl8723be/phy.c | 21 ++++++++++----------- drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c | 3 +-- drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 8 +++----- 6 files changed, 29 insertions(+), 38 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index 6b03c7fe8a04..fbf396143985 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -97,8 +97,7 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, while (!bwrite_sucess) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger for FW INT!\n"); + pr_err("Write H2C fail because no trigger for FW INT!\n"); break; } @@ -121,8 +120,8 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, box_extreg = REG_HMEBOX_EXT_3; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", boxnum); + pr_err("switch case %#x not processed\n", + boxnum); break; } @@ -194,8 +193,8 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", cmd_len); + pr_err("switch case %#x not processed\n", + cmd_len); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 56a1ae438fba..ae2a38ed4ba5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -747,9 +747,8 @@ static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -1383,7 +1382,7 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw) } rtstatus = _rtl8723be_init_mac(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; goto exit; } @@ -1532,8 +1531,7 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not support!\n", type); + pr_err("Network type %d not support!\n", type); return 1; } @@ -2247,7 +2245,7 @@ void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl8723be_read_adapter_info(hw, false); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } _rtl8723be_hal_customized_behavior(hw); } @@ -2584,9 +2582,7 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c index 497913eb3b37..8232e010d090 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c @@ -57,8 +57,8 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -99,8 +99,8 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index b4e0a58f1c1f..ab0f39e46e1b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -467,7 +467,7 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } _rtl8723be_phy_init_tx_power_by_rate(hw); @@ -478,13 +478,13 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) } phy_txpower_by_rate_config(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw, @@ -1249,8 +1249,7 @@ void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -1291,8 +1290,8 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -1316,8 +1315,8 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -1462,8 +1461,8 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd = &postcommoncmd[*step]; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Invalid 'stage' = %d, Check it!\n", *stage); + pr_err("Invalid 'stage' = %d, Check it!\n", + *stage); return true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c index 78f4f18d87b5..48491454b878 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c @@ -51,8 +51,7 @@ void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 847644d1f5f5..dd42c1a6d986 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -179,8 +179,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw.\n"); + pr_err("Can't alloc buffer for fw.\n"); return 1; } @@ -190,8 +189,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } return 0; @@ -273,7 +271,7 @@ static struct rtl_mod_params rtl8723be_mod_params = { .fwctrl_lps = true, .msi_support = false, .disable_watchdog = false, - .debug = DBG_EMERG, + .debug = 0, .ant_sel = 0, }; -- cgit v1.2.3 From a67005bc46d93f40e00d54a0c14c6a4e50e245ca Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:01 -0600 Subject: rtlwifi: rtl8723ae: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c | 11 +++++------ drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c | 19 +++++++------------ .../net/wireless/realtek/rtlwifi/rtl8723ae/led.c | 8 ++++---- .../net/wireless/realtek/rtlwifi/rtl8723ae/phy.c | 21 ++++++++++----------- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c | 3 +-- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c | 8 +++----- 6 files changed, 30 insertions(+), 40 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c index 164a27e72537..bef3d94228c8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c @@ -99,8 +99,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, while (!bwrite_sucess) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger for FW INT!\n"); + pr_err("Write H2C fail because no trigger for FW INT!\n"); break; } @@ -123,8 +122,8 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, box_extreg = REG_HMEBOX_EXT_3; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", boxnum); + pr_err("switch case %#x not processed\n", + boxnum); break; } @@ -229,8 +228,8 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", cmd_len); + pr_err("switch case %#x not processed\n", + cmd_len); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index e0da8dceff2c..bb9de2f6e695 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -570,9 +570,8 @@ static bool _rtl8723e_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -961,7 +960,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw) rtlpriv->intf_ops->disable_aspm(hw); rtstatus = _rtl8712e_init_mac(hw); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; goto exit; } @@ -1107,8 +1106,7 @@ static enum version_8723e _rtl8723e_read_chip_version(struct ieee80211_hw *hw) "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT.\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Chip Version ID: Unknown. Bug?\n"); + pr_err("Chip Version ID: Unknown. Bug?\n"); break; } @@ -1157,8 +1155,7 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not support!\n", type); + pr_err("Network type %d not support!\n", type); return 1; break; } @@ -1852,7 +1849,7 @@ void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw) } else { rtlefuse->autoload_failflag = true; _rtl8723e_read_adapter_info(hw, false); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } _rtl8723e_hal_customized_behavior(hw); } @@ -2245,9 +2242,7 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c index 77c10047cb20..e1e6d24f1daa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c @@ -58,8 +58,8 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -100,8 +100,8 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c index 8001dce9f277..5cf29f5a4b54 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c @@ -213,7 +213,7 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } @@ -227,13 +227,13 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, @@ -749,8 +749,7 @@ void rtl8723e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -791,8 +790,8 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -816,8 +815,8 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl8723e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -977,8 +976,8 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd = &postcommoncmd[*step]; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Invalid 'stage' = %d, Check it!\n", *stage); + pr_err("Invalid 'stage' = %d, Check it!\n", + *stage); return true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c index 422771778e03..89958b64b52d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c @@ -51,8 +51,7 @@ void rtl8723e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index c51a9e8234e9..401f54266f15 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -172,8 +172,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x6000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw.\n"); + pr_err("Can't alloc buffer for fw.\n"); return 1; } @@ -186,8 +185,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } return 0; @@ -270,7 +268,7 @@ static struct rtl_mod_params rtl8723e_mod_params = { .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, - .debug = DBG_EMERG, + .debug = 0, .msi_support = false, .disable_watchdog = false, }; -- cgit v1.2.3 From a44f59d60365a7236601911b529566f1428227d4 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:02 -0600 Subject: rtlwifi: rtl8192ee: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 21 +++++++----------- .../net/wireless/realtek/rtlwifi/rtl8192ee/hw.c | 11 ++++------ .../net/wireless/realtek/rtlwifi/rtl8192ee/phy.c | 25 +++++++++++----------- .../net/wireless/realtek/rtlwifi/rtl8192ee/rf.c | 3 +-- .../net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 8 +++---- 5 files changed, 28 insertions(+), 40 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 7a5cc0c746cd..8fe64dac72a6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -122,10 +122,8 @@ static void _rtl92ee_write_fw(struct ieee80211_hw *hw, pagenums = size / FW_8192C_PAGE_SIZE; remainsize = size % FW_8192C_PAGE_SIZE; - if (pagenums > 8) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 8\n"); - } + if (pagenums > 8) + pr_err("Page numbers should not greater then 8\n"); for (page = 0; page < pagenums; page++) { offset = page * FW_8192C_PAGE_SIZE; @@ -155,14 +153,13 @@ static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw) (!(value32 & FWDL_CHKSUM_RPT))); if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report faill ! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n", + value32); goto exit; } RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); + "Checksum report OK! REG_MCUFWDL:0x%08x\n", value32); value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; @@ -186,9 +183,8 @@ static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw) } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x. count = %d\n", - value32, counter); + pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x. count = %d\n", + value32, counter); exit: return err; @@ -241,8 +237,7 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) err = _rtl92ee_fw_free_to_go(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is not ready to run!\n"); + pr_err("Firmware is not ready to run!\n"); } else { RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "Firmware is ready to run!\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index 00b211b3fc8f..f731f962ccbb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -1320,7 +1320,7 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, 0x65, 1); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; return err; } @@ -1485,8 +1485,7 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not support!\n", type); + pr_err("Network type %d not support!\n", type); return 1; } @@ -2206,7 +2205,7 @@ void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl92ee_read_adapter_info(hw); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } _rtl92ee_hal_customized_behavior(hw); @@ -2484,9 +2483,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c index c35ba764a6c5..8b072ee8e0d5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c @@ -170,7 +170,7 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw, offset &= 0xff; newoffset = offset; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + pr_err("return all one\n"); return 0xFFFFFFFF; } tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); @@ -214,7 +214,7 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw, struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + pr_err("stop\n"); return; } offset &= 0xff; @@ -650,7 +650,7 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw) rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } @@ -662,12 +662,12 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw) } _rtl92ee_phy_txpower_by_rate_configuration(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw, @@ -1675,8 +1675,7 @@ void rtl92ee_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -1717,8 +1716,8 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -1742,8 +1741,8 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl92ee_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -1884,8 +1883,8 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd = &postcommoncmd[*step]; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Invalid 'stage' = %d, Check it!\n" , *stage); + pr_err("Invalid 'stage' = %d, Check it!\n", + *stage); return true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c index 73716c07d433..bc76a91da762 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c @@ -55,8 +55,7 @@ void rtl92ee_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index 46b605de36e7..eddc704b3ee3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -165,8 +165,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw\n"); + pr_err("Can't alloc buffer for fw\n"); return 1; } @@ -179,8 +178,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } @@ -260,7 +258,7 @@ static struct rtl_mod_params rtl92ee_mod_params = { .swctrl_lps = false, .fwctrl_lps = true, .msi_support = true, - .debug = DBG_EMERG, + .debug = 0, }; static const struct rtl_hal_cfg rtl92ee_hal_cfg = { -- cgit v1.2.3 From c7532b87653b91c1d5623e9dc147cccb8709769a Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:03 -0600 Subject: rtlwifi: rtl8723-common: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c | 17 +++++++---------- .../wireless/realtek/rtlwifi/rtl8723com/phy_common.c | 4 ++-- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c index 6e518625edbe..ad72024b9397 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c @@ -129,8 +129,8 @@ void rtl8723_write_fw(struct ieee80211_hw *hw, remain_size = size % FW_8192C_PAGE_SIZE; if (page_nums > max_page) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater than %d\n", max_page); + pr_err("Page numbers should not greater than %d\n", + max_page); } for (page = 0; page < page_nums; page++) { offset = page * FW_8192C_PAGE_SIZE; @@ -209,9 +209,8 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, (!(value32 & FWDL_CHKSUM_RPT))); if (counter >= max_count) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report fail ! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("chksum report fail ! REG_MCUFWDL:0x%08x .\n", + value32); goto exit; } RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, @@ -239,9 +238,8 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, } while (counter++ < max_count); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", + value32); exit: return err; @@ -294,8 +292,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, err = rtl8723_fw_free_to_go(hw, is_8723be, max_count); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is not ready to run!\n"); + pr_err("Firmware is not ready to run!\n"); } else { RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "Firmware is ready to run!\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c index 6b80ddc9e5c9..43d24e1ee5e6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c @@ -99,7 +99,7 @@ u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw, offset &= 0xff; newoffset = offset; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + pr_err("return all one\n"); return 0xFFFFFFFF; } tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); @@ -147,7 +147,7 @@ void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw, struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + pr_err("stop\n"); return; } offset &= 0xff; -- cgit v1.2.3 From 2d15acac23542ffe7b89ef65f80903ec33625899 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:04 -0600 Subject: rtlwifi: rtl8192se: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8192se/fw.c | 46 ++++++++-------------- .../net/wireless/realtek/rtlwifi/rtl8192se/hw.c | 41 ++++++++----------- .../net/wireless/realtek/rtlwifi/rtl8192se/led.c | 8 ++-- .../net/wireless/realtek/rtlwifi/rtl8192se/phy.c | 39 ++++++++---------- .../net/wireless/realtek/rtlwifi/rtl8192se/rf.c | 3 +- .../net/wireless/realtek/rtlwifi/rtl8192se/sw.c | 8 ++-- 6 files changed, 57 insertions(+), 88 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c index 32f9207b5cf5..1922e78ad6bd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c @@ -113,8 +113,7 @@ static u8 _rtl92s_firmware_header_map_rftype(struct ieee80211_hw *hw) case RF_2T2R: return 0x22; default: - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown RF type(%x)\n", - rtlphy->rf_type); + pr_err("Unknown RF type(%x)\n", rtlphy->rf_type); break; } return 0x22; @@ -168,9 +167,7 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw, _rtl92s_fw_set_rqpn(hw); if (buffer_len >= MAX_FIRMWARE_CODE_SIZE) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Size over FIRMWARE_CODE_SIZE!\n"); - + pr_err("Size over FIRMWARE_CODE_SIZE!\n"); return false; } @@ -239,9 +236,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, } while (pollingcnt--); if (!(cpustatus & IMEM_CHK_RPT) || (pollingcnt <= 0)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n", - cpustatus); + pr_err("FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n", + cpustatus); goto status_check_fail; } break; @@ -257,17 +253,15 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, } while (pollingcnt--); if (!(cpustatus & EMEM_CHK_RPT) || (pollingcnt <= 0)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n", - cpustatus); + pr_err("FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n", + cpustatus); goto status_check_fail; } /* Turn On CPU */ rtstatus = _rtl92s_firmware_enable_cpu(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Enable CPU fail!\n"); + pr_err("Enable CPU fail!\n"); goto status_check_fail; } break; @@ -282,9 +276,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, } while (pollingcnt--); if (!(cpustatus & DMEM_CODE_DONE) || (pollingcnt <= 0)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling DMEM code done fail ! cpustatus(%#x)\n", - cpustatus); + pr_err("Polling DMEM code done fail ! cpustatus(%#x)\n", + cpustatus); goto status_check_fail; } @@ -308,9 +301,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, if (((cpustatus & LOAD_FW_READY) != LOAD_FW_READY) || (pollingcnt <= 0)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling Load Firmware ready fail ! cpustatus(%x)\n", - cpustatus); + pr_err("Polling Load Firmware ready fail ! cpustatus(%x)\n", + cpustatus); goto status_check_fail; } @@ -331,8 +323,7 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, break; default: - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Unknown status check!\n"); + pr_err("Unknown status check!\n"); rtstatus = false; break; } @@ -380,8 +371,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw) /* 2. Retrieve IMEM image. */ if ((pfwheader->img_imem_size == 0) || (pfwheader->img_imem_size > sizeof(firmware->fw_imem))) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "memory for data image is less than IMEM required\n"); + pr_err("memory for data image is less than IMEM required\n"); goto fail; } else { puc_mappedfile += fwhdr_size; @@ -393,8 +383,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw) /* 3. Retriecve EMEM image. */ if (pfwheader->img_sram_size > sizeof(firmware->fw_emem)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "memory for data image is less than EMEM required\n"); + pr_err("memory for data image is less than EMEM required\n"); goto fail; } else { puc_mappedfile += firmware->fw_imem_len; @@ -428,8 +417,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw) RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unexpected Download step!!\n"); + pr_err("Unexpected Download step!!\n"); goto fail; } @@ -438,14 +426,14 @@ int rtl92s_download_fw(struct ieee80211_hw *hw) ul_filelength); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n"); + pr_err("fail!\n"); goto fail; } /* <3> Check whether load FW process is ready */ rtstatus = _rtl92s_firmware_checkready(hw, fwstatus); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n"); + pr_err("rtl8192se: firmware fail!\n"); goto fail; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index f4ab72101d04..d5e86b6fad11 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c @@ -75,11 +75,9 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HAL_DEF_WOWLAN: break; - default: { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); - break; - } + default: + pr_err("switch case %#x not processed\n", variable); + break; } } @@ -294,9 +292,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~AcmHw_VoqEn); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - e_aci); + pr_err("switch case %#x not processed\n", + e_aci); break; } } @@ -431,8 +428,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } @@ -745,9 +741,8 @@ static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw) } while (pollingcnt--); if (pollingcnt <= 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling TXDMA_INIT_VALUE timeout!! Current TCR(%#x)\n", - tmpu1b); + pr_err("Polling TXDMA_INIT_VALUE timeout!! Current TCR(%#x)\n", + tmpu1b); tmpu1b = rtl_read_byte(rtlpriv, CMDR); rtl_write_byte(rtlpriv, CMDR, tmpu1b & (~TXDMA_EN)); udelay(2); @@ -1004,7 +999,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */ if (!rtl92s_phy_mac_config(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n"); + pr_err("MAC Config failed\n"); err = rtstatus; goto exit; } @@ -1024,7 +1019,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */ if (!rtl92s_phy_bb_config(hw)) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n"); + pr_err("BB Config failed\n"); err = rtstatus; goto exit; } @@ -1194,8 +1189,7 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not supported!\n", type); + pr_err("Network type %d not supported!\n", type); return 1; } @@ -1685,8 +1679,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw) break; case EEPROM_93C46: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!\n"); + pr_err("RTL819X Not boot from eeprom, check it !!\n"); return; default: @@ -2030,7 +2023,7 @@ void rtl92se_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl92se_read_adapter_info(hw); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); rtlefuse->autoload_failflag = true; } } @@ -2463,8 +2456,8 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", enc_algo); + pr_err("switch case %#x not processed\n", + enc_algo); enc_algo = CAM_TKIP; break; } @@ -2481,9 +2474,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, - COMP_SEC, DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c index 870007801f6b..c740aeb0e83f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c @@ -63,8 +63,8 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, LEDCFG, ledcfg & 0x0f); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -99,8 +99,8 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3))); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c index 6299dc1c8eac..86cb853f7169 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c @@ -235,7 +235,6 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) { - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (!is_hal_stop(rtlhal)) { @@ -247,8 +246,7 @@ void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw, rtl92s_phy_set_fw_cmd(hw, FW_CMD_RESUME_DM_BY_SCAN); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown operation\n"); + pr_err("Unknown operation\n"); break; } } @@ -288,8 +286,8 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, BW_OPMODE, reg_bw_opmode); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -313,8 +311,8 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, RFPGA0_ANALOGPARAMETER2, 0x18); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -437,9 +435,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - currentcmd->cmdid); + pr_err("switch case %#x not processed\n", + currentcmd->cmdid); break; } @@ -644,8 +641,8 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw, _rtl92se_phy_set_rf_sleep(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpwr_state); + pr_err("switch case %#x not processed\n", + rfpwr_state); bresult = false; break; } @@ -937,8 +934,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw) } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); goto phy_BB8190_Config_ParaFile_Fail; } @@ -951,8 +947,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n"); + pr_err("_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n"); goto phy_BB8190_Config_ParaFile_Fail; } @@ -1077,12 +1072,10 @@ bool rtl92s_phy_bb_config(struct ieee80211_hw *hw) (rtlphy->rf_type == RF_1T2R && rf_num != 2) || (rtlphy->rf_type == RF_2T2R && rf_num != 2) || (rtlphy->rf_type == RF_2T2R_GREEN && rf_num != 2)) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "RF_Type(%x) does not match RF_Num(%x)!!\n", - rtlphy->rf_type, rf_num); - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "path1 0x%x, path2 0x%x, pathmap 0x%x\n", - path1, path2, pathmap); + pr_err("RF_Type(%x) does not match RF_Num(%x)!!\n", + rtlphy->rf_type, rf_num); + pr_err("path1 0x%x, path2 0x%x, pathmap 0x%x\n", + path1, path2, pathmap); } return rtstatus; @@ -1221,7 +1214,7 @@ void rtl92s_phy_chk_fwcmd_iodone(struct ieee80211_hw *hw) } while (--pollingcnt); if (pollingcnt == 0) - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Set FW Cmd fail!!\n"); + pr_err("Set FW Cmd fail!!\n"); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c index bd2fa7735866..ea5b8ec45ec9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c @@ -523,8 +523,7 @@ void rtl92s_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 998cefbd7e89..3c66d00abbac 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -96,8 +96,7 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context) return; } if (firmware->size > rtlpriv->max_fw_size) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is too big!\n"); + pr_err("Firmware is too big!\n"); rtlpriv->max_fw_size = 0; release_firmware(firmware); return; @@ -218,8 +217,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl92se_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } @@ -299,7 +297,7 @@ static struct rtl_mod_params rtl92se_mod_params = { .inactiveps = true, .swctrl_lps = true, .fwctrl_lps = false, - .debug = DBG_EMERG, + .debug = 0, }; /* Because memory R/W bursting will cause system hang/crash -- cgit v1.2.3 From b8c79f454880708119e39264a89705e10961d3fd Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:05 -0600 Subject: rtlwifi: rtl8192de: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8192de/fw.c | 22 ++++++-------- .../net/wireless/realtek/rtlwifi/rtl8192de/hw.c | 35 ++++++++-------------- .../net/wireless/realtek/rtlwifi/rtl8192de/led.c | 8 ++--- .../net/wireless/realtek/rtlwifi/rtl8192de/phy.c | 35 ++++++++++------------ .../net/wireless/realtek/rtlwifi/rtl8192de/rf.c | 3 +- .../net/wireless/realtek/rtlwifi/rtl8192de/sw.c | 8 ++--- 6 files changed, 46 insertions(+), 65 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index fc1d4f2ba82d..87e90a3c8911 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -126,8 +126,7 @@ static void _rtl92d_write_fw(struct ieee80211_hw *hw, pagenums = size / FW_8192D_PAGE_SIZE; remainSize = size % FW_8192D_PAGE_SIZE; if (pagenums > 8) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 8\n"); + pr_err("Page numbers should not greater then 8\n"); } for (page = 0; page < pagenums; page++) { offset = page * FW_8192D_PAGE_SIZE; @@ -153,9 +152,8 @@ static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw) } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) && (!(value32 & FWDL_ChkSum_rpt))); if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report faill ! REG_MCUFWDL:0x%08x\n", - value32); + pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n", + value32); return -EIO; } RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, @@ -327,8 +325,7 @@ int rtl92d_download_fw(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "fw is not ready to run!\n"); + pr_err("fw is not ready to run!\n"); goto exit; } else { RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "fw is ready to run!\n"); @@ -407,8 +404,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, while (!bwrite_success) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger for FW INT!\n"); + pr_err("Write H2C fail because no trigger for FW INT!\n"); break; } boxnum = rtlhal->last_hmeboxnum; @@ -430,8 +426,8 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, box_extreg = REG_HMEBOX_EXT_3; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", boxnum); + pr_err("switch case %#x not processed\n", + boxnum); break; } isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); @@ -507,8 +503,8 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, boxcontent[idx]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", cmd_len); + pr_err("switch case %#x not processed\n", + cmd_len); break; } bwrite_success = true; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index fcb14c5db172..1bd1893bb401 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -163,8 +163,7 @@ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HAL_DEF_WOWLAN: break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -358,9 +357,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_VOQEN); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - e_aci); + pr_err("switch case %#x not processed\n", + e_aci); break; } } @@ -500,8 +498,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -520,9 +517,8 @@ static bool _rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -920,7 +916,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw) /* rtlpriv->intf_ops->disable_aspm(hw); */ rtstatus = _rtl92de_init_mac(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags); return err; @@ -1119,11 +1115,8 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not supported!\n", type); + pr_err("Network type %d not supported!\n", type); return 1; - break; - } rtl_write_byte(rtlpriv, MSR, bt_msr); rtlpriv->cfg->ops->led_control(hw, ledaction); @@ -1732,7 +1725,7 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw) break; default: chipver |= CHIP_92D_D_CUT; - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown CUT!\n"); + pr_err("Unknown CUT!\n"); break; } rtlpriv->rtlhal.version = chipver; @@ -1816,7 +1809,7 @@ void rtl92de_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl92de_read_adapter_info(hw); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } return; } @@ -2169,8 +2162,8 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", enc_algo); + pr_err("switch case %#x not processed\n", + enc_algo); enc_algo = CAM_TKIP; break; } @@ -2186,9 +2179,7 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c index c22b8a215c87..4be787e53279 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c @@ -66,8 +66,8 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5)); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -101,8 +101,8 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3))); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index b0dc189e5579..de98d88199d6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -716,7 +716,7 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw) rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } @@ -731,13 +731,13 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, @@ -833,8 +833,7 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, break; case RF90_PATH_C: case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpath); + pr_err("switch case %#x not processed\n", rfpath); break; } return true; @@ -987,8 +986,8 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } switch (rtlphy->current_chan_bw) { @@ -1019,8 +1018,8 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw, HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -2842,9 +2841,8 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rtl92d_phy_reload_iqk_setting(hw, channel); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - currentcmd->cmdid); + pr_err("switch case %#x not processed\n", + currentcmd->cmdid); break; } break; @@ -2956,9 +2954,8 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw) rtl92d_dm_write_dig(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - rtlphy->current_io_type); + pr_err("switch case %#x not processed\n", + rtlphy->current_io_type); break; } rtlphy->set_io_inprogress = false; @@ -2988,8 +2985,8 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) postprocessing = true; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", iotype); + pr_err("switch case %#x not processed\n", + iotype); break; } } while (false); @@ -3176,8 +3173,8 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw, _rtl92d_phy_set_rfsleep(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpwr_state); + pr_err("switch case %#x not processed\n", + rfpwr_state); bresult = false; break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c index 9dc9e915513e..021d3c538ac2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c @@ -63,8 +63,7 @@ void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 0d98b34eff0a..51463d7b130a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -171,8 +171,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw\n"); + pr_err("Can't alloc buffer for fw\n"); return 1; } @@ -185,8 +184,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } @@ -256,7 +254,7 @@ static struct rtl_mod_params rtl92de_mod_params = { .inactiveps = true, .swctrl_lps = true, .fwctrl_lps = false, - .debug = DBG_EMERG, + .debug = 0, }; static const struct rtl_hal_cfg rtl92de_hal_cfg = { -- cgit v1.2.3 From c38af3f06af4780c3b245a67a89d991bdd36a1e0 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:06 -0600 Subject: rtlwifi: rtl8192cu: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8192cu/hw.c | 33 ++++++++-------------- .../net/wireless/realtek/rtlwifi/rtl8192cu/led.c | 8 +++--- .../net/wireless/realtek/rtlwifi/rtl8192cu/mac.c | 12 +++----- .../net/wireless/realtek/rtlwifi/rtl8192cu/phy.c | 15 +++++----- .../net/wireless/realtek/rtlwifi/rtl8192cu/rf.c | 3 +- .../net/wireless/realtek/rtlwifi/rtl8192cu/sw.c | 5 ++-- 6 files changed, 29 insertions(+), 47 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index ddfcf9949d9e..9db6ec62787a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -452,8 +452,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) break; } if (pollingCount++ > 100) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n"); + pr_err("Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n"); return -ENODEV; } } while (true); @@ -486,8 +485,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) break; } if (pollingCount++ > 1000) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n"); + pr_err("Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n"); return -ENODEV; } } while (true); @@ -687,7 +685,6 @@ static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw, u8 queue_sel) { u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ; - struct rtl_priv *rtlpriv = rtl_priv(hw); if (!wmm_enable) { /* typical setting */ beQ = QUEUE_LOW; @@ -705,8 +702,7 @@ static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw, hiQ = QUEUE_HIGH; } _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ); - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n", - queue_sel); + pr_info("Tx queue select :0x%02x..\n", queue_sel); } static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw, @@ -765,8 +761,7 @@ static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw, break; } rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele); - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n", - hq_sele); + pr_info("Tx queue select :0x%02x..\n", hq_sele); } static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw, @@ -848,8 +843,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) err = _rtl92cu_init_power_on(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to init power on!\n"); + pr_err("Failed to init power on!\n"); return err; } if (!wmm_enable) { @@ -860,8 +854,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) : WMM_CHIP_A_TX_PAGE_BOUNDARY; } if (false == rtl92c_init_llt_table(hw, boundary)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to init LLT Table!\n"); + pr_err("Failed to init LLT Table!\n"); return -EINVAL; } _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums, @@ -986,7 +979,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU; err = _rtl92cu_init_mac(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n"); + pr_err("init mac failed!\n"); goto exit; } err = rtl92c_download_fw(hw); @@ -1099,8 +1092,7 @@ static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM) udelay(50); } if (retry_cnts >= 100) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "#####=> 8051 reset failed!.........................\n"); + pr_err("8051 reset failed!.........................\n"); /* if 8051 reset fail, reset MAC. */ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, @@ -1340,8 +1332,7 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not supported!\n", type); + pr_err("Network type %d not supported!\n", type); goto error_out; } rtl_write_byte(rtlpriv, MSR, bt_msr); @@ -1555,8 +1546,7 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HAL_DEF_WOWLAN: break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -1926,8 +1916,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c index c6240813ff7b..70ea6c5692a5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c @@ -57,8 +57,8 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5)); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -90,8 +90,8 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3))); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index cf212f694db5..1b124eade846 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c @@ -157,9 +157,8 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d! _LLT_OP_VALUE(%x)\n", - address, _LLT_OP_VALUE(value)); + pr_err("Failed to polling write LLT done at address %d! _LLT_OP_VALUE(%x)\n", + address, _LLT_OP_VALUE(value)); status = false; break; } @@ -262,8 +261,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "illegal switch case\n"); + pr_err("illegal switch case\n"); enc_algo = CAM_TKIP; break; } @@ -280,9 +278,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c index f35f435c094e..f068dd5317a7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c @@ -274,8 +274,7 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, break; case RF90_PATH_C: case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpath); + pr_err("switch case %#x not processed\n", rfpath); break; default: break; @@ -314,8 +313,8 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } switch (rtlphy->current_chan_bw) { @@ -336,8 +335,8 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -509,8 +508,8 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, _rtl92c_phy_set_rf_sleep(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpwr_state); + pr_err("switch case %#x not processed\n", + rfpwr_state); bresult = false; break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c index 5e3183024aa0..9cff6bc4049c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c @@ -51,8 +51,7 @@ void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index b84e13ac6ead..935e8308e2e6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -68,8 +68,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw\n"); + pr_err("Can't alloc buffer for fw\n"); return 1; } if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) && @@ -158,7 +157,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { static struct rtl_mod_params rtl92cu_mod_params = { .sw_crypto = 0, - .debug = DBG_EMERG, + .debug = 0, }; module_param_named(swenc, rtl92cu_mod_params.sw_crypto, bool, 0444); -- cgit v1.2.3 From e40a005652ad55860300e36cd6a463c870c9dbcc Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:07 -0600 Subject: rtlwifi: rtl8192ce: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8192ce/hw.c | 37 ++++++++-------------- .../net/wireless/realtek/rtlwifi/rtl8192ce/led.c | 7 ++-- .../net/wireless/realtek/rtlwifi/rtl8192ce/phy.c | 15 ++++----- .../net/wireless/realtek/rtlwifi/rtl8192ce/rf.c | 3 +- .../net/wireless/realtek/rtlwifi/rtl8192ce/sw.c | 8 ++--- 5 files changed, 28 insertions(+), 42 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index b757a74339c1..611987dfc207 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -140,8 +140,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HAL_DEF_WOWLAN: break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -364,9 +363,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~AcmHw_VoqEn); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - e_aci); + pr_err("switch case %#x not processed\n", + e_aci); break; } } @@ -551,8 +549,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, array); break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %d not processed\n", variable); + pr_err("switch case %d not processed\n", variable); break; } } @@ -573,9 +570,8 @@ static bool _rtl92ce_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -963,7 +959,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw) rtlpriv->intf_ops->disable_aspm(hw); rtstatus = _rtl92ce_init_mac(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; goto exit; } @@ -1128,8 +1124,7 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw) break; } - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Chip Version ID: %s\n", versionid); + pr_info("Chip Version ID: %s\n", versionid); switch (version & 0x3) { case CHIP_88C: @@ -1143,8 +1138,7 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw) break; default: rtlphy->rf_type = RF_1T1R; - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "ERROR RF_Type is set!!\n"); + pr_err("ERROR RF_Type is set!!\n"); break; } @@ -1193,8 +1187,7 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw, "Set Network type to Mesh Point!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not supported!\n", type); + pr_err("Network type %d not supported!\n", type); return 1; } @@ -1780,7 +1773,7 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl92ce_read_adapter_info(hw); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } _rtl92ce_hal_customized_behavior(hw); } @@ -2152,8 +2145,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", enc_algo); + pr_err("switch case %#x not processed\n", + enc_algo); enc_algo = CAM_TKIP; break; } @@ -2171,9 +2164,7 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c index 833193b751f7..bdaa848995ae 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c @@ -57,8 +57,8 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5)); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -92,8 +92,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3))); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_info("switch case %#x not processed\n", pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c index d1b6a8fe7b6a..7c6d7fc1ef9a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c @@ -297,10 +297,10 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, break; case RF90_PATH_C: case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpath); + pr_info("Incorrect rfpath %#x\n", rfpath); break; default: + pr_info("switch case %#x not processed\n", rfpath); break; } return true; @@ -340,8 +340,7 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_info("unknown bandwidth: %#X\n", rtlphy->current_chan_bw); break; } @@ -365,8 +364,8 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl92ce_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -546,8 +545,8 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpwr_state); + pr_err("switch case %#x not processed\n", + rfpwr_state); bresult = false; break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c index 7cae6350437c..e68ed7f37c79 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c @@ -51,8 +51,7 @@ void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index 691ddef1ae28..9bd2bff6212a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -158,8 +158,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw\n"); + pr_err("Can't alloc buffer for fw\n"); return 1; } @@ -173,8 +172,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } @@ -249,7 +247,7 @@ static struct rtl_mod_params rtl92ce_mod_params = { .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, - .debug = DBG_EMERG, + .debug = 0, }; static const struct rtl_hal_cfg rtl92ce_hal_cfg = { -- cgit v1.2.3 From 0fc30e9350befdc1b5c8367d208fb7e2d68c0031 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:08 -0600 Subject: rtlwifi: rtl8192c-common: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../wireless/realtek/rtlwifi/rtl8192c/fw_common.c | 34 +++++++--------------- .../wireless/realtek/rtlwifi/rtl8192c/phy_common.c | 14 ++++----- 2 files changed, 17 insertions(+), 31 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index 9c3392ad01bc..f3fb31b9d9dc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c @@ -145,10 +145,8 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw, pageNums = size / FW_8192C_PAGE_SIZE; remainsize = size % FW_8192C_PAGE_SIZE; - if (pageNums > 4) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 4\n"); - } + if (pageNums > 4) + pr_err("Page numbers should not greater then 4\n"); for (page = 0; page < pageNums; page++) { offset = page * FW_8192C_PAGE_SIZE; @@ -180,15 +178,10 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw) (!(value32 & FWDL_ChkSum_rpt))); if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report faill ! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("chksum report fail! REG_MCUFWDL:0x%08x .\n", + value32); goto exit; } - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; value32 &= ~WINTINI_RDY; @@ -198,20 +191,15 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw) do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - if (value32 & WINTINI_RDY) { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n", - value32); - err = 0; - goto exit; - } + if (value32 & WINTINI_RDY) + return 0; mdelay(FW_8192C_POLLING_DELAY); } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32); + pr_err("Polling FW ready fail! REG_MCUFWDL:0x%08x.\n", + value32); exit: return err; @@ -251,8 +239,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) err = _rtl92c_fw_free_to_go(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is not ready to run!\n"); + pr_err("Firmware is not ready to run!\n"); } else { RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "Firmware is ready to run!\n"); @@ -327,8 +314,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, while (!bwrite_sucess) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger for FW INT!\n"); + pr_err("Write H2C fail because no trigger for FW INT!\n"); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c index 32fc4b60bc86..7c6e5d91439d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c @@ -104,7 +104,7 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, offset &= 0x3f; newoffset = offset; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + pr_err("return all one\n"); return 0xFFFFFFFF; } tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); @@ -152,7 +152,7 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + pr_err("stop\n"); return; } offset &= 0x3f; @@ -209,7 +209,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } if (rtlphy->rf_type == RF_1T2R) { @@ -222,13 +222,13 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = @@ -860,8 +860,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd = &postcommoncmd[*step]; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Invalid 'stage' = %d, Check it!\n", *stage); + pr_err("Invalid 'stage' = %d, Check it!\n", + *stage); return true; } -- cgit v1.2.3 From 02527a73beb344f8f7fff39c04e29a94127b8ee8 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:09 -0600 Subject: rtlwifi: rtl8188ee: Remove all instances of DBG_EMERG This is a step toward eliminating the RT_TRACE macros. Those calls that have DBG_EMERG as the level are always logged, and they represent error conditions, thus they are replaced with pr_err(). Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8188ee/fw.c | 34 +++++++--------------- .../net/wireless/realtek/rtlwifi/rtl8188ee/hw.c | 31 ++++++++------------ .../net/wireless/realtek/rtlwifi/rtl8188ee/phy.c | 25 ++++++++-------- .../net/wireless/realtek/rtlwifi/rtl8188ee/rf.c | 3 +- .../net/wireless/realtek/rtlwifi/rtl8188ee/sw.c | 8 ++--- 5 files changed, 38 insertions(+), 63 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index 49da81abaf82..8f3786353b50 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c @@ -125,10 +125,8 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw, pagenums = size / FW_8192C_PAGE_SIZE; remainsize = size % FW_8192C_PAGE_SIZE; - if (pagenums > 8) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 8\n"); - } + if (pagenums > 8) + pr_err("Page numbers should not greater then 8\n"); for (page = 0; page < pagenums; page++) { offset = page * FW_8192C_PAGE_SIZE; @@ -157,15 +155,10 @@ static int _rtl88e_fw_free_to_go(struct ieee80211_hw *hw) (!(value32 & FWDL_CHKSUM_RPT))); if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report faill ! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("chksum report fail! REG_MCUFWDL:0x%08x .\n", + value32); goto exit; } - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; value32 &= ~WINTINI_RDY; @@ -176,20 +169,15 @@ static int _rtl88e_fw_free_to_go(struct ieee80211_hw *hw) do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - if (value32 & WINTINI_RDY) { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Polling FW ready success!! REG_MCUFWDL:0x%08x.\n", - value32); - err = 0; - goto exit; - } + if (value32 & WINTINI_RDY) + return 0; udelay(FW_8192C_POLLING_DELAY); } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32); + pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", + value32); exit: return err; @@ -235,8 +223,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, err = _rtl88e_fw_free_to_go(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is not ready to run!\n"); + pr_err("Firmware is not ready to run!\n"); } else { RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "Firmware is ready to run!\n"); @@ -309,8 +296,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw, while (!write_sucess) { wait_writeh2c_limit--; if (wait_writeh2c_limit == 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger for FW INT!\n"); + pr_err("Write H2C fail because no trigger for FW INT!\n"); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 1a17dfe3edf6..679e214415d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -358,8 +358,7 @@ void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HAL_DEF_WOWLAN: break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -572,9 +571,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_VOQEN); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - e_aci); + pr_err("switch case %#x not processed\n", + e_aci); break; } } @@ -737,8 +735,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) 2, array); break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -759,9 +756,8 @@ static bool _rtl88ee_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -1096,7 +1092,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) rtstatus = _rtl88ee_init_mac(hw); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_info("Init MAC failed\n"); err = 1; goto exit; } @@ -1252,8 +1248,7 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not support!\n", type); + pr_err("Network type %d not support!\n", type); return 1; break; } @@ -1987,7 +1982,7 @@ void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl88ee_read_adapter_info(hw); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } _rtl88ee_hal_customized_behavior(hw); } @@ -2354,8 +2349,8 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", enc_algo); + pr_err("switch case %#x not processed\n", + enc_algo); enc_algo = CAM_TKIP; break; } @@ -2373,9 +2368,7 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c index ece8691ce3c0..14a256062614 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c @@ -176,7 +176,7 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw, offset &= 0xff; newoffset = offset; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + pr_err("return all one\n"); return 0xFFFFFFFF; } tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); @@ -220,7 +220,7 @@ static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw, struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + pr_err("stop\n"); return; } offset &= 0xff; @@ -373,7 +373,7 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw) rtstatus = phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } @@ -383,13 +383,13 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw) phy_config_bb_with_pghdr(hw, BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = @@ -1095,8 +1095,7 @@ void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -1137,8 +1136,8 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -1162,8 +1161,8 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl88e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -1303,8 +1302,8 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd = &postcommoncmd[*step]; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Invalid 'stage' = %d, Check it!\n", *stage); + pr_err("Invalid 'stage' = %d, Check it!\n", + *stage); return true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c index 26ac4c2903c7..30798b12a363 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c @@ -51,8 +51,7 @@ void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index f361808def47..276c74539fb3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -165,8 +165,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw.\n"); + pr_info("Can't alloc buffer for fw.\n"); return 1; } @@ -177,8 +176,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_info("Failed to request firmware!\n"); return 1; } @@ -278,7 +276,7 @@ static struct rtl_mod_params rtl88ee_mod_params = { .swctrl_lps = false, .fwctrl_lps = false, .msi_support = true, - .debug = DBG_EMERG, + .debug = 0, }; static const struct rtl_hal_cfg rtl88ee_hal_cfg = { -- cgit v1.2.3 From c93ac39da006457fc5b9b74f1505624dd509bf68 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 15 Dec 2016 12:23:10 -0600 Subject: rtlwifi: Remove some redundant code The symbol DBG_EMERG is no longer used and is removed. In a number of places, the code has redundant messages. For example, if the failure for the firmware to run is logged, it is not necessary to log that the firmware has been started. In addition, extraneous braces are removed. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/debug.h | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c | 6 +----- .../net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c | 6 +----- drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c | 10 ++-------- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 19 ++----------------- .../wireless/realtek/rtlwifi/rtl8723com/fw_common.c | 9 +-------- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c | 16 +++------------- 7 files changed, 11 insertions(+), 57 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h index 6f2e2b943c3d..a6cc3ca05c5e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.h +++ b/drivers/net/wireless/realtek/rtlwifi/debug.h @@ -36,7 +36,7 @@ *unexpected HW behavior, HW BUG *and so on. */ -#define DBG_EMERG 0 +/*#define DBG_EMERG 0 */ /* *Abnormal, rare, or unexpeted cases. diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index 8f3786353b50..afa784a46634 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c @@ -222,12 +222,8 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, _rtl88e_enable_fw_download(hw, false); err = _rtl88e_fw_free_to_go(hw); - if (err) { + if (err) pr_err("Firmware is not ready to run!\n"); - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, - "Firmware is ready to run!\n"); - } return 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index f3fb31b9d9dc..433ab7f33acf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c @@ -238,12 +238,8 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) _rtl92c_enable_fw_download(hw, false); err = _rtl92c_fw_free_to_go(hw); - if (err) { + if (err) pr_err("Firmware is not ready to run!\n"); - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Firmware is ready to run!\n"); - } return 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 87e90a3c8911..aa1e51c871df 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -125,9 +125,8 @@ static void _rtl92d_write_fw(struct ieee80211_hw *hw, _rtl92d_fill_dummy(bufferPtr, &size); pagenums = size / FW_8192D_PAGE_SIZE; remainSize = size % FW_8192D_PAGE_SIZE; - if (pagenums > 8) { + if (pagenums > 8) pr_err("Page numbers should not greater then 8\n"); - } for (page = 0; page < pagenums; page++) { offset = page * FW_8192D_PAGE_SIZE; _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), @@ -156,8 +155,6 @@ static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw) value32); return -EIO; } - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x\n", value32); value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); @@ -324,12 +321,9 @@ int rtl92d_download_fw(struct ieee80211_hw *hw) value &= (~BIT(5)); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); - if (err) { + if (err) pr_err("fw is not ready to run!\n"); goto exit; - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "fw is ready to run!\n"); - } exit: err = _rtl92d_fw_init(hw); return err; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 8fe64dac72a6..78ee6e1d1850 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -157,10 +157,6 @@ static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw) value32); goto exit; } - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK! REG_MCUFWDL:0x%08x\n", value32); - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; value32 &= ~WINTINI_RDY; @@ -171,13 +167,8 @@ static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw) do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - if (value32 & WINTINI_RDY) { - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , - "Polling FW ready success!! REG_MCUFWDL:0x%08x. count = %d\n", - value32, counter); - err = 0; - goto exit; - } + if (value32 & WINTINI_RDY) + return 0; udelay(FW_8192C_POLLING_DELAY*10); @@ -236,12 +227,6 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) _rtl92ee_enable_fw_download(hw, false); err = _rtl92ee_fw_free_to_go(hw); - if (err) { - pr_err("Firmware is not ready to run!\n"); - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , - "Firmware is ready to run!\n"); - } return 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c index ad72024b9397..8e0d038df701 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c @@ -213,9 +213,6 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, value32); goto exit; } - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL) | MCUFWDL_RDY; value32 &= ~WINTINI_RDY; rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); @@ -291,12 +288,8 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, rtl8723_enable_fw_download(hw, false); err = rtl8723_fw_free_to_go(hw, is_8723be, max_count); - if (err) { + if (err) pr_err("Firmware is not ready to run!\n"); - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Firmware is ready to run!\n"); - } return 0; } EXPORT_SYMBOL_GPL(rtl8723_download_fw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index 814136cdfe2b..94e97dce5457 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -124,9 +124,8 @@ static void _rtl8821ae_write_fw(struct ieee80211_hw *hw, pagenums = size / FW_8821AE_PAGE_SIZE; remainsize = size % FW_8821AE_PAGE_SIZE; - if (pagenums > 8) { + if (pagenums > 8) pr_err("Page numbers should not greater then 8\n"); - } for (page = 0; page < pagenums; page++) { offset = page * FW_8821AE_PAGE_SIZE; @@ -160,10 +159,6 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw) value32); goto exit; } - - pr_err("Checksum report OK! REG_MCUFWDL:0x%08x\n", - value32); - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; value32 &= ~WINTINI_RDY; @@ -174,13 +169,8 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw) counter = 0; do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - if (value32 & WINTINI_RDY) { - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, - "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n", - value32); - err = 0; - goto exit; - } + if (value32 & WINTINI_RDY) + return 0; udelay(FW_8821AE_POLLING_DELAY); } while (counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT); -- cgit v1.2.3 From 26eb994d52399a4767be30ccc2a7d68a0f34cadc Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Sun, 18 Dec 2016 04:27:24 +0530 Subject: libertas: constify cfg80211_ops structures cfg80211_ops structures are only passed as an argument to the function wiphy_new. This argument is of type const, so cfg80211_ops strutures having this property can be declared as const. Done using Coccinelle @r1 disable optional_qualifier @ identifier i; position p; @@ static struct cfg80211_ops i@p = {...}; @ok1@ identifier r1.i; position p; @@ wiphy_new(&i@p,...) @bad@ position p!={r1.p,ok1.p}; identifier r1.i; @@ i@p @depends on !bad disable optional_qualifier@ identifier r1.i; @@ +const struct cfg80211_ops i; File size before: text data bss dec hex filename 21225 1954 16 23195 5a9b wireless/marvell/libertas/cfg.o File size after: text data bss dec hex filename 22041 1154 16 23211 5aab wireless/marvell/libertas/cfg.o Signed-off-by: Bhumika Goyal Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/cfg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 7ff2efadceca..3f97acb57e66 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -2086,7 +2086,7 @@ static int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, * Initialization */ -static struct cfg80211_ops lbs_cfg80211_ops = { +static const struct cfg80211_ops lbs_cfg80211_ops = { .set_monitor_channel = lbs_cfg_set_monitor_channel, .libertas_set_mesh_channel = lbs_cfg_set_mesh_channel, .scan = lbs_cfg_scan, -- cgit v1.2.3 From 40b368af4b750863b2cb66a3a9513241db2f0793 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Wed, 28 Dec 2016 15:40:04 -0600 Subject: rtlwifi: Fix alignment issues The addresses of Wlan NIC registers are natural alignment, but some drivers have bugs. These are evident on platforms that need natural alignment to access registers. This change contains the following: 1. Function _rtl8821ae_dbi_read() is used to read one byte from DBI, thus it should use rtl_read_byte(). 2. Register 0x4C7 of 8192ee is single byte. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Cc: Stable Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index f731f962ccbb..b44244a8a22f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -1006,7 +1006,7 @@ static void _rtl92ee_hw_configure(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x100a); /* Note Data sheet don't define */ - rtl_write_word(rtlpriv, 0x4C7, 0x80); + rtl_write_byte(rtlpriv, 0x4C7, 0x80); rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index a4e37e764563..4f83eee1ff75 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -1127,7 +1127,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) } if (0 == tmp) { read_addr = REG_DBI_RDATA + addr % 4; - ret = rtl_read_word(rtlpriv, read_addr); + ret = rtl_read_byte(rtlpriv, read_addr); } return ret; } -- cgit v1.2.3 From 3b1fc7680a0f86be1d6f6bdf1143b3746e6b8d74 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 29 Dec 2016 16:00:29 +0000 Subject: rtlwifi: fix spelling mistake: "contry" -> "country" trivial fix to spelling mistake in RT_TRACE message Signed-off-by: Colin Ian King Acked-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/regd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index 6ee6bf8e7eaf..558c31bf5c80 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c @@ -440,7 +440,7 @@ int rtl_regd_init(struct ieee80211_hw *hw, if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) { RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG, - "rtl: EEPROM indicates invalid contry code, world wide 13 should be used\n"); + "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n"); rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13; } -- cgit v1.2.3 From ad334bbb07b07e2873942571b0c9f3c34571bd47 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 23 Dec 2016 00:43:22 +0000 Subject: brcmfmac: fix spelling mistakes on "Ivalid" Trivial fixes to spelling mistake "Ivalid" to "Invalid" in brcmf_err error messages. Signed-off-by: Colin Ian King Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index ccae3bbe7db2..13ca3eb2462b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3971,7 +3971,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, pval |= AES_ENABLED; break; default: - brcmf_err("Ivalid unicast security info\n"); + brcmf_err("Invalid unicast security info\n"); } offset++; } @@ -4015,7 +4015,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, wpa_auth |= WPA2_AUTH_1X_SHA256; break; default: - brcmf_err("Ivalid key mgmt info\n"); + brcmf_err("Invalid key mgmt info\n"); } offset++; } -- cgit v1.2.3 From 7bb387c5ab12aeac3d5eea28686489ff46b53ca9 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 29 Dec 2016 15:39:37 -0800 Subject: net: Allow IP_MULTICAST_IF to set index to L3 slave IP_MULTICAST_IF fails if sk_bound_dev_if is already set and the new index does not match it. e.g., ntpd[15381]: setsockopt IP_MULTICAST_IF 192.168.1.23 fails: Invalid argument Relax the check in setsockopt to allow setting mc_index to an L3 slave if sk_bound_dev_if points to an L3 master. Make a similar change for IPv6. In this case change the device lookup to take the rcu_read_lock avoiding a refcnt. The rcu lock is also needed for the lookup of a potential L3 master device. This really only silences a setsockopt failure since uses of mc_index are secondary to sk_bound_dev_if if it is set. In both cases, if either index is an L3 slave or master, lookups are directed to the same FIB table so relaxing the check at setsockopt time causes no harm. Patch is based on a suggested change by Darwin for a problem noted in their code base. Suggested-by: Darwin Dingel Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 7 ++++++- net/ipv6/ipv6_sockglue.c | 16 ++++++++++++---- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 57e1405e8282..cf9b43de4690 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -843,6 +843,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, { struct ip_mreqn mreq; struct net_device *dev = NULL; + int midx; if (sk->sk_type == SOCK_STREAM) goto e_inval; @@ -887,11 +888,15 @@ static int do_ip_setsockopt(struct sock *sk, int level, err = -EADDRNOTAVAIL; if (!dev) break; + + midx = l3mdev_master_ifindex(dev); + dev_put(dev); err = -EINVAL; if (sk->sk_bound_dev_if && - mreq.imr_ifindex != sk->sk_bound_dev_if) + mreq.imr_ifindex != sk->sk_bound_dev_if && + (!midx || midx != sk->sk_bound_dev_if)) break; inet->mc_index = mreq.imr_ifindex; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ee97c44e2aa0..a531ba032b85 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -595,16 +595,24 @@ done: if (val) { struct net_device *dev; + int midx; - if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) - goto e_inval; + rcu_read_lock(); - dev = dev_get_by_index(net, val); + dev = dev_get_by_index_rcu(net, val); if (!dev) { + rcu_read_unlock(); retv = -ENODEV; break; } - dev_put(dev); + midx = l3mdev_master_ifindex_rcu(dev); + + rcu_read_unlock(); + + if (sk->sk_bound_dev_if && + sk->sk_bound_dev_if != val && + (!midx || midx != sk->sk_bound_dev_if)) + goto e_inval; } np->mcast_oif = val; retv = 0; -- cgit v1.2.3 From 107fded7bf616ad6f46823d98b8ed6405d7adf2d Mon Sep 17 00:00:00 2001 From: Thomas Preisner Date: Fri, 30 Dec 2016 03:37:53 +0100 Subject: net: 3com: typhoon: typhoon_init_one: fix incorrect return values In a few cases the err-variable is not set to a negative error code if a function call in typhoon_init_one() fails and thus 0 is returned instead. It may be better to set err to the appropriate negative error code before returning. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=188841 Reported-by: Pan Bian Signed-off-by: Thomas Preisner Signed-off-by: Milan Stephan Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/typhoon.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 9fe3990319ec..25f2e92a67c4 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -2402,8 +2402,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); - if(!is_valid_ether_addr(dev->dev_addr)) { + if (!is_valid_ether_addr(dev->dev_addr)) { err_msg = "Could not obtain valid ethernet address, aborting"; + err = -EIO; goto error_out_reset; } @@ -2411,7 +2412,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * later when we print out the version reported. */ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { + err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp); + if (err < 0) { err_msg = "Could not get Sleep Image version"; goto error_out_reset; } @@ -2453,7 +2455,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features = dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; - if(register_netdev(dev) < 0) { + err = register_netdev(dev); + if (err < 0) { err_msg = "unable to register netdev"; goto error_out_reset; } -- cgit v1.2.3 From 6b6bbb5922a4b1d4b58125a572da91010295fba3 Mon Sep 17 00:00:00 2001 From: Thomas Preisner Date: Fri, 30 Dec 2016 03:37:54 +0100 Subject: net: 3com: typhoon: typhoon_init_one: make return values more specific In some cases the return value of a failing function is not being used and the function typhoon_init_one() returns another negative error code instead. Signed-off-by: Thomas Preisner Signed-off-by: Milan Stephan Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/typhoon.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 25f2e92a67c4..1986ad17950a 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -2370,9 +2370,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * 4) Get the hardware address. * 5) Put the card to sleep. */ - if (typhoon_reset(ioaddr, WaitSleep) < 0) { + err = typhoon_reset(ioaddr, WaitSleep); + if (err < 0) { err_msg = "could not reset 3XP"; - err = -EIO; goto error_out_dma; } @@ -2386,16 +2386,16 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) typhoon_init_interface(tp); typhoon_init_rings(tp); - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST); + if (err < 0) { err_msg = "cannot boot 3XP sleep image"; - err = -EIO; goto error_out_reset; } INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { + err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp); + if (err < 0) { err_msg = "cannot read MAC address"; - err = -EIO; goto error_out_reset; } @@ -2430,9 +2430,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if(xp_resp[0].numDesc != 0) tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; - if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { + err = typhoon_sleep(tp, PCI_D3hot, 0); + if (err < 0) { err_msg = "cannot put adapter to sleep"; - err = -EIO; goto error_out_reset; } -- cgit v1.2.3 From 32bcad4b7f33f79847e6d4757e9cf26c6980bac6 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Thu, 15 Dec 2016 18:13:23 +0100 Subject: batman-adv: Start new development cycle Signed-off-by: Simon Wunderlich --- net/batman-adv/main.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index a6cc8040a21d..8683542067ba 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -24,7 +24,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2016.5" +#define BATADV_SOURCE_VERSION "2017.0" #endif /* B.A.T.M.A.N. parameters */ -- cgit v1.2.3 From a60db8e70313a63e1fa10b9687ed549f6e85cdff Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 29 Dec 2016 20:14:00 +0000 Subject: wlcore: fix spelling mistake in wl1271_warning trivial fix to spelling mistake of function name in wl1271_warning, should be dynamic_ps_timeout instead of dyanmic_ps_timeout. Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index 7f672f6879d0..58e148d7bc7b 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -281,7 +281,7 @@ static ssize_t dynamic_ps_timeout_write(struct file *file, } if (value < 1 || value > 65535) { - wl1271_warning("dyanmic_ps_timeout is not in valid range"); + wl1271_warning("dynamic_ps_timeout is not in valid range"); return -ERANGE; } -- cgit v1.2.3 From e16e558e83ed848f5dac3931dc7549d7a3090f7e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 30 Dec 2016 14:50:27 +0000 Subject: rtlwifi: fix spelling mistake: "encrypiton" -> "encryption" trivial fix to spelling mistake in RT_TRACE message Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 3cfdbea40ffa..9ec72d1d089a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1529,7 +1529,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, key_type = AESCMAC_ENCRYPTION; RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n"); RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, - "HW don't support CMAC encrypiton, use software CMAC encrypiton\n"); + "HW don't support CMAC encryption, use software CMAC encryption\n"); err = -EOPNOTSUPP; goto out_unlock; default: -- cgit v1.2.3 From e8f1cb507d01205e03f69809af4347ed8ec9db5b Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 1 Jan 2017 13:57:00 +0200 Subject: qed*: Update to dual-license Since the submission of the qedr driver, there's inconsistency in the licensing of the various qed/qede files - some are GPLv2 and some are dual-license. Since qedr requires dual-license and it's dependent on both, we're updating the licensing of all qed/qede source files. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_cxt.h | 34 +++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_dcbx.h | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_dev.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 34 +++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_hw.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_hw.h | 32 ++++++++++++++++--- .../net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_init_ops.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_init_ops.h | 34 +++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_int.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_int.h | 34 +++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_iscsi.h | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_l2.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_l2.h | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 31 +++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_ll2.h | 31 +++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_main.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 34 +++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_ooo.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_ooo.h | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_roce.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_roce.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_selftest.c | 32 +++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sp.h | 34 +++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_spq.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_sriov.h | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_vf.c | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_vf.h | 32 ++++++++++++++++--- drivers/net/ethernet/qlogic/qede/qede.h | 37 ++++++++++++++++++---- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 37 ++++++++++++++++++---- drivers/net/ethernet/qlogic/qede/qede_main.c | 37 ++++++++++++++++++---- drivers/net/ethernet/qlogic/qede/qede_roce.c | 2 +- include/linux/qed/common_hsi.h | 33 ++++++++++++++++--- include/linux/qed/eth_common.h | 32 ++++++++++++++++--- include/linux/qed/iscsi_common.h | 32 ++++++++++++++++--- include/linux/qed/qed_chain.h | 34 +++++++++++++++++--- include/linux/qed/qed_eth_if.h | 32 ++++++++++++++++--- include/linux/qed/qed_if.h | 35 ++++++++++++++++---- include/linux/qed/qed_iov_if.h | 32 ++++++++++++++++--- include/linux/qed/qed_iscsi_if.h | 32 ++++++++++++++++--- include/linux/qed/qed_ll2_if.h | 31 +++++++++++++++--- include/linux/qed/qed_roce_if.h | 2 +- include/linux/qed/qede_roce.h | 2 +- include/linux/qed/rdma_common.h | 32 ++++++++++++++++--- include/linux/qed/roce_common.h | 32 ++++++++++++++++--- include/linux/qed/storage_common.h | 32 ++++++++++++++++--- include/linux/qed/tcp_common.h | 32 ++++++++++++++++--- 56 files changed, 1449 insertions(+), 223 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 44c184ebe3b0..f6e0ff439b1c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 0c42c240b5cf..dcb8fc185df7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 2b8bdaa77800..98f4973cac9d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_CXT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index a4789a93b692..dc0d2c9ad6b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index 9ba681643d05..d70300fda020 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_DCBX_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3b2250021c5f..33e720143b8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index b6711c106597..5d37ba24da40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_DEV_API_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 785ab03683eb..5d31189288e8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_HSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 6e4fae9b1430..1f606516b6aa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index d01557092868..9277264d2e65 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_HW_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 23e455f22adc..d891a6852695 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index d567ba94c8d1..243b64e0d4dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index 1e832049983d..555dd086796d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_INIT_OPS_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index c68dbf7092b1..84310b60849b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 0948be64dc78..0ae0bb4593ef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_INT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 17a70122df05..3a44d6b395fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index 67c25f3db4d5..20c187f4ed0b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_ISCSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 6a3727c4c0c6..fd153d27e3a6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 48c9bfc28140..2f0303761d6b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_L2_H #define _QED_L2_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 8e5cb7605b0f..05e32f4322eb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1,10 +1,33 @@ /* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation * - * Copyright (c) 2015 QLogic Corporation + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 6625a3ae5a33..c7f2975590ee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -1,10 +1,33 @@ /* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation * - * Copyright (c) 2015 QLogic Corporation + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_LL2_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index aeb98d8c5626..e1568428569b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 6dd3ce443484..256674131c47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 407a2c1830fb..363dce0f16b1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_MCP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index 155abcb507fd..7d731c6cb892 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h index 7a0670a9a074..4f138fb5f533 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_OOO_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 97544205a8c1..b6722c6ff761 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef REG_ADDR_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 2a16547c8966..bd4cad2b343b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1,5 +1,5 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 279f342af8db..36cf4b2ab7fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -1,5 +1,5 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c index 48bfaecaf6dc..1bafc05db2b8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -1,3 +1,35 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + #include #include "qed.h" #include "qed_dev_api.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 9c897bc68d05..043882959606 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_SP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index a39ef2e7a9a6..097a72987572 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index f022469bdcf8..645328a9f0cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 85b09dd1787a..469e857f5dcd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 509c02b4772e..1738d5bd2e8e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_SRIOV_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 60b31a8ede73..af0542c0351c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 11eb3854e6f2..7da0b165d8bc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_VF_H diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index c79dc78746fc..b2c6cc997bc0 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -1,11 +1,34 @@ /* QLogic qede NIC Driver -* Copyright (c) 2015 QLogic Corporation -* -* This software is available under the terms of the GNU General Public License -* (GPL) Version 2, available from the file COPYING in the main directory of -* this source tree. -*/ - + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ #ifndef _QEDE_H_ #define _QEDE_H_ #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 1c48f445c93b..8e971b289d4b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1,11 +1,34 @@ /* QLogic qede NIC Driver -* Copyright (c) 2015 QLogic Corporation -* -* This software is available under the terms of the GNU General Public License -* (GPL) Version 2, available from the file COPYING in the main directory of -* this source tree. -*/ - + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ #include #include #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index aecdd1c5c0ea..0851fe3a8f25 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1,11 +1,34 @@ /* QLogic qede NIC Driver -* Copyright (c) 2015 QLogic Corporation -* -* This software is available under the terms of the GNU General Public License -* (GPL) Version 2, available from the file COPYING in the main directory of -* this source tree. -*/ - + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ #include #include #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c index 49272716a7c4..f00657ce7c8f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_roce.c +++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c @@ -1,5 +1,5 @@ /* QLogic qedr NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 734deb094618..c33080baf38c 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -1,10 +1,35 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2016 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ + #ifndef _COMMON_HSI_H #define _COMMON_HSI_H #include diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 1aa0727c4136..4b402fb0eaad 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __ETH_COMMON__ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 8f64b1223c2f..4c5747babcf6 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __ISCSI_COMMON__ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 37dfba101c6c..5cd7a4608c9b 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_CHAIN_H diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 7a52f7c58c37..d91651ecfd26 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_ETH_IF_H diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 4b454f4f5b25..d1576a2bcfc9 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1,10 +1,33 @@ /* QLogic qed NIC Driver - * - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_IF_H diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index 5a4f8d0899e9..b1f979135f97 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_IOV_IF_H diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index d27912480cb3..f70bb81b8b6a 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_ISCSI_IF_H diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index fd75c265dba3..4fb4666ea879 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -1,10 +1,33 @@ /* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation * - * Copyright (c) 2015 QLogic Corporation + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_LL2_IF_H diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h index 53047d3fa678..f742d4312c9d 100644 --- a/include/linux/qed/qed_roce_if.h +++ b/include/linux/qed/qed_roce_if.h @@ -1,5 +1,5 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h index f48d64b0e2fb..3b8dd551a98c 100644 --- a/include/linux/qed/qede_roce.h +++ b/include/linux/qed/qede_roce.h @@ -1,5 +1,5 @@ /* QLogic qedr NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 7663725faa94..f773aa5e746f 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __RDMA_COMMON__ diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 2eeaf3dc6646..bad02df213df 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __ROCE_COMMON__ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 3b8e1efd9bc2..03f3e37ab059 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __STORAGE_COMMON__ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index dc3889d1bbe6..46fe7856f1b2 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __TCP_COMMON__ -- cgit v1.2.3 From cdda926d4098690de0d74ad6e7bb51bf4d7a4104 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 1 Jan 2017 13:57:01 +0200 Subject: qede: Break datapath logic into its own file This adds a new file qede_fp.c and relocates the datapath-related logic into it [from qede_main.c]. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/Makefile | 2 +- drivers/net/ethernet/qlogic/qede/qede.h | 11 + drivers/net/ethernet/qlogic/qede/qede_fp.c | 1674 ++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 1636 ------------------------- 4 files changed, 1686 insertions(+), 1637 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qede/qede_fp.c diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 048a230c3ce0..c54922be50d1 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_QEDE) := qede.o -qede-y := qede_main.o qede_ethtool.o +qede-y := qede_main.o qede_fp.o qede_ethtool.o qede-$(CONFIG_DCB) += qede_dcbnl.o qede-$(CONFIG_QED_RDMA) += qede_roce.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index b2c6cc997bc0..b4e419689000 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -408,6 +408,17 @@ struct qede_reload_args { } u; }; +/* Datapath functions definition */ +netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); +netdev_features_t qede_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); +void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp); +int qede_alloc_rx_buffer(struct qede_rx_queue *rxq); +int qede_free_tx_pkt(struct qede_dev *edev, + struct qede_tx_queue *txq, int *len); +int qede_poll(struct napi_struct *napi, int budget); +irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie); #ifdef CONFIG_DCB void qede_set_dcbnl_ops(struct net_device *ndev); #endif diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c new file mode 100644 index 000000000000..1614eed2d65d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -0,0 +1,1674 @@ +/* QLogic qede NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "qede.h" +/********************************* + * Content also used by slowpath * + *********************************/ + +int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) +{ + struct sw_rx_data *sw_rx_data; + struct eth_rx_bd *rx_bd; + dma_addr_t mapping; + struct page *data; + + data = alloc_pages(GFP_ATOMIC, 0); + if (unlikely(!data)) + return -ENOMEM; + + /* Map the entire page as it would be used + * for multiple RX buffer segment size mapping. + */ + mapping = dma_map_page(rxq->dev, data, 0, + PAGE_SIZE, rxq->data_direction); + if (unlikely(dma_mapping_error(rxq->dev, mapping))) { + __free_page(data); + return -ENOMEM; + } + + sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + sw_rx_data->page_offset = 0; + sw_rx_data->data = data; + sw_rx_data->mapping = mapping; + + /* Advance PROD and get BD pointer */ + rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); + WARN_ON(!rx_bd); + rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); + rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + + rxq->sw_rx_prod++; + + return 0; +} + +/* Unmap the data and free skb */ +int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) +{ + u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; + struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; + struct eth_tx_1st_bd *first_bd; + struct eth_tx_bd *tx_data_bd; + int bds_consumed = 0; + int nbds; + bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; + int i, split_bd_len = 0; + + if (unlikely(!skb)) { + DP_ERR(edev, + "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", + idx, txq->sw_tx_cons, txq->sw_tx_prod); + return -1; + } + + *len = skb->len; + + first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + + bds_consumed++; + + nbds = first_bd->data.nbds; + + if (data_split) { + struct eth_tx_bd *split = (struct eth_tx_bd *) + qed_chain_consume(&txq->tx_pbl); + split_bd_len = BD_UNMAP_LEN(split); + bds_consumed++; + } + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + + /* Unmap the data of the skb frags */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_consume(&txq->tx_pbl); + dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + } + + while (bds_consumed++ < nbds) + qed_chain_consume(&txq->tx_pbl); + + /* Free skb */ + dev_kfree_skb_any(skb); + txq->sw_tx_ring.skbs[idx].skb = NULL; + txq->sw_tx_ring.skbs[idx].flags = 0; + + return 0; +} + +/* Unmap the data and free skb when mapping failed during start_xmit */ +static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, + struct eth_tx_1st_bd *first_bd, + int nbd, bool data_split) +{ + u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; + struct eth_tx_bd *tx_data_bd; + int i, split_bd_len = 0; + + /* Return prod to its position before this skb was handled */ + qed_chain_set_prod(&txq->tx_pbl, + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); + + first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); + + if (data_split) { + struct eth_tx_bd *split = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + split_bd_len = BD_UNMAP_LEN(split); + nbd--; + } + + dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + + /* Unmap the data of the skb frags */ + for (i = 0; i < nbd; i++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + if (tx_data_bd->nbytes) + dma_unmap_page(txq->dev, + BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + } + + /* Return again prod to its position before this skb was handled */ + qed_chain_set_prod(&txq->tx_pbl, + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); + + /* Free skb */ + dev_kfree_skb_any(skb); + txq->sw_tx_ring.skbs[idx].skb = NULL; + txq->sw_tx_ring.skbs[idx].flags = 0; +} + +static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext) +{ + u32 rc = XMIT_L4_CSUM; + __be16 l3_proto; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return XMIT_PLAIN; + + l3_proto = vlan_get_protocol(skb); + if (l3_proto == htons(ETH_P_IPV6) && + (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) + *ipv6_ext = 1; + + if (skb->encapsulation) { + rc |= XMIT_ENC; + if (skb_is_gso(skb)) { + unsigned short gso_type = skb_shinfo(skb)->gso_type; + + if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || + (gso_type & SKB_GSO_GRE_CSUM)) + rc |= XMIT_ENC_GSO_L4_CSUM; + + rc |= XMIT_LSO; + return rc; + } + } + + if (skb_is_gso(skb)) + rc |= XMIT_LSO; + + return rc; +} + +static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, + struct eth_tx_2nd_bd *second_bd, + struct eth_tx_3rd_bd *third_bd) +{ + u8 l4_proto; + u16 bd2_bits1 = 0, bd2_bits2 = 0; + + bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); + + bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & + ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) + << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; + + bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << + ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); + + if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) + l4_proto = ipv6_hdr(skb)->nexthdr; + else + l4_proto = ip_hdr(skb)->protocol; + + if (l4_proto == IPPROTO_UDP) + bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; + + if (third_bd) + third_bd->data.bitfields |= + cpu_to_le16(((tcp_hdrlen(skb) / 4) & + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); + + second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); + second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); +} + +static int map_frag_to_bd(struct qede_tx_queue *txq, + skb_frag_t *frag, struct eth_tx_bd *bd) +{ + dma_addr_t mapping; + + /* Map skb non-linear frag data for DMA */ + mapping = skb_frag_dma_map(txq->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(txq->dev, mapping))) + return -ENOMEM; + + /* Setup the data pointer of the frag data */ + BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); + + return 0; +} + +static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) +{ + if (is_encap_pkt) + return (skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - skb->data); + else + return (skb_transport_header(skb) + + tcp_hdrlen(skb) - skb->data); +} + +/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ +#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) +static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type) +{ + int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; + + if (xmit_type & XMIT_LSO) { + int hlen; + + hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); + + /* linear payload would require its own BD */ + if (skb_headlen(skb) > hlen) + allowed_frags--; + } + + return (skb_shinfo(skb)->nr_frags > allowed_frags); +} +#endif + +static inline void qede_update_tx_producer(struct qede_tx_queue *txq) +{ + /* wmb makes sure that the BDs data is updated before updating the + * producer, otherwise FW may read old data from the BDs. + */ + wmb(); + barrier(); + writel(txq->tx_db.raw, txq->doorbell_addr); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the queue lock is released and another start_xmit is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); +} + +static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, + struct sw_rx_data *metadata, u16 padding, u16 length) +{ + struct qede_tx_queue *txq = fp->xdp_tx; + u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + struct eth_tx_1st_bd *first_bd; + + if (!qed_chain_get_elem_left(&txq->tx_pbl)) { + txq->stopped_cnt++; + return -ENOMEM; + } + + first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); + + memset(first_bd, 0, sizeof(*first_bd)); + first_bd->data.bd_flags.bitfields = + BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); + first_bd->data.bitfields |= + (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; + first_bd->data.nbds = 1; + + /* We can safely ignore the offset, as it's 0 for XDP */ + BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); + + /* Synchronize the buffer back to device, as program [probably] + * has changed it. + */ + dma_sync_single_for_device(&edev->pdev->dev, + metadata->mapping + padding, + length, PCI_DMA_TODEVICE); + + txq->sw_tx_ring.pages[idx] = metadata->data; + txq->sw_tx_prod++; + + /* Mark the fastpath for future XDP doorbell */ + fp->xdp_xmit = 1; + + return 0; +} + +int qede_txq_has_work(struct qede_tx_queue *txq) +{ + u16 hw_bd_cons; + + /* Tell compiler that consumer and producer can change */ + barrier(); + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) + return 0; + + return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); +} + +static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) +{ + struct eth_tx_1st_bd *bd; + u16 hw_bd_cons; + + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + barrier(); + + while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { + bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd), + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons & + NUM_TX_BDS_MAX]); + + txq->sw_tx_cons++; + txq->xmit_pkts++; + } +} + +static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) +{ + struct netdev_queue *netdev_txq; + u16 hw_bd_cons; + unsigned int pkts_compl = 0, bytes_compl = 0; + int rc; + + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); + + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + barrier(); + + while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { + int len = 0; + + rc = qede_free_tx_pkt(edev, txq, &len); + if (rc) { + DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", + hw_bd_cons, + qed_chain_get_cons_idx(&txq->tx_pbl)); + break; + } + + bytes_compl += len; + pkts_compl++; + txq->sw_tx_cons++; + txq->xmit_pkts++; + } + + netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); + + /* Need to make the tx_bd_cons update visible to start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that + * start_xmit() will miss it and cause the queue to be stopped + * forever. + * On the other hand we need an rmb() here to ensure the proper + * ordering of bit testing in the following + * netif_tx_queue_stopped(txq) call. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(netdev_txq))) { + /* Taking tx_lock is needed to prevent reenabling the queue + * while it's empty. This could have happen if rx_action() gets + * suspended in qede_tx_int() after the condition before + * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): + * + * stops the queue->sees fresh tx_bd_cons->releases the queue-> + * sends some packets consuming the whole queue again-> + * stops the queue + */ + + __netif_tx_lock(netdev_txq, smp_processor_id()); + + if ((netif_tx_queue_stopped(netdev_txq)) && + (edev->state == QEDE_STATE_OPEN) && + (qed_chain_get_elem_left(&txq->tx_pbl) + >= (MAX_SKB_FRAGS + 1))) { + netif_tx_wake_queue(netdev_txq); + DP_VERBOSE(edev, NETIF_MSG_TX_DONE, + "Wake queue was called\n"); + } + + __netif_tx_unlock(netdev_txq); + } + + return 0; +} + +bool qede_has_rx_work(struct qede_rx_queue *rxq) +{ + u16 hw_comp_cons, sw_comp_cons; + + /* Tell compiler that status block fields can change */ + barrier(); + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + return hw_comp_cons != sw_comp_cons; +} + +static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) +{ + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; +} + +/* This function reuses the buffer(from an offset) from + * consumer index to producer index in the bd ring + */ +static inline void qede_reuse_page(struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) +{ + struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); + struct sw_rx_data *curr_prod; + dma_addr_t new_mapping; + + curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + *curr_prod = *curr_cons; + + new_mapping = curr_prod->mapping + curr_prod->page_offset; + + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); + + rxq->sw_rx_prod++; + curr_cons->data = NULL; +} + +/* In case of allocation failures reuse buffers + * from consumer index to produce buffers for firmware + */ +void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) +{ + struct sw_rx_data *curr_cons; + + for (; count > 0; count--) { + curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + qede_reuse_page(rxq, curr_cons); + qede_rx_bd_ring_consume(rxq); + } +} + +static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) +{ + /* Move to the next segment in the page */ + curr_cons->page_offset += rxq->rx_buf_seg_size; + + if (curr_cons->page_offset == PAGE_SIZE) { + if (unlikely(qede_alloc_rx_buffer(rxq))) { + /* Since we failed to allocate new buffer + * current buffer can be used again. + */ + curr_cons->page_offset -= rxq->rx_buf_seg_size; + + return -ENOMEM; + } + + dma_unmap_page(rxq->dev, curr_cons->mapping, + PAGE_SIZE, rxq->data_direction); + } else { + /* Increment refcount of the page as we don't want + * network stack to take the ownership of the page + * which can be recycled multiple times by the driver. + */ + page_ref_inc(curr_cons->data); + qede_reuse_page(rxq, curr_cons); + } + + return 0; +} + +void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) +{ + u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); + u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); + struct eth_rx_prod_data rx_prods = {0}; + + /* Update producers */ + rx_prods.bd_prod = cpu_to_le16(bd_prod); + rx_prods.cqe_prod = cpu_to_le16(cqe_prod); + + /* Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + */ + wmb(); + + internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), + (u32 *)&rx_prods); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the napi lock is released and another qede_poll is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); +} + +static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) +{ + enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; + enum rss_hash_type htype; + u32 hash = 0; + + htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); + if (htype) { + hash_type = ((htype == RSS_HASH_TYPE_IPV4) || + (htype == RSS_HASH_TYPE_IPV6)) ? + PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; + hash = le32_to_cpu(rss_hash); + } + skb_set_hash(skb, hash, hash_type); +} + +static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) +{ + skb_checksum_none_assert(skb); + + if (csum_flag & QEDE_CSUM_UNNECESSARY) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) + skb->csum_level = 1; +} + +static inline void qede_skb_receive(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + struct sk_buff *skb, u16 vlan_tag) +{ + if (vlan_tag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + napi_gro_receive(&fp->napi, skb); + fp->rxq->rcv_pkts++; +} + +static void qede_set_gro_params(struct qede_dev *edev, + struct sk_buff *skb, + struct eth_fast_path_rx_tpa_start_cqe *cqe) +{ + u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); + + if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & + PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - + cqe->header_len; +} + +static int qede_fill_frag_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + u8 tpa_agg_index, u16 len_on_bd) +{ + struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & + NUM_RX_BDS_MAX]; + struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; + struct sk_buff *skb = tpa_info->skb; + + if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) + goto out; + + /* Add one frag and update the appropriate fields in the skb */ + skb_fill_page_desc(skb, tpa_info->frag_id++, + current_bd->data, current_bd->page_offset, + len_on_bd); + + if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { + /* Incr page ref count to reuse on allocation failure + * so that it doesn't get freed while freeing SKB. + */ + page_ref_inc(current_bd->data); + goto out; + } + + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; + + skb->data_len += len_on_bd; + skb->truesize += rxq->rx_buf_seg_size; + skb->len += len_on_bd; + + return 0; + +out: + tpa_info->state = QEDE_AGG_STATE_ERROR; + qede_recycle_rx_bd_ring(rxq, 1); + + return -ENOMEM; +} + +static bool qede_tunn_exist(u16 flag) +{ + return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << + PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); +} + +static u8 qede_check_tunn_csum(u16 flag) +{ + u16 csum_flag = 0; + u8 tcsum = 0; + + if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; + + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + tcsum = QEDE_TUNN_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | + PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return QEDE_CSUM_UNNECESSARY | tcsum; +} + +static void qede_tpa_start(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_start_cqe *cqe) +{ + struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; + struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); + struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); + struct sw_rx_data *replace_buf = &tpa_info->buffer; + dma_addr_t mapping = tpa_info->buffer_mapping; + struct sw_rx_data *sw_rx_data_cons; + struct sw_rx_data *sw_rx_data_prod; + + sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + + /* Use pre-allocated replacement buffer - we can't release the agg. + * start until its over and we don't want to risk allocation failing + * here, so re-allocate when aggregation will be over. + */ + sw_rx_data_prod->mapping = replace_buf->mapping; + + sw_rx_data_prod->data = replace_buf->data; + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + sw_rx_data_prod->page_offset = replace_buf->page_offset; + + rxq->sw_rx_prod++; + + /* move partial skb from cons to pool (don't unmap yet) + * save mapping, incase we drop the packet later on. + */ + tpa_info->buffer = *sw_rx_data_cons; + mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), + le32_to_cpu(rx_bd_cons->addr.lo)); + + tpa_info->buffer_mapping = mapping; + rxq->sw_rx_cons++; + + /* set tpa state to start only if we are able to allocate skb + * for this aggregation, otherwise mark as error and aggregation will + * be dropped + */ + tpa_info->skb = netdev_alloc_skb(edev->ndev, + le16_to_cpu(cqe->len_on_first_bd)); + if (unlikely(!tpa_info->skb)) { + DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); + tpa_info->state = QEDE_AGG_STATE_ERROR; + goto cons_buf; + } + + /* Start filling in the aggregation info */ + skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); + tpa_info->frag_id = 0; + tpa_info->state = QEDE_AGG_STATE_START; + + /* Store some information from first CQE */ + tpa_info->start_cqe_placement_offset = cqe->placement_offset; + tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); + if ((le16_to_cpu(cqe->pars_flags.flags) >> + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) + tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); + else + tpa_info->vlan_tag = 0; + + qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); + + /* This is needed in order to enable forwarding support */ + qede_set_gro_params(edev, tpa_info->skb, cqe); + +cons_buf: /* We still need to handle bd_len_list to consume buffers */ + if (likely(cqe->ext_bd_len_list[0])) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->ext_bd_len_list[0])); + + if (unlikely(cqe->ext_bd_len_list[1])) { + DP_ERR(edev, + "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); + tpa_info->state = QEDE_AGG_STATE_ERROR; + } +} + +#ifdef CONFIG_INET +static void qede_gro_ip_csum(struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct iphdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), + iph->saddr, iph->daddr, 0); + + tcp_gro_complete(skb); +} + +static void qede_gro_ipv6_csum(struct sk_buff *skb) +{ + struct ipv6hdr *iph = ipv6_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), + &iph->saddr, &iph->daddr, 0); + tcp_gro_complete(skb); +} +#endif + +static void qede_gro_receive(struct qede_dev *edev, + struct qede_fastpath *fp, + struct sk_buff *skb, + u16 vlan_tag) +{ + /* FW can send a single MTU sized packet from gro flow + * due to aggregation timeout/last segment etc. which + * is not expected to be a gro packet. If a skb has zero + * frags then simply push it in the stack as non gso skb. + */ + if (unlikely(!skb->data_len)) { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + goto send_skb; + } + +#ifdef CONFIG_INET + if (skb_shinfo(skb)->gso_size) { + skb_reset_network_header(skb); + + switch (skb->protocol) { + case htons(ETH_P_IP): + qede_gro_ip_csum(skb); + break; + case htons(ETH_P_IPV6): + qede_gro_ipv6_csum(skb); + break; + default: + DP_ERR(edev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + ntohs(skb->protocol)); + } + } +#endif + +send_skb: + skb_record_rx_queue(skb, fp->rxq->rxq_id); + qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); +} + +static inline void qede_tpa_cont(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_cont_cqe *cqe) +{ + int i; + + for (i = 0; cqe->len_list[i]; i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + + if (unlikely(i > 1)) + DP_ERR(edev, + "Strange - TPA cont with more than a single len_list entry\n"); +} + +static void qede_tpa_end(struct qede_dev *edev, + struct qede_fastpath *fp, + struct eth_fast_path_rx_tpa_end_cqe *cqe) +{ + struct qede_rx_queue *rxq = fp->rxq; + struct qede_agg_info *tpa_info; + struct sk_buff *skb; + int i; + + tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; + skb = tpa_info->skb; + + for (i = 0; cqe->len_list[i]; i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + if (unlikely(i > 1)) + DP_ERR(edev, + "Strange - TPA emd with more than a single len_list entry\n"); + + if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) + goto err; + + /* Sanity */ + if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) + DP_ERR(edev, + "Strange - TPA had %02x BDs, but SKB has only %d frags\n", + cqe->num_of_bds, tpa_info->frag_id); + if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) + DP_ERR(edev, + "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", + le16_to_cpu(cqe->total_packet_len), skb->len); + + memcpy(skb->data, + page_address(tpa_info->buffer.data) + + tpa_info->start_cqe_placement_offset + + tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); + + /* Finalize the SKB */ + skb->protocol = eth_type_trans(skb, edev->ndev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count + * to skb_shinfo(skb)->gso_segs + */ + NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); + + qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); + + tpa_info->state = QEDE_AGG_STATE_NONE; + + return; +err: + tpa_info->state = QEDE_AGG_STATE_NONE; + dev_kfree_skb_any(tpa_info->skb); + tpa_info->skb = NULL; +} + +static u8 qede_check_notunn_csum(u16 flag) +{ + u16 csum_flag = 0; + u8 csum = 0; + + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + csum = QEDE_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return csum; +} + +static u8 qede_check_csum(u16 flag) +{ + if (!qede_tunn_exist(flag)) + return qede_check_notunn_csum(flag); + else + return qede_check_tunn_csum(flag); +} + +static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, + u16 flag) +{ + u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; + + if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << + ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || + (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << + PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) + return true; + + return false; +} + +/* Return true iff packet is to be passed to stack */ +static bool qede_rx_xdp(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + struct bpf_prog *prog, + struct sw_rx_data *bd, + struct eth_fast_path_rx_reg_cqe *cqe) +{ + u16 len = le16_to_cpu(cqe->len_on_first_bd); + struct xdp_buff xdp; + enum xdp_action act; + + xdp.data = page_address(bd->data) + cqe->placement_offset; + xdp.data_end = xdp.data + len; + + /* Queues always have a full reset currently, so for the time + * being until there's atomic program replace just mark read + * side for map helpers. + */ + rcu_read_lock(); + act = bpf_prog_run_xdp(prog, &xdp); + rcu_read_unlock(); + + if (act == XDP_PASS) + return true; + + /* Count number of packets not to be passed to stack */ + rxq->xdp_no_pass++; + + switch (act) { + case XDP_TX: + /* We need the replacement buffer before transmit. */ + if (qede_alloc_rx_buffer(rxq)) { + qede_recycle_rx_bd_ring(rxq, 1); + return false; + } + + /* Now if there's a transmission problem, we'd still have to + * throw current buffer, as replacement was already allocated. + */ + if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) { + dma_unmap_page(rxq->dev, bd->mapping, + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(bd->data); + } + + /* Regardless, we've consumed an Rx BD */ + qede_rx_bd_ring_consume(rxq); + return false; + + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + case XDP_DROP: + qede_recycle_rx_bd_ring(rxq, cqe->bd_num); + } + + return false; +} + +static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, + u16 pad) +{ + unsigned int offset = bd->page_offset; + struct skb_frag_struct *frag; + struct page *page = bd->data; + unsigned int pull_len; + struct sk_buff *skb; + unsigned char *va; + + /* Allocate a new SKB with a sufficient large header len */ + skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + /* Copy data into SKB - if it's small, we can simply copy it and + * re-use the already allcoated & mapped memory. + */ + if (len + pad <= edev->rx_copybreak) { + memcpy(skb_put(skb, len), + page_address(page) + pad + offset, len); + qede_reuse_page(rxq, bd); + goto out; + } + + frag = &skb_shinfo(skb)->frags[0]; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + page, pad + offset, len, rxq->rx_buf_seg_size); + + va = skb_frag_address(frag); + pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); + + /* Align the pull_len to optimize memcpy */ + memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); + + /* Correct the skb & frag sizes offset after the pull */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; + + if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { + /* Incr page ref count to reuse on allocation failure so + * that it doesn't get freed while freeing SKB [as its + * already mapped there]. + */ + page_ref_inc(page); + dev_kfree_skb_any(skb); + return NULL; + } + +out: + /* We've consumed the first BD and prepared an SKB */ + qede_rx_bd_ring_consume(rxq); + return skb; +} + +static int qede_rx_build_jumbo(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sk_buff *skb, + struct eth_fast_path_rx_reg_cqe *cqe, + u16 first_bd_len) +{ + u16 pkt_len = le16_to_cpu(cqe->pkt_len); + struct sw_rx_data *bd; + u16 bd_cons_idx; + u8 num_frags; + + pkt_len -= first_bd_len; + + /* We've already used one BD for the SKB. Now take care of the rest */ + for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { + u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : + pkt_len; + + if (unlikely(!cur_size)) { + DP_ERR(edev, + "Still got %d BDs for mapping jumbo, but length became 0\n", + num_frags); + goto out; + } + + /* We need a replacement buffer for each BD */ + if (unlikely(qede_alloc_rx_buffer(rxq))) + goto out; + + /* Now that we've allocated the replacement buffer, + * we can safely consume the next BD and map it to the SKB. + */ + bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + bd = &rxq->sw_rx_ring[bd_cons_idx]; + qede_rx_bd_ring_consume(rxq); + + dma_unmap_page(rxq->dev, bd->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, + bd->data, 0, cur_size); + + skb->truesize += PAGE_SIZE; + skb->data_len += cur_size; + skb->len += cur_size; + pkt_len -= cur_size; + } + + if (unlikely(pkt_len)) + DP_ERR(edev, + "Mapped all BDs of jumbo, but still have %d bytes\n", + pkt_len); + +out: + return num_frags; +} + +static int qede_rx_process_tpa_cqe(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + union eth_rx_cqe *cqe, + enum eth_rx_cqe_type type) +{ + switch (type) { + case ETH_RX_CQE_TYPE_TPA_START: + qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); + return 0; + case ETH_RX_CQE_TYPE_TPA_CONT: + qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); + return 0; + case ETH_RX_CQE_TYPE_TPA_END: + qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); + return 1; + default: + return 0; + } +} + +static int qede_rx_process_cqe(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq) +{ + struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); + struct eth_fast_path_rx_reg_cqe *fp_cqe; + u16 len, pad, bd_cons_idx, parse_flag; + enum eth_rx_cqe_type cqe_type; + union eth_rx_cqe *cqe; + struct sw_rx_data *bd; + struct sk_buff *skb; + __le16 flags; + u8 csum_flag; + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; + + /* Process an unlikely slowpath event */ + if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { + struct eth_slow_path_rx_cqe *sp_cqe; + + sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; + edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); + return 0; + } + + /* Handle TPA cqes */ + if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) + return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); + + /* Get the data from the SW ring; Consume it only after it's evident + * we wouldn't recycle it. + */ + bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + bd = &rxq->sw_rx_ring[bd_cons_idx]; + + fp_cqe = &cqe->fast_path_regular; + len = le16_to_cpu(fp_cqe->len_on_first_bd); + pad = fp_cqe->placement_offset; + + /* Run eBPF program if one is attached */ + if (xdp_prog) + if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe)) + return 1; + + /* If this is an error packet then drop it */ + flags = cqe->fast_path_regular.pars_flags.flags; + parse_flag = le16_to_cpu(flags); + + csum_flag = qede_check_csum(parse_flag); + if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { + rxq->rx_ip_frags++; + } else { + DP_NOTICE(edev, + "CQE has error, flags = %x, dropping incoming packet\n", + parse_flag); + rxq->rx_hw_errors++; + qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); + return 0; + } + } + + /* Basic validation passed; Need to prepare an SKB. This would also + * guarantee to finally consume the first BD upon success. + */ + skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); + if (!skb) { + rxq->rx_alloc_errors++; + qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); + return 0; + } + + /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed + * by a single cqe. + */ + if (fp_cqe->bd_num > 1) { + u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, + fp_cqe, len); + + if (unlikely(unmapped_frags > 0)) { + qede_recycle_rx_bd_ring(rxq, unmapped_frags); + dev_kfree_skb_any(skb); + return 0; + } + } + + /* The SKB contains all the data. Now prepare meta-magic */ + skb->protocol = eth_type_trans(skb, edev->ndev); + qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); + qede_set_skb_csum(skb, csum_flag); + skb_record_rx_queue(skb, rxq->rxq_id); + + /* SKB is prepared - pass it to stack */ + qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); + + return 1; +} + +static int qede_rx_int(struct qede_fastpath *fp, int budget) +{ + struct qede_rx_queue *rxq = fp->rxq; + struct qede_dev *edev = fp->edev; + u16 hw_comp_cons, sw_comp_cons; + int work_done = 0; + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + /* Memory barrier to prevent the CPU from doing speculative reads of CQE + * / BD in the while-loop before reading hw_comp_cons. If the CQE is + * read before it is written by FW, then FW writes CQE and SB, and then + * the CPU reads the hw_comp_cons, it will use an old CQE. + */ + rmb(); + + /* Loop to complete all indicated BDs */ + while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { + qede_rx_process_cqe(edev, fp, rxq); + qed_chain_recycle_consumed(&rxq->rx_comp_ring); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + work_done++; + } + + /* Update producers */ + qede_update_rx_prod(edev, rxq); + + return work_done; +} + +static bool qede_poll_is_more_work(struct qede_fastpath *fp) +{ + qed_sb_update_sb_idx(fp->sb_info); + + /* *_has_*_work() reads the status block, thus we need to ensure that + * status block indices have been actually read (qed_sb_update_sb_idx) + * prior to this check (*_has_*_work) so that we won't write the + * "newer" value of the status block to HW (if there was a DMA right + * after qede_has_rx_work and if there is no rmb, the memory reading + * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb). + * In this case there will never be another interrupt until there is + * another update of the status block, while there is still unhandled + * work. + */ + rmb(); + + if (likely(fp->type & QEDE_FASTPATH_RX)) + if (qede_has_rx_work(fp->rxq)) + return true; + + if (fp->type & QEDE_FASTPATH_XDP) + if (qede_txq_has_work(fp->xdp_tx)) + return true; + + if (likely(fp->type & QEDE_FASTPATH_TX)) + if (qede_txq_has_work(fp->txq)) + return true; + + return false; +} + +/********************* + * NDO & API related * + *********************/ +int qede_poll(struct napi_struct *napi, int budget) +{ + struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, + napi); + struct qede_dev *edev = fp->edev; + int rx_work_done = 0; + + if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq)) + qede_tx_int(edev, fp->txq); + + if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) + qede_xdp_tx_int(edev, fp->xdp_tx); + + rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && + qede_has_rx_work(fp->rxq)) ? + qede_rx_int(fp, budget) : 0; + if (rx_work_done < budget) { + if (!qede_poll_is_more_work(fp)) { + napi_complete(napi); + + /* Update and reenable interrupts */ + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); + } else { + rx_work_done = budget; + } + } + + if (fp->xdp_xmit) { + u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); + + fp->xdp_xmit = 0; + fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); + qede_update_tx_producer(fp->xdp_tx); + } + + return rx_work_done; +} + +irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) +{ + struct qede_fastpath *fp = fp_cookie; + + qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); + + napi_schedule_irqoff(&fp->napi); + return IRQ_HANDLED; +} + +/* Main transmit function */ +netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + struct netdev_queue *netdev_txq; + struct qede_tx_queue *txq; + struct eth_tx_1st_bd *first_bd; + struct eth_tx_2nd_bd *second_bd = NULL; + struct eth_tx_3rd_bd *third_bd = NULL; + struct eth_tx_bd *tx_data_bd = NULL; + u16 txq_index; + u8 nbd = 0; + dma_addr_t mapping; + int rc, frag_idx = 0, ipv6_ext = 0; + u8 xmit_type; + u16 idx; + u16 hlen; + bool data_split = false; + + /* Get tx-queue context and netdev index */ + txq_index = skb_get_queue_mapping(skb); + WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); + txq = edev->fp_array[edev->fp_num_rx + txq_index].txq; + netdev_txq = netdev_get_tx_queue(ndev, txq_index); + + WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); + + xmit_type = qede_xmit_type(skb, &ipv6_ext); + +#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) + if (qede_pkt_req_lin(skb, xmit_type)) { + if (skb_linearize(skb)) { + DP_NOTICE(edev, + "SKB linearization failed - silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } +#endif + + /* Fill the entry in the SW ring and the BDs in the FW ring */ + idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + txq->sw_tx_ring.skbs[idx].skb = skb; + first_bd = (struct eth_tx_1st_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(first_bd, 0, sizeof(*first_bd)); + first_bd->data.bd_flags.bitfields = + 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + + /* Map skb linear data for DMA and set in the first BD */ + mapping = dma_map_single(txq->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(txq->dev, mapping))) { + DP_NOTICE(edev, "SKB mapping failed\n"); + qede_free_failed_tx_pkt(txq, first_bd, 0, false); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + nbd++; + BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); + + /* In case there is IPv6 with extension headers or LSO we need 2nd and + * 3rd BDs. + */ + if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { + second_bd = (struct eth_tx_2nd_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(second_bd, 0, sizeof(*second_bd)); + + nbd++; + third_bd = (struct eth_tx_3rd_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(third_bd, 0, sizeof(*third_bd)); + + nbd++; + /* We need to fill in additional data in second_bd... */ + tx_data_bd = (struct eth_tx_bd *)second_bd; + } + + if (skb_vlan_tag_present(skb)) { + first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; + } + + /* Fill the parsing flags & params according to the requested offload */ + if (xmit_type & XMIT_L4_CSUM) { + /* We don't re-calculate IP checksum as it is already done by + * the upper stack + */ + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + + if (xmit_type & XMIT_ENC) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + first_bd->data.bitfields |= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + } + + /* Legacy FW had flipped behavior in regard to this bit - + * I.e., needed to set to prevent FW from touching encapsulated + * packets when it didn't need to. + */ + if (unlikely(txq->is_legacy)) + first_bd->data.bitfields ^= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + + /* If the packet is IPv6 with extension header, indicate that + * to FW and pass few params, since the device cracker doesn't + * support parsing IPv6 with extension header/s. + */ + if (unlikely(ipv6_ext)) + qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); + } + + if (xmit_type & XMIT_LSO) { + first_bd->data.bd_flags.bitfields |= + (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); + third_bd->data.lso_mss = + cpu_to_le16(skb_shinfo(skb)->gso_size); + + if (unlikely(xmit_type & XMIT_ENC)) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; + + if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { + u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; + + first_bd->data.bd_flags.bitfields |= 1 << tmp; + } + hlen = qede_get_skb_hlen(skb, true); + } else { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + hlen = qede_get_skb_hlen(skb, false); + } + + /* @@@TBD - if will not be removed need to check */ + third_bd->data.bitfields |= + cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); + + /* Make life easier for FW guys who can't deal with header and + * data on same BD. If we need to split, use the second bd... + */ + if (unlikely(skb_headlen(skb) > hlen)) { + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "TSO split header size is %d (%x:%x)\n", + first_bd->nbytes, first_bd->addr.hi, + first_bd->addr.lo); + + mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), + le32_to_cpu(first_bd->addr.lo)) + + hlen; + + BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, + le16_to_cpu(first_bd->nbytes) - + hlen); + + /* this marks the BD as one that has no + * individual mapping + */ + txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; + + first_bd->nbytes = cpu_to_le16(hlen); + + tx_data_bd = (struct eth_tx_bd *)third_bd; + data_split = true; + } + } else { + first_bd->data.bitfields |= + (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; + } + + /* Handle fragmented skb */ + /* special handle for frags inside 2nd and 3rd bds.. */ + while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { + rc = map_frag_to_bd(txq, + &skb_shinfo(skb)->frags[frag_idx], + tx_data_bd); + if (rc) { + qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + + if (tx_data_bd == (struct eth_tx_bd *)second_bd) + tx_data_bd = (struct eth_tx_bd *)third_bd; + else + tx_data_bd = NULL; + + frag_idx++; + } + + /* map last frags into 4th, 5th .... */ + for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + + memset(tx_data_bd, 0, sizeof(*tx_data_bd)); + + rc = map_frag_to_bd(txq, + &skb_shinfo(skb)->frags[frag_idx], + tx_data_bd); + if (rc) { + qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + } + + /* update the first BD with the actual num BDs */ + first_bd->data.nbds = nbd; + + netdev_tx_sent_queue(netdev_txq, skb->len); + + skb_tx_timestamp(skb); + + /* Advance packet producer only before sending the packet since mapping + * of pages may fail. + */ + txq->sw_tx_prod++; + + /* 'next page' entries are counted in the producer value */ + txq->tx_db.data.bd_prod = + cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); + + if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) + qede_update_tx_producer(txq); + + if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) + < (MAX_SKB_FRAGS + 1))) { + if (skb->xmit_more) + qede_update_tx_producer(txq); + + netif_tx_stop_queue(netdev_txq); + txq->stopped_cnt++; + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "Stop queue was called\n"); + /* paired memory barrier is in qede_tx_int(), we have to keep + * ordering of set_bit() in netif_tx_stop_queue() and read of + * fp->bd_tx_cons + */ + smp_mb(); + + if ((qed_chain_get_elem_left(&txq->tx_pbl) >= + (MAX_SKB_FRAGS + 1)) && + (edev->state == QEDE_STATE_OPEN)) { + netif_tx_wake_queue(netdev_txq); + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "Wake queue was called\n"); + } + } + + return NETDEV_TX_OK; +} + +/* 8B udp header + 8B base tunnel header + 32B option length */ +#define QEDE_MAX_TUN_HDR_LEN 48 + +netdev_features_t qede_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (skb->encapsulation) { + u8 l4_proto = 0; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return features; + } + + /* Disable offloads for geneve tunnels, as HW can't parse + * the geneve header which has option length greater than 32B. + */ + if ((l4_proto == IPPROTO_UDP) && + ((skb_inner_mac_header(skb) - + skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) + return features & ~(NETIF_F_CSUM_MASK | + NETIF_F_GSO_MASK); + } + + return features; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 0851fe3a8f25..1393bf4add7a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -317,1608 +317,6 @@ static void __exit qede_cleanup(void) module_init(qede_init); module_exit(qede_cleanup); -/* ------------------------------------------------------------------------- - * START OF FAST-PATH - * ------------------------------------------------------------------------- - */ - -/* Unmap the data and free skb */ -static int qede_free_tx_pkt(struct qede_dev *edev, - struct qede_tx_queue *txq, int *len) -{ - u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; - struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; - struct eth_tx_1st_bd *first_bd; - struct eth_tx_bd *tx_data_bd; - int bds_consumed = 0; - int nbds; - bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; - int i, split_bd_len = 0; - - if (unlikely(!skb)) { - DP_ERR(edev, - "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", - idx, txq->sw_tx_cons, txq->sw_tx_prod); - return -1; - } - - *len = skb->len; - - first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); - - bds_consumed++; - - nbds = first_bd->data.nbds; - - if (data_split) { - struct eth_tx_bd *split = (struct eth_tx_bd *) - qed_chain_consume(&txq->tx_pbl); - split_bd_len = BD_UNMAP_LEN(split); - bds_consumed++; - } - dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); - - /* Unmap the data of the skb frags */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { - tx_data_bd = (struct eth_tx_bd *) - qed_chain_consume(&txq->tx_pbl); - dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), - BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); - } - - while (bds_consumed++ < nbds) - qed_chain_consume(&txq->tx_pbl); - - /* Free skb */ - dev_kfree_skb_any(skb); - txq->sw_tx_ring.skbs[idx].skb = NULL; - txq->sw_tx_ring.skbs[idx].flags = 0; - - return 0; -} - -/* Unmap the data and free skb when mapping failed during start_xmit */ -static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, - struct eth_tx_1st_bd *first_bd, - int nbd, bool data_split) -{ - u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; - struct eth_tx_bd *tx_data_bd; - int i, split_bd_len = 0; - - /* Return prod to its position before this skb was handled */ - qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); - - first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); - - if (data_split) { - struct eth_tx_bd *split = (struct eth_tx_bd *) - qed_chain_produce(&txq->tx_pbl); - split_bd_len = BD_UNMAP_LEN(split); - nbd--; - } - - dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); - - /* Unmap the data of the skb frags */ - for (i = 0; i < nbd; i++) { - tx_data_bd = (struct eth_tx_bd *) - qed_chain_produce(&txq->tx_pbl); - if (tx_data_bd->nbytes) - dma_unmap_page(txq->dev, - BD_UNMAP_ADDR(tx_data_bd), - BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); - } - - /* Return again prod to its position before this skb was handled */ - qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); - - /* Free skb */ - dev_kfree_skb_any(skb); - txq->sw_tx_ring.skbs[idx].skb = NULL; - txq->sw_tx_ring.skbs[idx].flags = 0; -} - -static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext) -{ - u32 rc = XMIT_L4_CSUM; - __be16 l3_proto; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return XMIT_PLAIN; - - l3_proto = vlan_get_protocol(skb); - if (l3_proto == htons(ETH_P_IPV6) && - (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) - *ipv6_ext = 1; - - if (skb->encapsulation) { - rc |= XMIT_ENC; - if (skb_is_gso(skb)) { - unsigned short gso_type = skb_shinfo(skb)->gso_type; - - if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || - (gso_type & SKB_GSO_GRE_CSUM)) - rc |= XMIT_ENC_GSO_L4_CSUM; - - rc |= XMIT_LSO; - return rc; - } - } - - if (skb_is_gso(skb)) - rc |= XMIT_LSO; - - return rc; -} - -static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, - struct eth_tx_2nd_bd *second_bd, - struct eth_tx_3rd_bd *third_bd) -{ - u8 l4_proto; - u16 bd2_bits1 = 0, bd2_bits2 = 0; - - bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); - - bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & - ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) - << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; - - bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << - ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); - - if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) - l4_proto = ipv6_hdr(skb)->nexthdr; - else - l4_proto = ip_hdr(skb)->protocol; - - if (l4_proto == IPPROTO_UDP) - bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; - - if (third_bd) - third_bd->data.bitfields |= - cpu_to_le16(((tcp_hdrlen(skb) / 4) & - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); - - second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); - second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); -} - -static int map_frag_to_bd(struct qede_tx_queue *txq, - skb_frag_t *frag, struct eth_tx_bd *bd) -{ - dma_addr_t mapping; - - /* Map skb non-linear frag data for DMA */ - mapping = skb_frag_dma_map(txq->dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(txq->dev, mapping))) - return -ENOMEM; - - /* Setup the data pointer of the frag data */ - BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); - - return 0; -} - -static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) -{ - if (is_encap_pkt) - return (skb_inner_transport_header(skb) + - inner_tcp_hdrlen(skb) - skb->data); - else - return (skb_transport_header(skb) + - tcp_hdrlen(skb) - skb->data); -} - -/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ -#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) -static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type) -{ - int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; - - if (xmit_type & XMIT_LSO) { - int hlen; - - hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); - - /* linear payload would require its own BD */ - if (skb_headlen(skb) > hlen) - allowed_frags--; - } - - return (skb_shinfo(skb)->nr_frags > allowed_frags); -} -#endif - -static inline void qede_update_tx_producer(struct qede_tx_queue *txq) -{ - /* wmb makes sure that the BDs data is updated before updating the - * producer, otherwise FW may read old data from the BDs. - */ - wmb(); - barrier(); - writel(txq->tx_db.raw, txq->doorbell_addr); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); -} - -static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, - struct sw_rx_data *metadata, u16 padding, u16 length) -{ - struct qede_tx_queue *txq = fp->xdp_tx; - u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - struct eth_tx_1st_bd *first_bd; - - if (!qed_chain_get_elem_left(&txq->tx_pbl)) { - txq->stopped_cnt++; - return -ENOMEM; - } - - first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); - - memset(first_bd, 0, sizeof(*first_bd)); - first_bd->data.bd_flags.bitfields = - BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); - first_bd->data.bitfields |= - (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << - ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; - first_bd->data.nbds = 1; - - /* We can safely ignore the offset, as it's 0 for XDP */ - BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); - - /* Synchronize the buffer back to device, as program [probably] - * has changed it. - */ - dma_sync_single_for_device(&edev->pdev->dev, - metadata->mapping + padding, - length, PCI_DMA_TODEVICE); - - txq->sw_tx_ring.pages[idx] = metadata->data; - txq->sw_tx_prod++; - - /* Mark the fastpath for future XDP doorbell */ - fp->xdp_xmit = 1; - - return 0; -} - -/* Main transmit function */ -static netdev_tx_t qede_start_xmit(struct sk_buff *skb, - struct net_device *ndev) -{ - struct qede_dev *edev = netdev_priv(ndev); - struct netdev_queue *netdev_txq; - struct qede_tx_queue *txq; - struct eth_tx_1st_bd *first_bd; - struct eth_tx_2nd_bd *second_bd = NULL; - struct eth_tx_3rd_bd *third_bd = NULL; - struct eth_tx_bd *tx_data_bd = NULL; - u16 txq_index; - u8 nbd = 0; - dma_addr_t mapping; - int rc, frag_idx = 0, ipv6_ext = 0; - u8 xmit_type; - u16 idx; - u16 hlen; - bool data_split = false; - - /* Get tx-queue context and netdev index */ - txq_index = skb_get_queue_mapping(skb); - WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); - txq = edev->fp_array[edev->fp_num_rx + txq_index].txq; - netdev_txq = netdev_get_tx_queue(ndev, txq_index); - - WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); - - xmit_type = qede_xmit_type(skb, &ipv6_ext); - -#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) - if (qede_pkt_req_lin(skb, xmit_type)) { - if (skb_linearize(skb)) { - DP_NOTICE(edev, - "SKB linearization failed - silently dropping this SKB\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - } -#endif - - /* Fill the entry in the SW ring and the BDs in the FW ring */ - idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - txq->sw_tx_ring.skbs[idx].skb = skb; - first_bd = (struct eth_tx_1st_bd *) - qed_chain_produce(&txq->tx_pbl); - memset(first_bd, 0, sizeof(*first_bd)); - first_bd->data.bd_flags.bitfields = - 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; - - /* Map skb linear data for DMA and set in the first BD */ - mapping = dma_map_single(txq->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(txq->dev, mapping))) { - DP_NOTICE(edev, "SKB mapping failed\n"); - qede_free_failed_tx_pkt(txq, first_bd, 0, false); - qede_update_tx_producer(txq); - return NETDEV_TX_OK; - } - nbd++; - BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); - - /* In case there is IPv6 with extension headers or LSO we need 2nd and - * 3rd BDs. - */ - if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { - second_bd = (struct eth_tx_2nd_bd *) - qed_chain_produce(&txq->tx_pbl); - memset(second_bd, 0, sizeof(*second_bd)); - - nbd++; - third_bd = (struct eth_tx_3rd_bd *) - qed_chain_produce(&txq->tx_pbl); - memset(third_bd, 0, sizeof(*third_bd)); - - nbd++; - /* We need to fill in additional data in second_bd... */ - tx_data_bd = (struct eth_tx_bd *)second_bd; - } - - if (skb_vlan_tag_present(skb)) { - first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; - } - - /* Fill the parsing flags & params according to the requested offload */ - if (xmit_type & XMIT_L4_CSUM) { - /* We don't re-calculate IP checksum as it is already done by - * the upper stack - */ - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; - - if (xmit_type & XMIT_ENC) { - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - first_bd->data.bitfields |= - 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; - } - - /* Legacy FW had flipped behavior in regard to this bit - - * I.e., needed to set to prevent FW from touching encapsulated - * packets when it didn't need to. - */ - if (unlikely(txq->is_legacy)) - first_bd->data.bitfields ^= - 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; - - /* If the packet is IPv6 with extension header, indicate that - * to FW and pass few params, since the device cracker doesn't - * support parsing IPv6 with extension header/s. - */ - if (unlikely(ipv6_ext)) - qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); - } - - if (xmit_type & XMIT_LSO) { - first_bd->data.bd_flags.bitfields |= - (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); - third_bd->data.lso_mss = - cpu_to_le16(skb_shinfo(skb)->gso_size); - - if (unlikely(xmit_type & XMIT_ENC)) { - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; - - if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { - u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; - - first_bd->data.bd_flags.bitfields |= 1 << tmp; - } - hlen = qede_get_skb_hlen(skb, true); - } else { - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - hlen = qede_get_skb_hlen(skb, false); - } - - /* @@@TBD - if will not be removed need to check */ - third_bd->data.bitfields |= - cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT)); - - /* Make life easier for FW guys who can't deal with header and - * data on same BD. If we need to split, use the second bd... - */ - if (unlikely(skb_headlen(skb) > hlen)) { - DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, - "TSO split header size is %d (%x:%x)\n", - first_bd->nbytes, first_bd->addr.hi, - first_bd->addr.lo); - - mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), - le32_to_cpu(first_bd->addr.lo)) + - hlen; - - BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, - le16_to_cpu(first_bd->nbytes) - - hlen); - - /* this marks the BD as one that has no - * individual mapping - */ - txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; - - first_bd->nbytes = cpu_to_le16(hlen); - - tx_data_bd = (struct eth_tx_bd *)third_bd; - data_split = true; - } - } else { - first_bd->data.bitfields |= - (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << - ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; - } - - /* Handle fragmented skb */ - /* special handle for frags inside 2nd and 3rd bds.. */ - while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { - rc = map_frag_to_bd(txq, - &skb_shinfo(skb)->frags[frag_idx], - tx_data_bd); - if (rc) { - qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); - qede_update_tx_producer(txq); - return NETDEV_TX_OK; - } - - if (tx_data_bd == (struct eth_tx_bd *)second_bd) - tx_data_bd = (struct eth_tx_bd *)third_bd; - else - tx_data_bd = NULL; - - frag_idx++; - } - - /* map last frags into 4th, 5th .... */ - for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { - tx_data_bd = (struct eth_tx_bd *) - qed_chain_produce(&txq->tx_pbl); - - memset(tx_data_bd, 0, sizeof(*tx_data_bd)); - - rc = map_frag_to_bd(txq, - &skb_shinfo(skb)->frags[frag_idx], - tx_data_bd); - if (rc) { - qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); - qede_update_tx_producer(txq); - return NETDEV_TX_OK; - } - } - - /* update the first BD with the actual num BDs */ - first_bd->data.nbds = nbd; - - netdev_tx_sent_queue(netdev_txq, skb->len); - - skb_tx_timestamp(skb); - - /* Advance packet producer only before sending the packet since mapping - * of pages may fail. - */ - txq->sw_tx_prod++; - - /* 'next page' entries are counted in the producer value */ - txq->tx_db.data.bd_prod = - cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); - - if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) - qede_update_tx_producer(txq); - - if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) - < (MAX_SKB_FRAGS + 1))) { - if (skb->xmit_more) - qede_update_tx_producer(txq); - - netif_tx_stop_queue(netdev_txq); - txq->stopped_cnt++; - DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, - "Stop queue was called\n"); - /* paired memory barrier is in qede_tx_int(), we have to keep - * ordering of set_bit() in netif_tx_stop_queue() and read of - * fp->bd_tx_cons - */ - smp_mb(); - - if (qed_chain_get_elem_left(&txq->tx_pbl) - >= (MAX_SKB_FRAGS + 1) && - (edev->state == QEDE_STATE_OPEN)) { - netif_tx_wake_queue(netdev_txq); - DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, - "Wake queue was called\n"); - } - } - - return NETDEV_TX_OK; -} - -int qede_txq_has_work(struct qede_tx_queue *txq) -{ - u16 hw_bd_cons; - - /* Tell compiler that consumer and producer can change */ - barrier(); - hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); - if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) - return 0; - - return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); -} - -static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) -{ - struct eth_tx_1st_bd *bd; - u16 hw_bd_cons; - - hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); - barrier(); - - while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { - bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); - - dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd), - PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons & - NUM_TX_BDS_MAX]); - - txq->sw_tx_cons++; - txq->xmit_pkts++; - } -} - -static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) -{ - struct netdev_queue *netdev_txq; - u16 hw_bd_cons; - unsigned int pkts_compl = 0, bytes_compl = 0; - int rc; - - netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); - - hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); - barrier(); - - while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { - int len = 0; - - rc = qede_free_tx_pkt(edev, txq, &len); - if (rc) { - DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", - hw_bd_cons, - qed_chain_get_cons_idx(&txq->tx_pbl)); - break; - } - - bytes_compl += len; - pkts_compl++; - txq->sw_tx_cons++; - txq->xmit_pkts++; - } - - netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); - - /* Need to make the tx_bd_cons update visible to start_xmit() - * before checking for netif_tx_queue_stopped(). Without the - * memory barrier, there is a small possibility that - * start_xmit() will miss it and cause the queue to be stopped - * forever. - * On the other hand we need an rmb() here to ensure the proper - * ordering of bit testing in the following - * netif_tx_queue_stopped(txq) call. - */ - smp_mb(); - - if (unlikely(netif_tx_queue_stopped(netdev_txq))) { - /* Taking tx_lock is needed to prevent reenabling the queue - * while it's empty. This could have happen if rx_action() gets - * suspended in qede_tx_int() after the condition before - * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): - * - * stops the queue->sees fresh tx_bd_cons->releases the queue-> - * sends some packets consuming the whole queue again-> - * stops the queue - */ - - __netif_tx_lock(netdev_txq, smp_processor_id()); - - if ((netif_tx_queue_stopped(netdev_txq)) && - (edev->state == QEDE_STATE_OPEN) && - (qed_chain_get_elem_left(&txq->tx_pbl) - >= (MAX_SKB_FRAGS + 1))) { - netif_tx_wake_queue(netdev_txq); - DP_VERBOSE(edev, NETIF_MSG_TX_DONE, - "Wake queue was called\n"); - } - - __netif_tx_unlock(netdev_txq); - } - - return 0; -} - -bool qede_has_rx_work(struct qede_rx_queue *rxq) -{ - u16 hw_comp_cons, sw_comp_cons; - - /* Tell compiler that status block fields can change */ - barrier(); - - hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - - return hw_comp_cons != sw_comp_cons; -} - -static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) -{ - qed_chain_consume(&rxq->rx_bd_ring); - rxq->sw_rx_cons++; -} - -/* This function reuses the buffer(from an offset) from - * consumer index to producer index in the bd ring - */ -static inline void qede_reuse_page(struct qede_rx_queue *rxq, - struct sw_rx_data *curr_cons) -{ - struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *curr_prod; - dma_addr_t new_mapping; - - curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; - *curr_prod = *curr_cons; - - new_mapping = curr_prod->mapping + curr_prod->page_offset; - - rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); - rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); - - rxq->sw_rx_prod++; - curr_cons->data = NULL; -} - -/* In case of allocation failures reuse buffers - * from consumer index to produce buffers for firmware - */ -void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) -{ - struct sw_rx_data *curr_cons; - - for (; count > 0; count--) { - curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - qede_reuse_page(rxq, curr_cons); - qede_rx_bd_ring_consume(rxq); - } -} - -static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) -{ - struct sw_rx_data *sw_rx_data; - struct eth_rx_bd *rx_bd; - dma_addr_t mapping; - struct page *data; - - data = alloc_pages(GFP_ATOMIC, 0); - if (unlikely(!data)) - return -ENOMEM; - - /* Map the entire page as it would be used - * for multiple RX buffer segment size mapping. - */ - mapping = dma_map_page(rxq->dev, data, 0, - PAGE_SIZE, rxq->data_direction); - if (unlikely(dma_mapping_error(rxq->dev, mapping))) { - __free_page(data); - return -ENOMEM; - } - - sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; - sw_rx_data->page_offset = 0; - sw_rx_data->data = data; - sw_rx_data->mapping = mapping; - - /* Advance PROD and get BD pointer */ - rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); - WARN_ON(!rx_bd); - rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); - - rxq->sw_rx_prod++; - - return 0; -} - -static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, - struct sw_rx_data *curr_cons) -{ - /* Move to the next segment in the page */ - curr_cons->page_offset += rxq->rx_buf_seg_size; - - if (curr_cons->page_offset == PAGE_SIZE) { - if (unlikely(qede_alloc_rx_buffer(rxq))) { - /* Since we failed to allocate new buffer - * current buffer can be used again. - */ - curr_cons->page_offset -= rxq->rx_buf_seg_size; - - return -ENOMEM; - } - - dma_unmap_page(rxq->dev, curr_cons->mapping, - PAGE_SIZE, rxq->data_direction); - } else { - /* Increment refcount of the page as we don't want - * network stack to take the ownership of the page - * which can be recycled multiple times by the driver. - */ - page_ref_inc(curr_cons->data); - qede_reuse_page(rxq, curr_cons); - } - - return 0; -} - -void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) -{ - u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); - u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); - struct eth_rx_prod_data rx_prods = {0}; - - /* Update producers */ - rx_prods.bd_prod = cpu_to_le16(bd_prod); - rx_prods.cqe_prod = cpu_to_le16(cqe_prod); - - /* Make sure that the BD and SGE data is updated before updating the - * producers since FW might read the BD/SGE right after the producer - * is updated. - */ - wmb(); - - internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), - (u32 *)&rx_prods); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the napi lock is released and another qede_poll is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); -} - -static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) -{ - enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; - enum rss_hash_type htype; - u32 hash = 0; - - htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); - if (htype) { - hash_type = ((htype == RSS_HASH_TYPE_IPV4) || - (htype == RSS_HASH_TYPE_IPV6)) ? - PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; - hash = le32_to_cpu(rss_hash); - } - skb_set_hash(skb, hash, hash_type); -} - -static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) -{ - skb_checksum_none_assert(skb); - - if (csum_flag & QEDE_CSUM_UNNECESSARY) - skb->ip_summed = CHECKSUM_UNNECESSARY; - - if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) - skb->csum_level = 1; -} - -static inline void qede_skb_receive(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq, - struct sk_buff *skb, u16 vlan_tag) -{ - if (vlan_tag) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - - napi_gro_receive(&fp->napi, skb); - fp->rxq->rcv_pkts++; -} - -static void qede_set_gro_params(struct qede_dev *edev, - struct sk_buff *skb, - struct eth_fast_path_rx_tpa_start_cqe *cqe) -{ - u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); - - if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & - PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; - else - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - - skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - - cqe->header_len; -} - -static int qede_fill_frag_skb(struct qede_dev *edev, - struct qede_rx_queue *rxq, - u8 tpa_agg_index, u16 len_on_bd) -{ - struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & - NUM_RX_BDS_MAX]; - struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; - struct sk_buff *skb = tpa_info->skb; - - if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) - goto out; - - /* Add one frag and update the appropriate fields in the skb */ - skb_fill_page_desc(skb, tpa_info->frag_id++, - current_bd->data, current_bd->page_offset, - len_on_bd); - - if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { - /* Incr page ref count to reuse on allocation failure - * so that it doesn't get freed while freeing SKB. - */ - page_ref_inc(current_bd->data); - goto out; - } - - qed_chain_consume(&rxq->rx_bd_ring); - rxq->sw_rx_cons++; - - skb->data_len += len_on_bd; - skb->truesize += rxq->rx_buf_seg_size; - skb->len += len_on_bd; - - return 0; - -out: - tpa_info->state = QEDE_AGG_STATE_ERROR; - qede_recycle_rx_bd_ring(rxq, 1); - - return -ENOMEM; -} - -static void qede_tpa_start(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct eth_fast_path_rx_tpa_start_cqe *cqe) -{ - struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; - struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); - struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *replace_buf = &tpa_info->buffer; - dma_addr_t mapping = tpa_info->buffer_mapping; - struct sw_rx_data *sw_rx_data_cons; - struct sw_rx_data *sw_rx_data_prod; - - sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; - - /* Use pre-allocated replacement buffer - we can't release the agg. - * start until its over and we don't want to risk allocation failing - * here, so re-allocate when aggregation will be over. - */ - sw_rx_data_prod->mapping = replace_buf->mapping; - - sw_rx_data_prod->data = replace_buf->data; - rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); - sw_rx_data_prod->page_offset = replace_buf->page_offset; - - rxq->sw_rx_prod++; - - /* move partial skb from cons to pool (don't unmap yet) - * save mapping, incase we drop the packet later on. - */ - tpa_info->buffer = *sw_rx_data_cons; - mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), - le32_to_cpu(rx_bd_cons->addr.lo)); - - tpa_info->buffer_mapping = mapping; - rxq->sw_rx_cons++; - - /* set tpa state to start only if we are able to allocate skb - * for this aggregation, otherwise mark as error and aggregation will - * be dropped - */ - tpa_info->skb = netdev_alloc_skb(edev->ndev, - le16_to_cpu(cqe->len_on_first_bd)); - if (unlikely(!tpa_info->skb)) { - DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); - tpa_info->state = QEDE_AGG_STATE_ERROR; - goto cons_buf; - } - - /* Start filling in the aggregation info */ - skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); - tpa_info->frag_id = 0; - tpa_info->state = QEDE_AGG_STATE_START; - - /* Store some information from first CQE */ - tpa_info->start_cqe_placement_offset = cqe->placement_offset; - tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); - if ((le16_to_cpu(cqe->pars_flags.flags) >> - PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & - PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) - tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); - else - tpa_info->vlan_tag = 0; - - qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); - - /* This is needed in order to enable forwarding support */ - qede_set_gro_params(edev, tpa_info->skb, cqe); - -cons_buf: /* We still need to handle bd_len_list to consume buffers */ - if (likely(cqe->ext_bd_len_list[0])) - qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, - le16_to_cpu(cqe->ext_bd_len_list[0])); - - if (unlikely(cqe->ext_bd_len_list[1])) { - DP_ERR(edev, - "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); - tpa_info->state = QEDE_AGG_STATE_ERROR; - } -} - -#ifdef CONFIG_INET -static void qede_gro_ip_csum(struct sk_buff *skb) -{ - const struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th; - - skb_set_transport_header(skb, sizeof(struct iphdr)); - th = tcp_hdr(skb); - - th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), - iph->saddr, iph->daddr, 0); - - tcp_gro_complete(skb); -} - -static void qede_gro_ipv6_csum(struct sk_buff *skb) -{ - struct ipv6hdr *iph = ipv6_hdr(skb); - struct tcphdr *th; - - skb_set_transport_header(skb, sizeof(struct ipv6hdr)); - th = tcp_hdr(skb); - - th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), - &iph->saddr, &iph->daddr, 0); - tcp_gro_complete(skb); -} -#endif - -static void qede_gro_receive(struct qede_dev *edev, - struct qede_fastpath *fp, - struct sk_buff *skb, - u16 vlan_tag) -{ - /* FW can send a single MTU sized packet from gro flow - * due to aggregation timeout/last segment etc. which - * is not expected to be a gro packet. If a skb has zero - * frags then simply push it in the stack as non gso skb. - */ - if (unlikely(!skb->data_len)) { - skb_shinfo(skb)->gso_type = 0; - skb_shinfo(skb)->gso_size = 0; - goto send_skb; - } - -#ifdef CONFIG_INET - if (skb_shinfo(skb)->gso_size) { - skb_reset_network_header(skb); - - switch (skb->protocol) { - case htons(ETH_P_IP): - qede_gro_ip_csum(skb); - break; - case htons(ETH_P_IPV6): - qede_gro_ipv6_csum(skb); - break; - default: - DP_ERR(edev, - "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", - ntohs(skb->protocol)); - } - } -#endif - -send_skb: - skb_record_rx_queue(skb, fp->rxq->rxq_id); - qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); -} - -static inline void qede_tpa_cont(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct eth_fast_path_rx_tpa_cont_cqe *cqe) -{ - int i; - - for (i = 0; cqe->len_list[i]; i++) - qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, - le16_to_cpu(cqe->len_list[i])); - - if (unlikely(i > 1)) - DP_ERR(edev, - "Strange - TPA cont with more than a single len_list entry\n"); -} - -static void qede_tpa_end(struct qede_dev *edev, - struct qede_fastpath *fp, - struct eth_fast_path_rx_tpa_end_cqe *cqe) -{ - struct qede_rx_queue *rxq = fp->rxq; - struct qede_agg_info *tpa_info; - struct sk_buff *skb; - int i; - - tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; - skb = tpa_info->skb; - - for (i = 0; cqe->len_list[i]; i++) - qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, - le16_to_cpu(cqe->len_list[i])); - if (unlikely(i > 1)) - DP_ERR(edev, - "Strange - TPA emd with more than a single len_list entry\n"); - - if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) - goto err; - - /* Sanity */ - if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) - DP_ERR(edev, - "Strange - TPA had %02x BDs, but SKB has only %d frags\n", - cqe->num_of_bds, tpa_info->frag_id); - if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) - DP_ERR(edev, - "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", - le16_to_cpu(cqe->total_packet_len), skb->len); - - memcpy(skb->data, - page_address(tpa_info->buffer.data) + - tpa_info->start_cqe_placement_offset + - tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); - - /* Finalize the SKB */ - skb->protocol = eth_type_trans(skb, edev->ndev); - skb->ip_summed = CHECKSUM_UNNECESSARY; - - /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count - * to skb_shinfo(skb)->gso_segs - */ - NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); - - qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); - - tpa_info->state = QEDE_AGG_STATE_NONE; - - return; -err: - tpa_info->state = QEDE_AGG_STATE_NONE; - dev_kfree_skb_any(tpa_info->skb); - tpa_info->skb = NULL; -} - -static bool qede_tunn_exist(u16 flag) -{ - return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << - PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); -} - -static u8 qede_check_tunn_csum(u16 flag) -{ - u16 csum_flag = 0; - u8 tcsum = 0; - - if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) - csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << - PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; - - if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { - csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; - tcsum = QEDE_TUNN_CSUM_UNNECESSARY; - } - - csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << - PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | - PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << - PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; - - if (csum_flag & flag) - return QEDE_CSUM_ERROR; - - return QEDE_CSUM_UNNECESSARY | tcsum; -} - -static u8 qede_check_notunn_csum(u16 flag) -{ - u16 csum_flag = 0; - u8 csum = 0; - - if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { - csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; - csum = QEDE_CSUM_UNNECESSARY; - } - - csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << - PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; - - if (csum_flag & flag) - return QEDE_CSUM_ERROR; - - return csum; -} - -static u8 qede_check_csum(u16 flag) -{ - if (!qede_tunn_exist(flag)) - return qede_check_notunn_csum(flag); - else - return qede_check_tunn_csum(flag); -} - -static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, - u16 flag) -{ - u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; - - if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << - ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || - (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << - PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) - return true; - - return false; -} - -/* Return true iff packet is to be passed to stack */ -static bool qede_rx_xdp(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq, - struct bpf_prog *prog, - struct sw_rx_data *bd, - struct eth_fast_path_rx_reg_cqe *cqe) -{ - u16 len = le16_to_cpu(cqe->len_on_first_bd); - struct xdp_buff xdp; - enum xdp_action act; - - xdp.data = page_address(bd->data) + cqe->placement_offset; - xdp.data_end = xdp.data + len; - - /* Queues always have a full reset currently, so for the time - * being until there's atomic program replace just mark read - * side for map helpers. - */ - rcu_read_lock(); - act = bpf_prog_run_xdp(prog, &xdp); - rcu_read_unlock(); - - if (act == XDP_PASS) - return true; - - /* Count number of packets not to be passed to stack */ - rxq->xdp_no_pass++; - - switch (act) { - case XDP_TX: - /* We need the replacement buffer before transmit. */ - if (qede_alloc_rx_buffer(rxq)) { - qede_recycle_rx_bd_ring(rxq, 1); - return false; - } - - /* Now if there's a transmission problem, we'd still have to - * throw current buffer, as replacement was already allocated. - */ - if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) { - dma_unmap_page(rxq->dev, bd->mapping, - PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(bd->data); - } - - /* Regardless, we've consumed an Rx BD */ - qede_rx_bd_ring_consume(rxq); - return false; - - default: - bpf_warn_invalid_xdp_action(act); - case XDP_ABORTED: - case XDP_DROP: - qede_recycle_rx_bd_ring(rxq, cqe->bd_num); - } - - return false; -} - -static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct sw_rx_data *bd, u16 len, - u16 pad) -{ - unsigned int offset = bd->page_offset; - struct skb_frag_struct *frag; - struct page *page = bd->data; - unsigned int pull_len; - struct sk_buff *skb; - unsigned char *va; - - /* Allocate a new SKB with a sufficient large header len */ - skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); - if (unlikely(!skb)) - return NULL; - - /* Copy data into SKB - if it's small, we can simply copy it and - * re-use the already allcoated & mapped memory. - */ - if (len + pad <= edev->rx_copybreak) { - memcpy(skb_put(skb, len), - page_address(page) + pad + offset, len); - qede_reuse_page(rxq, bd); - goto out; - } - - frag = &skb_shinfo(skb)->frags[0]; - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, pad + offset, len, rxq->rx_buf_seg_size); - - va = skb_frag_address(frag); - pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); - - /* Align the pull_len to optimize memcpy */ - memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); - - /* Correct the skb & frag sizes offset after the pull */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { - /* Incr page ref count to reuse on allocation failure so - * that it doesn't get freed while freeing SKB [as its - * already mapped there]. - */ - page_ref_inc(page); - dev_kfree_skb_any(skb); - return NULL; - } - -out: - /* We've consumed the first BD and prepared an SKB */ - qede_rx_bd_ring_consume(rxq); - return skb; -} - -static int qede_rx_build_jumbo(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct sk_buff *skb, - struct eth_fast_path_rx_reg_cqe *cqe, - u16 first_bd_len) -{ - u16 pkt_len = le16_to_cpu(cqe->pkt_len); - struct sw_rx_data *bd; - u16 bd_cons_idx; - u8 num_frags; - - pkt_len -= first_bd_len; - - /* We've already used one BD for the SKB. Now take care of the rest */ - for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { - u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : - pkt_len; - - if (unlikely(!cur_size)) { - DP_ERR(edev, - "Still got %d BDs for mapping jumbo, but length became 0\n", - num_frags); - goto out; - } - - /* We need a replacement buffer for each BD */ - if (unlikely(qede_alloc_rx_buffer(rxq))) - goto out; - - /* Now that we've allocated the replacement buffer, - * we can safely consume the next BD and map it to the SKB. - */ - bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; - bd = &rxq->sw_rx_ring[bd_cons_idx]; - qede_rx_bd_ring_consume(rxq); - - dma_unmap_page(rxq->dev, bd->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); - - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, - bd->data, 0, cur_size); - - skb->truesize += PAGE_SIZE; - skb->data_len += cur_size; - skb->len += cur_size; - pkt_len -= cur_size; - } - - if (unlikely(pkt_len)) - DP_ERR(edev, - "Mapped all BDs of jumbo, but still have %d bytes\n", - pkt_len); - -out: - return num_frags; -} - -static int qede_rx_process_tpa_cqe(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq, - union eth_rx_cqe *cqe, - enum eth_rx_cqe_type type) -{ - switch (type) { - case ETH_RX_CQE_TYPE_TPA_START: - qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); - return 0; - case ETH_RX_CQE_TYPE_TPA_CONT: - qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); - return 0; - case ETH_RX_CQE_TYPE_TPA_END: - qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); - return 1; - default: - return 0; - } -} - -static int qede_rx_process_cqe(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq) -{ - struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); - struct eth_fast_path_rx_reg_cqe *fp_cqe; - u16 len, pad, bd_cons_idx, parse_flag; - enum eth_rx_cqe_type cqe_type; - union eth_rx_cqe *cqe; - struct sw_rx_data *bd; - struct sk_buff *skb; - __le16 flags; - u8 csum_flag; - - /* Get the CQE from the completion ring */ - cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); - cqe_type = cqe->fast_path_regular.type; - - /* Process an unlikely slowpath event */ - if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { - struct eth_slow_path_rx_cqe *sp_cqe; - - sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; - edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); - return 0; - } - - /* Handle TPA cqes */ - if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) - return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); - - /* Get the data from the SW ring; Consume it only after it's evident - * we wouldn't recycle it. - */ - bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; - bd = &rxq->sw_rx_ring[bd_cons_idx]; - - fp_cqe = &cqe->fast_path_regular; - len = le16_to_cpu(fp_cqe->len_on_first_bd); - pad = fp_cqe->placement_offset; - - /* Run eBPF program if one is attached */ - if (xdp_prog) - if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe)) - return 1; - - /* If this is an error packet then drop it */ - flags = cqe->fast_path_regular.pars_flags.flags; - parse_flag = le16_to_cpu(flags); - - csum_flag = qede_check_csum(parse_flag); - if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { - if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { - rxq->rx_ip_frags++; - } else { - DP_NOTICE(edev, - "CQE has error, flags = %x, dropping incoming packet\n", - parse_flag); - rxq->rx_hw_errors++; - qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); - return 0; - } - } - - /* Basic validation passed; Need to prepare an SKB. This would also - * guarantee to finally consume the first BD upon success. - */ - skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); - if (!skb) { - rxq->rx_alloc_errors++; - qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); - return 0; - } - - /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed - * by a single cqe. - */ - if (fp_cqe->bd_num > 1) { - u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, - fp_cqe, len); - - if (unlikely(unmapped_frags > 0)) { - qede_recycle_rx_bd_ring(rxq, unmapped_frags); - dev_kfree_skb_any(skb); - return 0; - } - } - - /* The SKB contains all the data. Now prepare meta-magic */ - skb->protocol = eth_type_trans(skb, edev->ndev); - qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); - qede_set_skb_csum(skb, csum_flag); - skb_record_rx_queue(skb, rxq->rxq_id); - - /* SKB is prepared - pass it to stack */ - qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); - - return 1; -} - -static int qede_rx_int(struct qede_fastpath *fp, int budget) -{ - struct qede_rx_queue *rxq = fp->rxq; - struct qede_dev *edev = fp->edev; - u16 hw_comp_cons, sw_comp_cons; - int work_done = 0; - - hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - - /* Memory barrier to prevent the CPU from doing speculative reads of CQE - * / BD in the while-loop before reading hw_comp_cons. If the CQE is - * read before it is written by FW, then FW writes CQE and SB, and then - * the CPU reads the hw_comp_cons, it will use an old CQE. - */ - rmb(); - - /* Loop to complete all indicated BDs */ - while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { - qede_rx_process_cqe(edev, fp, rxq); - qed_chain_recycle_consumed(&rxq->rx_comp_ring); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - work_done++; - } - - /* Update producers */ - qede_update_rx_prod(edev, rxq); - - return work_done; -} - -static bool qede_poll_is_more_work(struct qede_fastpath *fp) -{ - qed_sb_update_sb_idx(fp->sb_info); - - /* *_has_*_work() reads the status block, thus we need to ensure that - * status block indices have been actually read (qed_sb_update_sb_idx) - * prior to this check (*_has_*_work) so that we won't write the - * "newer" value of the status block to HW (if there was a DMA right - * after qede_has_rx_work and if there is no rmb, the memory reading - * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb). - * In this case there will never be another interrupt until there is - * another update of the status block, while there is still unhandled - * work. - */ - rmb(); - - if (likely(fp->type & QEDE_FASTPATH_RX)) - if (qede_has_rx_work(fp->rxq)) - return true; - - if (fp->type & QEDE_FASTPATH_XDP) - if (qede_txq_has_work(fp->xdp_tx)) - return true; - - if (likely(fp->type & QEDE_FASTPATH_TX)) - if (qede_txq_has_work(fp->txq)) - return true; - - return false; -} - -static int qede_poll(struct napi_struct *napi, int budget) -{ - struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, - napi); - struct qede_dev *edev = fp->edev; - int rx_work_done = 0; - - if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq)) - qede_tx_int(edev, fp->txq); - - if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) - qede_xdp_tx_int(edev, fp->xdp_tx); - - rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && - qede_has_rx_work(fp->rxq)) ? - qede_rx_int(fp, budget) : 0; - if (rx_work_done < budget) { - if (!qede_poll_is_more_work(fp)) { - napi_complete(napi); - - /* Update and reenable interrupts */ - qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); - } else { - rx_work_done = budget; - } - } - - if (fp->xdp_xmit) { - u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); - - fp->xdp_xmit = 0; - fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); - qede_update_tx_producer(fp->xdp_tx); - } - - return rx_work_done; -} - -static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) -{ - struct qede_fastpath *fp = fp_cookie; - - qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); - - napi_schedule_irqoff(&fp->napi); - return IRQ_HANDLED; -} - -/* ------------------------------------------------------------------------- - * END OF FAST-PATH - * ------------------------------------------------------------------------- - */ - static int qede_open(struct net_device *ndev); static int qede_close(struct net_device *ndev); static int qede_set_mac_addr(struct net_device *ndev, void *p); @@ -2482,40 +880,6 @@ static void qede_udp_tunnel_del(struct net_device *dev, schedule_delayed_work(&edev->sp_task, 0); } -/* 8B udp header + 8B base tunnel header + 32B option length */ -#define QEDE_MAX_TUN_HDR_LEN 48 - -static netdev_features_t qede_features_check(struct sk_buff *skb, - struct net_device *dev, - netdev_features_t features) -{ - if (skb->encapsulation) { - u8 l4_proto = 0; - - switch (vlan_get_protocol(skb)) { - case htons(ETH_P_IP): - l4_proto = ip_hdr(skb)->protocol; - break; - case htons(ETH_P_IPV6): - l4_proto = ipv6_hdr(skb)->nexthdr; - break; - default: - return features; - } - - /* Disable offloads for geneve tunnels, as HW can't parse - * the geneve header which has option length greater than 32B. - */ - if ((l4_proto == IPPROTO_UDP) && - ((skb_inner_mac_header(skb) - - skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) - return features & ~(NETIF_F_CSUM_MASK | - NETIF_F_GSO_MASK); - } - - return features; -} - static void qede_xdp_reload_func(struct qede_dev *edev, struct qede_reload_args *args) { -- cgit v1.2.3 From aed284c7f0d9d33ce34fadade605b33e8683b7ed Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 1 Jan 2017 13:57:02 +0200 Subject: qede: Split filtering logic to its own file This takes the various filtering logic of the driver and moves them into their own dedicated file - qede_filter.c. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/Makefile | 2 +- drivers/net/ethernet/qlogic/qede/qede.h | 22 + drivers/net/ethernet/qlogic/qede/qede_filter.c | 687 +++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 654 ----------------------- 4 files changed, 710 insertions(+), 655 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qede/qede_filter.c diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index c54922be50d1..38fbee6a442b 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_QEDE) := qede.o -qede-y := qede_main.o qede_fp.o qede_ethtool.o +qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede-$(CONFIG_DCB) += qede_dcbnl.o qede-$(CONFIG_QED_RDMA) += qede_roce.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index b4e419689000..ab49263c9d43 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -419,9 +419,31 @@ int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len); int qede_poll(struct napi_struct *napi, int budget); irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie); + +/* Filtering function definitions */ +void qede_force_mac(void *dev, u8 *mac, bool forced); +int qede_set_mac_addr(struct net_device *ndev, void *p); + +int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid); +int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid); +void qede_vlan_mark_nonconfigured(struct qede_dev *edev); +int qede_configure_vlan_filters(struct qede_dev *edev); + +int qede_set_features(struct net_device *dev, netdev_features_t features); +void qede_set_rx_mode(struct net_device *ndev); +void qede_config_rx_mode(struct net_device *ndev); +void qede_fill_rss_params(struct qede_dev *edev, + struct qed_update_vport_rss_params *rss, u8 *update); + +void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti); +void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti); + +int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp); + #ifdef CONFIG_DCB void qede_set_dcbnl_ops(struct net_device *ndev); #endif + void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); void qede_set_ethtool_ops(struct net_device *netdev); void qede_reload(struct qede_dev *edev, diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c new file mode 100644 index 000000000000..6161e093a127 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -0,0 +1,687 @@ +/* QLogic qede NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include + +#include +#include "qede.h" + +void qede_force_mac(void *dev, u8 *mac, bool forced) +{ + struct qede_dev *edev = dev; + + /* MAC hints take effect only if we haven't set one already */ + if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) + return; + + ether_addr_copy(edev->ndev->dev_addr, mac); + ether_addr_copy(edev->primary_mac, mac); +} + +static int qede_set_ucast_rx_mac(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + unsigned char mac[ETH_ALEN]) +{ + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.mac_valid = 1; + ether_addr_copy(filter_cmd.filter.ucast.mac, mac); + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +static int qede_set_ucast_rx_vlan(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + u16 vid) +{ + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.vlan_valid = 1; + filter_cmd.filter.ucast.vlan = vid; + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) +{ + struct qed_update_vport_params params; + int rc; + + /* Proceed only if action actually needs to be performed */ + if (edev->accept_any_vlan == action) + return; + + memset(¶ms, 0, sizeof(params)); + + params.vport_id = 0; + params.accept_any_vlan = action; + params.update_accept_any_vlan_flg = 1; + + rc = edev->ops->vport_update(edev->cdev, ¶ms); + if (rc) { + DP_ERR(edev, "Failed to %s accept-any-vlan\n", + action ? "enable" : "disable"); + } else { + DP_INFO(edev, "%s accept-any-vlan\n", + action ? "enabled" : "disabled"); + edev->accept_any_vlan = action; + } +} + +int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan, *tmp; + int rc = 0; + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) { + DP_INFO(edev, "Failed to allocate struct for vlan\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&vlan->list); + vlan->vid = vid; + vlan->configured = false; + + /* Verify vlan isn't already configured */ + list_for_each_entry(tmp, &edev->vlan_list, list) { + if (tmp->vid == vlan->vid) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "vlan already configured\n"); + kfree(vlan); + return -EEXIST; + } + } + + /* If interface is down, cache this VLAN ID and return */ + __qede_lock(edev); + if (edev->state != QEDE_STATE_OPEN) { + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, VLAN %d will be configured when interface is up\n", + vid); + if (vid != 0) + edev->non_configured_vlans++; + list_add(&vlan->list, &edev->vlan_list); + goto out; + } + + /* Check for the filter limit. + * Note - vlan0 has a reserved filter and can be added without + * worrying about quota + */ + if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || + (vlan->vid == 0)) { + rc = qede_set_ucast_rx_vlan(edev, + QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %d\n", + vlan->vid); + kfree(vlan); + goto out; + } + vlan->configured = true; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) + edev->configured_vlans++; + } else { + /* Out of quota; Activate accept-any-VLAN mode */ + if (!edev->non_configured_vlans) + qede_config_accept_any_vlan(edev, true); + + edev->non_configured_vlans++; + } + + list_add(&vlan->list, &edev->vlan_list); + +out: + __qede_unlock(edev); + return rc; +} + +static void qede_del_vlan_from_list(struct qede_dev *edev, + struct qede_vlan *vlan) +{ + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + if (vlan->configured) + edev->configured_vlans--; + else + edev->non_configured_vlans--; + } + + list_del(&vlan->list); + kfree(vlan); +} + +int qede_configure_vlan_filters(struct qede_dev *edev) +{ + int rc = 0, real_rc = 0, accept_any_vlan = 0; + struct qed_dev_eth_info *dev_info; + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return 0; + + dev_info = &edev->dev_info; + + /* Configure non-configured vlans */ + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (vlan->configured) + continue; + + /* We have used all our credits, now enable accept_any_vlan */ + if ((vlan->vid != 0) && + (edev->configured_vlans == dev_info->num_vlan_filters)) { + accept_any_vlan = 1; + continue; + } + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid); + + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %u\n", + vlan->vid); + real_rc = rc; + continue; + } + + vlan->configured = true; + /* vlan0 filter doesn't consume our VLAN filter's quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans--; + edev->configured_vlans++; + } + } + + /* enable accept_any_vlan mode if we have more VLANs than credits, + * or remove accept_any_vlan mode if we've actually removed + * a non-configured vlan, and all remaining vlans are truly configured. + */ + + if (accept_any_vlan) + qede_config_accept_any_vlan(edev, true); + else if (!edev->non_configured_vlans) + qede_config_accept_any_vlan(edev, false); + + return real_rc; +} + +int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan = NULL; + int rc = 0; + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); + + /* Find whether entry exists */ + __qede_lock(edev); + list_for_each_entry(vlan, &edev->vlan_list, list) + if (vlan->vid == vid) + break; + + if (!vlan || (vlan->vid != vid)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Vlan isn't configured\n"); + goto out; + } + + if (edev->state != QEDE_STATE_OPEN) { + /* As interface is already down, we don't have a VPORT + * instance to remove vlan filter. So just update vlan list + */ + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, removing VLAN from list only\n"); + qede_del_vlan_from_list(edev, vlan); + goto out; + } + + /* Remove vlan */ + if (vlan->configured) { + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, + vid); + if (rc) { + DP_ERR(edev, "Failed to remove VLAN %d\n", vid); + goto out; + } + } + + qede_del_vlan_from_list(edev, vlan); + + /* We have removed a VLAN - try to see if we can + * configure non-configured VLAN from the list. + */ + rc = qede_configure_vlan_filters(edev); + +out: + __qede_unlock(edev); + return rc; +} + +void qede_vlan_mark_nonconfigured(struct qede_dev *edev) +{ + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return; + + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (!vlan->configured) + continue; + + vlan->configured = false; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans++; + edev->configured_vlans--; + } + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "marked vlan %d as non-configured\n", vlan->vid); + } + + edev->accept_any_vlan = false; +} + +static void qede_set_features_reload(struct qede_dev *edev, + struct qede_reload_args *args) +{ + edev->ndev->features = args->u.features; +} + +int qede_set_features(struct net_device *dev, netdev_features_t features) +{ + struct qede_dev *edev = netdev_priv(dev); + netdev_features_t changes = features ^ dev->features; + bool need_reload = false; + + /* No action needed if hardware GRO is disabled during driver load */ + if (changes & NETIF_F_GRO) { + if (dev->features & NETIF_F_GRO) + need_reload = !edev->gro_disable; + else + need_reload = edev->gro_disable; + } + + if (need_reload) { + struct qede_reload_args args; + + args.u.features = features; + args.func = &qede_set_features_reload; + + /* Make sure that we definitely need to reload. + * In case of an eBPF attached program, there will be no FW + * aggregations, so no need to actually reload. + */ + __qede_lock(edev); + if (edev->xdp_prog) + args.func(edev, &args); + else + qede_reload(edev, &args, true); + __qede_unlock(edev); + + return 1; + } + + return 0; +} + +void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", + t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (edev->geneve_dst_port) + return; + + edev->geneve_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + } + + schedule_delayed_work(&edev->sp_task, 0); +} + +void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (t_port != edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", + t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (t_port != edev->geneve_dst_port) + return; + + edev->geneve_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + } + + schedule_delayed_work(&edev->sp_task, 0); +} + +static void qede_xdp_reload_func(struct qede_dev *edev, + struct qede_reload_args *args) +{ + struct bpf_prog *old; + + old = xchg(&edev->xdp_prog, args->u.new_prog); + if (old) + bpf_prog_put(old); +} + +static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) +{ + struct qede_reload_args args; + + if (prog && prog->xdp_adjust_head) { + DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n"); + return -EOPNOTSUPP; + } + + /* If we're called, there was already a bpf reference increment */ + args.func = &qede_xdp_reload_func; + args.u.new_prog = prog; + qede_reload(edev, &args, false); + + return 0; +} + +int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return qede_xdp_set(edev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = !!edev->xdp_prog; + return 0; + default: + return -EINVAL; + } +} + +static int qede_set_mcast_rx_mac(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + unsigned char *mac, int num_macs) +{ + struct qed_filter_params filter_cmd; + int i; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_MCAST; + filter_cmd.filter.mcast.type = opcode; + filter_cmd.filter.mcast.num = num_macs; + + for (i = 0; i < num_macs; i++, mac += ETH_ALEN) + ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +int qede_set_mac_addr(struct net_device *ndev, void *p) +{ + struct qede_dev *edev = netdev_priv(ndev); + struct sockaddr *addr = p; + int rc; + + ASSERT_RTNL(); /* @@@TBD To be removed */ + + DP_INFO(edev, "Set_mac_addr called\n"); + + if (!is_valid_ether_addr(addr->sa_data)) { + DP_NOTICE(edev, "The MAC address is not valid\n"); + return -EFAULT; + } + + if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { + DP_NOTICE(edev, "qed prevents setting MAC\n"); + return -EINVAL; + } + + ether_addr_copy(ndev->dev_addr, addr->sa_data); + + if (!netif_running(ndev)) { + DP_NOTICE(edev, "The device is currently down\n"); + return 0; + } + + /* Remove the previous primary mac */ + rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, + edev->primary_mac); + if (rc) + return rc; + + edev->ops->common->update_mac(edev->cdev, addr->sa_data); + + /* Add MAC filter according to the new unicast HW MAC address */ + ether_addr_copy(edev->primary_mac, ndev->dev_addr); + return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, + edev->primary_mac); +} + +static int +qede_configure_mcast_filtering(struct net_device *ndev, + enum qed_filter_rx_mode_type *accept_flags) +{ + struct qede_dev *edev = netdev_priv(ndev); + unsigned char *mc_macs, *temp; + struct netdev_hw_addr *ha; + int rc = 0, mc_count; + size_t size; + + size = 64 * ETH_ALEN; + + mc_macs = kzalloc(size, GFP_KERNEL); + if (!mc_macs) { + DP_NOTICE(edev, + "Failed to allocate memory for multicast MACs\n"); + rc = -ENOMEM; + goto exit; + } + + temp = mc_macs; + + /* Remove all previously configured MAC filters */ + rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, + mc_macs, 1); + if (rc) + goto exit; + + netif_addr_lock_bh(ndev); + + mc_count = netdev_mc_count(ndev); + if (mc_count < 64) { + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(temp, ha->addr); + temp += ETH_ALEN; + } + } + + netif_addr_unlock_bh(ndev); + + /* Check for all multicast @@@TBD resource allocation */ + if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) { + if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) + *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; + } else { + /* Add all multicast MAC filters */ + rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, + mc_macs, mc_count); + } + +exit: + kfree(mc_macs); + return rc; +} + +void qede_set_rx_mode(struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + + set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} + +/* Must be called with qede_lock held */ +void qede_config_rx_mode(struct net_device *ndev) +{ + enum qed_filter_rx_mode_type accept_flags; + struct qede_dev *edev = netdev_priv(ndev); + struct qed_filter_params rx_mode; + unsigned char *uc_macs, *temp; + struct netdev_hw_addr *ha; + int rc, uc_count; + size_t size; + + netif_addr_lock_bh(ndev); + + uc_count = netdev_uc_count(ndev); + size = uc_count * ETH_ALEN; + + uc_macs = kzalloc(size, GFP_ATOMIC); + if (!uc_macs) { + DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); + netif_addr_unlock_bh(ndev); + return; + } + + temp = uc_macs; + netdev_for_each_uc_addr(ha, ndev) { + ether_addr_copy(temp, ha->addr); + temp += ETH_ALEN; + } + + netif_addr_unlock_bh(ndev); + + /* Configure the struct for the Rx mode */ + memset(&rx_mode, 0, sizeof(struct qed_filter_params)); + rx_mode.type = QED_FILTER_TYPE_RX_MODE; + + /* Remove all previous unicast secondary macs and multicast macs + * (configrue / leave the primary mac) + */ + rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, + edev->primary_mac); + if (rc) + goto out; + + /* Check for promiscuous */ + if ((ndev->flags & IFF_PROMISC) || + (uc_count > edev->dev_info.num_mac_filters - 1)) { + accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; + } else { + /* Add MAC filters according to the unicast secondary macs */ + int i; + + temp = uc_macs; + for (i = 0; i < uc_count; i++) { + rc = qede_set_ucast_rx_mac(edev, + QED_FILTER_XCAST_TYPE_ADD, + temp); + if (rc) + goto out; + + temp += ETH_ALEN; + } + + rc = qede_configure_mcast_filtering(ndev, &accept_flags); + if (rc) + goto out; + } + + /* take care of VLAN mode */ + if (ndev->flags & IFF_PROMISC) { + qede_config_accept_any_vlan(edev, true); + } else if (!edev->non_configured_vlans) { + /* It's possible that accept_any_vlan mode is set due to a + * previous setting of IFF_PROMISC. If vlan credits are + * sufficient, disable accept_any_vlan. + */ + qede_config_accept_any_vlan(edev, false); + } + + rx_mode.filter.accept_flags = accept_flags; + edev->ops->filter_config(edev->cdev, &rx_mode); +out: + kfree(uc_macs); +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 1393bf4add7a..334e4140c798 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -210,18 +210,6 @@ static struct pci_driver qede_pci_driver = { #endif }; -static void qede_force_mac(void *dev, u8 *mac, bool forced) -{ - struct qede_dev *edev = dev; - - /* MAC hints take effect only if we haven't set one already */ - if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) - return; - - ether_addr_copy(edev->ndev->dev_addr, mac); - ether_addr_copy(edev->primary_mac, mac); -} - static struct qed_eth_cb_ops qede_ll_ops = { { .link_update = qede_link_update, @@ -319,39 +307,6 @@ module_exit(qede_cleanup); static int qede_open(struct net_device *ndev); static int qede_close(struct net_device *ndev); -static int qede_set_mac_addr(struct net_device *ndev, void *p); -static void qede_set_rx_mode(struct net_device *ndev); -static void qede_config_rx_mode(struct net_device *ndev); - -static int qede_set_ucast_rx_mac(struct qede_dev *edev, - enum qed_filter_xcast_params_type opcode, - unsigned char mac[ETH_ALEN]) -{ - struct qed_filter_params filter_cmd; - - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.mac_valid = 1; - ether_addr_copy(filter_cmd.filter.ucast.mac, mac); - - return edev->ops->filter_config(edev->cdev, &filter_cmd); -} - -static int qede_set_ucast_rx_vlan(struct qede_dev *edev, - enum qed_filter_xcast_params_type opcode, - u16 vid) -{ - struct qed_filter_params filter_cmd; - - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.vlan_valid = 1; - filter_cmd.filter.ucast.vlan = vid; - - return edev->ops->filter_config(edev->cdev, &filter_cmd); -} void qede_fill_by_demand_stats(struct qede_dev *edev) { @@ -519,409 +474,6 @@ static int qede_set_vf_link_state(struct net_device *dev, int vfidx, } #endif -static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) -{ - struct qed_update_vport_params params; - int rc; - - /* Proceed only if action actually needs to be performed */ - if (edev->accept_any_vlan == action) - return; - - memset(¶ms, 0, sizeof(params)); - - params.vport_id = 0; - params.accept_any_vlan = action; - params.update_accept_any_vlan_flg = 1; - - rc = edev->ops->vport_update(edev->cdev, ¶ms); - if (rc) { - DP_ERR(edev, "Failed to %s accept-any-vlan\n", - action ? "enable" : "disable"); - } else { - DP_INFO(edev, "%s accept-any-vlan\n", - action ? "enabled" : "disabled"); - edev->accept_any_vlan = action; - } -} - -static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) -{ - struct qede_dev *edev = netdev_priv(dev); - struct qede_vlan *vlan, *tmp; - int rc = 0; - - DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); - - vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); - if (!vlan) { - DP_INFO(edev, "Failed to allocate struct for vlan\n"); - return -ENOMEM; - } - INIT_LIST_HEAD(&vlan->list); - vlan->vid = vid; - vlan->configured = false; - - /* Verify vlan isn't already configured */ - list_for_each_entry(tmp, &edev->vlan_list, list) { - if (tmp->vid == vlan->vid) { - DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), - "vlan already configured\n"); - kfree(vlan); - return -EEXIST; - } - } - - /* If interface is down, cache this VLAN ID and return */ - __qede_lock(edev); - if (edev->state != QEDE_STATE_OPEN) { - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "Interface is down, VLAN %d will be configured when interface is up\n", - vid); - if (vid != 0) - edev->non_configured_vlans++; - list_add(&vlan->list, &edev->vlan_list); - goto out; - } - - /* Check for the filter limit. - * Note - vlan0 has a reserved filter and can be added without - * worrying about quota - */ - if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || - (vlan->vid == 0)) { - rc = qede_set_ucast_rx_vlan(edev, - QED_FILTER_XCAST_TYPE_ADD, - vlan->vid); - if (rc) { - DP_ERR(edev, "Failed to configure VLAN %d\n", - vlan->vid); - kfree(vlan); - goto out; - } - vlan->configured = true; - - /* vlan0 filter isn't consuming out of our quota */ - if (vlan->vid != 0) - edev->configured_vlans++; - } else { - /* Out of quota; Activate accept-any-VLAN mode */ - if (!edev->non_configured_vlans) - qede_config_accept_any_vlan(edev, true); - - edev->non_configured_vlans++; - } - - list_add(&vlan->list, &edev->vlan_list); - -out: - __qede_unlock(edev); - return rc; -} - -static void qede_del_vlan_from_list(struct qede_dev *edev, - struct qede_vlan *vlan) -{ - /* vlan0 filter isn't consuming out of our quota */ - if (vlan->vid != 0) { - if (vlan->configured) - edev->configured_vlans--; - else - edev->non_configured_vlans--; - } - - list_del(&vlan->list); - kfree(vlan); -} - -static int qede_configure_vlan_filters(struct qede_dev *edev) -{ - int rc = 0, real_rc = 0, accept_any_vlan = 0; - struct qed_dev_eth_info *dev_info; - struct qede_vlan *vlan = NULL; - - if (list_empty(&edev->vlan_list)) - return 0; - - dev_info = &edev->dev_info; - - /* Configure non-configured vlans */ - list_for_each_entry(vlan, &edev->vlan_list, list) { - if (vlan->configured) - continue; - - /* We have used all our credits, now enable accept_any_vlan */ - if ((vlan->vid != 0) && - (edev->configured_vlans == dev_info->num_vlan_filters)) { - accept_any_vlan = 1; - continue; - } - - DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid); - - rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, - vlan->vid); - if (rc) { - DP_ERR(edev, "Failed to configure VLAN %u\n", - vlan->vid); - real_rc = rc; - continue; - } - - vlan->configured = true; - /* vlan0 filter doesn't consume our VLAN filter's quota */ - if (vlan->vid != 0) { - edev->non_configured_vlans--; - edev->configured_vlans++; - } - } - - /* enable accept_any_vlan mode if we have more VLANs than credits, - * or remove accept_any_vlan mode if we've actually removed - * a non-configured vlan, and all remaining vlans are truly configured. - */ - - if (accept_any_vlan) - qede_config_accept_any_vlan(edev, true); - else if (!edev->non_configured_vlans) - qede_config_accept_any_vlan(edev, false); - - return real_rc; -} - -static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) -{ - struct qede_dev *edev = netdev_priv(dev); - struct qede_vlan *vlan = NULL; - int rc = 0; - - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); - - /* Find whether entry exists */ - __qede_lock(edev); - list_for_each_entry(vlan, &edev->vlan_list, list) - if (vlan->vid == vid) - break; - - if (!vlan || (vlan->vid != vid)) { - DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), - "Vlan isn't configured\n"); - goto out; - } - - if (edev->state != QEDE_STATE_OPEN) { - /* As interface is already down, we don't have a VPORT - * instance to remove vlan filter. So just update vlan list - */ - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "Interface is down, removing VLAN from list only\n"); - qede_del_vlan_from_list(edev, vlan); - goto out; - } - - /* Remove vlan */ - if (vlan->configured) { - rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, - vid); - if (rc) { - DP_ERR(edev, "Failed to remove VLAN %d\n", vid); - goto out; - } - } - - qede_del_vlan_from_list(edev, vlan); - - /* We have removed a VLAN - try to see if we can - * configure non-configured VLAN from the list. - */ - rc = qede_configure_vlan_filters(edev); - -out: - __qede_unlock(edev); - return rc; -} - -static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) -{ - struct qede_vlan *vlan = NULL; - - if (list_empty(&edev->vlan_list)) - return; - - list_for_each_entry(vlan, &edev->vlan_list, list) { - if (!vlan->configured) - continue; - - vlan->configured = false; - - /* vlan0 filter isn't consuming out of our quota */ - if (vlan->vid != 0) { - edev->non_configured_vlans++; - edev->configured_vlans--; - } - - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "marked vlan %d as non-configured\n", vlan->vid); - } - - edev->accept_any_vlan = false; -} - -static void qede_set_features_reload(struct qede_dev *edev, - struct qede_reload_args *args) -{ - edev->ndev->features = args->u.features; -} - -int qede_set_features(struct net_device *dev, netdev_features_t features) -{ - struct qede_dev *edev = netdev_priv(dev); - netdev_features_t changes = features ^ dev->features; - bool need_reload = false; - - /* No action needed if hardware GRO is disabled during driver load */ - if (changes & NETIF_F_GRO) { - if (dev->features & NETIF_F_GRO) - need_reload = !edev->gro_disable; - else - need_reload = edev->gro_disable; - } - - if (need_reload) { - struct qede_reload_args args; - - args.u.features = features; - args.func = &qede_set_features_reload; - - /* Make sure that we definitely need to reload. - * In case of an eBPF attached program, there will be no FW - * aggregations, so no need to actually reload. - */ - __qede_lock(edev); - if (edev->xdp_prog) - args.func(edev, &args); - else - qede_reload(edev, &args, true); - __qede_unlock(edev); - - return 1; - } - - return 0; -} - -static void qede_udp_tunnel_add(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (edev->vxlan_dst_port) - return; - - edev->vxlan_dst_port = t_port; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", - t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (edev->geneve_dst_port) - return; - - edev->geneve_dst_port = t_port; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", - t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - break; - default: - return; - } - - schedule_delayed_work(&edev->sp_task, 0); -} - -static void qede_udp_tunnel_del(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (t_port != edev->vxlan_dst_port) - return; - - edev->vxlan_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", - t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (t_port != edev->geneve_dst_port) - return; - - edev->geneve_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", - t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - break; - default: - return; - } - - schedule_delayed_work(&edev->sp_task, 0); -} - -static void qede_xdp_reload_func(struct qede_dev *edev, - struct qede_reload_args *args) -{ - struct bpf_prog *old; - - old = xchg(&edev->xdp_prog, args->u.new_prog); - if (old) - bpf_prog_put(old); -} - -static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) -{ - struct qede_reload_args args; - - if (prog && prog->xdp_adjust_head) { - DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - - /* If we're called, there was already a bpf reference increment */ - args.func = &qede_xdp_reload_func; - args.u.new_prog = prog; - qede_reload(edev, &args, false); - - return 0; -} - -static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) -{ - struct qede_dev *edev = netdev_priv(dev); - - switch (xdp->command) { - case XDP_SETUP_PROG: - return qede_xdp_set(edev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_attached = !!edev->xdp_prog; - return 0; - default: - return -EINVAL; - } -} - static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -2237,23 +1789,6 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) return 0; } -static int qede_set_mcast_rx_mac(struct qede_dev *edev, - enum qed_filter_xcast_params_type opcode, - unsigned char *mac, int num_macs) -{ - struct qed_filter_params filter_cmd; - int i; - - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_MCAST; - filter_cmd.filter.mcast.type = opcode; - filter_cmd.filter.mcast.num = num_macs; - - for (i = 0; i < num_macs; i++, mac += ETH_ALEN) - ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); - - return edev->ops->filter_config(edev->cdev, &filter_cmd); -} enum qede_unload_mode { QEDE_UNLOAD_NORMAL, @@ -2484,192 +2019,3 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } } } - -static int qede_set_mac_addr(struct net_device *ndev, void *p) -{ - struct qede_dev *edev = netdev_priv(ndev); - struct sockaddr *addr = p; - int rc; - - ASSERT_RTNL(); /* @@@TBD To be removed */ - - DP_INFO(edev, "Set_mac_addr called\n"); - - if (!is_valid_ether_addr(addr->sa_data)) { - DP_NOTICE(edev, "The MAC address is not valid\n"); - return -EFAULT; - } - - if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { - DP_NOTICE(edev, "qed prevents setting MAC\n"); - return -EINVAL; - } - - ether_addr_copy(ndev->dev_addr, addr->sa_data); - - if (!netif_running(ndev)) { - DP_NOTICE(edev, "The device is currently down\n"); - return 0; - } - - /* Remove the previous primary mac */ - rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, - edev->primary_mac); - if (rc) - return rc; - - edev->ops->common->update_mac(edev->cdev, addr->sa_data); - - /* Add MAC filter according to the new unicast HW MAC address */ - ether_addr_copy(edev->primary_mac, ndev->dev_addr); - return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, - edev->primary_mac); -} - -static int -qede_configure_mcast_filtering(struct net_device *ndev, - enum qed_filter_rx_mode_type *accept_flags) -{ - struct qede_dev *edev = netdev_priv(ndev); - unsigned char *mc_macs, *temp; - struct netdev_hw_addr *ha; - int rc = 0, mc_count; - size_t size; - - size = 64 * ETH_ALEN; - - mc_macs = kzalloc(size, GFP_KERNEL); - if (!mc_macs) { - DP_NOTICE(edev, - "Failed to allocate memory for multicast MACs\n"); - rc = -ENOMEM; - goto exit; - } - - temp = mc_macs; - - /* Remove all previously configured MAC filters */ - rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, - mc_macs, 1); - if (rc) - goto exit; - - netif_addr_lock_bh(ndev); - - mc_count = netdev_mc_count(ndev); - if (mc_count < 64) { - netdev_for_each_mc_addr(ha, ndev) { - ether_addr_copy(temp, ha->addr); - temp += ETH_ALEN; - } - } - - netif_addr_unlock_bh(ndev); - - /* Check for all multicast @@@TBD resource allocation */ - if ((ndev->flags & IFF_ALLMULTI) || - (mc_count > 64)) { - if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) - *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; - } else { - /* Add all multicast MAC filters */ - rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, - mc_macs, mc_count); - } - -exit: - kfree(mc_macs); - return rc; -} - -static void qede_set_rx_mode(struct net_device *ndev) -{ - struct qede_dev *edev = netdev_priv(ndev); - - set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} - -/* Must be called with qede_lock held */ -static void qede_config_rx_mode(struct net_device *ndev) -{ - enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST; - struct qede_dev *edev = netdev_priv(ndev); - struct qed_filter_params rx_mode; - unsigned char *uc_macs, *temp; - struct netdev_hw_addr *ha; - int rc, uc_count; - size_t size; - - netif_addr_lock_bh(ndev); - - uc_count = netdev_uc_count(ndev); - size = uc_count * ETH_ALEN; - - uc_macs = kzalloc(size, GFP_ATOMIC); - if (!uc_macs) { - DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); - netif_addr_unlock_bh(ndev); - return; - } - - temp = uc_macs; - netdev_for_each_uc_addr(ha, ndev) { - ether_addr_copy(temp, ha->addr); - temp += ETH_ALEN; - } - - netif_addr_unlock_bh(ndev); - - /* Configure the struct for the Rx mode */ - memset(&rx_mode, 0, sizeof(struct qed_filter_params)); - rx_mode.type = QED_FILTER_TYPE_RX_MODE; - - /* Remove all previous unicast secondary macs and multicast macs - * (configrue / leave the primary mac) - */ - rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, - edev->primary_mac); - if (rc) - goto out; - - /* Check for promiscuous */ - if ((ndev->flags & IFF_PROMISC) || - (uc_count > edev->dev_info.num_mac_filters - 1)) { - accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; - } else { - /* Add MAC filters according to the unicast secondary macs */ - int i; - - temp = uc_macs; - for (i = 0; i < uc_count; i++) { - rc = qede_set_ucast_rx_mac(edev, - QED_FILTER_XCAST_TYPE_ADD, - temp); - if (rc) - goto out; - - temp += ETH_ALEN; - } - - rc = qede_configure_mcast_filtering(ndev, &accept_flags); - if (rc) - goto out; - } - - /* take care of VLAN mode */ - if (ndev->flags & IFF_PROMISC) { - qede_config_accept_any_vlan(edev, true); - } else if (!edev->non_configured_vlans) { - /* It's possible that accept_any_vlan mode is set due to a - * previous setting of IFF_PROMISC. If vlan credits are - * sufficient, disable accept_any_vlan. - */ - qede_config_accept_any_vlan(edev, false); - } - - rx_mode.filter.accept_flags = accept_flags; - edev->ops->filter_config(edev->cdev, &rx_mode); -out: - kfree(uc_macs); -} -- cgit v1.2.3 From e1d32acbcbd35af5264acc70ff03bf8da9e447a8 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 1 Jan 2017 13:57:03 +0200 Subject: qed*: Change maximal number of queues Today qede requests contexts that would suffice for 64 'whole' combined queues [192 meant for 64 rx, tx and xdp tx queues], but registers netdev and limits the number of queues based on information received by qed. In turn, qed doesn't take context into account when informing qede how many queues it can support. This would lead to a configuration problem in case user tries configuring >64 combined queues to interface [or >96 in case xdp isn't enabled]. Since we don't have a mangement firware that actually provides so many interrupt lines to a single device we're currently safe but that's about to change soon. The new maximum is hence changed: - For RoCE devices, the limit would remain 64. - For non-RoCE devices, the limit might be higher [depending on the actual configuration of the device]. qed would start enforcing that limit in both scenarios. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 32 ++++++++++++++++++++++------ drivers/net/ethernet/qlogic/qed/qed_main.c | 11 ++++++++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 2 +- 3 files changed, 37 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index fd153d27e3a6..03d31b394df7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1753,13 +1753,31 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, int max_vf_mac_filters = 0; if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { - for_each_hwfn(cdev, i) - info->num_queues += - FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); - if (cdev->int_params.fp_msix_cnt) - info->num_queues = - min_t(u8, info->num_queues, - cdev->int_params.fp_msix_cnt); + u16 num_queues = 0; + + /* Since the feature controls only queue-zones, + * make sure we have the contexts [rx, tx, xdp] to + * match. + */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + u16 l2_queues = (u16)FEAT_NUM(hwfn, + QED_PF_L2_QUE); + u16 cids; + + cids = hwfn->pf_params.eth_pf_params.num_cons; + num_queues += min_t(u16, l2_queues, cids / 3); + } + + /* queues might theoretically be >256, but interrupts' + * upper-limit guarantes that it would fit in a u8. + */ + if (cdev->int_params.fp_msix_cnt) { + u8 irqs = cdev->int_params.fp_msix_cnt; + + info->num_queues = (u8)min_t(u16, + num_queues, irqs); + } } else { info->num_queues = cdev->num_hwfns; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index e1568428569b..93eee83ccdc3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -877,6 +877,17 @@ static void qed_update_pf_params(struct qed_dev *cdev, params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } + /* In case we might support RDMA, don't allow qede to be greedy + * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. + */ + if (QED_LEADING_HWFN(cdev)->hw_info.personality == + QED_PCI_ETH_ROCE) { + u16 *num_cons; + + num_cons = ¶ms->eth_pf_params.num_cons; + *num_cons = min_t(u16, *num_cons, 192); + } + for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 334e4140c798..a679d4296cd8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -753,7 +753,7 @@ static void qede_update_pf_params(struct qed_dev *cdev) /* 64 rx + 64 tx + 64 XDP */ memset(&pf_params, 0, sizeof(struct qed_pf_params)); - pf_params.eth_pf_params.num_cons = 192; + pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3; qed_ops->common->update_pf_params(cdev, &pf_params); } -- cgit v1.2.3 From e3eef7ee0201dbe5f4fc011b58d26228b57736ce Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 1 Jan 2017 13:57:04 +0200 Subject: qede: Postpone reallocation until NAPI end During Rx flow driver allocates a replacement buffer each time it consumes an Rx buffer. Failing to do so, it would consume the currently processed buffer and re-post it on the ring. As a result, the Rx ring is always completely full [from driver POV]. We now allow the Rx ring to shorten by doing the re-allocations at the end of the NAPI run. The only limitation is that we still want to make sure each time we reallocate that we'd still have sufficient elements in the Rx ring to guarantee that FW would be able to post additional data and trigger an interrupt. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 7 +++++-- drivers/net/ethernet/qlogic/qede/qede_fp.c | 23 +++++++++++++++++++---- drivers/net/ethernet/qlogic/qede/qede_main.c | 3 ++- 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index ab49263c9d43..1c5aac4b6139 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -280,7 +280,7 @@ struct qede_rx_queue { u16 sw_rx_cons; u16 sw_rx_prod; - u16 num_rx_buffers; /* Slowpath */ + u16 filled_buffers; u8 data_direction; u8 rxq_id; @@ -293,6 +293,9 @@ struct qede_rx_queue { struct qed_chain rx_bd_ring; struct qed_chain rx_comp_ring ____cacheline_aligned; + /* Used once per each NAPI run */ + u16 num_rx_buffers; + /* GRO */ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; @@ -414,7 +417,7 @@ netdev_features_t qede_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp); -int qede_alloc_rx_buffer(struct qede_rx_queue *rxq); +int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy); int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len); int qede_poll(struct napi_struct *napi, int budget); diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 1614eed2d65d..a06acab48086 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -46,13 +46,22 @@ * Content also used by slowpath * *********************************/ -int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) +int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) { struct sw_rx_data *sw_rx_data; struct eth_rx_bd *rx_bd; dma_addr_t mapping; struct page *data; + /* In case lazy-allocation is allowed, postpone allocation until the + * end of the NAPI run. We'd still need to make sure the Rx ring has + * sufficient buffers to guarantee an additional Rx interrupt. + */ + if (allow_lazy && likely(rxq->filled_buffers > 12)) { + rxq->filled_buffers--; + return 0; + } + data = alloc_pages(GFP_ATOMIC, 0); if (unlikely(!data)) return -ENOMEM; @@ -79,6 +88,7 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); rxq->sw_rx_prod++; + rxq->filled_buffers++; return 0; } @@ -523,7 +533,7 @@ static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, curr_cons->page_offset += rxq->rx_buf_seg_size; if (curr_cons->page_offset == PAGE_SIZE) { - if (unlikely(qede_alloc_rx_buffer(rxq))) { + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { /* Since we failed to allocate new buffer * current buffer can be used again. */ @@ -1002,7 +1012,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, switch (act) { case XDP_TX: /* We need the replacement buffer before transmit. */ - if (qede_alloc_rx_buffer(rxq)) { + if (qede_alloc_rx_buffer(rxq, true)) { qede_recycle_rx_bd_ring(rxq, 1); return false; } @@ -1116,7 +1126,7 @@ static int qede_rx_build_jumbo(struct qede_dev *edev, } /* We need a replacement buffer for each BD */ - if (unlikely(qede_alloc_rx_buffer(rxq))) + if (unlikely(qede_alloc_rx_buffer(rxq, true))) goto out; /* Now that we've allocated the replacement buffer, @@ -1293,6 +1303,11 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) work_done++; } + /* Allocate replacement buffers */ + while (rxq->num_rx_buffers - rxq->filled_buffers) + if (qede_alloc_rx_buffer(rxq, false)) + break; + /* Update producers */ qede_update_rx_prod(edev, rxq); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a679d4296cd8..be4121c867c3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1154,8 +1154,9 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) goto err; /* Allocate buffers for the Rx ring */ + rxq->filled_buffers = 0; for (i = 0; i < rxq->num_rx_buffers; i++) { - rc = qede_alloc_rx_buffer(rxq); + rc = qede_alloc_rx_buffer(rxq, false); if (rc) { DP_ERR(edev, "Rx buffers allocation failed at index %d\n", i); -- cgit v1.2.3 From 7ca547bdb00e3b162e8ed2889161b93eba9def0b Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Sun, 1 Jan 2017 13:57:05 +0200 Subject: qede - mark SKB as encapsulated When driver receives a recognized encapsulated packet it needs to set the skb->encapsulation field as well. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_fp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index a06acab48086..dddae1e21bf0 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -607,8 +607,10 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) if (csum_flag & QEDE_CSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_UNNECESSARY; - if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) + if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) { skb->csum_level = 1; + skb->encapsulation = 1; + } } static inline void qede_skb_receive(struct qede_dev *edev, -- cgit v1.2.3 From 04e0fd006a9a670f557815b29ea89ffdac667c4d Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 1 Jan 2017 13:57:06 +0200 Subject: qede: Remove unnecessary datapath dereference Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_fp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index dddae1e21bf0..1a6ca4884fad 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -622,7 +622,7 @@ static inline void qede_skb_receive(struct qede_dev *edev, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); napi_gro_receive(&fp->napi, skb); - fp->rxq->rcv_pkts++; + rxq->rcv_pkts++; } static void qede_set_gro_params(struct qede_dev *edev, -- cgit v1.2.3 From f29ffdb65ff0eaf95d2a2b80f0dee3fbd5a64772 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 1 Jan 2017 13:57:07 +0200 Subject: qed*: RSS indirection based on queue-handles A step toward having qede agnostic to the queue configurations in firmware/hardware - let the RSS indirections use queue handles instead of actual queue indices. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 232 +++++++++++++++--------- drivers/net/ethernet/qlogic/qed/qed_l2.h | 28 +-- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 82 +++++---- drivers/net/ethernet/qlogic/qed/qed_vf.c | 12 +- drivers/net/ethernet/qlogic/qede/qede.h | 6 +- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 77 ++++---- drivers/net/ethernet/qlogic/qede/qede_filter.c | 92 ++++++++-- drivers/net/ethernet/qlogic/qede/qede_main.c | 126 +++++-------- include/linux/qed/qed_eth_if.h | 2 +- 9 files changed, 392 insertions(+), 265 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 03d31b394df7..a35db6951147 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -98,6 +98,7 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid->cid = cid; p_cid->vf_qid = vf_qid; p_cid->rel = *p_params; + p_cid->p_owner = p_hwfn; /* Don't try calculating the absolute indices for VFs */ if (IS_VF(p_hwfn->cdev)) { @@ -272,76 +273,103 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, static int qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, - struct qed_rss_params *p_params) + struct qed_rss_params *p_rss) { - struct eth_vport_rss_config *rss = &p_ramrod->rss_config; - u16 abs_l2_queue = 0, capabilities = 0; - int rc = 0, i; + struct eth_vport_rss_config *p_config; + u16 capabilities = 0; + int i, table_size; + int rc = 0; - if (!p_params) { + if (!p_rss) { p_ramrod->common.update_rss_flg = 0; return rc; } + p_config = &p_ramrod->rss_config; - BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != - ETH_RSS_IND_TABLE_ENTRIES_NUM); + BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); - rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id); + rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); if (rc) return rc; - p_ramrod->common.update_rss_flg = p_params->update_rss_config; - rss->update_rss_capabilities = p_params->update_rss_capabilities; - rss->update_rss_ind_table = p_params->update_rss_ind_table; - rss->update_rss_key = p_params->update_rss_key; + p_ramrod->common.update_rss_flg = p_rss->update_rss_config; + p_config->update_rss_capabilities = p_rss->update_rss_capabilities; + p_config->update_rss_ind_table = p_rss->update_rss_ind_table; + p_config->update_rss_key = p_rss->update_rss_key; - rss->rss_mode = p_params->rss_enable ? - ETH_VPORT_RSS_MODE_REGULAR : - ETH_VPORT_RSS_MODE_DISABLED; + p_config->rss_mode = p_rss->rss_enable ? + ETH_VPORT_RSS_MODE_REGULAR : + ETH_VPORT_RSS_MODE_DISABLED; SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV4)); + !!(p_rss->rss_caps & QED_RSS_IPV4)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV6)); + !!(p_rss->rss_caps & QED_RSS_IPV6)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV4_TCP)); + !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV6_TCP)); + !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV4_UDP)); + !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV6_UDP)); - rss->tbl_size = p_params->rss_table_size_log; + !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); + p_config->tbl_size = p_rss->rss_table_size_log; - rss->capabilities = cpu_to_le16(capabilities); + p_config->capabilities = cpu_to_le16(capabilities); DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", p_ramrod->common.update_rss_flg, - rss->rss_mode, rss->update_rss_capabilities, - capabilities, rss->update_rss_ind_table, - rss->update_rss_key); + p_config->rss_mode, + p_config->update_rss_capabilities, + p_config->capabilities, + p_config->update_rss_ind_table, p_config->update_rss_key); - for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { - rc = qed_fw_l2_queue(p_hwfn, - (u8)p_params->rss_ind_table[i], - &abs_l2_queue); - if (rc) - return rc; + table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, + 1 << p_config->tbl_size); + for (i = 0; i < table_size; i++) { + struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; - rss->indirection_table[i] = cpu_to_le16(abs_l2_queue); - DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n", - i, rss->indirection_table[i]); + if (!p_queue) + return -EINVAL; + + p_config->indirection_table[i] = + cpu_to_le16(p_queue->abs.queue_id); + } + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "Configured RSS indirection table [%d entries]:\n", + table_size); + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { + DP_VERBOSE(p_hwfn, + NETIF_MSG_IFUP, + "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", + le16_to_cpu(p_config->indirection_table[i]), + le16_to_cpu(p_config->indirection_table[i + 1]), + le16_to_cpu(p_config->indirection_table[i + 2]), + le16_to_cpu(p_config->indirection_table[i + 3]), + le16_to_cpu(p_config->indirection_table[i + 4]), + le16_to_cpu(p_config->indirection_table[i + 5]), + le16_to_cpu(p_config->indirection_table[i + 6]), + le16_to_cpu(p_config->indirection_table[i + 7]), + le16_to_cpu(p_config->indirection_table[i + 8]), + le16_to_cpu(p_config->indirection_table[i + 9]), + le16_to_cpu(p_config->indirection_table[i + 10]), + le16_to_cpu(p_config->indirection_table[i + 11]), + le16_to_cpu(p_config->indirection_table[i + 12]), + le16_to_cpu(p_config->indirection_table[i + 13]), + le16_to_cpu(p_config->indirection_table[i + 14]), + le16_to_cpu(p_config->indirection_table[i + 15])); } for (i = 0; i < 10; i++) - rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]); + p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); return rc; } @@ -1899,18 +1927,84 @@ static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) return 0; } +static int qed_update_vport_rss(struct qed_dev *cdev, + struct qed_update_vport_rss_params *input, + struct qed_rss_params *rss) +{ + int i, fn; + + /* Update configuration with what's correct regardless of CMT */ + rss->update_rss_config = 1; + rss->rss_enable = 1; + rss->update_rss_capabilities = 1; + rss->update_rss_ind_table = 1; + rss->update_rss_key = 1; + rss->rss_caps = input->rss_caps; + memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); + + /* In regular scenario, we'd simply need to take input handlers. + * But in CMT, we'd have to split the handlers according to the + * engine they were configured on. We'd then have to understand + * whether RSS is really required, since 2-queues on CMT doesn't + * require RSS. + */ + if (cdev->num_hwfns == 1) { + memcpy(rss->rss_ind_table, + input->rss_ind_table, + QED_RSS_IND_TABLE_SIZE * sizeof(void *)); + rss->rss_table_size_log = 7; + return 0; + } + + /* Start by copying the non-spcific information to the 2nd copy */ + memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); + + /* CMT should be round-robin */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + struct qed_queue_cid *cid = input->rss_ind_table[i]; + struct qed_rss_params *t_rss; + + if (cid->p_owner == QED_LEADING_HWFN(cdev)) + t_rss = &rss[0]; + else + t_rss = &rss[1]; + + t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; + } + + /* Make sure RSS is actually required */ + for_each_hwfn(cdev, fn) { + for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { + if (rss[fn].rss_ind_table[i] != + rss[fn].rss_ind_table[0]) + break; + } + if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { + DP_VERBOSE(cdev, NETIF_MSG_IFUP, + "CMT - 1 queue per-hwfn; Disabling RSS\n"); + return -EINVAL; + } + rss[fn].rss_table_size_log = 6; + } + + return 0; +} + static int qed_update_vport(struct qed_dev *cdev, struct qed_update_vport_params *params) { struct qed_sp_vport_update_params sp_params; - struct qed_rss_params sp_rss_params; - int rc, i; + struct qed_rss_params *rss; + int rc = 0, i; if (!cdev) return -ENODEV; + rss = vzalloc(sizeof(*rss) * cdev->num_hwfns); + if (!rss) + return -ENOMEM; + memset(&sp_params, 0, sizeof(sp_params)); - memset(&sp_rss_params, 0, sizeof(sp_rss_params)); /* Translate protocol params into sp params */ sp_params.vport_id = params->vport_id; @@ -1924,66 +2018,24 @@ static int qed_update_vport(struct qed_dev *cdev, sp_params.update_accept_any_vlan_flg = params->update_accept_any_vlan_flg; - /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. - * We need to re-fix the rss values per engine for CMT. - */ - if (cdev->num_hwfns > 1 && params->update_rss_flg) { - struct qed_update_vport_rss_params *rss = ¶ms->rss_params; - int k, max = 0; - - /* Find largest entry, since it's possible RSS needs to - * be disabled [in case only 1 queue per-hwfn] - */ - for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) - max = (max > rss->rss_ind_table[k]) ? - max : rss->rss_ind_table[k]; - - /* Either fix RSS values or disable RSS */ - if (cdev->num_hwfns < max + 1) { - int divisor = (max + cdev->num_hwfns - 1) / - cdev->num_hwfns; - - DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "CMT - fixing RSS values (modulo %02x)\n", - divisor); - - for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) - rss->rss_ind_table[k] = - rss->rss_ind_table[k] % divisor; - } else { - DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "CMT - 1 queue per-hwfn; Disabling RSS\n"); + /* Prepare the RSS configuration */ + if (params->update_rss_flg) + if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss)) params->update_rss_flg = 0; - } - } - - /* Now, update the RSS configuration for actual configuration */ - if (params->update_rss_flg) { - sp_rss_params.update_rss_config = 1; - sp_rss_params.rss_enable = 1; - sp_rss_params.update_rss_capabilities = 1; - sp_rss_params.update_rss_ind_table = 1; - sp_rss_params.update_rss_key = 1; - sp_rss_params.rss_caps = params->rss_params.rss_caps; - sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ - memcpy(sp_rss_params.rss_ind_table, - params->rss_params.rss_ind_table, - QED_RSS_IND_TABLE_SIZE * sizeof(u16)); - memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, - QED_RSS_KEY_SIZE * sizeof(u32)); - sp_params.rss_params = &sp_rss_params; - } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (params->update_rss_flg) + sp_params.rss_params = &rss[i]; + sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; rc = qed_sp_vport_update(p_hwfn, &sp_params, QED_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_ERR(cdev, "Failed to update VPORT\n"); - return rc; + goto out; } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), @@ -1992,7 +2044,9 @@ static int qed_update_vport(struct qed_dev *cdev, params->update_vport_active_flg); } - return 0; +out: + vfree(rss); + return rc; } static int qed_start_rxq(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 2f0303761d6b..93cb932ef663 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -39,6 +39,20 @@ #include "qed.h" #include "qed_hw.h" #include "qed_sp.h" +struct qed_rss_params { + u8 update_rss_config; + u8 rss_enable; + u8 rss_eng_id; + u8 update_rss_capabilities; + u8 update_rss_ind_table; + u8 update_rss_key; + u8 rss_caps; + u8 rss_table_size_log; + + /* Indirection table consist of rx queue handles */ + void *rss_ind_table[QED_RSS_IND_TABLE_SIZE]; + u32 rss_key[QED_RSS_KEY_SIZE]; +}; struct qed_sge_tpa_params { u8 max_buffers_per_cqe; @@ -156,18 +170,6 @@ struct qed_sp_vport_start_params { int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); -struct qed_rss_params { - u8 update_rss_config; - u8 rss_enable; - u8 rss_eng_id; - u8 update_rss_capabilities; - u8 update_rss_ind_table; - u8 update_rss_key; - u8 rss_caps; - u8 rss_table_size_log; - u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE]; - u32 rss_key[QED_RSS_KEY_SIZE]; -}; struct qed_filter_accept_flags { u8 update_rx_mode_config; @@ -287,6 +289,8 @@ struct qed_queue_cid { /* Legacy VFs might have Rx producer located elsewhere */ bool b_legacy_vf; + + struct qed_hwfn *p_owner; }; void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 469e857f5dcd..b22baf5e5daf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -32,6 +32,7 @@ #include #include +#include #include #include "qed_cxt.h" #include "qed_hsi.h" @@ -2318,12 +2319,14 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, struct qed_sp_vport_update_params *p_data, struct qed_rss_params *p_rss, - struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) + struct qed_iov_vf_mbx *p_mbx, + u16 *tlvs_mask, u16 *tlvs_accepted) { struct vfpf_vport_update_rss_tlv *p_rss_tlv; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; - u16 i, q_idx, max_q_idx; + bool b_reject = false; u16 table_size; + u16 i, q_idx; p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); @@ -2347,34 +2350,39 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, p_rss->rss_eng_id = vf->relative_vf_id + 1; p_rss->rss_caps = p_rss_tlv->rss_caps; p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; - memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table, - sizeof(p_rss->rss_ind_table)); memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), (1 << p_rss_tlv->rss_table_size_log)); - max_q_idx = ARRAY_SIZE(vf->vf_queues); - for (i = 0; i < table_size; i++) { - u16 index = vf->vf_queues[0].fw_rx_qid; + q_idx = p_rss_tlv->rss_ind_table[i]; + if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Omitting RSS due to wrong queue %04x\n", + vf->relative_vf_id, q_idx); + b_reject = true; + goto out; + } - q_idx = p_rss->rss_ind_table[i]; - if (q_idx >= max_q_idx) - DP_NOTICE(p_hwfn, - "rss_ind_table[%d] = %d, rxq is out of range\n", - i, q_idx); - else if (!vf->vf_queues[q_idx].p_rx_cid) - DP_NOTICE(p_hwfn, - "rss_ind_table[%d] = %d, rxq is not active\n", - i, q_idx); - else - index = vf->vf_queues[q_idx].fw_rx_qid; - p_rss->rss_ind_table[i] = index; + if (!vf->vf_queues[q_idx].p_rx_cid) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Omitting RSS due to inactive queue %08x\n", + vf->relative_vf_id, q_idx); + b_reject = true; + goto out; + } + + p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid; } p_data->rss_params = p_rss; +out: *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; + if (!b_reject) + *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; } static void @@ -2429,12 +2437,12 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { + struct qed_rss_params *p_rss_params = NULL; struct qed_sp_vport_update_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct qed_sge_tpa_params sge_tpa_params; - struct qed_rss_params rss_params; + u16 tlvs_mask = 0, tlvs_accepted = 0; u8 status = PFVF_STATUS_SUCCESS; - u16 tlvs_mask = 0; u16 length; int rc; @@ -2447,6 +2455,11 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, status = PFVF_STATUS_FAILURE; goto out; } + p_rss_params = vzalloc(sizeof(*p_rss_params)); + if (p_rss_params == NULL) { + status = PFVF_STATUS_FAILURE; + goto out; + } memset(¶ms, 0, sizeof(params)); params.opaque_fid = vf->opaque_fid; @@ -2461,20 +2474,26 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); - qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params, - mbx, &tlvs_mask); qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, &sge_tpa_params, mbx, &tlvs_mask); - /* Just log a message if there is no single extended tlv in buffer. - * When all features of vport update ramrod would be requested by VF - * as extended TLVs in buffer then an error can be returned in response - * if there is no extended TLV present in buffer. + tlvs_accepted = tlvs_mask; + + /* Some of the extended TLVs need to be validated first; In that case, + * they can update the mask without updating the accepted [so that + * PF could communicate to VF it has rejected request]. */ - if (!tlvs_mask) { - DP_NOTICE(p_hwfn, - "No feature tlvs found for vport update\n"); + qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, + mbx, &tlvs_mask, &tlvs_accepted); + + if (!tlvs_accepted) { + if (tlvs_mask) + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Upper-layer prevents VF vport configuration\n"); + else + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "No feature tlvs found for vport update\n"); status = PFVF_STATUS_NOT_SUPPORTED; goto out; } @@ -2485,8 +2504,9 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, status = PFVF_STATUS_FAILURE; out: + vfree(p_rss_params); length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, - tlvs_mask, tlvs_mask); + tlvs_mask, tlvs_accepted); qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index af0542c0351c..9667059b15bd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -838,6 +838,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, if (p_params->rss_params) { struct qed_rss_params *rss_params = p_params->rss_params; struct vfpf_vport_update_rss_tlv *p_rss_tlv; + int i, table_size; size = sizeof(struct vfpf_vport_update_rss_tlv); p_rss_tlv = qed_add_tlv(p_hwfn, @@ -860,8 +861,15 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, p_rss_tlv->rss_enable = rss_params->rss_enable; p_rss_tlv->rss_caps = rss_params->rss_caps; p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; - memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table, - sizeof(rss_params->rss_ind_table)); + + table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE, + 1 << p_rss_tlv->rss_table_size_log); + for (i = 0; i < table_size; i++) { + struct qed_queue_cid *p_queue; + + p_queue = rss_params->rss_ind_table[i]; + p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; + } memcpy(p_rss_tlv->rss_key, rss_params->rss_key, sizeof(rss_params->rss_key)); } diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 1c5aac4b6139..f4e9423cd90a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -164,6 +164,7 @@ struct qede_dev { u16 num_queues; #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) +#define QEDE_RX_QUEUE_IDX(edev, i) (i) #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) struct qed_int_info int_info; @@ -194,7 +195,10 @@ struct qede_dev { #define QEDE_RSS_KEY_INITED BIT(1) #define QEDE_RSS_CAPS_INITED BIT(2) u32 rss_params_inited; /* bit-field to track initialized rss params */ - struct qed_update_vport_rss_params rss_params; + u16 rss_ind_table[128]; + u32 rss_key[10]; + u8 rss_caps; + u16 q_num_rx_buffers; /* Must be a power of two */ u16 q_num_tx_buffers; /* Must be a power of two */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 8e971b289d4b..baf264225c12 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "qede.h" #define QEDE_RQSTAT_OFFSET(stat_name) \ @@ -931,8 +932,7 @@ static int qede_set_channels(struct net_device *dev, /* Reset the indirection table if rx queue count is updated */ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) { edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED; - memset(&edev->rss_params.rss_ind_table, 0, - sizeof(edev->rss_params.rss_ind_table)); + memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table)); } qede_reload(edev, NULL, false); @@ -978,11 +978,11 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: - if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP) + if (edev->rss_caps & QED_RSS_IPV4_UDP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V6_FLOW: - if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP) + if (edev->rss_caps & QED_RSS_IPV6_UDP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case IPV4_FLOW: @@ -1015,8 +1015,9 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) { - struct qed_update_vport_params vport_update_params; + struct qed_update_vport_params *vport_update_params; u8 set_caps = 0, clr_caps = 0; + int rc = 0; DP_VERBOSE(edev, QED_MSG_DEBUG, "Set rss flags command parameters: flow type = %d, data = %llu\n", @@ -1091,27 +1092,29 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) } /* No action is needed if there is no change in the rss capability */ - if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps & - ~clr_caps) | set_caps)) + if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps)) return 0; /* Update internal configuration */ - edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) | - set_caps; + edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps); edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; /* Re-configure if possible */ - if (netif_running(edev->ndev)) { - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.update_rss_flg = 1; - vport_update_params.vport_id = 0; - memcpy(&vport_update_params.rss_params, &edev->rss_params, - sizeof(vport_update_params.rss_params)); - return edev->ops->vport_update(edev->cdev, - &vport_update_params); + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) { + __qede_unlock(edev); + return -ENOMEM; + } + qede_fill_rss_params(edev, &vport_update_params->rss_params, + &vport_update_params->update_rss_flg); + rc = edev->ops->vport_update(edev->cdev, vport_update_params); + vfree(vport_update_params); } + __qede_unlock(edev); - return 0; + return rc; } static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) @@ -1136,7 +1139,7 @@ static u32 qede_get_rxfh_key_size(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); - return sizeof(edev->rss_params.rss_key); + return sizeof(edev->rss_key); } static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) @@ -1151,11 +1154,10 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) return 0; for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) - indir[i] = edev->rss_params.rss_ind_table[i]; + indir[i] = edev->rss_ind_table[i]; if (key) - memcpy(key, edev->rss_params.rss_key, - qede_get_rxfh_key_size(dev)); + memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev)); return 0; } @@ -1163,9 +1165,9 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) static int qede_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { - struct qed_update_vport_params vport_update_params; + struct qed_update_vport_params *vport_update_params; struct qede_dev *edev = netdev_priv(dev); - int i; + int i, rc = 0; if (edev->dev_info.common.num_hwfns > 1) { DP_INFO(edev, @@ -1181,27 +1183,30 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir, if (indir) { for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) - edev->rss_params.rss_ind_table[i] = indir[i]; + edev->rss_ind_table[i] = indir[i]; edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; } if (key) { - memcpy(&edev->rss_params.rss_key, key, - qede_get_rxfh_key_size(dev)); + memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev)); edev->rss_params_inited |= QEDE_RSS_KEY_INITED; } - if (netif_running(edev->ndev)) { - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.update_rss_flg = 1; - vport_update_params.vport_id = 0; - memcpy(&vport_update_params.rss_params, &edev->rss_params, - sizeof(vport_update_params.rss_params)); - return edev->ops->vport_update(edev->cdev, - &vport_update_params); + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) { + __qede_unlock(edev); + return -ENOMEM; + } + qede_fill_rss_params(edev, &vport_update_params->rss_params, + &vport_update_params->update_rss_flg); + rc = edev->ops->vport_update(edev->cdev, vport_update_params); + vfree(vport_update_params); } + __qede_unlock(edev); - return 0; + return rc; } /* This function enables the interrupt generation and the NAPI on the device */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 6161e093a127..03e2a81b30c6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include "qede.h" @@ -49,6 +50,60 @@ void qede_force_mac(void *dev, u8 *mac, bool forced) ether_addr_copy(edev->primary_mac, mac); } +void qede_fill_rss_params(struct qede_dev *edev, + struct qed_update_vport_rss_params *rss, u8 *update) +{ + bool need_reset = false; + int i; + + if (QEDE_RSS_COUNT(edev) <= 1) { + memset(rss, 0, sizeof(*rss)); + *update = 0; + return; + } + + /* Need to validate current RSS config uses valid entries */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) { + need_reset = true; + break; + } + } + + if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) { + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + u16 indir_val, val; + + val = QEDE_RSS_COUNT(edev); + indir_val = ethtool_rxfh_indir_default(i, val); + edev->rss_ind_table[i] = indir_val; + } + edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; + } + + /* Now that we have the queue-indirection, prepare the handles */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]); + + rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle; + } + + if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { + netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key)); + edev->rss_params_inited |= QEDE_RSS_KEY_INITED; + } + memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key)); + + if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { + edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 | + QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP; + edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; + } + rss->rss_caps = edev->rss_caps; + + *update = 1; +} + static int qede_set_ucast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, unsigned char mac[ETH_ALEN]) @@ -79,22 +134,24 @@ static int qede_set_ucast_rx_vlan(struct qede_dev *edev, return edev->ops->filter_config(edev->cdev, &filter_cmd); } -static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) +static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action) { - struct qed_update_vport_params params; + struct qed_update_vport_params *params; int rc; /* Proceed only if action actually needs to be performed */ if (edev->accept_any_vlan == action) - return; + return 0; - memset(¶ms, 0, sizeof(params)); + params = vzalloc(sizeof(*params)); + if (!params) + return -ENOMEM; - params.vport_id = 0; - params.accept_any_vlan = action; - params.update_accept_any_vlan_flg = 1; + params->vport_id = 0; + params->accept_any_vlan = action; + params->update_accept_any_vlan_flg = 1; - rc = edev->ops->vport_update(edev->cdev, ¶ms); + rc = edev->ops->vport_update(edev->cdev, params); if (rc) { DP_ERR(edev, "Failed to %s accept-any-vlan\n", action ? "enable" : "disable"); @@ -103,6 +160,9 @@ static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) action ? "enabled" : "disabled"); edev->accept_any_vlan = action; } + + vfree(params); + return 0; } int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) @@ -166,8 +226,13 @@ int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) edev->configured_vlans++; } else { /* Out of quota; Activate accept-any-VLAN mode */ - if (!edev->non_configured_vlans) - qede_config_accept_any_vlan(edev, true); + if (!edev->non_configured_vlans) { + rc = qede_config_accept_any_vlan(edev, true); + if (rc) { + kfree(vlan); + goto out; + } + } edev->non_configured_vlans++; } @@ -242,9 +307,12 @@ int qede_configure_vlan_filters(struct qede_dev *edev) */ if (accept_any_vlan) - qede_config_accept_any_vlan(edev, true); + rc = qede_config_accept_any_vlan(edev, true); else if (!edev->non_configured_vlans) - qede_config_accept_any_vlan(edev, false); + rc = qede_config_accept_any_vlan(edev, false); + + if (rc && !real_rc) + real_rc = rc; return real_rc; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index be4121c867c3..88d47d6f35ac 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #include "qede.h" @@ -177,8 +178,12 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) { struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); struct qed_dev_info *qed_info = &edev->dev_info.common; + struct qed_update_vport_params *vport_params; int rc; + vport_params = vzalloc(sizeof(*vport_params)); + if (!vport_params) + return -ENOMEM; DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); @@ -186,15 +191,13 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) /* Enable/Disable Tx switching for PF */ if ((rc == num_vfs_param) && netif_running(edev->ndev) && qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { - struct qed_update_vport_params params; - - memset(¶ms, 0, sizeof(params)); - params.vport_id = 0; - params.update_tx_switching_flg = 1; - params.tx_switching_flg = num_vfs_param ? 1 : 0; - edev->ops->vport_update(edev->cdev, ¶ms); + vport_params->vport_id = 0; + vport_params->update_tx_switching_flg = 1; + vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; + edev->ops->vport_update(edev->cdev, vport_params); } + vfree(vport_params); return rc; } #endif @@ -1504,19 +1507,24 @@ static int qede_stop_txq(struct qede_dev *edev, static int qede_stop_queues(struct qede_dev *edev) { - struct qed_update_vport_params vport_update_params; + struct qed_update_vport_params *vport_update_params; struct qed_dev *cdev = edev->cdev; struct qede_fastpath *fp; int rc, i; /* Disable the vport */ - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.vport_id = 0; - vport_update_params.update_vport_active_flg = 1; - vport_update_params.vport_active_flg = 0; - vport_update_params.update_rss_flg = 0; + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) + return -ENOMEM; + + vport_update_params->vport_id = 0; + vport_update_params->update_vport_active_flg = 1; + vport_update_params->vport_active_flg = 0; + vport_update_params->update_rss_flg = 0; + + rc = edev->ops->vport_update(cdev, vport_update_params); + vfree(vport_update_params); - rc = edev->ops->vport_update(cdev, &vport_update_params); if (rc) { DP_ERR(edev, "Failed to update vport\n"); return rc; @@ -1628,11 +1636,10 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) { int vlan_removal_en = 1; struct qed_dev *cdev = edev->cdev; - struct qed_update_vport_params vport_update_params; - struct qed_queue_start_common_params q_params; struct qed_dev_info *qed_info = &edev->dev_info.common; + struct qed_update_vport_params *vport_update_params; + struct qed_queue_start_common_params q_params; struct qed_start_vport_params start = {0}; - bool reset_rss_indir = false; int rc, i; if (!edev->num_queues) { @@ -1641,6 +1648,10 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) return -EINVAL; } + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) + return -ENOMEM; + start.gro_enable = !edev->gro_disable; start.mtu = edev->ndev->mtu; start.vport_id = 0; @@ -1652,7 +1663,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (rc) { DP_ERR(edev, "Start V-PORT failed %d\n", rc); - return rc; + goto out; } DP_VERBOSE(edev, NETIF_MSG_IFUP, @@ -1688,7 +1699,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (rc) { DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); - return rc; + goto out; } /* Use the return parameters */ @@ -1704,93 +1715,46 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (fp->type & QEDE_FASTPATH_XDP) { rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI); if (rc) - return rc; + goto out; fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); if (IS_ERR(fp->rxq->xdp_prog)) { rc = PTR_ERR(fp->rxq->xdp_prog); fp->rxq->xdp_prog = NULL; - return rc; + goto out; } } if (fp->type & QEDE_FASTPATH_TX) { rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0)); if (rc) - return rc; + goto out; } } /* Prepare and send the vport enable */ - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.vport_id = start.vport_id; - vport_update_params.update_vport_active_flg = 1; - vport_update_params.vport_active_flg = 1; + vport_update_params->vport_id = start.vport_id; + vport_update_params->update_vport_active_flg = 1; + vport_update_params->vport_active_flg = 1; if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && qed_info->tx_switching) { - vport_update_params.update_tx_switching_flg = 1; - vport_update_params.tx_switching_flg = 1; + vport_update_params->update_tx_switching_flg = 1; + vport_update_params->tx_switching_flg = 1; } - /* Fill struct with RSS params */ - if (QEDE_RSS_COUNT(edev) > 1) { - vport_update_params.update_rss_flg = 1; - - /* Need to validate current RSS config uses valid entries */ - for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { - if (edev->rss_params.rss_ind_table[i] >= - QEDE_RSS_COUNT(edev)) { - reset_rss_indir = true; - break; - } - } - - if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || - reset_rss_indir) { - u16 val; + qede_fill_rss_params(edev, &vport_update_params->rss_params, + &vport_update_params->update_rss_flg); - for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { - u16 indir_val; - - val = QEDE_RSS_COUNT(edev); - indir_val = ethtool_rxfh_indir_default(i, val); - edev->rss_params.rss_ind_table[i] = indir_val; - } - edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; - } - - if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { - netdev_rss_key_fill(edev->rss_params.rss_key, - sizeof(edev->rss_params.rss_key)); - edev->rss_params_inited |= QEDE_RSS_KEY_INITED; - } - - if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { - edev->rss_params.rss_caps = QED_RSS_IPV4 | - QED_RSS_IPV6 | - QED_RSS_IPV4_TCP | - QED_RSS_IPV6_TCP; - edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; - } - - memcpy(&vport_update_params.rss_params, &edev->rss_params, - sizeof(vport_update_params.rss_params)); - } else { - memset(&vport_update_params.rss_params, 0, - sizeof(vport_update_params.rss_params)); - } - - rc = edev->ops->vport_update(cdev, &vport_update_params); - if (rc) { + rc = edev->ops->vport_update(cdev, vport_update_params); + if (rc) DP_ERR(edev, "Update V-PORT failed %d\n", rc); - return rc; - } - return 0; +out: + vfree(vport_update_params); + return rc; } - enum qede_unload_mode { QEDE_UNLOAD_NORMAL, }; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index d91651ecfd26..3613d63cd5d0 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -77,7 +77,7 @@ struct qed_dev_eth_info { }; struct qed_update_vport_rss_params { - u16 rss_ind_table[128]; + void *rss_ind_table[128]; u32 rss_key[10]; u8 rss_caps; }; -- cgit v1.2.3 From f990c82c385b1d9ce6acadb668df313c693cf48f Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 1 Jan 2017 13:57:08 +0200 Subject: qed*: Add support for ndo_set_vf_trust Trusted VFs would be allowed to receive promiscuous and multicast promiscuous data. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 128 +++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 9 ++ drivers/net/ethernet/qlogic/qede/qede_filter.c | 20 ++-- drivers/net/ethernet/qlogic/qede/qede_main.c | 11 +++ include/linux/qed/qed_iov_if.h | 2 + 5 files changed, 162 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index b22baf5e5daf..b1213643bbfd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1225,6 +1225,9 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) /* Clear the VF mac */ memset(vf_info->mac, 0, ETH_ALEN); + + vf_info->rx_accept_mode = 0; + vf_info->tx_accept_mode = 0; } static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, @@ -2433,6 +2436,39 @@ qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; } +static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, + u8 vfid, + struct qed_sp_vport_update_params *params, + u16 *tlvs) +{ + u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; + struct qed_filter_accept_flags *flags = ¶ms->accept_flags; + struct qed_public_vf_info *vf_info; + + /* Untrusted VFs can't even be trusted to know that fact. + * Simply indicate everything is configured fine, and trace + * configuration 'behind their back'. + */ + if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) + return 0; + + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); + + if (flags->update_rx_mode_config) { + vf_info->rx_accept_mode = flags->rx_accept_filter; + if (!vf_info->is_trusted_configured) + flags->rx_accept_filter &= ~mask; + } + + if (flags->update_tx_mode_config) { + vf_info->tx_accept_mode = flags->tx_accept_filter; + if (!vf_info->is_trusted_configured) + flags->tx_accept_filter &= ~mask; + } + + return 0; +} + static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) @@ -2487,6 +2523,13 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, mbx, &tlvs_mask, &tlvs_accepted); + if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, + ¶ms, &tlvs_accepted)) { + tlvs_accepted = 0; + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + if (!tlvs_accepted) { if (tlvs_mask) DP_VERBOSE(p_hwfn, QED_MSG_IOV, @@ -3936,6 +3979,32 @@ static int qed_set_vf_rate(struct qed_dev *cdev, return 0; } +static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf; + + if (!qed_iov_pf_sanity_check(hwfn, vfid)) { + DP_NOTICE(hwfn, + "SR-IOV sanity check failed, can't set trust\n"); + return -EINVAL; + } + + vf = qed_iov_get_public_vf_info(hwfn, vfid, true); + + if (vf->is_trusted_request == trust) + return 0; + vf->is_trusted_request = trust; + + qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); + } + + return 0; +} + static void qed_handle_vf_msg(struct qed_hwfn *hwfn) { u64 events[QED_VF_ARRAY_LENGTH]; @@ -4040,6 +4109,61 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) +{ + struct qed_sp_vport_update_params params; + struct qed_filter_accept_flags *flags; + struct qed_public_vf_info *vf_info; + struct qed_vf_info *vf; + u8 mask; + int i; + + mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; + flags = ¶ms.accept_flags; + + qed_for_each_vf(hwfn, i) { + /* Need to make sure current requested configuration didn't + * flip so that we'll end up configuring something that's not + * needed. + */ + vf_info = qed_iov_get_public_vf_info(hwfn, i, true); + if (vf_info->is_trusted_configured == + vf_info->is_trusted_request) + continue; + vf_info->is_trusted_configured = vf_info->is_trusted_request; + + /* Validate that the VF has a configured vport */ + vf = qed_iov_get_vf_info(hwfn, i, true); + if (!vf->vport_instance) + continue; + + memset(¶ms, 0, sizeof(params)); + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + + if (vf_info->rx_accept_mode & mask) { + flags->update_rx_mode_config = 1; + flags->rx_accept_filter = vf_info->rx_accept_mode; + } + + if (vf_info->tx_accept_mode & mask) { + flags->update_tx_mode_config = 1; + flags->tx_accept_filter = vf_info->tx_accept_mode; + } + + /* Remove if needed; Otherwise this would set the mask */ + if (!vf_info->is_trusted_configured) { + flags->rx_accept_filter &= ~mask; + flags->tx_accept_filter &= ~mask; + } + + if (flags->update_rx_mode_config || + flags->update_tx_mode_config) + qed_sp_vport_update(hwfn, ¶ms, + QED_SPQ_MODE_EBLOCK, NULL); + } +} + static void qed_iov_pf_task(struct work_struct *work) { @@ -4075,6 +4199,9 @@ static void qed_iov_pf_task(struct work_struct *work) if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, &hwfn->iov_task_flags)) qed_handle_bulletin_post(hwfn); + + if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) + qed_iov_handle_trust_change(hwfn); } void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) @@ -4137,4 +4264,5 @@ const struct qed_iov_hv_ops qed_iov_ops_pass = { .set_link_state = &qed_set_vf_link_state, .set_spoof = &qed_spoof_configure, .set_rate = &qed_set_vf_rate, + .set_trust = &qed_set_vf_trust, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 1738d5bd2e8e..0a2e3a36d2cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -80,6 +80,14 @@ struct qed_public_vf_info { /* Currently configured Tx rate in MB/sec. 0 if unconfigured */ int tx_rate; + + /* Trusted VFs can configure promiscuous mode. + * Also store shadow promisc configuration if needed. + */ + bool is_trusted_configured; + bool is_trusted_request; + u8 rx_accept_mode; + u8 tx_accept_mode; }; struct qed_iov_vf_init_params { @@ -245,6 +253,7 @@ enum qed_iov_wq_flag { QED_IOV_WQ_BULLETIN_UPDATE_FLAG, QED_IOV_WQ_STOP_WQ_FLAG, QED_IOV_WQ_FLR_FLAG, + QED_IOV_WQ_TRUST_FLAG, }; #ifdef CONFIG_QED_SRIOV diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 03e2a81b30c6..107c3fda4792 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -714,11 +714,13 @@ void qede_config_rx_mode(struct net_device *ndev) goto out; /* Check for promiscuous */ - if ((ndev->flags & IFF_PROMISC) || - (uc_count > edev->dev_info.num_mac_filters - 1)) { + if (ndev->flags & IFF_PROMISC) accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; - } else { - /* Add MAC filters according to the unicast secondary macs */ + else + accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR; + + /* Configure all filters regardless, in case promisc is rejected */ + if (uc_count < edev->dev_info.num_mac_filters) { int i; temp = uc_macs; @@ -731,12 +733,14 @@ void qede_config_rx_mode(struct net_device *ndev) temp += ETH_ALEN; } - - rc = qede_configure_mcast_filtering(ndev, &accept_flags); - if (rc) - goto out; + } else { + accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; } + rc = qede_configure_mcast_filtering(ndev, &accept_flags); + if (rc) + goto out; + /* take care of VLAN mode */ if (ndev->flags & IFF_PROMISC) { qede_config_accept_any_vlan(edev, true); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 88d47d6f35ac..b58509feecd5 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -475,6 +475,16 @@ static int qede_set_vf_link_state(struct net_device *dev, int vfidx, return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); } + +static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops) + return -EINVAL; + + return edev->ops->iov->set_trust(edev->cdev, vfidx, setting); +} #endif static const struct net_device_ops qede_netdev_ops = { @@ -488,6 +498,7 @@ static const struct net_device_ops qede_netdev_ops = { #ifdef CONFIG_QED_SRIOV .ndo_set_vf_mac = qede_set_vf_mac, .ndo_set_vf_vlan = qede_set_vf_vlan, + .ndo_set_vf_trust = qede_set_vf_trust, #endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index b1f979135f97..ac2e6a3199a3 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -53,6 +53,8 @@ struct qed_iov_hv_ops { int (*set_rate) (struct qed_dev *cdev, int vfid, u32 min_rate, u32 max_rate); + + int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust); }; #endif -- cgit v1.2.3 From 8806787609dea02c333b187f94c1556798ac8897 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 1 Jan 2017 13:57:09 +0200 Subject: qed: Support Multicast on Tx-switching Currently multicast traffic wouldn't be routed internally to listener; Instead it would only be sent to network via the physical carrier. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index a35db6951147..c92a8506c1e1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2210,11 +2210,14 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, QED_ACCEPT_MCAST_MATCHED | QED_ACCEPT_BCAST; - if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; - else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) + accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + } return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, QED_SPQ_MODE_CB, NULL); -- cgit v1.2.3 From 1fe582ecade714e90e71c67c32edb7b5b809d651 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Sun, 1 Jan 2017 13:57:10 +0200 Subject: qed: Conserve RDMA resources when !QEDR If qedr isn't part of the kernel then don't allocate RDMA resources for it in qed. Signed-off-by: Ram Amrani Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 256674131c47..c8a877594032 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1122,7 +1122,9 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { case FUNC_MF_CFG_PROTOCOL_ETHERNET: - if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) + if (!IS_ENABLED(CONFIG_QED_RDMA)) + *p_proto = QED_PCI_ETH; + else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); break; case FUNC_MF_CFG_PROTOCOL_ISCSI: -- cgit v1.2.3 From ce742922acdc4219509bf5b57e0a073990abfa28 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 1 Jan 2017 13:57:11 +0200 Subject: qed*: Advance driver versions to 8.10.10.20. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 2 +- drivers/net/ethernet/qlogic/qede/qede.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index f6e0ff439b1c..1f61cf3209e8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -51,7 +51,7 @@ #include "qed_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; -#define DRV_MODULE_VERSION "8.10.9.20" +#define DRV_MODULE_VERSION "8.10.10.20" #define MAX_HWFNS_PER_DEVICE (4) #define NAME_SIZE 16 diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index f4e9423cd90a..b4234066689b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -49,7 +49,7 @@ #define QEDE_MAJOR_VERSION 8 #define QEDE_MINOR_VERSION 10 -#define QEDE_REVISION_VERSION 9 +#define QEDE_REVISION_VERSION 10 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ -- cgit v1.2.3 From 57629915d568c522ac1422df7bba4bee5b5c7a7c Mon Sep 17 00:00:00 2001 From: Ilan peer Date: Mon, 26 Dec 2016 18:17:36 +0200 Subject: mac80211: Fix addition of mesh configuration element The code was setting the capabilities byte to zero, after it was already properly set previously. Fix it. The bug was found while debugging hwsim mesh tests failures that happened since the commit mentioned below. Fixes: 76f43b4c0a93 ("mac80211: Remove invalid flag operations in mesh TSF synchronization") Signed-off-by: Ilan Peer Reviewed-by: Masashi Honma Signed-off-by: Johannes Berg --- net/mac80211/mesh.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index cc2a63bd233f..9c23172feba0 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -279,8 +279,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */ *pos |= ifmsh->ps_peers_deep_sleep ? IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00; - *pos++ = 0x00; - return 0; } -- cgit v1.2.3 From b4c7a3023a29f78881e08cd9ba8c3f0ee198c10b Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Mon, 2 Jan 2017 08:28:28 +0100 Subject: cfg80211: update wireless-regdb repo url in Documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's maintained by Seth Forshe for a long time now. Signed-off-by: Rafał Miłecki Cc: Seth Forshee Signed-off-by: Johannes Berg --- Documentation/networking/regulatory.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt index 356f791af574..b4af93fbbff6 100644 --- a/Documentation/networking/regulatory.txt +++ b/Documentation/networking/regulatory.txt @@ -205,7 +205,7 @@ the data in regdb.c as an alternative to using CRDA. The file net/wireless/db.txt should be kept up-to-date with the db.txt file available in the git repository here: - git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git + git://git.kernel.org/pub/scm/linux/kernel/git/sforshee/wireless-regdb.git Again, most users in most situations should be using the CRDA package provided with their distribution, and in most other situations users -- cgit v1.2.3 From b7f98864de216dc450e6d535c67bd3d7680df2a3 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Mon, 2 Jan 2017 08:28:29 +0100 Subject: cfg80211: fix example REG_RULE usage in Documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's just an example, but lets make it look more real to don't confuse people about possible REG_RULE usage. Channels are 20 MHz wide, so start and end frequencies are 10 MHz away from the center one. Signed-off-by: Rafał Miłecki Signed-off-by: Johannes Berg --- Documentation/networking/regulatory.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt index b4af93fbbff6..7818b5fe448b 100644 --- a/Documentation/networking/regulatory.txt +++ b/Documentation/networking/regulatory.txt @@ -156,12 +156,12 @@ struct ieee80211_regdomain mydriver_jp_regdom = { //.alpha2 = "99", /* If I have no alpha2 to map it to */ .reg_rules = { /* IEEE 802.11b/g, channels 1..14 */ - REG_RULE(2412-20, 2484+20, 40, 6, 20, 0), + REG_RULE(2412-10, 2484+10, 40, 6, 20, 0), /* IEEE 802.11a, channels 34..48 */ - REG_RULE(5170-20, 5240+20, 40, 6, 20, + REG_RULE(5170-10, 5240+10, 40, 6, 20, NL80211_RRF_NO_IR), /* IEEE 802.11a, channels 52..64 */ - REG_RULE(5260-20, 5320+20, 40, 6, 20, + REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_NO_IR| NL80211_RRF_DFS), } -- cgit v1.2.3 From 31b95c9bdc20663a20b3261303c2a5fc34aae133 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Fri, 30 Dec 2016 13:56:46 +0100 Subject: net: stmmac: remove unused duplicate property snps,axi_all For core revision 3.x Address-Aligned Beats is available in two registers. The DT property snps,aal was created for AAL in the DMA bus register, which is a read/write bit. The DT property snps,axi_all was created for AXI_AAL in the AXI bus mode register, which is a read only bit that reflects the value of AAL in the DMA bus register. Since the value of snps,axi_all is never used in the driver, and since the property was created for a bit that is read only, it should be safe to remove the property. Acked-by: Giuseppe Cavallaro Signed-off-by: Niklas Cassel Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/stmmac.txt | 1 - drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 1 - include/linux/stmmac.h | 1 - 3 files changed, 3 deletions(-) diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index 128da752fec9..c3d2fd480a1b 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt @@ -65,7 +65,6 @@ Optional properties: - snps,wr_osr_lmt: max write outstanding req. limit - snps,rd_osr_lmt: max read outstanding req. limit - snps,kbbe: do not cross 1KiB boundary. - - snps,axi_all: align address - snps,blen: this is a vector of supported burst length. - snps,fb: fixed-burst - snps,mb: mixed-burst diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 082cd48db6a7..60ba8993c650 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -121,7 +121,6 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); - axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all"); axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 266dab9ad782..889e0e9a3f1c 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -103,7 +103,6 @@ struct stmmac_axi { u32 axi_wr_osr_lmt; u32 axi_rd_osr_lmt; bool axi_kbbe; - bool axi_axi_all; u32 axi_blen[AXI_BLEN]; bool axi_fb; bool axi_mb; -- cgit v1.2.3 From 7b13558f242747db90e52fdc510441a2ed0bafba Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Mon, 2 Jan 2017 11:37:38 +0200 Subject: net/mlx5: Fix offset naming for reserved fields in hca_cap_bits Fix offset for reserved fields. Fixes: 7486216b3a0b ("{net,IB}/mlx5: mlx5_ifc updates") Fixes: b4ff3a36d3e4 ("net/mlx5: Use offset based reserved field names in the IFC header file") Fixes: 7d5e14237a55 ("net/mlx5: Update mlx5_ifc hardware features") Signed-off-by: Max Gurtovoy Reviewed-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/mlx5_ifc.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 57bec544e20a..4792c856531e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -826,9 +826,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_1a9[0x2]; u8 local_ca_ack_delay[0x5]; u8 port_module_event[0x1]; - u8 reserved_at_1b0[0x1]; + u8 reserved_at_1b1[0x1]; u8 ports_check[0x1]; - u8 reserved_at_1b2[0x1]; + u8 reserved_at_1b3[0x1]; u8 disable_link_up[0x1]; u8 beacon_led[0x1]; u8 port_type[0x2]; @@ -858,7 +858,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 compact_address_vector[0x1]; u8 striding_rq[0x1]; - u8 reserved_at_201[0x2]; + u8 reserved_at_202[0x2]; u8 ipoib_basic_offloads[0x1]; u8 reserved_at_205[0xa]; u8 drain_sigerr[0x1]; @@ -1009,10 +1009,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 rndv_offload_rc[0x1]; u8 rndv_offload_dc[0x1]; u8 log_tag_matching_list_sz[0x5]; - u8 reserved_at_5e8[0x3]; + u8 reserved_at_5f8[0x3]; u8 log_max_xrq[0x5]; - u8 reserved_at_5f0[0x200]; + u8 reserved_at_600[0x200]; }; enum mlx5_flow_destination_type { -- cgit v1.2.3 From 9f885201f274072c32a65f4a324da405d5980efd Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 2 Jan 2017 11:37:39 +0200 Subject: IB/mlx5: Reorder code in query device command The order of features exposed by private mlx5-abi.h file is CQE zipping, packet pacing and multi-packet WQE. The internal order implemented in mlx5_ib_query_device() is multi-packet WQE, CQE zipping and packet pacing. Such difference hurts code readability, so let's sync, while mlx5-abi.h (exposed to userspace) is the primary order. This commit doesn't change any functionality. Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d566f6738833..2ab4e3219c84 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -672,17 +672,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); } - if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, - uhw->outlen)) { - resp.mlx5_ib_support_multi_pkt_send_wqes = - MLX5_CAP_ETH(mdev, multi_pkt_send_wqe); - resp.response_length += - sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); - } - - if (field_avail(typeof(resp), reserved, uhw->outlen)) - resp.response_length += sizeof(resp.reserved); - if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { resp.cqe_comp_caps.max_num = MLX5_CAP_GEN(dev->mdev, cqe_compression) ? @@ -706,6 +695,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, resp.response_length += sizeof(resp.packet_pacing_caps); } + if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, + uhw->outlen)) { + resp.mlx5_ib_support_multi_pkt_send_wqes = + MLX5_CAP_ETH(mdev, multi_pkt_send_wqe); + resp.response_length += + sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); + } + + if (field_avail(typeof(resp), reserved, uhw->outlen)) + resp.response_length += sizeof(resp.reserved); + if (uhw->outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); -- cgit v1.2.3 From d5ea2df9cefa9c81a66021b5bb89562d02bbc2f7 Mon Sep 17 00:00:00 2001 From: Binoy Jayan Date: Mon, 2 Jan 2017 11:37:40 +0200 Subject: IB/mlx5: Add helper mlx5_ib_post_send_wait Clean up the following common code (to post a list of work requests to the send queue of the specified QP) at various places and add a helper function 'mlx5_ib_post_send_wait' to implement the same. - Initialize 'mlx5_ib_umr_context' on stack - Assign "mlx5_umr_wr:wr:wr_cqe to umr_context.cqe - Acquire the semaphore - call ib_post_send with a single ib_send_wr - wait_for_completion() - Check for umr_context.status - Release the semaphore Signed-off-by: Binoy Jayan Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/mr.c | 115 +++++++++++----------------------------- 1 file changed, 32 insertions(+), 83 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 8f608debe141..7ab9b67ce43b 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -891,16 +891,40 @@ static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) init_completion(&context->done); } +static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, + struct mlx5_umr_wr *umrwr) +{ + struct umr_common *umrc = &dev->umrc; + struct ib_send_wr *bad; + int err; + struct mlx5_ib_umr_context umr_context; + + mlx5_ib_init_umr_context(&umr_context); + umrwr->wr.wr_cqe = &umr_context.cqe; + + down(&umrc->sem); + err = ib_post_send(umrc->qp, &umrwr->wr, &bad); + if (err) { + mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err); + } else { + wait_for_completion(&umr_context.done); + if (umr_context.status != IB_WC_SUCCESS) { + mlx5_ib_warn(dev, "reg umr failed (%u)\n", + umr_context.status); + err = -EFAULT; + } + } + up(&umrc->sem); + return err; +} + static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr, u64 len, int npages, int page_shift, int order, int access_flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct device *ddev = dev->ib_dev.dma_device; - struct umr_common *umrc = &dev->umrc; - struct mlx5_ib_umr_context umr_context; struct mlx5_umr_wr umrwr = {}; - struct ib_send_wr *bad; struct mlx5_ib_mr *mr; struct ib_sge sg; int size; @@ -929,24 +953,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, if (err) goto free_mr; - mlx5_ib_init_umr_context(&umr_context); - - umrwr.wr.wr_cqe = &umr_context.cqe; prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key, page_shift, virt_addr, len, access_flags); - down(&umrc->sem); - err = ib_post_send(umrc->qp, &umrwr.wr, &bad); - if (err) { - mlx5_ib_warn(dev, "post send failed, err %d\n", err); + err = mlx5_ib_post_send_wait(dev, &umrwr); + if (err && err != -EFAULT) goto unmap_dma; - } else { - wait_for_completion(&umr_context.done); - if (umr_context.status != IB_WC_SUCCESS) { - mlx5_ib_warn(dev, "reg umr failed\n"); - err = -EFAULT; - } - } mr->mmkey.iova = virt_addr; mr->mmkey.size = len; @@ -955,7 +967,6 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, mr->live = 1; unmap_dma: - up(&umrc->sem); dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); kfree(mr_pas); @@ -975,13 +986,10 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, { struct mlx5_ib_dev *dev = mr->dev; struct device *ddev = dev->ib_dev.dma_device; - struct umr_common *umrc = &dev->umrc; - struct mlx5_ib_umr_context umr_context; struct ib_umem *umem = mr->umem; int size; __be64 *pas; dma_addr_t dma; - struct ib_send_wr *bad; struct mlx5_umr_wr wr; struct ib_sge sg; int err = 0; @@ -1046,10 +1054,7 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); - mlx5_ib_init_umr_context(&umr_context); - memset(&wr, 0, sizeof(wr)); - wr.wr.wr_cqe = &umr_context.cqe; sg.addr = dma; sg.length = ALIGN(npages * sizeof(u64), @@ -1066,19 +1071,7 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, wr.mkey = mr->mmkey.key; wr.target.offset = start_page_index; - down(&umrc->sem); - err = ib_post_send(umrc->qp, &wr.wr, &bad); - if (err) { - mlx5_ib_err(dev, "UMR post send failed, err %d\n", err); - } else { - wait_for_completion(&umr_context.done); - if (umr_context.status != IB_WC_SUCCESS) { - mlx5_ib_err(dev, "UMR completion failed, code %d\n", - umr_context.status); - err = -EFAULT; - } - } - up(&umrc->sem); + err = mlx5_ib_post_send_wait(dev, &wr); } dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); @@ -1248,39 +1241,14 @@ error: static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { struct mlx5_core_dev *mdev = dev->mdev; - struct umr_common *umrc = &dev->umrc; - struct mlx5_ib_umr_context umr_context; struct mlx5_umr_wr umrwr = {}; - struct ib_send_wr *bad; - int err; if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) return 0; - mlx5_ib_init_umr_context(&umr_context); - - umrwr.wr.wr_cqe = &umr_context.cqe; prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key); - down(&umrc->sem); - err = ib_post_send(umrc->qp, &umrwr.wr, &bad); - if (err) { - up(&umrc->sem); - mlx5_ib_dbg(dev, "err %d\n", err); - goto error; - } else { - wait_for_completion(&umr_context.done); - up(&umrc->sem); - } - if (umr_context.status != IB_WC_SUCCESS) { - mlx5_ib_warn(dev, "unreg umr failed\n"); - err = -EFAULT; - goto error; - } - return 0; - -error: - return err; + return mlx5_ib_post_send_wait(dev, &umrwr); } static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr, @@ -1289,19 +1257,13 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr, { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct device *ddev = dev->ib_dev.dma_device; - struct mlx5_ib_umr_context umr_context; - struct ib_send_wr *bad; struct mlx5_umr_wr umrwr = {}; struct ib_sge sg; - struct umr_common *umrc = &dev->umrc; dma_addr_t dma = 0; __be64 *mr_pas = NULL; int size; int err; - mlx5_ib_init_umr_context(&umr_context); - - umrwr.wr.wr_cqe = &umr_context.cqe; umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; if (flags & IB_MR_REREG_TRANS) { @@ -1329,21 +1291,8 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr, } /* post send request to UMR QP */ - down(&umrc->sem); - err = ib_post_send(umrc->qp, &umrwr.wr, &bad); + err = mlx5_ib_post_send_wait(dev, &umrwr); - if (err) { - mlx5_ib_warn(dev, "post send failed, err %d\n", err); - } else { - wait_for_completion(&umr_context.done); - if (umr_context.status != IB_WC_SUCCESS) { - mlx5_ib_warn(dev, "reg umr failed (%u)\n", - umr_context.status); - err = -EFAULT; - } - } - - up(&umrc->sem); if (flags & IB_MR_REREG_TRANS) { dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); kfree(mr_pas); -- cgit v1.2.3 From bcda1aca7712539439d53dd5351c6223e03bf6e1 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:41 +0200 Subject: net/mlx5: Support new MR features This patch adds the following items to IFC file. 1. MLX5_MKC_ACCESS_MODE_KSM enum value for creating KSM memory keys. KSM access mode used when indirect MKey associated with fixed memory size entries. 2. null_mkey field that is used to indicate non-present KLM/KSM entries, where it causes the device to generate page fault event when trying to access it. 3. struct mlx5_ifc_cmd_hca_cap_bits capability bits indicating related value/field is supported: * fixed_buffer_size - MLX5_MKC_ACCESS_MODE_KSM * umr_extended_translation_offset - translation_offset_42_16 in UMR ctrl segment * null_mkey - null_mkey in QUERY_SPECIAL_CONTEXTS Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/mlx5_ifc.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 4792c856531e..7c760e580268 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -782,11 +782,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_eq[0x4]; u8 max_indirection[0x8]; - u8 reserved_at_108[0x1]; + u8 fixed_buffer_size[0x1]; u8 log_max_mrw_sz[0x7]; u8 reserved_at_110[0x2]; u8 log_max_bsf_list_size[0x6]; - u8 reserved_at_118[0x2]; + u8 umr_extended_translation_offset[0x1]; + u8 null_mkey[0x1]; u8 log_max_klm_list_size[0x6]; u8 reserved_at_120[0xa]; @@ -2569,6 +2570,7 @@ enum { MLX5_MKC_ACCESS_MODE_PA = 0x0, MLX5_MKC_ACCESS_MODE_MTT = 0x1, MLX5_MKC_ACCESS_MODE_KLMS = 0x2, + MLX5_MKC_ACCESS_MODE_KSM = 0x3, }; struct mlx5_ifc_mkc_bits { @@ -3677,6 +3679,10 @@ struct mlx5_ifc_query_special_contexts_out_bits { u8 dump_fill_mkey[0x20]; u8 resd_lkey[0x20]; + + u8 null_mkey[0x20]; + + u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_query_special_contexts_in_bits { -- cgit v1.2.3 From 3161625589c1d7c54e949d462f4d0c327664881a Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:42 +0200 Subject: IB/mlx5: Refactor UMR post send format * Update struct mlx5_wqe_umr_ctrl_seg. * Currenlty UMR send_flags aim only certain use cases: enabled/disable cached MR, modifying XLT for ODP. By making flags independent make UMR more flexible allowing arbitrary manipulations. * Since different UMR formats have different entry sizes UMR request should receive exact size of translation table update instead of number of entries. Rename field npages to xlt_size in struct mlx5_umr_wr and update relevant code accordingly. * Add support of length64 bit. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 24 ++-- drivers/infiniband/hw/mlx5/mr.c | 50 +++++---- drivers/infiniband/hw/mlx5/odp.c | 3 +- drivers/infiniband/hw/mlx5/qp.c | 128 +++++++++------------- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- include/linux/mlx5/qp.h | 14 ++- 6 files changed, 103 insertions(+), 118 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 6c6057eb60ea..d79580dcf20f 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -174,13 +174,12 @@ struct mlx5_ib_flow_db { * enum ib_send_flags and enum ib_qp_type for low-level driver */ -#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START -#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1) -#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2) - -#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3) -#define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4) -#define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END +#define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0) +#define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1) +#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2) +#define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3) +#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4) +#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 /* @@ -190,6 +189,9 @@ struct mlx5_ib_flow_db { #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 #define MLX5_IB_WR_UMR IB_WR_RESERVED1 +#define MLX5_IB_UMR_OCTOWORD 16 +#define MLX5_IB_UMR_XLT_ALIGNMENT 64 + /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. * * These flags are intended for internal use by the mlx5_ib driver, and they @@ -414,13 +416,11 @@ enum mlx5_ib_qp_flags { struct mlx5_umr_wr { struct ib_send_wr wr; - union { - u64 virt_addr; - u64 offset; - } target; + u64 virt_addr; + u64 offset; struct ib_pd *pd; unsigned int page_shift; - unsigned int npages; + unsigned int xlt_size; u64 length; int access_flags; u32 mkey; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 7ab9b67ce43b..be8d38d7ca12 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -774,7 +774,7 @@ static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, * To avoid copying garbage after the pas array, we allocate * a little more. */ - *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT); + *size = ALIGN(sizeof(struct mlx5_mtt) * npages, MLX5_UMR_MTT_ALIGNMENT); *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); if (!(*mr_pas)) return -ENOMEM; @@ -782,7 +782,7 @@ static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN); mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT); /* Clear padding after the actual pages. */ - memset(pas + npages, 0, *size - npages * sizeof(u64)); + memset(pas + npages, 0, *size - npages * sizeof(struct mlx5_mtt)); *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE); if (dma_mapping_error(ddev, *dma)) { @@ -801,7 +801,8 @@ static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr, struct mlx5_umr_wr *umrwr = umr_wr(wr); sg->addr = dma; - sg->length = ALIGN(sizeof(u64) * n, 64); + sg->length = ALIGN(sizeof(struct mlx5_mtt) * n, + MLX5_IB_UMR_XLT_ALIGNMENT); sg->lkey = dev->umrc.pd->local_dma_lkey; wr->next = NULL; @@ -813,7 +814,7 @@ static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr, wr->opcode = MLX5_IB_WR_UMR; - umrwr->npages = n; + umrwr->xlt_size = sg->length; umrwr->page_shift = page_shift; umrwr->mkey = key; } @@ -827,9 +828,11 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift); - wr->send_flags = 0; + wr->send_flags = MLX5_IB_SEND_UMR_ENABLE_MR | + MLX5_IB_SEND_UMR_UPDATE_TRANSLATION | + MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; - umrwr->target.virt_addr = virt_addr; + umrwr->virt_addr = virt_addr; umrwr->length = len; umrwr->access_flags = access_flags; umrwr->pd = pd; @@ -840,7 +843,8 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, { struct mlx5_umr_wr *umrwr = umr_wr(wr); - wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE; + wr->send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | + MLX5_IB_SEND_UMR_FAIL_IF_FREE; wr->opcode = MLX5_IB_WR_UMR; umrwr->mkey = key; } @@ -993,7 +997,8 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, struct mlx5_umr_wr wr; struct ib_sge sg; int err = 0; - const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64); + const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / + sizeof(struct mlx5_mtt); const int page_index_mask = page_index_alignment - 1; size_t pages_mapped = 0; size_t pages_to_map = 0; @@ -1012,7 +1017,7 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES) return -EINVAL; - size = sizeof(u64) * pages_to_map; + size = sizeof(struct mlx5_mtt) * pages_to_map; size = min_t(int, PAGE_SIZE, size); /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim * code, when we are called from an invalidation. The pas buffer must @@ -1026,7 +1031,7 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex); memset(pas, 0, size); } - pages_iter = size / sizeof(u64); + pages_iter = size / sizeof(struct mlx5_mtt); dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE); if (dma_mapping_error(ddev, dma)) { mlx5_ib_err(dev, "unable to map DMA during MTT update.\n"); @@ -1049,7 +1054,8 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, MLX5_IB_MTT_PRESENT); /* Clear padding after the pages brought from the * umem. */ - memset(pas + npages, 0, size - npages * sizeof(u64)); + memset(pas + npages, 0, size - npages * + sizeof(struct mlx5_mtt)); } dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); @@ -1057,19 +1063,19 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, memset(&wr, 0, sizeof(wr)); sg.addr = dma; - sg.length = ALIGN(npages * sizeof(u64), + sg.length = ALIGN(npages * sizeof(struct mlx5_mtt), MLX5_UMR_MTT_ALIGNMENT); sg.lkey = dev->umrc.pd->local_dma_lkey; wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE | - MLX5_IB_SEND_UMR_UPDATE_MTT; + MLX5_IB_SEND_UMR_UPDATE_XLT; wr.wr.sg_list = &sg; wr.wr.num_sge = 1; wr.wr.opcode = MLX5_IB_WR_UMR; - wr.npages = sg.length / sizeof(u64); + wr.xlt_size = sg.length; wr.page_shift = PAGE_SHIFT; wr.mkey = mr->mmkey.key; - wr.target.offset = start_page_index; + wr.offset = start_page_index * sizeof(struct mlx5_mtt); err = mlx5_ib_post_send_wait(dev, &wr); } @@ -1272,7 +1278,7 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr, if (err) return err; - umrwr.target.virt_addr = virt_addr; + umrwr.virt_addr = virt_addr; umrwr.length = length; umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; } @@ -1280,14 +1286,10 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr, prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key, page_shift); - if (flags & IB_MR_REREG_PD) { + if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) { umrwr.pd = pd; - umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD; - } - - if (flags & IB_MR_REREG_ACCESS) { umrwr.access_flags = access_flags; - umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS; + umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; } /* post send request to UMR QP */ @@ -1552,11 +1554,11 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT; MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); err = mlx5_alloc_priv_descs(pd->device, mr, - ndescs, sizeof(u64)); + ndescs, sizeof(struct mlx5_mtt)); if (err) goto err_free_in; - mr->desc_size = sizeof(u64); + mr->desc_size = sizeof(struct mlx5_mtt); mr->max_descs = ndescs; } else if (mr_type == IB_MR_TYPE_SG_GAPS) { mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index cacb631a7b0a..67651eca59c5 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -47,7 +47,8 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, unsigned long end) { struct mlx5_ib_mr *mr; - const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / sizeof(u64)) - 1; + const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / + sizeof(struct mlx5_mtt)) - 1; u64 idx = 0, blk_start_idx = 0; int in_block = 0; u64 addr; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index a1b3125f0a6e..ec2301ac0fde 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3080,9 +3080,10 @@ static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) dseg->addr = cpu_to_be64(sg->addr); } -static __be16 get_klm_octo(int npages) +static u64 get_xlt_octo(u64 bytes) { - return cpu_to_be16(ALIGN(npages, 8) / 2); + return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) / + MLX5_IB_UMR_OCTOWORD; } static __be64 frwr_mkey_mask(void) @@ -3127,18 +3128,14 @@ static __be64 sig_mkey_mask(void) } static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, - struct mlx5_ib_mr *mr) + struct mlx5_ib_mr *mr) { - int ndescs = mr->ndescs; + int size = mr->ndescs * mr->desc_size; memset(umr, 0, sizeof(*umr)); - if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) - /* KLMs take twice the size of MTTs */ - ndescs *= 2; - umr->flags = MLX5_UMR_CHECK_NOT_FREE; - umr->klm_octowords = get_klm_octo(ndescs); + umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); umr->mkey_mask = frwr_mkey_mask(); } @@ -3149,37 +3146,17 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) umr->flags = MLX5_UMR_INLINE; } -static __be64 get_umr_reg_mr_mask(int atomic) +static __be64 get_umr_enable_mr_mask(void) { u64 result; - result = MLX5_MKEY_MASK_LEN | - MLX5_MKEY_MASK_PAGE_SIZE | - MLX5_MKEY_MASK_START_ADDR | - MLX5_MKEY_MASK_PD | - MLX5_MKEY_MASK_LR | - MLX5_MKEY_MASK_LW | - MLX5_MKEY_MASK_KEY | - MLX5_MKEY_MASK_RR | - MLX5_MKEY_MASK_RW | + result = MLX5_MKEY_MASK_KEY | MLX5_MKEY_MASK_FREE; - if (atomic) - result |= MLX5_MKEY_MASK_A; - - return cpu_to_be64(result); -} - -static __be64 get_umr_unreg_mr_mask(void) -{ - u64 result; - - result = MLX5_MKEY_MASK_FREE; - return cpu_to_be64(result); } -static __be64 get_umr_update_mtt_mask(void) +static __be64 get_umr_disable_mr_mask(void) { u64 result; @@ -3194,23 +3171,22 @@ static __be64 get_umr_update_translation_mask(void) result = MLX5_MKEY_MASK_LEN | MLX5_MKEY_MASK_PAGE_SIZE | - MLX5_MKEY_MASK_START_ADDR | - MLX5_MKEY_MASK_KEY | - MLX5_MKEY_MASK_FREE; + MLX5_MKEY_MASK_START_ADDR; return cpu_to_be64(result); } -static __be64 get_umr_update_access_mask(void) +static __be64 get_umr_update_access_mask(int atomic) { u64 result; - result = MLX5_MKEY_MASK_LW | + result = MLX5_MKEY_MASK_LR | + MLX5_MKEY_MASK_LW | MLX5_MKEY_MASK_RR | - MLX5_MKEY_MASK_RW | - MLX5_MKEY_MASK_A | - MLX5_MKEY_MASK_KEY | - MLX5_MKEY_MASK_FREE; + MLX5_MKEY_MASK_RW; + + if (atomic) + result |= MLX5_MKEY_MASK_A; return cpu_to_be64(result); } @@ -3219,9 +3195,7 @@ static __be64 get_umr_update_pd_mask(void) { u64 result; - result = MLX5_MKEY_MASK_PD | - MLX5_MKEY_MASK_KEY | - MLX5_MKEY_MASK_FREE; + result = MLX5_MKEY_MASK_PD; return cpu_to_be64(result); } @@ -3238,24 +3212,24 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, else umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */ - if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { - umr->klm_octowords = get_klm_octo(umrwr->npages); - if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) { - umr->mkey_mask = get_umr_update_mtt_mask(); - umr->bsf_octowords = get_klm_octo(umrwr->target.offset); - umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN; - } - if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) - umr->mkey_mask |= get_umr_update_translation_mask(); - if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS) - umr->mkey_mask |= get_umr_update_access_mask(); - if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD) - umr->mkey_mask |= get_umr_update_pd_mask(); - if (!umr->mkey_mask) - umr->mkey_mask = get_umr_reg_mr_mask(atomic); - } else { - umr->mkey_mask = get_umr_unreg_mr_mask(); + umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { + u64 offset = get_xlt_octo(umrwr->offset); + + umr->xlt_offset = cpu_to_be16(offset & 0xffff); + umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16); + umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN; } + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) + umr->mkey_mask |= get_umr_update_translation_mask(); + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) { + umr->mkey_mask |= get_umr_update_access_mask(atomic); + umr->mkey_mask |= get_umr_update_pd_mask(); + } + if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR) + umr->mkey_mask |= get_umr_enable_mr_mask(); + if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) + umr->mkey_mask |= get_umr_disable_mr_mask(); if (!wr->num_sge) umr->flags |= MLX5_UMR_INLINE; @@ -3303,17 +3277,17 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w struct mlx5_umr_wr *umrwr = umr_wr(wr); memset(seg, 0, sizeof(*seg)); - if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { + if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) seg->status = MLX5_MKEY_STATUS_FREE; - return; - } seg->flags = convert_access(umrwr->access_flags); - if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) { - if (umrwr->pd) - seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); - seg->start_addr = cpu_to_be64(umrwr->target.virt_addr); - } + if (umrwr->pd) + seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION && + !umrwr->length) + seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64); + + seg->start_addr = cpu_to_be64(umrwr->virt_addr); seg->len = cpu_to_be64(umrwr->length); seg->log2_page_size = umrwr->page_shift; seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | @@ -3611,7 +3585,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr, } static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, - struct ib_sig_handover_wr *wr, u32 nelements, + struct ib_sig_handover_wr *wr, u32 size, u32 length, u32 pdn) { struct ib_mr *sig_mr = wr->sig_mr; @@ -3626,17 +3600,17 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | MLX5_MKEY_BSF_EN | pdn); seg->len = cpu_to_be64(length); - seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements))); + seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size)); seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); } static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, - u32 nelements) + u32 size) { memset(umr, 0, sizeof(*umr)); umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE; - umr->klm_octowords = get_klm_octo(nelements); + umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE); umr->mkey_mask = sig_mkey_mask(); } @@ -3648,7 +3622,7 @@ static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr); struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr); u32 pdn = get_pd(qp)->pdn; - u32 klm_oct_size; + u32 xlt_size; int region_len, ret; if (unlikely(wr->wr.num_sge != 1) || @@ -3670,15 +3644,15 @@ static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, * then we use strided block format (3 octowords), * else we use single KLM (1 octoword) **/ - klm_oct_size = wr->prot ? 3 : 1; + xlt_size = wr->prot ? 0x30 : sizeof(struct mlx5_klm); - set_sig_umr_segment(*seg, klm_oct_size); + set_sig_umr_segment(*seg, xlt_size); *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; if (unlikely((*seg == qp->sq.qend))) *seg = mlx5_get_send_wqe(qp, 0); - set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn); + set_sig_mkey_segment(*seg, wr, xlt_size, region_len, pdn); *seg += sizeof(struct mlx5_mkey_seg); *size += sizeof(struct mlx5_mkey_seg) / 16; if (unlikely((*seg == qp->sq.qend))) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cbfa38fc72c0..5ff86f0ecb7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -396,7 +396,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq, cseg->imm = rq->mkey_be; ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; - ucseg->klm_octowords = + ucseg->xlt_octowords = cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); ucseg->bsf_octowords = cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 0aacb2a7480d..693811e0cb24 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -292,10 +292,14 @@ struct mlx5_wqe_data_seg { struct mlx5_wqe_umr_ctrl_seg { u8 flags; u8 rsvd0[3]; - __be16 klm_octowords; - __be16 bsf_octowords; + __be16 xlt_octowords; + union { + __be16 xlt_offset; + __be16 bsf_octowords; + }; __be64 mkey_mask; - u8 rsvd1[32]; + __be32 xlt_offset_47_16; + u8 rsvd1[28]; }; struct mlx5_seg_set_psv { @@ -389,6 +393,10 @@ struct mlx5_bsf { struct mlx5_bsf_inl m_inl; }; +struct mlx5_mtt { + __be64 ptag; +}; + struct mlx5_klm { __be32 bcount; __be32 key; -- cgit v1.2.3 From c438fde1c288a754aa5d22e3668f03a1dde18335 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:43 +0200 Subject: IB/mlx5: Add support for big MRs Make use of extended UMR translation offset. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 + drivers/infiniband/hw/mlx5/mr.c | 8 +++++--- drivers/infiniband/hw/mlx5/odp.c | 5 +++++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d79580dcf20f..73bff77ab20c 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -634,6 +634,7 @@ struct mlx5_ib_dev { int fill_delay; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING struct ib_odp_caps odp_caps; + u64 odp_max_size; /* * Sleepable RCU that prevents destruction of MRs while they are still * being used by a page fault handler. diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index be8d38d7ca12..4d40fe0556bd 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1121,8 +1121,9 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, goto err_1; } pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); - mlx5_ib_populate_pas(dev, umem, page_shift, pas, - pg_cap ? MLX5_IB_MTT_PRESENT : 0); + if (!(access_flags & IB_ACCESS_ON_DEMAND)) + mlx5_ib_populate_pas(dev, umem, page_shift, pas, + pg_cap ? MLX5_IB_MTT_PRESENT : 0); /* The pg_access bit allows setting the access flags * in the page list submitted with the command. */ @@ -1210,7 +1211,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mlx5_ib_dbg(dev, "cache empty for order %d", order); mr = NULL; } - } else if (access_flags & IB_ACCESS_ON_DEMAND) { + } else if (access_flags & IB_ACCESS_ON_DEMAND && + !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { err = -EINVAL; pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB"); goto error; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 67651eca59c5..1e73c127feb7 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -121,6 +121,11 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) caps->general_caps = IB_ODP_SUPPORT; + if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) + dev->odp_max_size = U64_MAX; + else + dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT); + if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send)) caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND; -- cgit v1.2.3 From 7d0cc6edcc7011133c45f62a7796a98b8cb5da0f Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:44 +0200 Subject: IB/mlx5: Add MR cache for large UMR regions In this change we turn mlx5_ib_update_mtt() into generic mlx5_ib_update_xlt() to perfrom HCA translation table modifiactions supporting both atomic and process contexts and not limited by number of modified entries. Using this function we increase preallocated MRs up to 16GB. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 14 +- drivers/infiniband/hw/mlx5/mem.c | 32 +- drivers/infiniband/hw/mlx5/mlx5_ib.h | 15 +- drivers/infiniband/hw/mlx5/mr.c | 386 ++++++++++--------------- drivers/infiniband/hw/mlx5/odp.c | 19 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 20 ++ include/linux/mlx5/driver.h | 2 +- 7 files changed, 240 insertions(+), 248 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 2ab4e3219c84..b87127206ef2 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1112,11 +1112,18 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; #endif + context->upd_xlt_page = __get_free_page(GFP_KERNEL); + if (!context->upd_xlt_page) { + err = -ENOMEM; + goto out_uars; + } + mutex_init(&context->upd_xlt_page_mutex); + if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) { err = mlx5_core_alloc_transport_domain(dev->mdev, &context->tdn); if (err) - goto out_uars; + goto out_page; } INIT_LIST_HEAD(&context->vma_private_list); @@ -1168,6 +1175,9 @@ out_td: if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn); +out_page: + free_page(context->upd_xlt_page); + out_uars: for (i--; i >= 0; i--) mlx5_cmd_free_uar(dev->mdev, uars[i].index); @@ -1195,6 +1205,8 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn); + free_page(context->upd_xlt_page); + for (i = 0; i < uuari->num_uars; i++) { if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 6851357c16f4..778d8a18925f 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -159,7 +159,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, unsigned long umem_page_shift = ilog2(umem->page_size); int shift = page_shift - umem_page_shift; int mask = (1 << shift) - 1; - int i, k; + int i, k, idx; u64 cur = 0; u64 base; int len; @@ -185,18 +185,36 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { len = sg_dma_len(sg) >> umem_page_shift; base = sg_dma_address(sg); - for (k = 0; k < len; k++) { + + /* Skip elements below offset */ + if (i + len < offset << shift) { + i += len; + continue; + } + + /* Skip pages below offset */ + if (i < offset << shift) { + k = (offset << shift) - i; + i = offset << shift; + } else { + k = 0; + } + + for (; k < len; k++) { if (!(i & mask)) { cur = base + (k << umem_page_shift); cur |= access_flags; + idx = (i >> shift) - offset; - pas[i >> shift] = cpu_to_be64(cur); + pas[idx] = cpu_to_be64(cur); mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", - i >> shift, be64_to_cpu(pas[i >> shift])); - } else - mlx5_ib_dbg(dev, "=====> 0x%llx\n", - base + (k << umem_page_shift)); + i >> shift, be64_to_cpu(pas[idx])); + } i++; + + /* Stop after num_pages reached */ + if (i >> shift >= offset + num_pages) + return; } } } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 73bff77ab20c..02d925573945 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -125,6 +125,10 @@ struct mlx5_ib_ucontext { /* Transport Domain number */ u32 tdn; struct list_head vma_private_list; + + unsigned long upd_xlt_page; + /* protect ODP/KSM */ + struct mutex upd_xlt_page_mutex; }; static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) @@ -192,6 +196,13 @@ struct mlx5_ib_flow_db { #define MLX5_IB_UMR_OCTOWORD 16 #define MLX5_IB_UMR_XLT_ALIGNMENT 64 +#define MLX5_IB_UPD_XLT_ZAP BIT(0) +#define MLX5_IB_UPD_XLT_ENABLE BIT(1) +#define MLX5_IB_UPD_XLT_ATOMIC BIT(2) +#define MLX5_IB_UPD_XLT_ADDR BIT(3) +#define MLX5_IB_UPD_XLT_PD BIT(4) +#define MLX5_IB_UPD_XLT_ACCESS BIT(5) + /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. * * These flags are intended for internal use by the mlx5_ib driver, and they @@ -788,8 +799,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata); int mlx5_ib_dealloc_mw(struct ib_mw *mw); -int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, - int npages, int zap); +int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, + int page_shift, int flags); int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_pd *pd, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 4d40fe0556bd..f4ecc10cbbba 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -46,14 +46,9 @@ enum { }; #define MLX5_UMR_ALIGN 2048 -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -static __be64 mlx5_ib_update_mtt_emergency_buffer[ - MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)] - __aligned(MLX5_UMR_ALIGN); -static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex); -#endif static int clean_mr(struct mlx5_ib_mr *mr); +static int use_umr(struct mlx5_ib_dev *dev, int order); static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { @@ -629,7 +624,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ent->dev = dev; if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && - (mlx5_core_is_pf(dev->mdev))) + mlx5_core_is_pf(dev->mdev) && + use_umr(dev, ent->order)) limit = dev->mdev->profile->mr_cache[i].limit; else limit = 0; @@ -757,98 +753,13 @@ static int get_octo_len(u64 addr, u64 len, int page_size) return (npages + 1) / 2; } -static int use_umr(int order) +static int use_umr(struct mlx5_ib_dev *dev, int order) { + if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) + return order < MAX_MR_CACHE_ENTRIES + 2; return order <= MLX5_MAX_UMR_SHIFT; } -static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, - int npages, int page_shift, int *size, - __be64 **mr_pas, dma_addr_t *dma) -{ - __be64 *pas; - struct device *ddev = dev->ib_dev.dma_device; - - /* - * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. - * To avoid copying garbage after the pas array, we allocate - * a little more. - */ - *size = ALIGN(sizeof(struct mlx5_mtt) * npages, MLX5_UMR_MTT_ALIGNMENT); - *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); - if (!(*mr_pas)) - return -ENOMEM; - - pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN); - mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT); - /* Clear padding after the actual pages. */ - memset(pas + npages, 0, *size - npages * sizeof(struct mlx5_mtt)); - - *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE); - if (dma_mapping_error(ddev, *dma)) { - kfree(*mr_pas); - return -ENOMEM; - } - - return 0; -} - -static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr, - struct ib_sge *sg, u64 dma, int n, u32 key, - int page_shift) -{ - struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_umr_wr *umrwr = umr_wr(wr); - - sg->addr = dma; - sg->length = ALIGN(sizeof(struct mlx5_mtt) * n, - MLX5_IB_UMR_XLT_ALIGNMENT); - sg->lkey = dev->umrc.pd->local_dma_lkey; - - wr->next = NULL; - wr->sg_list = sg; - if (n) - wr->num_sge = 1; - else - wr->num_sge = 0; - - wr->opcode = MLX5_IB_WR_UMR; - - umrwr->xlt_size = sg->length; - umrwr->page_shift = page_shift; - umrwr->mkey = key; -} - -static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, - struct ib_sge *sg, u64 dma, int n, u32 key, - int page_shift, u64 virt_addr, u64 len, - int access_flags) -{ - struct mlx5_umr_wr *umrwr = umr_wr(wr); - - prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift); - - wr->send_flags = MLX5_IB_SEND_UMR_ENABLE_MR | - MLX5_IB_SEND_UMR_UPDATE_TRANSLATION | - MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; - - umrwr->virt_addr = virt_addr; - umrwr->length = len; - umrwr->access_flags = access_flags; - umrwr->pd = pd; -} - -static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, - struct ib_send_wr *wr, u32 key) -{ - struct mlx5_umr_wr *umrwr = umr_wr(wr); - - wr->send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | - MLX5_IB_SEND_UMR_FAIL_IF_FREE; - wr->opcode = MLX5_IB_WR_UMR; - umrwr->mkey = key; -} - static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, int access_flags, struct ib_umem **umem, int *npages, int *page_shift, int *ncont, @@ -927,13 +838,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, int page_shift, int order, int access_flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct device *ddev = dev->ib_dev.dma_device; - struct mlx5_umr_wr umrwr = {}; struct mlx5_ib_mr *mr; - struct ib_sge sg; - int size; - __be64 *mr_pas; - dma_addr_t dma; int err = 0; int i; @@ -952,144 +857,174 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, if (!mr) return ERR_PTR(-EAGAIN); - err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas, - &dma); - if (err) - goto free_mr; - - prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key, - page_shift, virt_addr, len, access_flags); - - err = mlx5_ib_post_send_wait(dev, &umrwr); - if (err && err != -EFAULT) - goto unmap_dma; - + mr->ibmr.pd = pd; + mr->umem = umem; + mr->access_flags = access_flags; + mr->desc_size = sizeof(struct mlx5_mtt); mr->mmkey.iova = virt_addr; mr->mmkey.size = len; mr->mmkey.pd = to_mpd(pd)->pdn; - mr->live = 1; - -unmap_dma: - dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); + err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, + MLX5_IB_UPD_XLT_ENABLE); - kfree(mr_pas); - -free_mr: if (err) { free_cached_mr(dev, mr); return ERR_PTR(err); } + mr->live = 1; + return mr; } -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, - int zap) +static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages, + void *xlt, int page_shift, size_t size, + int flags) { struct mlx5_ib_dev *dev = mr->dev; - struct device *ddev = dev->ib_dev.dma_device; struct ib_umem *umem = mr->umem; + + npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx); + + if (!(flags & MLX5_IB_UPD_XLT_ZAP)) { + __mlx5_ib_populate_pas(dev, umem, page_shift, + idx, npages, xlt, + MLX5_IB_MTT_PRESENT); + /* Clear padding after the pages + * brought from the umem. + */ + memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0, + size - npages * sizeof(struct mlx5_mtt)); + } + + return npages; +} + +#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \ + MLX5_UMR_MTT_ALIGNMENT) +#define MLX5_SPARE_UMR_CHUNK 0x10000 + +int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, + int page_shift, int flags) +{ + struct mlx5_ib_dev *dev = mr->dev; + struct device *ddev = dev->ib_dev.dma_device; + struct mlx5_ib_ucontext *uctx = NULL; int size; - __be64 *pas; + void *xlt; dma_addr_t dma; struct mlx5_umr_wr wr; struct ib_sge sg; int err = 0; - const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / - sizeof(struct mlx5_mtt); - const int page_index_mask = page_index_alignment - 1; + int desc_size = sizeof(struct mlx5_mtt); + const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size; + const int page_mask = page_align - 1; size_t pages_mapped = 0; size_t pages_to_map = 0; size_t pages_iter = 0; - int use_emergency_buf = 0; + gfp_t gfp; /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes, - * so we need to align the offset and length accordingly */ - if (start_page_index & page_index_mask) { - npages += start_page_index & page_index_mask; - start_page_index &= ~page_index_mask; + * so we need to align the offset and length accordingly + */ + if (idx & page_mask) { + npages += idx & page_mask; + idx &= ~page_mask; } - pages_to_map = ALIGN(npages, page_index_alignment); + gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL; + gfp |= __GFP_ZERO | __GFP_NOWARN; - if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES) - return -EINVAL; + pages_to_map = ALIGN(npages, page_align); + size = desc_size * pages_to_map; + size = min_t(int, size, MLX5_MAX_UMR_CHUNK); - size = sizeof(struct mlx5_mtt) * pages_to_map; - size = min_t(int, PAGE_SIZE, size); - /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim - * code, when we are called from an invalidation. The pas buffer must - * be 2k-aligned for Connect-IB. */ - pas = (__be64 *)get_zeroed_page(GFP_ATOMIC); - if (!pas) { - mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n"); - pas = mlx5_ib_update_mtt_emergency_buffer; - size = MLX5_UMR_MTT_MIN_CHUNK_SIZE; - use_emergency_buf = 1; - mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex); - memset(pas, 0, size); + xlt = (void *)__get_free_pages(gfp, get_order(size)); + if (!xlt && size > MLX5_SPARE_UMR_CHUNK) { + mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n", + size, get_order(size), MLX5_SPARE_UMR_CHUNK); + + size = MLX5_SPARE_UMR_CHUNK; + xlt = (void *)__get_free_pages(gfp, get_order(size)); } - pages_iter = size / sizeof(struct mlx5_mtt); - dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE); + + if (!xlt) { + uctx = to_mucontext(mr->ibmr.uobject->context); + mlx5_ib_warn(dev, "Using XLT emergency buffer\n"); + size = PAGE_SIZE; + xlt = (void *)uctx->upd_xlt_page; + mutex_lock(&uctx->upd_xlt_page_mutex); + memset(xlt, 0, size); + } + pages_iter = size / desc_size; + dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE); if (dma_mapping_error(ddev, dma)) { - mlx5_ib_err(dev, "unable to map DMA during MTT update.\n"); + mlx5_ib_err(dev, "unable to map DMA during XLT update.\n"); err = -ENOMEM; - goto free_pas; + goto free_xlt; } + sg.addr = dma; + sg.lkey = dev->umrc.pd->local_dma_lkey; + + memset(&wr, 0, sizeof(wr)); + wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT; + if (!(flags & MLX5_IB_UPD_XLT_ENABLE)) + wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE; + wr.wr.sg_list = &sg; + wr.wr.num_sge = 1; + wr.wr.opcode = MLX5_IB_WR_UMR; + + wr.pd = mr->ibmr.pd; + wr.mkey = mr->mmkey.key; + wr.length = mr->mmkey.size; + wr.virt_addr = mr->mmkey.iova; + wr.access_flags = mr->access_flags; + wr.page_shift = page_shift; + for (pages_mapped = 0; pages_mapped < pages_to_map && !err; - pages_mapped += pages_iter, start_page_index += pages_iter) { + pages_mapped += pages_iter, idx += pages_iter) { dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE); - - npages = min_t(size_t, - pages_iter, - ib_umem_num_pages(umem) - start_page_index); - - if (!zap) { - __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT, - start_page_index, npages, pas, - MLX5_IB_MTT_PRESENT); - /* Clear padding after the pages brought from the - * umem. */ - memset(pas + npages, 0, size - npages * - sizeof(struct mlx5_mtt)); - } + npages = populate_xlt(mr, idx, pages_iter, xlt, + page_shift, size, flags); dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); - memset(&wr, 0, sizeof(wr)); - - sg.addr = dma; - sg.length = ALIGN(npages * sizeof(struct mlx5_mtt), - MLX5_UMR_MTT_ALIGNMENT); - sg.lkey = dev->umrc.pd->local_dma_lkey; + sg.length = ALIGN(npages * desc_size, + MLX5_UMR_MTT_ALIGNMENT); + + if (pages_mapped + pages_iter >= pages_to_map) { + if (flags & MLX5_IB_UPD_XLT_ENABLE) + wr.wr.send_flags |= + MLX5_IB_SEND_UMR_ENABLE_MR | + MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS | + MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; + if (flags & MLX5_IB_UPD_XLT_PD || + flags & MLX5_IB_UPD_XLT_ACCESS) + wr.wr.send_flags |= + MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; + if (flags & MLX5_IB_UPD_XLT_ADDR) + wr.wr.send_flags |= + MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; + } - wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE | - MLX5_IB_SEND_UMR_UPDATE_XLT; - wr.wr.sg_list = &sg; - wr.wr.num_sge = 1; - wr.wr.opcode = MLX5_IB_WR_UMR; + wr.offset = idx * desc_size; wr.xlt_size = sg.length; - wr.page_shift = PAGE_SHIFT; - wr.mkey = mr->mmkey.key; - wr.offset = start_page_index * sizeof(struct mlx5_mtt); err = mlx5_ib_post_send_wait(dev, &wr); } dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); -free_pas: - if (!use_emergency_buf) - free_page((unsigned long)pas); +free_xlt: + if (uctx) + mutex_unlock(&uctx->upd_xlt_page_mutex); else - mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex); + free_pages((unsigned long)xlt, get_order(size)); return err; } -#endif /* * If ibmr is NULL it will be allocated by reg_create. @@ -1204,7 +1139,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (err < 0) return ERR_PTR(err); - if (use_umr(order)) { + if (use_umr(dev, order)) { mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, order, access_flags); if (PTR_ERR(mr) == -EAGAIN) { @@ -1254,39 +1189,25 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) return 0; - prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key); + umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | + MLX5_IB_SEND_UMR_FAIL_IF_FREE; + umrwr.wr.opcode = MLX5_IB_WR_UMR; + umrwr.mkey = mr->mmkey.key; return mlx5_ib_post_send_wait(dev, &umrwr); } -static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr, - u64 length, int npages, int page_shift, int order, +static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, int access_flags, int flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct device *ddev = dev->ib_dev.dma_device; struct mlx5_umr_wr umrwr = {}; - struct ib_sge sg; - dma_addr_t dma = 0; - __be64 *mr_pas = NULL; - int size; int err; umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; - if (flags & IB_MR_REREG_TRANS) { - err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size, - &mr_pas, &dma); - if (err) - return err; - - umrwr.virt_addr = virt_addr; - umrwr.length = length; - umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; - } - - prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key, - page_shift); + umrwr.wr.opcode = MLX5_IB_WR_UMR; + umrwr.mkey = mr->mmkey.key; if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) { umrwr.pd = pd; @@ -1294,13 +1215,8 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr, umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; } - /* post send request to UMR QP */ err = mlx5_ib_post_send_wait(dev, &umrwr); - if (flags & IB_MR_REREG_TRANS) { - dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); - kfree(mr_pas); - } return err; } @@ -1317,6 +1233,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address; u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length; int page_shift = 0; + int upd_flags = 0; int npages = 0; int ncont = 0; int order = 0; @@ -1325,6 +1242,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", start, virt_addr, length, access_flags); + atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); + if (flags != IB_MR_REREG_PD) { /* * Replace umem. This needs to be done whether or not UMR is @@ -1335,7 +1254,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, &npages, &page_shift, &ncont, &order); if (err < 0) { - mr->umem = NULL; + clean_mr(mr); return err; } } @@ -1367,32 +1286,37 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, /* * Send a UMR WQE */ - err = rereg_umr(pd, mr, addr, len, npages, page_shift, - order, access_flags, flags); + mr->ibmr.pd = pd; + mr->access_flags = access_flags; + mr->mmkey.iova = addr; + mr->mmkey.size = len; + mr->mmkey.pd = to_mpd(pd)->pdn; + + if (flags & IB_MR_REREG_TRANS) { + upd_flags = MLX5_IB_UPD_XLT_ADDR; + if (flags & IB_MR_REREG_PD) + upd_flags |= MLX5_IB_UPD_XLT_PD; + if (flags & IB_MR_REREG_ACCESS) + upd_flags |= MLX5_IB_UPD_XLT_ACCESS; + err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, + upd_flags); + } else { + err = rereg_umr(pd, mr, access_flags, flags); + } + if (err) { mlx5_ib_warn(dev, "Failed to rereg UMR\n"); + ib_umem_release(mr->umem); + clean_mr(mr); return err; } } - if (flags & IB_MR_REREG_PD) { - ib_mr->pd = pd; - mr->mmkey.pd = to_mpd(pd)->pdn; - } + set_mr_fileds(dev, mr, npages, len, access_flags); - if (flags & IB_MR_REREG_ACCESS) - mr->access_flags = access_flags; - - if (flags & IB_MR_REREG_TRANS) { - atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); - set_mr_fileds(dev, mr, npages, len, access_flags); - mr->mmkey.iova = addr; - mr->mmkey.size = len; - } #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING update_odp_mr(mr); #endif - return 0; } diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 1e73c127feb7..cfd7ee500c47 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -91,16 +91,21 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, u64 umr_offset = idx & umr_block_mask; if (in_block && umr_offset == 0) { - mlx5_ib_update_mtt(mr, blk_start_idx, - idx - blk_start_idx, 1); + mlx5_ib_update_xlt(mr, blk_start_idx, + idx - blk_start_idx, + PAGE_SHIFT, + MLX5_IB_UPD_XLT_ZAP | + MLX5_IB_UPD_XLT_ATOMIC); in_block = 0; } } } if (in_block) - mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1, - 1); - + mlx5_ib_update_xlt(mr, blk_start_idx, + idx - blk_start_idx + 1, + PAGE_SHIFT, + MLX5_IB_UPD_XLT_ZAP | + MLX5_IB_UPD_XLT_ATOMIC); /* * We are now sure that the device will not access the * memory. We can safely unmap it, and mark it as dirty if @@ -257,7 +262,9 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, * this MR, since ib_umem_odp_map_dma_pages already * checks this. */ - ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0); + ret = mlx5_ib_update_xlt(mr, start_idx, npages, + PAGE_SHIFT, + MLX5_IB_UPD_XLT_ATOMIC); } else { ret = -EAGAIN; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 54e5a786f191..1713bd8d44a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -152,6 +152,26 @@ static struct mlx5_profile profile[] = { .size = 8, .limit = 4 }, + .mr_cache[16] = { + .size = 8, + .limit = 4 + }, + .mr_cache[17] = { + .size = 8, + .limit = 4 + }, + .mr_cache[18] = { + .size = 8, + .limit = 4 + }, + .mr_cache[19] = { + .size = 4, + .limit = 2 + }, + .mr_cache[20] = { + .size = 4, + .limit = 2 + }, }, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 0ae55361e674..ec52f3b50bf5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -959,7 +959,7 @@ enum { }; enum { - MAX_MR_CACHE_ENTRIES = 16, + MAX_MR_CACHE_ENTRIES = 21, }; enum { -- cgit v1.2.3 From 223cdc72429079aaf72511d2677b5d6584866313 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:45 +0200 Subject: net/mlx5: Update PAGE_FAULT_RESUME layout Update PAGE_FAULT_RESUME command layout. Three bit fields describing page fault: rdma, rdma_write, req_res gave 8 possible combinations, while only a few were legal. Now they are interpreted as three-bit type field, where former legal combinations turns into corresponding types and unused were added as new types. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/qp.c | 10 ++-------- include/linux/mlx5/mlx5_ifc.h | 9 ++++----- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index d0a4005fe63a..5378a5f74bdc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -515,14 +515,8 @@ int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME); - MLX5_SET(page_fault_resume_in, in, qpn, qpn); - - if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR) - MLX5_SET(page_fault_resume_in, in, req_res, 1); - if (flags & MLX5_PAGE_FAULT_RESUME_WRITE) - MLX5_SET(page_fault_resume_in, in, read_write, 1); - if (flags & MLX5_PAGE_FAULT_RESUME_RDMA) - MLX5_SET(page_fault_resume_in, in, rdma, 1); + MLX5_SET(page_fault_resume_in, in, wq_number, qpn); + MLX5_SET(page_fault_resume_in, in, page_fault_type, flags); if (error) MLX5_SET(page_fault_resume_in, in, error, 1); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 7c760e580268..608dc988b3d6 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4775,12 +4775,11 @@ struct mlx5_ifc_page_fault_resume_in_bits { u8 error[0x1]; u8 reserved_at_41[0x4]; - u8 rdma[0x1]; - u8 read_write[0x1]; - u8 req_res[0x1]; - u8 qpn[0x18]; + u8 page_fault_type[0x3]; + u8 wq_number[0x18]; - u8 reserved_at_60[0x20]; + u8 reserved_at_60[0x8]; + u8 token[0x18]; }; struct mlx5_ifc_nop_out_bits { -- cgit v1.2.3 From d9aaed838765e28234cb700c7d1ac975cadf28c9 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:46 +0200 Subject: {net,IB}/mlx5: Refactor page fault handling * Update page fault event according to last specification. * Separate code path for page fault EQ, completion EQ and async EQ. * Move page fault handling work queue from mlx5_ib static variable into mlx5_core page fault EQ. * Allocate memory to store ODP event dynamically as the events arrive, since in atomic context - use mempool. * Make mlx5_ib page fault handler run in process context. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 14 +- drivers/infiniband/hw/mlx5/mlx5_ib.h | 49 +--- drivers/infiniband/hw/mlx5/odp.c | 300 ++++++++------------- drivers/infiniband/hw/mlx5/qp.c | 26 -- drivers/net/ethernet/mellanox/mlx5/core/dev.c | 33 +++ drivers/net/ethernet/mellanox/mlx5/core/eq.c | 290 +++++++++++++++++--- drivers/net/ethernet/mellanox/mlx5/core/main.c | 21 +- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 2 + drivers/net/ethernet/mellanox/mlx5/core/qp.c | 108 -------- include/linux/mlx5/device.h | 6 +- include/linux/mlx5/driver.h | 97 ++++++- include/linux/mlx5/qp.h | 44 --- 12 files changed, 522 insertions(+), 468 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index b87127206ef2..86c61e73780e 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3319,6 +3319,9 @@ static struct mlx5_interface mlx5_ib_interface = { .add = mlx5_ib_add, .remove = mlx5_ib_remove, .event = mlx5_ib_event, +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + .pfault = mlx5_ib_pfault, +#endif .protocol = MLX5_INTERFACE_PROTOCOL_IB, }; @@ -3329,25 +3332,14 @@ static int __init mlx5_ib_init(void) if (deprecated_prof_sel != 2) pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n"); - err = mlx5_ib_odp_init(); - if (err) - return err; - err = mlx5_register_interface(&mlx5_ib_interface); - if (err) - goto clean_odp; - - return err; -clean_odp: - mlx5_ib_odp_cleanup(); return err; } static void __exit mlx5_ib_cleanup(void) { mlx5_unregister_interface(&mlx5_ib_interface); - mlx5_ib_odp_cleanup(); } module_init(mlx5_ib_init); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 02d925573945..a51c8051aeb2 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -277,29 +277,6 @@ struct mlx5_ib_rwq_ind_table { u32 rqtn; }; -/* - * Connect-IB can trigger up to four concurrent pagefaults - * per-QP. - */ -enum mlx5_ib_pagefault_context { - MLX5_IB_PAGEFAULT_RESPONDER_READ, - MLX5_IB_PAGEFAULT_REQUESTOR_READ, - MLX5_IB_PAGEFAULT_RESPONDER_WRITE, - MLX5_IB_PAGEFAULT_REQUESTOR_WRITE, - MLX5_IB_PAGEFAULT_CONTEXTS -}; - -static inline enum mlx5_ib_pagefault_context - mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault) -{ - return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE); -} - -struct mlx5_ib_pfault { - struct work_struct work; - struct mlx5_pagefault mpfault; -}; - struct mlx5_ib_ubuffer { struct ib_umem *umem; int buf_size; @@ -385,20 +362,6 @@ struct mlx5_ib_qp { /* Store signature errors */ bool signature_en; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - /* - * A flag that is true for QP's that are in a state that doesn't - * allow page faults, and shouldn't schedule any more faults. - */ - int disable_page_faults; - /* - * The disable_page_faults_lock protects a QP's disable_page_faults - * field, allowing for a thread to atomically check whether the QP - * allows page faults, and if so schedule a page fault. - */ - spinlock_t disable_page_faults_lock; - struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS]; -#endif struct list_head qps_list; struct list_head cq_recv_list; struct list_head cq_send_list; @@ -869,18 +832,13 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -extern struct workqueue_struct *mlx5_ib_page_fault_wq; - void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); -void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault); -void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp); +void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context, + struct mlx5_pagefault *pfault); int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev); int __init mlx5_ib_odp_init(void); void mlx5_ib_odp_cleanup(void); -void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp); -void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, unsigned long end); #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ @@ -889,13 +847,10 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) return; } -static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {} static inline int mlx5_ib_odp_init(void) { return 0; } static inline void mlx5_ib_odp_cleanup(void) {} -static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {} -static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {} #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index cfd7ee500c47..26f96c79a45a 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -41,8 +41,6 @@ * a pagefault. */ #define MMU_NOTIFIER_TIMEOUT 1000 -struct workqueue_struct *mlx5_ib_page_fault_wq; - void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, unsigned long end) { @@ -162,38 +160,38 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev, return container_of(mmkey, struct mlx5_ib_mr, mmkey); } -static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault, +static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev, + struct mlx5_pagefault *pfault, int error) { - struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); - u32 qpn = qp->trans_qp.base.mqp.qpn; + int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ? + pfault->wqe.wq_num : pfault->token; int ret = mlx5_core_page_fault_resume(dev->mdev, - qpn, - pfault->mpfault.flags, + pfault->token, + wq_num, + pfault->type, error); if (ret) - pr_err("Failed to resolve the page fault on QP 0x%x\n", qpn); + mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n", + wq_num); } /* - * Handle a single data segment in a page-fault WQE. + * Handle a single data segment in a page-fault WQE or RDMA region. * - * Returns number of pages retrieved on success. The caller will continue to + * Returns number of pages retrieved on success. The caller may continue to * the next data segment. * Can return the following error codes: * -EAGAIN to designate a temporary error. The caller will abort handling the * page fault and resolve it. * -EFAULT when there's an error mapping the requested pages. The caller will - * abort the page fault handling and possibly move the QP to an error state. - * On other errors the QP should also be closed with an error. + * abort the page fault handling. */ -static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault, +static int pagefault_single_data_segment(struct mlx5_ib_dev *mib_dev, u32 key, u64 io_virt, size_t bcnt, + u32 *bytes_committed, u32 *bytes_mapped) { - struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device); int srcu_key; unsigned int current_seq; u64 start_idx; @@ -219,12 +217,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, key); if (bytes_mapped) *bytes_mapped += - (bcnt - pfault->mpfault.bytes_committed); - goto srcu_unlock; - } - if (mr->ibmr.pd != qp->ibqp.pd) { - pr_err("Page-fault with different PDs for QP and MR.\n"); - ret = -EFAULT; + (bcnt - *bytes_committed); goto srcu_unlock; } @@ -240,8 +233,8 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, * in all iterations (in iteration 2 and above, * bytes_committed == 0). */ - io_virt += pfault->mpfault.bytes_committed; - bcnt -= pfault->mpfault.bytes_committed; + io_virt += *bytes_committed; + bcnt -= *bytes_committed; start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT; @@ -300,7 +293,7 @@ srcu_unlock: } } srcu_read_unlock(&mib_dev->mr_srcu, srcu_key); - pfault->mpfault.bytes_committed = 0; + *bytes_committed = 0; return ret ? ret : npages; } @@ -322,8 +315,9 @@ srcu_unlock: * Returns the number of pages loaded if positive, zero for an empty WQE, or a * negative error code. */ -static int pagefault_data_segments(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault, void *wqe, +static int pagefault_data_segments(struct mlx5_ib_dev *dev, + struct mlx5_pagefault *pfault, + struct mlx5_ib_qp *qp, void *wqe, void *wqe_end, u32 *bytes_mapped, u32 *total_wqe_bytes, int receive_queue) { @@ -367,22 +361,23 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp, if (!inline_segment && total_wqe_bytes) { *total_wqe_bytes += bcnt - min_t(size_t, bcnt, - pfault->mpfault.bytes_committed); + pfault->bytes_committed); } /* A zero length data segment designates a length of 2GB. */ if (bcnt == 0) bcnt = 1U << 31; - if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) { - pfault->mpfault.bytes_committed -= + if (inline_segment || bcnt <= pfault->bytes_committed) { + pfault->bytes_committed -= min_t(size_t, bcnt, - pfault->mpfault.bytes_committed); + pfault->bytes_committed); continue; } - ret = pagefault_single_data_segment(qp, pfault, key, io_virt, - bcnt, bytes_mapped); + ret = pagefault_single_data_segment(dev, key, io_virt, bcnt, + &pfault->bytes_committed, + bytes_mapped); if (ret < 0) break; npages += ret; @@ -396,12 +391,11 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp, * scatter-gather list, and set wqe_end to the end of the WQE. */ static int mlx5_ib_mr_initiator_pfault_handler( - struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, - void **wqe, void **wqe_end, int wqe_length) + struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault, + struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length) { - struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); struct mlx5_wqe_ctrl_seg *ctrl = *wqe; - u16 wqe_index = pfault->mpfault.wqe.wqe_index; + u16 wqe_index = pfault->wqe.wqe_index; unsigned ds, opcode; #if defined(DEBUG) u32 ctrl_wqe_index, ctrl_qpn; @@ -502,10 +496,9 @@ invalid_transport_or_opcode: * scatter-gather list, and set wqe_end to the end of the WQE. */ static int mlx5_ib_mr_responder_pfault_handler( - struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, - void **wqe, void **wqe_end, int wqe_length) + struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault, + struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length) { - struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); struct mlx5_ib_wq *wq = &qp->rq; int wqe_size = 1 << wq->wqe_shift; @@ -542,70 +535,83 @@ invalid_transport_or_opcode: return 0; } -static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault) +static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev, + u32 wq_num) +{ + struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num); + + if (!mqp) { + mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num); + return NULL; + } + + return to_mibqp(mqp); +} + +static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, + struct mlx5_pagefault *pfault) { - struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); int ret; void *wqe, *wqe_end; u32 bytes_mapped, total_wqe_bytes; char *buffer = NULL; - int resume_with_error = 0; - u16 wqe_index = pfault->mpfault.wqe.wqe_index; - int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR; - u32 qpn = qp->trans_qp.base.mqp.qpn; + int resume_with_error = 1; + u16 wqe_index = pfault->wqe.wqe_index; + int requestor = pfault->type & MLX5_PFAULT_REQUESTOR; + struct mlx5_ib_qp *qp; buffer = (char *)__get_free_page(GFP_KERNEL); if (!buffer) { mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); - resume_with_error = 1; goto resolve_page_fault; } + qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num); + if (!qp) + goto resolve_page_fault; + ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer, PAGE_SIZE, &qp->trans_qp.base); if (ret < 0) { - mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n", - -ret, wqe_index, qpn); - resume_with_error = 1; + mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n", + ret, wqe_index, pfault->token); goto resolve_page_fault; } wqe = buffer; if (requestor) - ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe, + ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe, &wqe_end, ret); else - ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe, + ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe, &wqe_end, ret); - if (ret < 0) { - resume_with_error = 1; + if (ret < 0) goto resolve_page_fault; - } if (wqe >= wqe_end) { mlx5_ib_err(dev, "ODP fault on invalid WQE.\n"); - resume_with_error = 1; goto resolve_page_fault; } - ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped, - &total_wqe_bytes, !requestor); + ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end, + &bytes_mapped, &total_wqe_bytes, + !requestor); if (ret == -EAGAIN) { + resume_with_error = 0; goto resolve_page_fault; } else if (ret < 0 || total_wqe_bytes > bytes_mapped) { - mlx5_ib_err(dev, "Error getting user pages for page fault. Error: 0x%x\n", - -ret); - resume_with_error = 1; + if (ret != -ENOENT) + mlx5_ib_err(dev, "Error getting user pages for page fault. Error: %d\n", + ret); goto resolve_page_fault; } + resume_with_error = 0; resolve_page_fault: - mlx5_ib_page_fault_resume(qp, pfault, resume_with_error); - mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n", - qpn, resume_with_error, - pfault->mpfault.flags); - + mlx5_ib_page_fault_resume(dev, pfault, resume_with_error); + mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n", + pfault->token, resume_with_error, + pfault->type); free_page((unsigned long)buffer); } @@ -615,15 +621,14 @@ static int pages_in_range(u64 address, u32 length) (address & PAGE_MASK)) >> PAGE_SHIFT; } -static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault) +static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, + struct mlx5_pagefault *pfault) { - struct mlx5_pagefault *mpfault = &pfault->mpfault; u64 address; u32 length; - u32 prefetch_len = mpfault->bytes_committed; + u32 prefetch_len = pfault->bytes_committed; int prefetch_activated = 0; - u32 rkey = mpfault->rdma.r_key; + u32 rkey = pfault->rdma.r_key; int ret; /* The RDMA responder handler handles the page fault in two parts. @@ -632,38 +637,40 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp, * prefetches more pages. The second operation cannot use the pfault * context and therefore uses the dummy_pfault context allocated on * the stack */ - struct mlx5_ib_pfault dummy_pfault = {}; + pfault->rdma.rdma_va += pfault->bytes_committed; + pfault->rdma.rdma_op_len -= min(pfault->bytes_committed, + pfault->rdma.rdma_op_len); + pfault->bytes_committed = 0; - dummy_pfault.mpfault.bytes_committed = 0; - - mpfault->rdma.rdma_va += mpfault->bytes_committed; - mpfault->rdma.rdma_op_len -= min(mpfault->bytes_committed, - mpfault->rdma.rdma_op_len); - mpfault->bytes_committed = 0; - - address = mpfault->rdma.rdma_va; - length = mpfault->rdma.rdma_op_len; + address = pfault->rdma.rdma_va; + length = pfault->rdma.rdma_op_len; /* For some operations, the hardware cannot tell the exact message * length, and in those cases it reports zero. Use prefetch * logic. */ if (length == 0) { prefetch_activated = 1; - length = mpfault->rdma.packet_size; + length = pfault->rdma.packet_size; prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len); } - ret = pagefault_single_data_segment(qp, pfault, rkey, address, length, - NULL); + ret = pagefault_single_data_segment(dev, rkey, address, length, + &pfault->bytes_committed, NULL); if (ret == -EAGAIN) { /* We're racing with an invalidation, don't prefetch */ prefetch_activated = 0; } else if (ret < 0 || pages_in_range(address, length) > ret) { - mlx5_ib_page_fault_resume(qp, pfault, 1); + mlx5_ib_page_fault_resume(dev, pfault, 1); + if (ret != -ENOENT) + mlx5_ib_warn(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n", + ret, pfault->token, pfault->type); return; } - mlx5_ib_page_fault_resume(qp, pfault, 0); + mlx5_ib_page_fault_resume(dev, pfault, 0); + mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n", + pfault->token, pfault->type, + prefetch_activated); /* At this point, there might be a new pagefault already arriving in * the eq, switch to the dummy pagefault for the rest of the @@ -671,112 +678,39 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp, * work-queue is being fenced. */ if (prefetch_activated) { - ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey, - address, + u32 bytes_committed = 0; + + ret = pagefault_single_data_segment(dev, rkey, address, prefetch_len, - NULL); + &bytes_committed, NULL); if (ret < 0) { - pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n", - ret, prefetch_activated, - qp->ibqp.qp_num, address, prefetch_len); + mlx5_ib_warn(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n", + ret, pfault->token, address, + prefetch_len); } } } -void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault) +void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context, + struct mlx5_pagefault *pfault) { - u8 event_subtype = pfault->mpfault.event_subtype; + struct mlx5_ib_dev *dev = context; + u8 event_subtype = pfault->event_subtype; switch (event_subtype) { case MLX5_PFAULT_SUBTYPE_WQE: - mlx5_ib_mr_wqe_pfault_handler(qp, pfault); + mlx5_ib_mr_wqe_pfault_handler(dev, pfault); break; case MLX5_PFAULT_SUBTYPE_RDMA: - mlx5_ib_mr_rdma_pfault_handler(qp, pfault); + mlx5_ib_mr_rdma_pfault_handler(dev, pfault); break; default: - pr_warn("Invalid page fault event subtype: 0x%x\n", - event_subtype); - mlx5_ib_page_fault_resume(qp, pfault, 1); - break; + mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n", + event_subtype); + mlx5_ib_page_fault_resume(dev, pfault, 1); } } -static void mlx5_ib_qp_pfault_action(struct work_struct *work) -{ - struct mlx5_ib_pfault *pfault = container_of(work, - struct mlx5_ib_pfault, - work); - enum mlx5_ib_pagefault_context context = - mlx5_ib_get_pagefault_context(&pfault->mpfault); - struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp, - pagefaults[context]); - mlx5_ib_mr_pfault_handler(qp, pfault); -} - -void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) -{ - unsigned long flags; - - spin_lock_irqsave(&qp->disable_page_faults_lock, flags); - qp->disable_page_faults = 1; - spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags); - - /* - * Note that at this point, we are guarenteed that no more - * work queue elements will be posted to the work queue with - * the QP we are closing. - */ - flush_workqueue(mlx5_ib_page_fault_wq); -} - -void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) -{ - unsigned long flags; - - spin_lock_irqsave(&qp->disable_page_faults_lock, flags); - qp->disable_page_faults = 0; - spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags); -} - -static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp, - struct mlx5_pagefault *pfault) -{ - /* - * Note that we will only get one fault event per QP per context - * (responder/initiator, read/write), until we resolve the page fault - * with the mlx5_ib_page_fault_resume command. Since this function is - * called from within the work element, there is no risk of missing - * events. - */ - struct mlx5_ib_qp *mibqp = to_mibqp(qp); - enum mlx5_ib_pagefault_context context = - mlx5_ib_get_pagefault_context(pfault); - struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context]; - - qp_pfault->mpfault = *pfault; - - /* No need to stop interrupts here since we are in an interrupt */ - spin_lock(&mibqp->disable_page_faults_lock); - if (!mibqp->disable_page_faults) - queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work); - spin_unlock(&mibqp->disable_page_faults_lock); -} - -void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) -{ - int i; - - qp->disable_page_faults = 1; - spin_lock_init(&qp->disable_page_faults_lock); - - qp->trans_qp.base.mqp.pfault_handler = mlx5_ib_pfault_handler; - - for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i) - INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action); -} - int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { int ret; @@ -793,17 +727,3 @@ void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) cleanup_srcu_struct(&ibdev->mr_srcu); } -int __init mlx5_ib_odp_init(void) -{ - mlx5_ib_page_fault_wq = alloc_ordered_workqueue("mlx5_ib_page_faults", - WQ_MEM_RECLAIM); - if (!mlx5_ib_page_fault_wq) - return -ENOMEM; - - return 0; -} - -void mlx5_ib_odp_cleanup(void) -{ - destroy_workqueue(mlx5_ib_page_fault_wq); -} diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ec2301ac0fde..53f4dd32f956 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1526,9 +1526,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, &qp->raw_packet_qp.rq.base : &qp->trans_qp.base; - if (init_attr->qp_type != IB_QPT_RAW_PACKET) - mlx5_ib_odp_create_qp(qp); - mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); @@ -1923,7 +1920,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) if (qp->state != IB_QPS_RESET) { if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) { - mlx5_ib_qp_disable_pagefaults(qp); err = mlx5_core_qp_modify(dev->mdev, MLX5_CMD_OP_2RST_QP, 0, NULL, &base->mqp); @@ -2823,16 +2819,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, if (mlx5_st < 0) goto out; - /* If moving to a reset or error state, we must disable page faults on - * this QP and flush all current page faults. Otherwise a stale page - * fault may attempt to work on this QP after it is reset and moved - * again to RTS, and may cause the driver and the device to get out of - * sync. */ - if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && - (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR) && - (qp->ibqp.qp_type != IB_QPT_RAW_PACKET)) - mlx5_ib_qp_disable_pagefaults(qp); - if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || !optab[mlx5_cur][mlx5_new]) goto out; @@ -2864,10 +2850,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, if (err) goto out; - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT && - (qp->ibqp.qp_type != IB_QPT_RAW_PACKET)) - mlx5_ib_qp_enable_pagefaults(qp); - qp->state = new_state; if (attr_mask & IB_QP_ACCESS_FLAGS) @@ -4533,14 +4515,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - /* - * Wait for any outstanding page faults, in case the user frees memory - * based upon this query's result. - */ - flush_workqueue(mlx5_ib_page_fault_wq); -#endif - mutex_lock(&qp->mutex); if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index a9dbc28f6b97..a62f4b6a21a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -71,6 +71,16 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) if (dev_ctx->context) { spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (dev_ctx->intf->pfault) { + if (priv->pfault) { + mlx5_core_err(dev, "multiple page fault handlers not supported"); + } else { + priv->pfault_ctx = dev_ctx->context; + priv->pfault = dev_ctx->intf->pfault; + } + } +#endif spin_unlock_irq(&priv->ctx_lock); } else { kfree(dev_ctx); @@ -97,6 +107,15 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) if (!dev_ctx) return; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + spin_lock_irq(&priv->ctx_lock); + if (priv->pfault == dev_ctx->intf->pfault) + priv->pfault = NULL; + spin_unlock_irq(&priv->ctx_lock); + + synchronize_srcu(&priv->pfault_srcu); +#endif + spin_lock_irq(&priv->ctx_lock); list_del(&dev_ctx->list); spin_unlock_irq(&priv->ctx_lock); @@ -329,6 +348,20 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, spin_unlock_irqrestore(&priv->ctx_lock, flags); } +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +void mlx5_core_page_fault(struct mlx5_core_dev *dev, + struct mlx5_pagefault *pfault) +{ + struct mlx5_priv *priv = &dev->priv; + int srcu_idx; + + srcu_idx = srcu_read_lock(&priv->pfault_srcu); + if (priv->pfault) + priv->pfault(dev, priv->pfault_ctx, pfault); + srcu_read_unlock(&priv->pfault_srcu, srcu_idx); +} +#endif + void mlx5_dev_list_lock(void) { mutex_lock(&mlx5_intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 8ffcc8808e50..4aff8ac68e14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -54,6 +54,7 @@ enum { MLX5_NUM_SPARE_EQE = 0x80, MLX5_NUM_ASYNC_EQE = 0x100, MLX5_NUM_CMD_EQE = 32, + MLX5_NUM_PF_DRAIN = 64, }; enum { @@ -188,10 +189,193 @@ static void eq_update_ci(struct mlx5_eq *eq, int arm) mb(); } -static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +static void eqe_pf_action(struct work_struct *work) +{ + struct mlx5_pagefault *pfault = container_of(work, + struct mlx5_pagefault, + work); + struct mlx5_eq *eq = pfault->eq; + + mlx5_core_page_fault(eq->dev, pfault); + mempool_free(pfault, eq->pf_ctx.pool); +} + +static void eq_pf_process(struct mlx5_eq *eq) +{ + struct mlx5_core_dev *dev = eq->dev; + struct mlx5_eqe_page_fault *pf_eqe; + struct mlx5_pagefault *pfault; + struct mlx5_eqe *eqe; + int set_ci = 0; + + while ((eqe = next_eqe_sw(eq))) { + pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC); + if (!pfault) { + schedule_work(&eq->pf_ctx.work); + break; + } + + dma_rmb(); + pf_eqe = &eqe->data.page_fault; + pfault->event_subtype = eqe->sub_type; + pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed); + + mlx5_core_dbg(dev, + "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n", + eqe->sub_type, pfault->bytes_committed); + + switch (eqe->sub_type) { + case MLX5_PFAULT_SUBTYPE_RDMA: + /* RDMA based event */ + pfault->type = + be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; + pfault->token = + be32_to_cpu(pf_eqe->rdma.pftype_token) & + MLX5_24BIT_MASK; + pfault->rdma.r_key = + be32_to_cpu(pf_eqe->rdma.r_key); + pfault->rdma.packet_size = + be16_to_cpu(pf_eqe->rdma.packet_length); + pfault->rdma.rdma_op_len = + be32_to_cpu(pf_eqe->rdma.rdma_op_len); + pfault->rdma.rdma_va = + be64_to_cpu(pf_eqe->rdma.rdma_va); + mlx5_core_dbg(dev, + "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n", + pfault->type, pfault->token, + pfault->rdma.r_key); + mlx5_core_dbg(dev, + "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n", + pfault->rdma.rdma_op_len, + pfault->rdma.rdma_va); + break; + + case MLX5_PFAULT_SUBTYPE_WQE: + /* WQE based event */ + pfault->type = + be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24; + pfault->token = + be32_to_cpu(pf_eqe->wqe.token); + pfault->wqe.wq_num = + be32_to_cpu(pf_eqe->wqe.pftype_wq) & + MLX5_24BIT_MASK; + pfault->wqe.wqe_index = + be16_to_cpu(pf_eqe->wqe.wqe_index); + pfault->wqe.packet_size = + be16_to_cpu(pf_eqe->wqe.packet_length); + mlx5_core_dbg(dev, + "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n", + pfault->type, pfault->token, + pfault->wqe.wq_num, + pfault->wqe.wqe_index); + break; + + default: + mlx5_core_warn(dev, + "Unsupported page fault event sub-type: 0x%02hhx\n", + eqe->sub_type); + /* Unsupported page faults should still be + * resolved by the page fault handler + */ + } + + pfault->eq = eq; + INIT_WORK(&pfault->work, eqe_pf_action); + queue_work(eq->pf_ctx.wq, &pfault->work); + + ++eq->cons_index; + ++set_ci; + + if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); +} + +static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr) +{ + struct mlx5_eq *eq = eq_ptr; + unsigned long flags; + + if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) { + eq_pf_process(eq); + spin_unlock_irqrestore(&eq->pf_ctx.lock, flags); + } else { + schedule_work(&eq->pf_ctx.work); + } + + return IRQ_HANDLED; +} + +/* mempool_refill() was proposed but unfortunately wasn't accepted + * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html + * Chip workaround. + */ +static void mempool_refill(mempool_t *pool) +{ + while (pool->curr_nr < pool->min_nr) + mempool_free(mempool_alloc(pool, GFP_KERNEL), pool); +} + +static void eq_pf_action(struct work_struct *work) +{ + struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work); + + mempool_refill(eq->pf_ctx.pool); + + spin_lock_irq(&eq->pf_ctx.lock); + eq_pf_process(eq); + spin_unlock_irq(&eq->pf_ctx.lock); +} + +static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name) { + spin_lock_init(&pf_ctx->lock); + INIT_WORK(&pf_ctx->work, eq_pf_action); + + pf_ctx->wq = alloc_ordered_workqueue(name, + WQ_MEM_RECLAIM); + if (!pf_ctx->wq) + return -ENOMEM; + + pf_ctx->pool = mempool_create_kmalloc_pool + (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault)); + if (!pf_ctx->pool) + goto err_wq; + + return 0; +err_wq: + destroy_workqueue(pf_ctx->wq); + return -ENOMEM; +} + +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, + u32 wq_num, u8 type, int error) +{ + u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; + + MLX5_SET(page_fault_resume_in, in, opcode, + MLX5_CMD_OP_PAGE_FAULT_RESUME); + MLX5_SET(page_fault_resume_in, in, error, !!error); + MLX5_SET(page_fault_resume_in, in, page_fault_type, type); + MLX5_SET(page_fault_resume_in, in, wq_number, wq_num); + MLX5_SET(page_fault_resume_in, in, token, token); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} +EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); +#endif + +static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) +{ + struct mlx5_eq *eq = eq_ptr; + struct mlx5_core_dev *dev = eq->dev; struct mlx5_eqe *eqe; - int eqes_found = 0; int set_ci = 0; u32 cqn = -1; u32 rsn; @@ -276,12 +460,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) } break; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - case MLX5_EVENT_TYPE_PAGE_FAULT: - mlx5_eq_pagefault(dev, eqe); - break; -#endif - #ifdef CONFIG_MLX5_CORE_EN case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); @@ -299,7 +477,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) } ++eq->cons_index; - eqes_found = 1; ++set_ci; /* The HCA will think the queue has overflowed if we @@ -319,17 +496,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) if (cqn != -1) tasklet_schedule(&eq->tasklet_ctx.task); - return eqes_found; -} - -static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) -{ - struct mlx5_eq *eq = eq_ptr; - struct mlx5_core_dev *dev = eq->dev; - - mlx5_eq_int(dev, eq); - - /* MSI-X vectors always belong to us */ return IRQ_HANDLED; } @@ -345,22 +511,32 @@ static void init_eq_buf(struct mlx5_eq *eq) } int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, struct mlx5_uar *uar) + int nent, u64 mask, const char *name, + struct mlx5_uar *uar, enum mlx5_eq_type type) { u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; struct mlx5_priv *priv = &dev->priv; + irq_handler_t handler; __be64 *pas; void *eqc; int inlen; u32 *in; int err; + eq->type = type; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); eq->cons_index = 0; err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); if (err) return err; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (type == MLX5_EQ_TYPE_PF) + handler = mlx5_eq_pf_int; + else +#endif + handler = mlx5_eq_int; + init_eq_buf(eq); inlen = MLX5_ST_SZ_BYTES(create_eq_in) + @@ -396,7 +572,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, eq->irqn = priv->msix_arr[vecidx].vector; eq->dev = dev; eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; - err = request_irq(eq->irqn, mlx5_msix_handler, 0, + err = request_irq(eq->irqn, handler, 0, priv->irq_info[vecidx].name, eq); if (err) goto err_eq; @@ -405,11 +581,20 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, if (err) goto err_irq; - INIT_LIST_HEAD(&eq->tasklet_ctx.list); - INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); - spin_lock_init(&eq->tasklet_ctx.lock); - tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, - (unsigned long)&eq->tasklet_ctx); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (type == MLX5_EQ_TYPE_PF) { + err = init_pf_ctx(&eq->pf_ctx, name); + if (err) + goto err_irq; + } else +#endif + { + INIT_LIST_HEAD(&eq->tasklet_ctx.list); + INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); + spin_lock_init(&eq->tasklet_ctx.lock); + tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, + (unsigned long)&eq->tasklet_ctx); + } /* EQs are created in ARMED state */ @@ -444,7 +629,16 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); synchronize_irq(eq->irqn); - tasklet_disable(&eq->tasklet_ctx.task); + + if (eq->type == MLX5_EQ_TYPE_COMP) { + tasklet_disable(&eq->tasklet_ctx.task); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + } else if (eq->type == MLX5_EQ_TYPE_PF) { + cancel_work_sync(&eq->pf_ctx.work); + destroy_workqueue(eq->pf_ctx.wq); + mempool_destroy(eq->pf_ctx.pool); +#endif + } mlx5_buf_free(dev, &eq->buf); return err; @@ -479,8 +673,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; - if (MLX5_CAP_GEN(dev, pg)) - async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && MLX5_CAP_GEN(dev, vport_group_manager) && @@ -494,7 +686,8 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, - "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); + "mlx5_cmd_eq", &dev->priv.uuari.uars[0], + MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); return err; @@ -504,7 +697,8 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, MLX5_NUM_ASYNC_EQE, async_event_mask, - "mlx5_async_eq", &dev->priv.uuari.uars[0]); + "mlx5_async_eq", &dev->priv.uuari.uars[0], + MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create async EQ %d\n", err); goto err1; @@ -514,13 +708,35 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) MLX5_EQ_VEC_PAGES, /* TODO: sriov max_vf + */ 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", - &dev->priv.uuari.uars[0]); + &dev->priv.uuari.uars[0], + MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); goto err2; } +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (MLX5_CAP_GEN(dev, pg)) { + err = mlx5_create_map_eq(dev, &table->pfault_eq, + MLX5_EQ_VEC_PFAULT, + MLX5_NUM_ASYNC_EQE, + 1 << MLX5_EVENT_TYPE_PAGE_FAULT, + "mlx5_page_fault_eq", + &dev->priv.uuari.uars[0], + MLX5_EQ_TYPE_PF); + if (err) { + mlx5_core_warn(dev, "failed to create page fault EQ %d\n", + err); + goto err3; + } + } + return err; +err3: + mlx5_destroy_unmap_eq(dev, &table->pages_eq); +#else + return err; +#endif err2: mlx5_destroy_unmap_eq(dev, &table->async_eq); @@ -536,6 +752,14 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev) struct mlx5_eq_table *table = &dev->priv.eq_table; int err; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (MLX5_CAP_GEN(dev, pg)) { + err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); + if (err) + return err; + } +#endif + err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 1713bd8d44a4..f4115135e30b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -753,7 +753,8 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, - name, &dev->priv.uuari.uars[0]); + name, &dev->priv.uuari.uars[0], + MLX5_EQ_TYPE_COMP); if (err) { kfree(eq); goto clean; @@ -1295,10 +1296,19 @@ static int init_one(struct pci_dev *pdev, spin_lock_init(&priv->ctx_lock); mutex_init(&dev->pci_status_mutex); mutex_init(&dev->intf_state_mutex); + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + err = init_srcu_struct(&priv->pfault_srcu); + if (err) { + dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n", + err); + goto clean_dev; + } +#endif err = mlx5_pci_init(dev, priv); if (err) { dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err); - goto clean_dev; + goto clean_srcu; } err = mlx5_health_init(dev); @@ -1332,7 +1342,11 @@ clean_health: mlx5_health_cleanup(dev); close_pci: mlx5_pci_close(dev, priv); +clean_srcu: +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + cleanup_srcu_struct(&priv->pfault_srcu); clean_dev: +#endif pci_set_drvdata(pdev, NULL); devlink_free(devlink); @@ -1357,6 +1371,9 @@ static void remove_one(struct pci_dev *pdev) mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); mlx5_pci_close(dev, priv); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + cleanup_srcu_struct(&priv->pfault_srcu); +#endif pci_set_drvdata(pdev, NULL); devlink_free(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index d4a99c9757cb..74241e82de63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -86,6 +86,8 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); +void mlx5_core_page_fault(struct mlx5_core_dev *dev, + struct mlx5_pagefault *pfault); void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 5378a5f74bdc..cbbcef2884be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -143,95 +143,6 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) mlx5_core_put_rsc(common); } -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) -{ - struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault; - int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK; - struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn); - struct mlx5_core_qp *qp = - container_of(common, struct mlx5_core_qp, common); - struct mlx5_pagefault pfault; - - if (!qp) { - mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n", - qpn); - return; - } - - pfault.event_subtype = eqe->sub_type; - pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) & - (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA); - pfault.bytes_committed = be32_to_cpu( - pf_eqe->bytes_committed); - - mlx5_core_dbg(dev, - "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n", - eqe->sub_type, pfault.flags); - - switch (eqe->sub_type) { - case MLX5_PFAULT_SUBTYPE_RDMA: - /* RDMA based event */ - pfault.rdma.r_key = - be32_to_cpu(pf_eqe->rdma.r_key); - pfault.rdma.packet_size = - be16_to_cpu(pf_eqe->rdma.packet_length); - pfault.rdma.rdma_op_len = - be32_to_cpu(pf_eqe->rdma.rdma_op_len); - pfault.rdma.rdma_va = - be64_to_cpu(pf_eqe->rdma.rdma_va); - mlx5_core_dbg(dev, - "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n", - qpn, pfault.rdma.r_key); - mlx5_core_dbg(dev, - "PAGE_FAULT: rdma_op_len: 0x%08x,\n", - pfault.rdma.rdma_op_len); - mlx5_core_dbg(dev, - "PAGE_FAULT: rdma_va: 0x%016llx,\n", - pfault.rdma.rdma_va); - mlx5_core_dbg(dev, - "PAGE_FAULT: bytes_committed: 0x%06x\n", - pfault.bytes_committed); - break; - - case MLX5_PFAULT_SUBTYPE_WQE: - /* WQE based event */ - pfault.wqe.wqe_index = - be16_to_cpu(pf_eqe->wqe.wqe_index); - pfault.wqe.packet_size = - be16_to_cpu(pf_eqe->wqe.packet_length); - mlx5_core_dbg(dev, - "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n", - qpn, pfault.wqe.wqe_index); - mlx5_core_dbg(dev, - "PAGE_FAULT: bytes_committed: 0x%06x\n", - pfault.bytes_committed); - break; - - default: - mlx5_core_warn(dev, - "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n", - eqe->sub_type, qpn); - /* Unsupported page faults should still be resolved by the - * page fault handler - */ - } - - if (qp->pfault_handler) { - qp->pfault_handler(qp, &pfault); - } else { - mlx5_core_err(dev, - "ODP event for QP %08x, without a fault handler in QP\n", - qpn); - /* Page fault will remain unresolved. QP will hang until it is - * destroyed - */ - } - - mlx5_core_put_rsc(common); -} -#endif - static int create_qprqsq_common(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, int rsc_type) @@ -506,25 +417,6 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) } EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, - u8 flags, int error) -{ - u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; - - MLX5_SET(page_fault_resume_in, in, opcode, - MLX5_CMD_OP_PAGE_FAULT_RESUME); - MLX5_SET(page_fault_resume_in, in, wq_number, qpn); - MLX5_SET(page_fault_resume_in, in, page_fault_type, flags); - if (error) - MLX5_SET(page_fault_resume_in, in, error, 1); - - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} -EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); -#endif - int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *rq) { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 9f489365b3d3..3ccaeff15a80 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -534,7 +534,9 @@ struct mlx5_eqe_page_fault { __be16 wqe_index; u16 reserved2; __be16 packet_length; - u8 reserved3[12]; + __be32 token; + u8 reserved4[8]; + __be32 pftype_wq; } __packed wqe; struct { __be32 r_key; @@ -542,9 +544,9 @@ struct mlx5_eqe_page_fault { __be16 packet_length; __be32 rdma_op_len; __be64 rdma_va; + __be32 pftype_token; } __packed rdma; } __packed; - __be32 flags_qpn; } __packed; struct mlx5_eqe_vport_change { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ec52f3b50bf5..b52d07491fe7 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -83,6 +84,7 @@ enum { MLX5_EQ_VEC_PAGES = 0, MLX5_EQ_VEC_CMD = 1, MLX5_EQ_VEC_ASYNC = 2, + MLX5_EQ_VEC_PFAULT = 3, MLX5_EQ_VEC_COMP_BASE, }; @@ -178,6 +180,14 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; +enum mlx5_eq_type { + MLX5_EQ_TYPE_COMP, + MLX5_EQ_TYPE_ASYNC, +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + MLX5_EQ_TYPE_PF, +#endif +}; + struct mlx5_uuar_info { struct mlx5_uar *uars; int num_uars; @@ -333,6 +343,14 @@ struct mlx5_eq_tasklet { spinlock_t lock; }; +struct mlx5_eq_pagefault { + struct work_struct work; + /* Pagefaults lock */ + spinlock_t lock; + struct workqueue_struct *wq; + mempool_t *pool; +}; + struct mlx5_eq { struct mlx5_core_dev *dev; __be32 __iomem *doorbell; @@ -346,7 +364,13 @@ struct mlx5_eq { struct list_head list; int index; struct mlx5_rsc_debug *dbg; - struct mlx5_eq_tasklet tasklet_ctx; + enum mlx5_eq_type type; + union { + struct mlx5_eq_tasklet tasklet_ctx; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + struct mlx5_eq_pagefault pf_ctx; +#endif + }; }; struct mlx5_core_psv { @@ -377,6 +401,8 @@ struct mlx5_core_mkey { u32 pd; }; +#define MLX5_24BIT_MASK ((1 << 24) - 1) + enum mlx5_res_type { MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, @@ -411,6 +437,9 @@ struct mlx5_eq_table { struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + struct mlx5_eq pfault_eq; +#endif int num_comp_vectors; /* protect EQs list */ @@ -497,6 +526,7 @@ struct mlx5_fc_stats { struct mlx5_eswitch; struct mlx5_lag; +struct mlx5_pagefault; struct mlx5_rl_entry { u32 rate; @@ -601,6 +631,14 @@ struct mlx5_priv { struct mlx5_rl_table rl_table; struct mlx5_port_module_event_stats pme_stats; + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + void (*pfault)(struct mlx5_core_dev *dev, + void *context, + struct mlx5_pagefault *pfault); + void *pfault_ctx; + struct srcu_struct pfault_srcu; +#endif }; enum mlx5_device_state { @@ -619,6 +657,50 @@ enum mlx5_pci_status { MLX5_PCI_STATUS_ENABLED, }; +enum mlx5_pagefault_type_flags { + MLX5_PFAULT_REQUESTOR = 1 << 0, + MLX5_PFAULT_WRITE = 1 << 1, + MLX5_PFAULT_RDMA = 1 << 2, +}; + +/* Contains the details of a pagefault. */ +struct mlx5_pagefault { + u32 bytes_committed; + u32 token; + u8 event_subtype; + u8 type; + union { + /* Initiator or send message responder pagefault details. */ + struct { + /* Received packet size, only valid for responders. */ + u32 packet_size; + /* + * Number of resource holding WQE, depends on type. + */ + u32 wq_num; + /* + * WQE index. Refers to either the send queue or + * receive queue, according to event_subtype. + */ + u16 wqe_index; + } wqe; + /* RDMA responder pagefault details */ + struct { + u32 r_key; + /* + * Received packet size, minimal size page fault + * resolution required for forward progress. + */ + u32 packet_size; + u32 rdma_op_len; + u64 rdma_va; + } rdma; + }; + + struct mlx5_eq *eq; + struct work_struct work; +}; + struct mlx5_td { struct list_head tirs_list; u32 tdn; @@ -879,15 +961,13 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); -#endif void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, struct mlx5_uar *uar); + int nent, u64 mask, const char *name, + struct mlx5_uar *uar, enum mlx5_eq_type type); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); @@ -926,6 +1006,10 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *odp_caps); int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out, size_t sz); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, + u32 wq_num, u8 type, int error); +#endif int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); @@ -974,6 +1058,9 @@ struct mlx5_interface { void (*detach)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param); + void (*pfault)(struct mlx5_core_dev *dev, + void *context, + struct mlx5_pagefault *pfault); void * (*get_dev)(void *context); int protocol; struct list_head list; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 693811e0cb24..9ed775f5cb66 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -50,9 +50,6 @@ #define MLX5_BSF_APPTAG_ESCAPE 0x1 #define MLX5_BSF_APPREF_ESCAPE 0x2 -#define MLX5_QPN_BITS 24 -#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1) - enum mlx5_qp_optpar { MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MLX5_QP_OPTPAR_RRE = 1 << 1, @@ -418,46 +415,9 @@ struct mlx5_stride_block_ctrl_seg { __be16 num_entries; }; -enum mlx5_pagefault_flags { - MLX5_PFAULT_REQUESTOR = 1 << 0, - MLX5_PFAULT_WRITE = 1 << 1, - MLX5_PFAULT_RDMA = 1 << 2, -}; - -/* Contains the details of a pagefault. */ -struct mlx5_pagefault { - u32 bytes_committed; - u8 event_subtype; - enum mlx5_pagefault_flags flags; - union { - /* Initiator or send message responder pagefault details. */ - struct { - /* Received packet size, only valid for responders. */ - u32 packet_size; - /* - * WQE index. Refers to either the send queue or - * receive queue, according to event_subtype. - */ - u16 wqe_index; - } wqe; - /* RDMA responder pagefault details */ - struct { - u32 r_key; - /* - * Received packet size, minimal size page fault - * resolution required for forward progress. - */ - u32 packet_size; - u32 rdma_op_len; - u64 rdma_va; - } rdma; - }; -}; - struct mlx5_core_qp { struct mlx5_core_rsc_common common; /* must be first */ void (*event) (struct mlx5_core_qp *, int); - void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *); int qpn; struct mlx5_rsc_debug *dbg; int pid; @@ -557,10 +517,6 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev); void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, - u8 context, int error); -#endif int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *rq); void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, -- cgit v1.2.3 From 17d2f88f92ce39b348f125f6b2e6eeb6b0906ac7 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:47 +0200 Subject: IB/mlx5: Add ODP atomics support Handle ODP atomic operations. When initiator of RDMA atomic operation use ODP MR to provide source data handle pagefault properly. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/odp.c | 88 +++++++++++++++++++++++----------------- include/linux/mlx5/mlx5_ifc.h | 2 +- include/linux/mlx5/qp.h | 18 ++++++++ 3 files changed, 69 insertions(+), 39 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 26f96c79a45a..971b2885f474 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -144,6 +144,9 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read)) caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ; + if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic)) + caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC; + return; } @@ -386,6 +389,17 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev, return ret < 0 ? ret : npages; } +static const u32 mlx5_ib_odp_opcode_cap[] = { + [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND, + [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND, + [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND, + [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE, + [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE, + [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ, + [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC, + [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC, +}; + /* * Parse initiator WQE. Advances the wqe pointer to point at the * scatter-gather list, and set wqe_end to the end of the WQE. @@ -396,6 +410,8 @@ static int mlx5_ib_mr_initiator_pfault_handler( { struct mlx5_wqe_ctrl_seg *ctrl = *wqe; u16 wqe_index = pfault->wqe.wqe_index; + u32 transport_caps; + struct mlx5_base_av *av; unsigned ds, opcode; #if defined(DEBUG) u32 ctrl_wqe_index, ctrl_qpn; @@ -441,53 +457,49 @@ static int mlx5_ib_mr_initiator_pfault_handler( opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & MLX5_WQE_CTRL_OPCODE_MASK; + switch (qp->ibqp.qp_type) { case IB_QPT_RC: - switch (opcode) { - case MLX5_OPCODE_SEND: - case MLX5_OPCODE_SEND_IMM: - case MLX5_OPCODE_SEND_INVAL: - if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & - IB_ODP_SUPPORT_SEND)) - goto invalid_transport_or_opcode; - break; - case MLX5_OPCODE_RDMA_WRITE: - case MLX5_OPCODE_RDMA_WRITE_IMM: - if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & - IB_ODP_SUPPORT_WRITE)) - goto invalid_transport_or_opcode; - *wqe += sizeof(struct mlx5_wqe_raddr_seg); - break; - case MLX5_OPCODE_RDMA_READ: - if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & - IB_ODP_SUPPORT_READ)) - goto invalid_transport_or_opcode; - *wqe += sizeof(struct mlx5_wqe_raddr_seg); - break; - default: - goto invalid_transport_or_opcode; - } + transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps; break; case IB_QPT_UD: - switch (opcode) { - case MLX5_OPCODE_SEND: - case MLX5_OPCODE_SEND_IMM: - if (!(dev->odp_caps.per_transport_caps.ud_odp_caps & - IB_ODP_SUPPORT_SEND)) - goto invalid_transport_or_opcode; - *wqe += sizeof(struct mlx5_wqe_datagram_seg); - break; - default: - goto invalid_transport_or_opcode; - } + transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps; break; default: -invalid_transport_or_opcode: - mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n", - qp->ibqp.qp_type, opcode); + mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n", + qp->ibqp.qp_type); + return -EFAULT; + } + + if (unlikely(opcode >= sizeof(mlx5_ib_odp_opcode_cap) / + sizeof(mlx5_ib_odp_opcode_cap[0]) || + !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) { + mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n", + opcode); return -EFAULT; } + if (qp->ibqp.qp_type != IB_QPT_RC) { + av = *wqe; + if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) + *wqe += sizeof(struct mlx5_av); + else + *wqe += sizeof(struct mlx5_base_av); + } + + switch (opcode) { + case MLX5_OPCODE_RDMA_WRITE: + case MLX5_OPCODE_RDMA_WRITE_IMM: + case MLX5_OPCODE_RDMA_READ: + *wqe += sizeof(struct mlx5_wqe_raddr_seg); + break; + case MLX5_OPCODE_ATOMIC_CS: + case MLX5_OPCODE_ATOMIC_FA: + *wqe += sizeof(struct mlx5_wqe_raddr_seg); + *wqe += sizeof(struct mlx5_wqe_atomic_seg); + break; + } + return 0; } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 608dc988b3d6..15f896781966 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -328,7 +328,7 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 receive[0x1]; u8 write[0x1]; u8 read[0x1]; - u8 reserved_at_4[0x1]; + u8 atomic[0x1]; u8 srq_receive[0x1]; u8 reserved_at_6[0x1a]; }; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 9ed775f5cb66..219c699c17b7 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -212,6 +212,7 @@ struct mlx5_wqe_ctrl_seg { #define MLX5_WQE_CTRL_OPCODE_MASK 0xff #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 +#define MLX5_WQE_AV_EXT 0x80000000 enum { MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, @@ -242,6 +243,23 @@ struct mlx5_wqe_masked_atomic_seg { __be64 compare_mask; }; +struct mlx5_base_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + union { + __be16 rlid; + __be16 udp_sport; + }; +}; + struct mlx5_av { union { struct { -- cgit v1.2.3 From aa8e08d2f523501c40b0e70f1c4ecacb97195931 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:48 +0200 Subject: IB/mlx5: Improve MR check Add "type" field to mlx5_core MKEY struct. Check whether page fault happens on MKEY corresponding to MR. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/mr.c | 5 +++++ drivers/infiniband/hw/mlx5/odp.c | 9 +++++++-- include/linux/mlx5/driver.h | 6 ++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index f4ecc10cbbba..8cf2a67f9fb0 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -129,6 +129,7 @@ static void reg_mr_callback(int status, void *context) return; } + mr->mmkey.type = MLX5_MKEY_MR; spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); key = dev->mdev->priv.mkey_key++; spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); @@ -728,6 +729,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) goto err_in; kfree(in); + mr->mmkey.type = MLX5_MKEY_MR; mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; mr->umem = NULL; @@ -1088,6 +1090,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, mlx5_ib_warn(dev, "create mkey failed\n"); goto err_2; } + mr->mmkey.type = MLX5_MKEY_MR; mr->umem = umem; mr->dev = dev; mr->live = 1; @@ -1533,6 +1536,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, if (err) goto err_destroy_psv; + mr->mmkey.type = MLX5_MKEY_MR; mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; mr->umem = NULL; @@ -1613,6 +1617,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, if (err) goto free; + mw->mmkey.type = MLX5_MKEY_MW; mw->ibmw.rkey = mw->mmkey.key; resp.response_length = min(offsetof(typeof(resp), response_length) + diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 971b2885f474..e5bc267aca73 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -155,9 +155,14 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev, { u32 base_key = mlx5_base_mkey(key); struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key); - struct mlx5_ib_mr *mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); + struct mlx5_ib_mr *mr; + + if (!mmkey || mmkey->key != key || mmkey->type != MLX5_MKEY_MR) + return NULL; + + mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); - if (!mmkey || mmkey->key != key || !mr->live) + if (!mr->live) return NULL; return container_of(mmkey, struct mlx5_ib_mr, mmkey); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index b52d07491fe7..cfa49bca009c 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -394,11 +394,17 @@ struct mlx5_core_sig_ctx { u32 sigerr_count; }; +enum { + MLX5_MKEY_MR = 1, + MLX5_MKEY_MW, +}; + struct mlx5_core_mkey { u64 iova; u64 size; u32 key; u32 pd; + u32 type; }; #define MLX5_24BIT_MASK ((1 << 24) - 1) -- cgit v1.2.3 From 8e4881aa1d5d2f9c7ebfd0fe5e138f0cc345832c Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 Jan 2017 19:02:45 +0100 Subject: net: mdio: add mdio45_ethtool_ksettings_get There is a function in mdio for the old ethtool api gset. We add a new function mdio45_ethtool_ksettings_get for the new ethtool api glinksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/mdio.c | 178 +++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mdio.h | 21 ++++++ 2 files changed, 199 insertions(+) diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c index 3e027ed0b3bb..077364cbf439 100644 --- a/drivers/net/mdio.c +++ b/drivers/net/mdio.c @@ -341,6 +341,184 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, } EXPORT_SYMBOL(mdio45_ethtool_gset_npage); +/** + * mdio45_ethtool_ksettings_get_npage - get settings for ETHTOOL_GLINKSETTINGS + * @mdio: MDIO interface + * @cmd: Ethtool request structure + * @npage_adv: Modes currently advertised on next pages + * @npage_lpa: Modes advertised by link partner on next pages + * + * The @cmd parameter is expected to have been cleared before calling + * mdio45_ethtool_ksettings_get_npage(). + * + * Since the CSRs for auto-negotiation using next pages are not fully + * standardised, this function does not attempt to decode them. The + * caller must pass them in. + */ +void mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio, + struct ethtool_link_ksettings *cmd, + u32 npage_adv, u32 npage_lpa) +{ + int reg; + u32 speed, supported = 0, advertising = 0, lp_advertising = 0; + + BUILD_BUG_ON(MDIO_SUPPORTS_C22 != ETH_MDIO_SUPPORTS_C22); + BUILD_BUG_ON(MDIO_SUPPORTS_C45 != ETH_MDIO_SUPPORTS_C45); + + cmd->base.phy_address = mdio->prtad; + cmd->base.mdio_support = + mdio->mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22); + + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_CTRL2); + switch (reg & MDIO_PMA_CTRL2_TYPE) { + case MDIO_PMA_CTRL2_10GBT: + case MDIO_PMA_CTRL2_1000BT: + case MDIO_PMA_CTRL2_100BTX: + case MDIO_PMA_CTRL2_10BT: + cmd->base.port = PORT_TP; + supported = SUPPORTED_TP; + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_SPEED); + if (reg & MDIO_SPEED_10G) + supported |= SUPPORTED_10000baseT_Full; + if (reg & MDIO_PMA_SPEED_1000) + supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseT_Half); + if (reg & MDIO_PMA_SPEED_100) + supported |= (SUPPORTED_100baseT_Full | + SUPPORTED_100baseT_Half); + if (reg & MDIO_PMA_SPEED_10) + supported |= (SUPPORTED_10baseT_Full | + SUPPORTED_10baseT_Half); + advertising = ADVERTISED_TP; + break; + + case MDIO_PMA_CTRL2_10GBCX4: + cmd->base.port = PORT_OTHER; + supported = 0; + advertising = 0; + break; + + case MDIO_PMA_CTRL2_10GBKX4: + case MDIO_PMA_CTRL2_10GBKR: + case MDIO_PMA_CTRL2_1000BKX: + cmd->base.port = PORT_OTHER; + supported = SUPPORTED_Backplane; + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_PMA_EXTABLE); + if (reg & MDIO_PMA_EXTABLE_10GBKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (reg & MDIO_PMA_EXTABLE_10GBKR) + supported |= SUPPORTED_10000baseKR_Full; + if (reg & MDIO_PMA_EXTABLE_1000BKX) + supported |= SUPPORTED_1000baseKX_Full; + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_PMA_10GBR_FECABLE); + if (reg & MDIO_PMA_10GBR_FECABLE_ABLE) + supported |= SUPPORTED_10000baseR_FEC; + advertising = ADVERTISED_Backplane; + break; + + /* All the other defined modes are flavours of optical */ + default: + cmd->base.port = PORT_FIBRE; + supported = SUPPORTED_FIBRE; + advertising = ADVERTISED_FIBRE; + break; + } + + if (mdio->mmds & MDIO_DEVS_AN) { + supported |= SUPPORTED_Autoneg; + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, + MDIO_CTRL1); + if (reg & MDIO_AN_CTRL1_ENABLE) { + cmd->base.autoneg = AUTONEG_ENABLE; + advertising |= + ADVERTISED_Autoneg | + mdio45_get_an(mdio, MDIO_AN_ADVERTISE) | + npage_adv; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + + if (cmd->base.autoneg) { + u32 modes = 0; + int an_stat = mdio->mdio_read(mdio->dev, mdio->prtad, + MDIO_MMD_AN, MDIO_STAT1); + + /* If AN is complete and successful, report best common + * mode, otherwise report best advertised mode. + */ + if (an_stat & MDIO_AN_STAT1_COMPLETE) { + lp_advertising = + mdio45_get_an(mdio, MDIO_AN_LPA) | npage_lpa; + if (an_stat & MDIO_AN_STAT1_LPABLE) + lp_advertising |= ADVERTISED_Autoneg; + modes = advertising & lp_advertising; + } + if ((modes & ~ADVERTISED_Autoneg) == 0) + modes = advertising; + + if (modes & (ADVERTISED_10000baseT_Full | + ADVERTISED_10000baseKX4_Full | + ADVERTISED_10000baseKR_Full)) { + speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; + } else if (modes & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseKX_Full)) { + speed = SPEED_1000; + cmd->base.duplex = !(modes & ADVERTISED_1000baseT_Half); + } else if (modes & (ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half)) { + speed = SPEED_100; + cmd->base.duplex = !!(modes & ADVERTISED_100baseT_Full); + } else { + speed = SPEED_10; + cmd->base.duplex = !!(modes & ADVERTISED_10baseT_Full); + } + } else { + /* Report forced settings */ + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_CTRL1); + speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1) + * ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10)); + cmd->base.duplex = (reg & MDIO_CTRL1_FULLDPLX || + speed == SPEED_10000); + } + + cmd->base.speed = speed; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); + + /* 10GBASE-T MDI/MDI-X */ + if (cmd->base.port == PORT_TP && (cmd->base.speed == SPEED_10000)) { + switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_PMA_10GBT_SWAPPOL)) { + case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX: + cmd->base.eth_tp_mdix = ETH_TP_MDI; + break; + case 0: + cmd->base.eth_tp_mdix = ETH_TP_MDI_X; + break; + default: + /* It's complicated... */ + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + break; + } + } +} +EXPORT_SYMBOL(mdio45_ethtool_ksettings_get_npage); + /** * mdio_mii_ioctl - MII ioctl interface for MDIO (clause 22 or 45) PHYs * @mdio: MDIO interface diff --git a/include/linux/mdio.h b/include/linux/mdio.h index bf9d1d750693..b6587a4b32e7 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -130,6 +130,10 @@ extern int mdio45_nway_restart(const struct mdio_if_info *mdio); extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, struct ethtool_cmd *ecmd, u32 npage_adv, u32 npage_lpa); +extern void +mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio, + struct ethtool_link_ksettings *cmd, + u32 npage_adv, u32 npage_lpa); /** * mdio45_ethtool_gset - get settings for ETHTOOL_GSET @@ -147,6 +151,23 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio, mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0); } +/** + * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS + * @mdio: MDIO interface + * @cmd: Ethtool request structure + * + * Since the CSRs for auto-negotiation using next pages are not fully + * standardised, this function does not attempt to decode them. Use + * mdio45_ethtool_ksettings_get_npage() to specify advertisement bits + * from next pages. + */ +static inline void +mdio45_ethtool_ksettings_get(const struct mdio_if_info *mdio, + struct ethtool_link_ksettings *cmd) +{ + mdio45_ethtool_ksettings_get_npage(mdio, cmd, 0, 0); +} + extern int mdio_mii_ioctl(const struct mdio_if_info *mdio, struct mii_ioctl_data *mii_data, int cmd); -- cgit v1.2.3 From e938ed150f1ed9410ff50d19ac32efdd26c77903 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 Jan 2017 19:02:46 +0100 Subject: net: sfc: falcon: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/falcon/efx.c | 2 +- drivers/net/ethernet/sfc/falcon/ethtool.c | 29 +++++++++-------- drivers/net/ethernet/sfc/falcon/mdio_10g.c | 44 ++++++++++++++++---------- drivers/net/ethernet/sfc/falcon/mdio_10g.h | 3 +- drivers/net/ethernet/sfc/falcon/net_driver.h | 12 +++---- drivers/net/ethernet/sfc/falcon/qt202x_phy.c | 9 +++--- drivers/net/ethernet/sfc/falcon/tenxpress.c | 22 ++++++------- drivers/net/ethernet/sfc/falcon/txc43128_phy.c | 9 +++--- 8 files changed, 74 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 5c5cb3c4c12e..438ef9e7a10d 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -986,7 +986,7 @@ void ef4_mac_reconfigure(struct ef4_nic *efx) /* Push loopback/power/transmit disable settings to the PHY, and reconfigure * the MAC appropriately. All other PHY configuration changes are pushed - * through phy_op->set_settings(), and pushed asynchronously to the MAC + * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC * through ef4_monitor(). * * Callers must hold the mac_lock diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index 8e1929b01a32..56049157a5af 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -115,44 +115,47 @@ static int ef4_ethtool_phys_id(struct net_device *net_dev, } /* This must be called with rtnl_lock held. */ -static int ef4_ethtool_get_settings(struct net_device *net_dev, - struct ethtool_cmd *ecmd) +static int +ef4_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) { struct ef4_nic *efx = netdev_priv(net_dev); struct ef4_link_state *link_state = &efx->link_state; mutex_lock(&efx->mac_lock); - efx->phy_op->get_settings(efx, ecmd); + efx->phy_op->get_link_ksettings(efx, cmd); mutex_unlock(&efx->mac_lock); /* Both MACs support pause frames (bidirectional and respond-only) */ - ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); if (LOOPBACK_INTERNAL(efx)) { - ethtool_cmd_speed_set(ecmd, link_state->speed); - ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.speed = link_state->speed; + cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; } return 0; } /* This must be called with rtnl_lock held. */ -static int ef4_ethtool_set_settings(struct net_device *net_dev, - struct ethtool_cmd *ecmd) +static int +ef4_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) { struct ef4_nic *efx = netdev_priv(net_dev); int rc; /* GMAC does not support 1000Mbps HD */ - if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && - (ecmd->duplex != DUPLEX_FULL)) { + if ((cmd->base.speed == SPEED_1000) && + (cmd->base.duplex != DUPLEX_FULL)) { netif_dbg(efx, drv, efx->net_dev, "rejecting unsupported 1000Mbps HD setting\n"); return -EINVAL; } mutex_lock(&efx->mac_lock); - rc = efx->phy_op->set_settings(efx, ecmd); + rc = efx->phy_op->set_link_ksettings(efx, cmd); mutex_unlock(&efx->mac_lock); return rc; } @@ -1310,8 +1313,6 @@ static int ef4_ethtool_get_module_info(struct net_device *net_dev, } const struct ethtool_ops ef4_ethtool_ops = { - .get_settings = ef4_ethtool_get_settings, - .set_settings = ef4_ethtool_set_settings, .get_drvinfo = ef4_ethtool_get_drvinfo, .get_regs_len = ef4_ethtool_get_regs_len, .get_regs = ef4_ethtool_get_regs, @@ -1340,4 +1341,6 @@ const struct ethtool_ops ef4_ethtool_ops = { .set_rxfh = ef4_ethtool_set_rxfh, .get_module_info = ef4_ethtool_get_module_info, .get_module_eeprom = ef4_ethtool_get_module_eeprom, + .get_link_ksettings = ef4_ethtool_get_link_ksettings, + .set_link_ksettings = ef4_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/sfc/falcon/mdio_10g.c b/drivers/net/ethernet/sfc/falcon/mdio_10g.c index e7d7c09296aa..ee0713f03d01 100644 --- a/drivers/net/ethernet/sfc/falcon/mdio_10g.c +++ b/drivers/net/ethernet/sfc/falcon/mdio_10g.c @@ -226,33 +226,45 @@ void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, } /** - * ef4_mdio_set_settings - Set (some of) the PHY settings over MDIO. + * ef4_mdio_set_link_ksettings - Set (some of) the PHY settings over MDIO. * @efx: Efx NIC - * @ecmd: New settings + * @cmd: New settings */ -int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +int ef4_mdio_set_link_ksettings(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd) { - struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET }; - - efx->phy_op->get_settings(efx, &prev); - - if (ecmd->advertising == prev.advertising && - ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) && - ecmd->duplex == prev.duplex && - ecmd->port == prev.port && - ecmd->autoneg == prev.autoneg) + struct ethtool_link_ksettings prev = { + .base.cmd = ETHTOOL_GLINKSETTINGS + }; + u32 prev_advertising, advertising; + u32 prev_supported; + + efx->phy_op->get_link_ksettings(efx, &prev); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32(&prev_advertising, + prev.link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32(&prev_supported, + prev.link_modes.supported); + + if (advertising == prev_advertising && + cmd->base.speed == prev.base.speed && + cmd->base.duplex == prev.base.duplex && + cmd->base.port == prev.base.port && + cmd->base.autoneg == prev.base.autoneg) return 0; /* We can only change these settings for -T PHYs */ - if (prev.port != PORT_TP || ecmd->port != PORT_TP) + if (prev.base.port != PORT_TP || cmd->base.port != PORT_TP) return -EINVAL; /* Check that PHY supports these settings */ - if (!ecmd->autoneg || - (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported) + if (!cmd->base.autoneg || + (advertising | SUPPORTED_Autoneg) & ~prev_supported) return -EINVAL; - ef4_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg); + ef4_link_set_advertising(efx, advertising | ADVERTISED_Autoneg); ef4_mdio_an_reconfigure(efx); return 0; } diff --git a/drivers/net/ethernet/sfc/falcon/mdio_10g.h b/drivers/net/ethernet/sfc/falcon/mdio_10g.h index 885cf7a834a6..53cb5cc4ad37 100644 --- a/drivers/net/ethernet/sfc/falcon/mdio_10g.h +++ b/drivers/net/ethernet/sfc/falcon/mdio_10g.h @@ -83,7 +83,8 @@ void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, int low_power, unsigned int mmd_mask); /* Set (some of) the PHY settings over MDIO */ -int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd); +int ef4_mdio_set_link_ksettings(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd); /* Push advertising flags and restart autonegotiation */ void ef4_mdio_an_reconfigure(struct ef4_nic *efx); diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h index 210b28f7d2a1..fe59dd67f918 100644 --- a/drivers/net/ethernet/sfc/falcon/net_driver.h +++ b/drivers/net/ethernet/sfc/falcon/net_driver.h @@ -684,8 +684,8 @@ static inline bool ef4_link_state_equal(const struct ef4_link_state *left, * @reconfigure: Reconfigure PHY (e.g. for new link parameters) * @poll: Update @link_state and report whether it changed. * Serialised by the mac_lock. - * @get_settings: Get ethtool settings. Serialised by the mac_lock. - * @set_settings: Set ethtool settings. Serialised by the mac_lock. + * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock. + * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock. * @set_npage_adv: Set abilities advertised in (Extended) Next Page * (only needed where AN bit is set in mmds) * @test_alive: Test that PHY is 'alive' (online) @@ -700,10 +700,10 @@ struct ef4_phy_operations { void (*remove) (struct ef4_nic *efx); int (*reconfigure) (struct ef4_nic *efx); bool (*poll) (struct ef4_nic *efx); - void (*get_settings) (struct ef4_nic *efx, - struct ethtool_cmd *ecmd); - int (*set_settings) (struct ef4_nic *efx, - struct ethtool_cmd *ecmd); + void (*get_link_ksettings)(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd); + int (*set_link_ksettings)(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd); void (*set_npage_adv) (struct ef4_nic *efx, u32); int (*test_alive) (struct ef4_nic *efx); const char *(*test_name) (struct ef4_nic *efx, unsigned int index); diff --git a/drivers/net/ethernet/sfc/falcon/qt202x_phy.c b/drivers/net/ethernet/sfc/falcon/qt202x_phy.c index d29331652548..f5e0f18d4ea8 100644 --- a/drivers/net/ethernet/sfc/falcon/qt202x_phy.c +++ b/drivers/net/ethernet/sfc/falcon/qt202x_phy.c @@ -437,9 +437,10 @@ static int qt202x_phy_reconfigure(struct ef4_nic *efx) return 0; } -static void qt202x_phy_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +static void qt202x_phy_get_link_ksettings(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd) { - mdio45_ethtool_gset(&efx->mdio, ecmd); + mdio45_ethtool_ksettings_get(&efx->mdio, cmd); } static void qt202x_phy_remove(struct ef4_nic *efx) @@ -487,8 +488,8 @@ const struct ef4_phy_operations falcon_qt202x_phy_ops = { .poll = qt202x_phy_poll, .fini = ef4_port_dummy_op_void, .remove = qt202x_phy_remove, - .get_settings = qt202x_phy_get_settings, - .set_settings = ef4_mdio_set_settings, + .get_link_ksettings = qt202x_phy_get_link_ksettings, + .set_link_ksettings = ef4_mdio_set_link_ksettings, .test_alive = ef4_mdio_test_alive, .get_module_eeprom = qt202x_phy_get_module_eeprom, .get_module_info = qt202x_phy_get_module_info, diff --git a/drivers/net/ethernet/sfc/falcon/tenxpress.c b/drivers/net/ethernet/sfc/falcon/tenxpress.c index acc548a1c4d6..ff9b4e2b590c 100644 --- a/drivers/net/ethernet/sfc/falcon/tenxpress.c +++ b/drivers/net/ethernet/sfc/falcon/tenxpress.c @@ -351,9 +351,6 @@ static int tenxpress_phy_reconfigure(struct ef4_nic *efx) return 0; } -static void -tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd); - /* Poll for link state changes */ static bool tenxpress_phy_poll(struct ef4_nic *efx) { @@ -443,7 +440,8 @@ sfx7101_run_tests(struct ef4_nic *efx, int *results, unsigned flags) } static void -tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +tenxpress_get_link_ksettings(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd) { u32 adv = 0, lpa = 0; int reg; @@ -455,20 +453,22 @@ tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) if (reg & MDIO_AN_10GBT_STAT_LP10G) lpa |= ADVERTISED_10000baseT_Full; - mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); + mdio45_ethtool_ksettings_get_npage(&efx->mdio, cmd, adv, lpa); /* In loopback, the PHY automatically brings up the correct interface, * but doesn't advertise the correct speed. So override it */ if (LOOPBACK_EXTERNAL(efx)) - ethtool_cmd_speed_set(ecmd, SPEED_10000); + cmd->base.speed = SPEED_10000; } -static int tenxpress_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +static int +tenxpress_set_link_ksettings(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd) { - if (!ecmd->autoneg) + if (!cmd->base.autoneg) return -EINVAL; - return ef4_mdio_set_settings(efx, ecmd); + return ef4_mdio_set_link_ksettings(efx, cmd); } static void sfx7101_set_npage_adv(struct ef4_nic *efx, u32 advertising) @@ -485,8 +485,8 @@ const struct ef4_phy_operations falcon_sfx7101_phy_ops = { .poll = tenxpress_phy_poll, .fini = sfx7101_phy_fini, .remove = tenxpress_phy_remove, - .get_settings = tenxpress_get_settings, - .set_settings = tenxpress_set_settings, + .get_link_ksettings = tenxpress_get_link_ksettings, + .set_link_ksettings = tenxpress_set_link_ksettings, .set_npage_adv = sfx7101_set_npage_adv, .test_alive = ef4_mdio_test_alive, .test_name = sfx7101_test_name, diff --git a/drivers/net/ethernet/sfc/falcon/txc43128_phy.c b/drivers/net/ethernet/sfc/falcon/txc43128_phy.c index 18421f5e880f..3c55fd23c271 100644 --- a/drivers/net/ethernet/sfc/falcon/txc43128_phy.c +++ b/drivers/net/ethernet/sfc/falcon/txc43128_phy.c @@ -540,9 +540,10 @@ static int txc43128_run_tests(struct ef4_nic *efx, int *results, unsigned flags) return rc; } -static void txc43128_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +static void txc43128_get_link_ksettings(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd) { - mdio45_ethtool_gset(&efx->mdio, ecmd); + mdio45_ethtool_ksettings_get(&efx->mdio, cmd); } const struct ef4_phy_operations falcon_txc_phy_ops = { @@ -552,8 +553,8 @@ const struct ef4_phy_operations falcon_txc_phy_ops = { .poll = txc43128_phy_poll, .fini = txc43128_phy_fini, .remove = txc43128_phy_remove, - .get_settings = txc43128_get_settings, - .set_settings = ef4_mdio_set_settings, + .get_link_ksettings = txc43128_get_link_ksettings, + .set_link_ksettings = ef4_mdio_set_link_ksettings, .test_alive = ef4_mdio_test_alive, .run_tests = txc43128_run_tests, .test_name = txc43128_test_name, -- cgit v1.2.3 From 41a65f7040d1a65557b3ba42fe4105e209a66ad8 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 Jan 2017 19:05:38 +0100 Subject: net: dec: de2104x: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/de2104x.c | 91 ++++++++++++++++++-------------- 1 file changed, 51 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 57c17e797ae3..127ce9707378 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -1485,95 +1485,104 @@ static void __de_get_regs(struct de_private *de, u8 *buf) de_rx_missed(de, rbuf[8]); } -static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd) +static int __de_get_link_ksettings(struct de_private *de, + struct ethtool_link_ksettings *cmd) { - ecmd->supported = de->media_supported; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->phy_address = 0; - ecmd->advertising = de->media_advertise; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + de->media_supported); + cmd->base.phy_address = 0; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + de->media_advertise); switch (de->media_type) { case DE_MEDIA_AUI: - ecmd->port = PORT_AUI; + cmd->base.port = PORT_AUI; break; case DE_MEDIA_BNC: - ecmd->port = PORT_BNC; + cmd->base.port = PORT_BNC; break; default: - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; break; } - ethtool_cmd_speed_set(ecmd, 10); + cmd->base.speed = 10; if (dr32(MacMode) & FullDuplex) - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; if (de->media_lock) - ecmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; else - ecmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; /* ignore maxtxpkt, maxrxpkt for now */ return 0; } -static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) +static int __de_set_link_ksettings(struct de_private *de, + const struct ethtool_link_ksettings *cmd) { u32 new_media; unsigned int media_lock; + u8 duplex = cmd->base.duplex; + u8 port = cmd->base.port; + u8 autoneg = cmd->base.autoneg; + u32 advertising; - if (ethtool_cmd_speed(ecmd) != 10) - return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.speed != 10) return -EINVAL; - if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC) + if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL) return -EINVAL; - if (de->de21040 && ecmd->port == PORT_BNC) + if (port != PORT_TP && port != PORT_AUI && port != PORT_BNC) return -EINVAL; - if (ecmd->transceiver != XCVR_INTERNAL) + if (de->de21040 && port == PORT_BNC) return -EINVAL; - if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + if (autoneg != AUTONEG_DISABLE && autoneg != AUTONEG_ENABLE) return -EINVAL; - if (ecmd->advertising & ~de->media_supported) + if (advertising & ~de->media_supported) return -EINVAL; - if (ecmd->autoneg == AUTONEG_ENABLE && - (!(ecmd->advertising & ADVERTISED_Autoneg))) + if (autoneg == AUTONEG_ENABLE && + (!(advertising & ADVERTISED_Autoneg))) return -EINVAL; - switch (ecmd->port) { + switch (port) { case PORT_AUI: new_media = DE_MEDIA_AUI; - if (!(ecmd->advertising & ADVERTISED_AUI)) + if (!(advertising & ADVERTISED_AUI)) return -EINVAL; break; case PORT_BNC: new_media = DE_MEDIA_BNC; - if (!(ecmd->advertising & ADVERTISED_BNC)) + if (!(advertising & ADVERTISED_BNC)) return -EINVAL; break; default: - if (ecmd->autoneg == AUTONEG_ENABLE) + if (autoneg == AUTONEG_ENABLE) new_media = DE_MEDIA_TP_AUTO; - else if (ecmd->duplex == DUPLEX_FULL) + else if (duplex == DUPLEX_FULL) new_media = DE_MEDIA_TP_FD; else new_media = DE_MEDIA_TP; - if (!(ecmd->advertising & ADVERTISED_TP)) + if (!(advertising & ADVERTISED_TP)) return -EINVAL; - if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half))) + if (!(advertising & (ADVERTISED_10baseT_Full | + ADVERTISED_10baseT_Half))) return -EINVAL; break; } - media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1; + media_lock = (autoneg == AUTONEG_ENABLE) ? 0 : 1; if ((new_media == de->media_type) && (media_lock == de->media_lock) && - (ecmd->advertising == de->media_advertise)) + (advertising == de->media_advertise)) return 0; /* nothing to change */ de_link_down(de); @@ -1582,7 +1591,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) de->media_type = new_media; de->media_lock = media_lock; - de->media_advertise = ecmd->advertising; + de->media_advertise = advertising; de_set_media(de); if (netif_running(de->dev)) de_start_rxtx(de); @@ -1604,25 +1613,27 @@ static int de_get_regs_len(struct net_device *dev) return DE_REGS_SIZE; } -static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int de_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct de_private *de = netdev_priv(dev); int rc; spin_lock_irq(&de->lock); - rc = __de_get_settings(de, ecmd); + rc = __de_get_link_ksettings(de, cmd); spin_unlock_irq(&de->lock); return rc; } -static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int de_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct de_private *de = netdev_priv(dev); int rc; spin_lock_irq(&de->lock); - rc = __de_set_settings(de, ecmd); + rc = __de_set_link_ksettings(de, cmd); spin_unlock_irq(&de->lock); return rc; @@ -1690,13 +1701,13 @@ static const struct ethtool_ops de_ethtool_ops = { .get_link = ethtool_op_get_link, .get_drvinfo = de_get_drvinfo, .get_regs_len = de_get_regs_len, - .get_settings = de_get_settings, - .set_settings = de_set_settings, .get_msglevel = de_get_msglevel, .set_msglevel = de_set_msglevel, .get_eeprom = de_get_eeprom, .nway_reset = de_nway_reset, .get_regs = de_get_regs, + .get_link_ksettings = de_get_link_ksettings, + .set_link_ksettings = de_set_link_ksettings, }; static void de21040_get_mac_address(struct de_private *de) -- cgit v1.2.3 From 6711a87a9b43331356dd12cb7bd9788ae434b67a Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 Jan 2017 19:11:06 +0100 Subject: net: dec: uli526x: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/uli526x.c | 41 +++++++++++++++++++------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index f82ebe5d89ee..8d98b259d1ba 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -926,48 +926,53 @@ static void uli526x_set_filter_mode(struct net_device * dev) } static void -ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd) +ULi_ethtool_get_link_ksettings(struct uli526x_board_info *db, + struct ethtool_link_ksettings *cmd) { - ecmd->supported = (SUPPORTED_10baseT_Half | + u32 supported, advertising; + + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII); - ecmd->advertising = (ADVERTISED_10baseT_Half | + advertising = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_Autoneg | ADVERTISED_MII); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); - ecmd->port = PORT_MII; - ecmd->phy_address = db->phy_addr; - - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.port = PORT_MII; + cmd->base.phy_address = db->phy_addr; - ethtool_cmd_speed_set(ecmd, SPEED_10); - ecmd->duplex = DUPLEX_HALF; + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_HALF; if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) { - ethtool_cmd_speed_set(ecmd, SPEED_100); + cmd->base.speed = SPEED_100; } if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) { - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; } if(db->link_failed) { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } if (db->media_mode & ULI526X_AUTO) { - ecmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; } } @@ -981,10 +986,12 @@ static void netdev_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ struct uli526x_board_info *np = netdev_priv(dev); - ULi_ethtool_gset(np, cmd); + ULi_ethtool_get_link_ksettings(np, cmd); return 0; } @@ -1006,9 +1013,9 @@ static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, .get_link = netdev_get_link, .get_wol = uli526x_get_wol, + .get_link_ksettings = netdev_get_link_ksettings, }; /* -- cgit v1.2.3 From 453e5f3dfc35668b590b528e9cefc2c84fd6bd81 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 Jan 2017 20:47:01 +0100 Subject: net: dec: winbond-840: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/winbond-840.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index bc9bf88e5831..d1f2f3cc7cfa 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -1391,25 +1391,27 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_gset(&np->mii_if, cmd); + rc = mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_sset(&np->mii_if, cmd); + rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return rc; @@ -1439,12 +1441,12 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -- cgit v1.2.3 From 9022255006cd9cbf6be78954ed6217372b885f6a Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 Jan 2017 20:49:26 +0100 Subject: net: dlink: dl2k: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. The previous implementation of set_settings was modifying the value of speed and duplex, but with the new API, it's not possible. The structure ethtool_link_ksettings is defined as const. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/dlink/dl2k.c | 71 ++++++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 8c95a8a81e3c..1e350135f11d 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -1256,52 +1256,63 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } -static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int rio_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); + u32 supported, advertising; + if (np->phy_media) { /* fiber device */ - cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; - cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; - cmd->port = PORT_FIBRE; - cmd->transceiver = XCVR_INTERNAL; + supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; + advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; } else { /* copper device */ - cmd->supported = SUPPORTED_10baseT_Half | + supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII; - cmd->advertising = ADVERTISED_10baseT_Half | + advertising = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full| + ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_MII; - cmd->port = PORT_MII; - cmd->transceiver = XCVR_INTERNAL; + cmd->base.port = PORT_MII; } - if ( np->link_status ) { - ethtool_cmd_speed_set(cmd, np->speed); - cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + if (np->link_status) { + cmd->base.speed = np->speed; + cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; } else { - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); - cmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - if ( np->an_enable) - cmd->autoneg = AUTONEG_ENABLE; + if (np->an_enable) + cmd->base.autoneg = AUTONEG_ENABLE; else - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; + + cmd->base.phy_address = np->phy_addr; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); - cmd->phy_address = np->phy_addr; return 0; } -static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int rio_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); + u32 speed = cmd->base.speed; + u8 duplex = cmd->base.duplex; + netif_carrier_off(dev); - if (cmd->autoneg == AUTONEG_ENABLE) { - if (np->an_enable) + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (np->an_enable) { return 0; - else { + } else { np->an_enable = 1; mii_set_media(dev); return 0; @@ -1309,18 +1320,18 @@ static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } else { np->an_enable = 0; if (np->speed == 1000) { - ethtool_cmd_speed_set(cmd, SPEED_100); - cmd->duplex = DUPLEX_FULL; + speed = SPEED_100; + duplex = DUPLEX_FULL; printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); } - switch (ethtool_cmd_speed(cmd)) { + switch (speed) { case SPEED_10: np->speed = 10; - np->full_duplex = (cmd->duplex == DUPLEX_FULL); + np->full_duplex = (duplex == DUPLEX_FULL); break; case SPEED_100: np->speed = 100; - np->full_duplex = (cmd->duplex == DUPLEX_FULL); + np->full_duplex = (duplex == DUPLEX_FULL); break; case SPEED_1000: /* not supported */ default: @@ -1339,9 +1350,9 @@ static u32 rio_get_link(struct net_device *dev) static const struct ethtool_ops ethtool_ops = { .get_drvinfo = rio_get_drvinfo, - .get_settings = rio_get_settings, - .set_settings = rio_set_settings, .get_link = rio_get_link, + .get_link_ksettings = rio_get_link_ksettings, + .set_link_ksettings = rio_set_link_ksettings, }; static int -- cgit v1.2.3 From 88c513d478def1d40593481de45f0d73108edc5c Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 Jan 2017 20:52:12 +0100 Subject: net: dlink: sundance: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/dlink/sundance.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 2e5b66762e15..2704bcf023be 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1664,21 +1664,23 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); - mii_ethtool_gset(&np->mii_if, ecmd); + mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return 0; } -static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); - res = mii_ethtool_sset(&np->mii_if, ecmd); + res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return res; } @@ -1800,8 +1802,6 @@ static int sundance_set_wol(struct net_device *dev, static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = get_drvinfo, - .get_settings = get_settings, - .set_settings = set_settings, .nway_reset = nway_reset, .get_link = get_link, .get_wol = sundance_get_wol, @@ -1811,6 +1811,8 @@ static const struct ethtool_ops ethtool_ops = { .get_strings = get_strings, .get_sset_count = get_sset_count, .get_ethtool_stats = get_ethtool_stats, + .get_link_ksettings = get_link_ksettings, + .set_link_ksettings = set_link_ksettings, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -- cgit v1.2.3 From fd90095db59612de89da3439032fb3944cccea80 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 2 Jan 2017 17:42:14 +0100 Subject: net: emulex: benet: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_ethtool.c | 73 ++++++++++++-------------- 1 file changed, 34 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 0a48a31225e6..7d1819c9e8cc 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -606,7 +606,8 @@ bool be_pause_supported(struct be_adapter *adapter) false : true; } -static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +static int be_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct be_adapter *adapter = netdev_priv(netdev); u8 link_status; @@ -614,13 +615,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) int status; u32 auto_speeds; u32 fixed_speeds; + u32 supported = 0, advertising = 0; if (adapter->phy.link_speed < 0) { status = be_cmd_link_status_query(adapter, &link_speed, &link_status, 0); if (!status) be_link_status_update(adapter, link_status); - ethtool_cmd_speed_set(ecmd, link_speed); + cmd->base.speed = link_speed; status = be_cmd_get_phy_info(adapter); if (!status) { @@ -629,58 +631,51 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) be_cmd_query_cable_type(adapter); - ecmd->supported = + supported = convert_to_et_setting(adapter, auto_speeds | fixed_speeds); - ecmd->advertising = + advertising = convert_to_et_setting(adapter, auto_speeds); - ecmd->port = be_get_port_type(adapter); + cmd->base.port = be_get_port_type(adapter); if (adapter->phy.auto_speeds_supported) { - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->advertising |= ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + cmd->base.autoneg = AUTONEG_ENABLE; + advertising |= ADVERTISED_Autoneg; } - ecmd->supported |= SUPPORTED_Pause; + supported |= SUPPORTED_Pause; if (be_pause_supported(adapter)) - ecmd->advertising |= ADVERTISED_Pause; - - switch (adapter->phy.interface_type) { - case PHY_TYPE_KR_10GB: - case PHY_TYPE_KX4_10GB: - ecmd->transceiver = XCVR_INTERNAL; - break; - default: - ecmd->transceiver = XCVR_EXTERNAL; - break; - } + advertising |= ADVERTISED_Pause; } else { - ecmd->port = PORT_OTHER; - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->transceiver = XCVR_DUMMY1; + cmd->base.port = PORT_OTHER; + cmd->base.autoneg = AUTONEG_DISABLE; } /* Save for future use */ - adapter->phy.link_speed = ethtool_cmd_speed(ecmd); - adapter->phy.port_type = ecmd->port; - adapter->phy.transceiver = ecmd->transceiver; - adapter->phy.autoneg = ecmd->autoneg; - adapter->phy.advertising = ecmd->advertising; - adapter->phy.supported = ecmd->supported; + adapter->phy.link_speed = cmd->base.speed; + adapter->phy.port_type = cmd->base.port; + adapter->phy.autoneg = cmd->base.autoneg; + adapter->phy.advertising = advertising; + adapter->phy.supported = supported; } else { - ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed); - ecmd->port = adapter->phy.port_type; - ecmd->transceiver = adapter->phy.transceiver; - ecmd->autoneg = adapter->phy.autoneg; - ecmd->advertising = adapter->phy.advertising; - ecmd->supported = adapter->phy.supported; + cmd->base.speed = adapter->phy.link_speed; + cmd->base.port = adapter->phy.port_type; + cmd->base.autoneg = adapter->phy.autoneg; + advertising = adapter->phy.advertising; + supported = adapter->phy.supported; } - ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN; - ecmd->phy_address = adapter->port_num; + cmd->base.duplex = netif_carrier_ok(netdev) ? + DUPLEX_FULL : DUPLEX_UNKNOWN; + cmd->base.phy_address = adapter->port_num; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } @@ -1399,7 +1394,6 @@ static int be_set_priv_flags(struct net_device *netdev, u32 flags) } const struct ethtool_ops be_ethtool_ops = { - .get_settings = be_get_settings, .get_drvinfo = be_get_drvinfo, .get_wol = be_get_wol, .set_wol = be_set_wol, @@ -1433,5 +1427,6 @@ const struct ethtool_ops be_ethtool_ops = { .get_channels = be_get_channels, .set_channels = be_set_channels, .get_module_info = be_get_module_info, - .get_module_eeprom = be_get_module_eeprom + .get_module_eeprom = be_get_module_eeprom, + .get_link_ksettings = be_get_link_ksettings, }; -- cgit v1.2.3 From 9da12b6454c4a762077fc33b56bdfd49acdae3ae Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 2 Jan 2017 19:53:11 +0100 Subject: net: faraday: ftmac100: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/faraday/ftmac100.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index dce5f7b7f772..c0ddbbe6c226 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -825,16 +825,18 @@ static void ftmac100_get_drvinfo(struct net_device *netdev, strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); } -static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int ftmac100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct ftmac100 *priv = netdev_priv(netdev); - return mii_ethtool_gset(&priv->mii, cmd); + return mii_ethtool_get_link_ksettings(&priv->mii, cmd); } -static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int ftmac100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct ftmac100 *priv = netdev_priv(netdev); - return mii_ethtool_sset(&priv->mii, cmd); + return mii_ethtool_set_link_ksettings(&priv->mii, cmd); } static int ftmac100_nway_reset(struct net_device *netdev) @@ -850,11 +852,11 @@ static u32 ftmac100_get_link(struct net_device *netdev) } static const struct ethtool_ops ftmac100_ethtool_ops = { - .set_settings = ftmac100_set_settings, - .get_settings = ftmac100_get_settings, .get_drvinfo = ftmac100_get_drvinfo, .nway_reset = ftmac100_nway_reset, .get_link = ftmac100_get_link, + .get_link_ksettings = ftmac100_get_link_ksettings, + .set_link_ksettings = ftmac100_set_link_ksettings, }; /****************************************************************************** -- cgit v1.2.3 From 0a0a8d6b0e88d947d7ab3198b325e31f677bebc2 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 2 Jan 2017 20:47:27 +0100 Subject: net: fealnx: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/fealnx.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 9cb436cb3745..766636a7c25e 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1817,25 +1817,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_gset(&np->mii, cmd); + rc = mii_ethtool_get_link_ksettings(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_sset(&np->mii, cmd); + rc = mii_ethtool_set_link_ksettings(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc; @@ -1865,12 +1867,12 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -- cgit v1.2.3 From f69b22e65ecfcb3648304e0e32a1df6f0d421375 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 4 Nov 2015 13:42:39 -0800 Subject: RDS: log the address on bind failure It's useful to know the IP address when RDS fails to bind a connection. Thus, adding it to the error message. Orabug: 21894138 Reviewed-by: Wei Lin Guay Signed-off-by: Santosh Shilimkar --- net/rds/bind.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/rds/bind.c b/net/rds/bind.c index 095f6ce583fe..3a915bedb76c 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -176,8 +176,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (!trans) { ret = -EADDRNOTAVAIL; rds_remove_bound(rs); - printk_ratelimited(KERN_INFO "RDS: rds_bind() could not find a transport, " - "load rds_tcp or rds_rdma?\n"); + pr_info_ratelimited("RDS: %s could not find a transport for %pI4, load rds_tcp or rds_rdma?\n", + __func__, &sin->sin_addr.s_addr); goto out; } -- cgit v1.2.3 From bb7897631d2379ec198635cc24bf1e8c629d0bda Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sun, 4 Dec 2016 16:41:29 -0800 Subject: RDS: mark few internal functions static to make sparse build happy Fixes below warnings: warning: symbol 'rds_send_probe' was not declared. Should it be static? warning: symbol 'rds_send_ping' was not declared. Should it be static? warning: symbol 'rds_tcp_accept_one_path' was not declared. Should it be static? warning: symbol 'rds_walk_conn_path_info' was not declared. Should it be static? Signed-off-by: Santosh Shilimkar --- net/rds/connection.c | 10 +++++----- net/rds/send.c | 4 ++-- net/rds/tcp_listen.c | 1 + 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/net/rds/connection.c b/net/rds/connection.c index fe9d31c0b22d..0e04dcceb1d4 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -545,11 +545,11 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len, } EXPORT_SYMBOL_GPL(rds_for_each_conn_info); -void rds_walk_conn_path_info(struct socket *sock, unsigned int len, - struct rds_info_iterator *iter, - struct rds_info_lengths *lens, - int (*visitor)(struct rds_conn_path *, void *), - size_t item_len) +static void rds_walk_conn_path_info(struct socket *sock, unsigned int len, + struct rds_info_iterator *iter, + struct rds_info_lengths *lens, + int (*visitor)(struct rds_conn_path *, void *), + size_t item_len) { u64 buffer[(item_len + 7) / 8]; struct hlist_head *head; diff --git a/net/rds/send.c b/net/rds/send.c index 77c8c6e613ad..bb13c56fc2f8 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1169,7 +1169,7 @@ out: * or * RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED */ -int +static int rds_send_probe(struct rds_conn_path *cp, __be16 sport, __be16 dport, u8 h_flags) { @@ -1238,7 +1238,7 @@ rds_send_pong(struct rds_conn_path *cp, __be16 dport) return rds_send_probe(cp, 0, dport, 0); } -void +static void rds_send_ping(struct rds_connection *conn) { unsigned long flags; diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index f74bab3ecdca..67d0929c7d3d 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -79,6 +79,7 @@ bail: * smaller ip address, we recycle conns in RDS_CONN_ERROR on the passive side * by moving them to CONNECTING in this function. */ +static struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn) { int i; -- cgit v1.2.3 From ff3f19a2f608ee406331e8c7b60d7376e75c2157 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 14 Mar 2016 07:43:55 -0700 Subject: RDS: IB: include faddr in connection log Also use pr_* for it. Signed-off-by: Santosh Shilimkar --- net/rds/ib_cm.c | 19 +++++++++---------- net/rds/ib_recv.c | 4 ++-- net/rds/ib_send.c | 4 ++-- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 5b2ab95afa07..b9da1e59ecc1 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -113,19 +113,18 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even } if (conn->c_version < RDS_PROTOCOL(3, 1)) { - printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed," - " no longer supported\n", - &conn->c_faddr, - RDS_PROTOCOL_MAJOR(conn->c_version), - RDS_PROTOCOL_MINOR(conn->c_version)); + pr_notice("RDS/IB: Connection <%pI4,%pI4> version %u.%u no longer supported\n", + &conn->c_laddr, &conn->c_faddr, + RDS_PROTOCOL_MAJOR(conn->c_version), + RDS_PROTOCOL_MINOR(conn->c_version)); rds_conn_destroy(conn); return; } else { - printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n", - &conn->c_faddr, - RDS_PROTOCOL_MAJOR(conn->c_version), - RDS_PROTOCOL_MINOR(conn->c_version), - ic->i_flowctl ? ", flow control" : ""); + pr_notice("RDS/IB: connected <%pI4,%pI4> version %u.%u%s\n", + &conn->c_laddr, &conn->c_faddr, + RDS_PROTOCOL_MAJOR(conn->c_version), + RDS_PROTOCOL_MINOR(conn->c_version), + ic->i_flowctl ? ", flow control" : ""); } /* diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 606a11f681d2..6803b75eb8bd 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -980,8 +980,8 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, } else { /* We expect errors as the qp is drained during shutdown */ if (rds_conn_up(conn) || rds_conn_connecting(conn)) - rds_ib_conn_error(conn, "recv completion on %pI4 had status %u (%s), disconnecting and reconnecting\n", - &conn->c_faddr, + rds_ib_conn_error(conn, "recv completion on <%pI4,%pI4> had status %u (%s), disconnecting and reconnecting\n", + &conn->c_laddr, &conn->c_faddr, wc->status, ib_wc_status_msg(wc->status)); } diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 84d90c97332f..19eca5c4c00c 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -300,8 +300,8 @@ void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) /* We expect errors as the qp is drained during shutdown */ if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) { - rds_ib_conn_error(conn, "send completion on %pI4 had status %u (%s), disconnecting and reconnecting\n", - &conn->c_faddr, wc->status, + rds_ib_conn_error(conn, "send completion on <%pI4,%pI4> had status %u (%s), disconnecting and reconnecting\n", + &conn->c_laddr, &conn->c_faddr, wc->status, ib_wc_status_msg(wc->status)); } } -- cgit v1.2.3 From fab8688d7185a1fe01ee9e0930fc59c0f161ee93 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 4 Jul 2016 15:31:21 -0700 Subject: RDS: IB: make the transport retry count smallest Transport retry is not much useful since it indicate packet loss in fabric so its better to failover fast rather than longer retry. Signed-off-by: Santosh Shilimkar --- net/rds/ib.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rds/ib.h b/net/rds/ib.h index 45ac8e8e58f4..f4e81214e70a 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -16,7 +16,7 @@ #define RDS_IB_DEFAULT_SEND_WR 256 #define RDS_IB_DEFAULT_FR_WR 512 -#define RDS_IB_DEFAULT_RETRY_COUNT 2 +#define RDS_IB_DEFAULT_RETRY_COUNT 1 #define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ -- cgit v1.2.3 From 3e56c2f856d7aba6a03feea834d68f9c05f7d0b6 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sun, 4 Dec 2016 16:25:43 -0800 Subject: RDS: RDMA: fix the ib_map_mr_sg_zbva() argument Fixes warning: Using plain integer as NULL pointer Signed-off-by: Santosh Shilimkar --- net/rds/ib_frmr.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index d921adc62765..66b3d6228a15 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c @@ -104,14 +104,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) struct rds_ib_frmr *frmr = &ibmr->u.frmr; struct ib_send_wr *failed_wr; struct ib_reg_wr reg_wr; - int ret; + int ret, off = 0; while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { atomic_inc(&ibmr->ic->i_fastreg_wrs); cpu_relax(); } - ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, 0, PAGE_SIZE); + ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, + &off, PAGE_SIZE); if (unlikely(ret != ibmr->sg_len)) return ret < 0 ? ret : -EINVAL; -- cgit v1.2.3 From 8d5d8a5fd7f9337b2eff689df14ff3ae617f3ae6 Mon Sep 17 00:00:00 2001 From: Qing Huang Date: Mon, 4 Jul 2016 16:29:13 -0700 Subject: RDS: RDMA: start rdma listening after init This prevents RDS from handling incoming rdma packets before RDS completes initializing its recv/send components. Signed-off-by: Qing Huang Signed-off-by: Santosh Shilimkar --- net/rds/rdma_transport.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index d5f311767157..fc59821f0a27 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -206,18 +206,13 @@ static int rds_rdma_init(void) { int ret; - ret = rds_rdma_listen_init(); + ret = rds_ib_init(); if (ret) goto out; - ret = rds_ib_init(); + ret = rds_rdma_listen_init(); if (ret) - goto err_ib_init; - - goto out; - -err_ib_init: - rds_rdma_listen_stop(); + rds_ib_exit(); out: return ret; } -- cgit v1.2.3 From 584a8279a44a800dea5a5c1e9d53a002e03016b4 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 4 Jul 2016 17:04:37 -0700 Subject: RDS: RDMA: return appropriate error on rdma map failures The first message to a remote node should prompt a new connection even if it is RDMA operation. For RDMA operation the MR mapping can fail because connections is not yet up. Since the connection establishment is asynchronous, we make sure the map failure because of unavailable connection reach to the user by appropriate error code. Before returning to the user, lets trigger the connection so that its ready for the next retry. Signed-off-by: Santosh Shilimkar --- net/rds/send.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/net/rds/send.c b/net/rds/send.c index bb13c56fc2f8..0a6f38b1c8a5 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -945,6 +945,11 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, ret = rds_cmsg_rdma_map(rs, rm, cmsg); if (!ret) *allocated_mr = 1; + else if (ret == -ENODEV) + /* Accommodate the get_mr() case which can fail + * if connection isn't established yet. + */ + ret = -EAGAIN; break; case RDS_CMSG_ATOMIC_CSWP: case RDS_CMSG_ATOMIC_FADD: @@ -1082,8 +1087,12 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) /* Parse any control messages the user may have included. */ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); - if (ret) + if (ret) { + /* Trigger connection so that its ready for the next retry */ + if (ret == -EAGAIN) + rds_conn_connect_if_down(conn); goto out; + } if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", -- cgit v1.2.3 From 56012459310a1dbcc55c2dbf5500a9f7571402cb Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 8 Mar 2016 09:19:01 -0800 Subject: RDS: IB: split the mr registration and invalidation path MR invalidation in RDS is done in background thread and not in data path like registration. So break the dependency between them which helps to remove the performance bottleneck. Signed-off-by: Santosh Shilimkar --- net/rds/ib.h | 4 +++- net/rds/ib_cm.c | 9 +++++++-- net/rds/ib_frmr.c | 11 ++++++----- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/net/rds/ib.h b/net/rds/ib.h index f4e81214e70a..f14c26d22b27 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -14,7 +14,8 @@ #define RDS_IB_DEFAULT_RECV_WR 1024 #define RDS_IB_DEFAULT_SEND_WR 256 -#define RDS_IB_DEFAULT_FR_WR 512 +#define RDS_IB_DEFAULT_FR_WR 256 +#define RDS_IB_DEFAULT_FR_INV_WR 256 #define RDS_IB_DEFAULT_RETRY_COUNT 1 @@ -125,6 +126,7 @@ struct rds_ib_connection { /* To control the number of wrs from fastreg */ atomic_t i_fastreg_wrs; + atomic_t i_fastunreg_wrs; /* interrupt handling */ struct tasklet_struct i_send_tasklet; diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index b9da1e59ecc1..3002acf75766 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -382,7 +382,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn) * completion queue and send queue. This extra space is used for FRMR * registration and invalidation work requests */ - fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0); + fr_queue_space = rds_ibdev->use_fastreg ? + (RDS_IB_DEFAULT_FR_WR + 1) + + (RDS_IB_DEFAULT_FR_INV_WR + 1) + : 0; /* add the conn now so that connection establishment has the dev */ rds_ib_add_conn(rds_ibdev, conn); @@ -444,6 +447,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) attr.send_cq = ic->i_send_cq; attr.recv_cq = ic->i_recv_cq; atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR); + atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR); /* * XXX this can fail if max_*_wr is too large? Are we supposed @@ -766,7 +770,8 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp) wait_event(rds_ib_ring_empty_wait, rds_ib_ring_empty(&ic->i_recv_ring) && (atomic_read(&ic->i_signaled_sends) == 0) && - (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR)); + (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR) && + (atomic_read(&ic->i_fastunreg_wrs) == RDS_IB_DEFAULT_FR_INV_WR)); tasklet_kill(&ic->i_send_tasklet); tasklet_kill(&ic->i_recv_tasklet); diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index 66b3d6228a15..48332a6ed738 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c @@ -241,8 +241,8 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr) if (frmr->fr_state != FRMR_IS_INUSE) goto out; - while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { - atomic_inc(&ibmr->ic->i_fastreg_wrs); + while (atomic_dec_return(&ibmr->ic->i_fastunreg_wrs) <= 0) { + atomic_inc(&ibmr->ic->i_fastunreg_wrs); cpu_relax(); } @@ -261,7 +261,7 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr) if (unlikely(ret)) { frmr->fr_state = FRMR_IS_STALE; frmr->fr_inv = false; - atomic_inc(&ibmr->ic->i_fastreg_wrs); + atomic_inc(&ibmr->ic->i_fastunreg_wrs); pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret); goto out; } @@ -289,9 +289,10 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) if (frmr->fr_inv) { frmr->fr_state = FRMR_IS_FREE; frmr->fr_inv = false; + atomic_inc(&ic->i_fastreg_wrs); + } else { + atomic_inc(&ic->i_fastunreg_wrs); } - - atomic_inc(&ic->i_fastreg_wrs); } void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed, -- cgit v1.2.3 From c536a068870a08fb7b35482e701a6b72e294b493 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sun, 3 Jul 2016 19:14:10 -0700 Subject: RDS: RDMA: silence the use_once mr log flood In absence of extension headers, message log will keep flooding the console. As such even without use_once we can clean up the MRs so its not really an error case message so make it debug message Signed-off-by: Santosh Shilimkar --- net/rds/rdma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/rds/rdma.c b/net/rds/rdma.c index ea961144084f..4297f3f337d7 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -415,7 +415,8 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) { - printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key); + pr_debug("rds: trying to unuse MR with unknown r_key %u!\n", + r_key); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); return; } -- cgit v1.2.3 From 581d53c91cbf7b31415a9ed5e9a8b89d6af609b3 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sat, 9 Jul 2016 18:31:38 -0700 Subject: RDS: IB: track and log active side endpoint in connection Useful to know the active and passive end points in a RDS IB connection. Signed-off-by: Santosh Shilimkar --- net/rds/ib.h | 3 +++ net/rds/ib_cm.c | 11 +++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/net/rds/ib.h b/net/rds/ib.h index f14c26d22b27..5f02b4d8f10c 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -181,6 +181,9 @@ struct rds_ib_connection { /* Batched completions */ unsigned int i_unsignaled_wrs; + + /* Endpoint role in connection */ + bool i_active_side; }; /* This assumes that atomic_t is at least 32 bits */ diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 3002acf75766..4d1bf04b06b5 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -120,16 +120,17 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even rds_conn_destroy(conn); return; } else { - pr_notice("RDS/IB: connected <%pI4,%pI4> version %u.%u%s\n", + pr_notice("RDS/IB: %s conn connected <%pI4,%pI4> version %u.%u%s\n", + ic->i_active_side ? "Active" : "Passive", &conn->c_laddr, &conn->c_faddr, RDS_PROTOCOL_MAJOR(conn->c_version), RDS_PROTOCOL_MINOR(conn->c_version), ic->i_flowctl ? ", flow control" : ""); } - /* - * Init rings and fill recv. this needs to wait until protocol negotiation - * is complete, since ring layout is different from 3.0 to 3.1. + /* Init rings and fill recv. this needs to wait until protocol + * negotiation is complete, since ring layout is different + * from 3.1 to 4.1. */ rds_ib_send_init_ring(ic); rds_ib_recv_init_ring(ic); @@ -685,6 +686,7 @@ out: if (ic->i_cm_id == cm_id) ret = 0; } + ic->i_active_side = true; return ret; } @@ -859,6 +861,7 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp) ic->i_sends = NULL; vfree(ic->i_recvs); ic->i_recvs = NULL; + ic->i_active_side = false; } int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) -- cgit v1.2.3 From 09b2b8f52895addd9bf28dc5ac98ff5cc750cf9a Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sat, 9 Jul 2016 17:14:02 -0700 Subject: RDS: IB: add few useful cache stasts Tracks the ib receive cache total, incoming and frag allocations. Signed-off-by: Santosh Shilimkar --- net/rds/ib.h | 7 +++++++ net/rds/ib_recv.c | 6 ++++++ net/rds/ib_stats.c | 2 ++ 3 files changed, 15 insertions(+) diff --git a/net/rds/ib.h b/net/rds/ib.h index 5f02b4d8f10c..c62e5513d306 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -151,6 +151,7 @@ struct rds_ib_connection { u64 i_ack_recv; /* last ACK received */ struct rds_ib_refill_cache i_cache_incs; struct rds_ib_refill_cache i_cache_frags; + atomic_t i_cache_allocs; /* sending acks */ unsigned long i_ack_flags; @@ -254,6 +255,8 @@ struct rds_ib_statistics { uint64_t s_ib_rx_refill_from_cq; uint64_t s_ib_rx_refill_from_thread; uint64_t s_ib_rx_alloc_limit; + uint64_t s_ib_rx_total_frags; + uint64_t s_ib_rx_total_incs; uint64_t s_ib_rx_credit_updates; uint64_t s_ib_ack_sent; uint64_t s_ib_ack_send_failure; @@ -276,6 +279,8 @@ struct rds_ib_statistics { uint64_t s_ib_rdma_mr_1m_reused; uint64_t s_ib_atomic_cswp; uint64_t s_ib_atomic_fadd; + uint64_t s_ib_recv_added_to_cache; + uint64_t s_ib_recv_removed_from_cache; }; extern struct workqueue_struct *rds_ib_wq; @@ -406,6 +411,8 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); /* ib_stats.c */ DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) +#define rds_ib_stats_add(member, count) \ + rds_stats_add_which(rds_ib_stats, member, count) unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail); diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 6803b75eb8bd..4b0f12679219 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -194,6 +194,8 @@ static void rds_ib_frag_free(struct rds_ib_connection *ic, rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg)); rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags); + atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); + rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE); } /* Recycle inc after freeing attached frags */ @@ -261,6 +263,7 @@ static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *i atomic_dec(&rds_ib_allocation); return NULL; } + rds_ib_stats_inc(s_ib_rx_total_incs); } INIT_LIST_HEAD(&ibinc->ii_frags); rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr); @@ -278,6 +281,8 @@ static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags); if (cache_item) { frag = container_of(cache_item, struct rds_page_frag, f_cache_entry); + atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); + rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE); } else { frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask); if (!frag) @@ -290,6 +295,7 @@ static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic kmem_cache_free(rds_ib_frag_slab, frag); return NULL; } + rds_ib_stats_inc(s_ib_rx_total_frags); } INIT_LIST_HEAD(&frag->f_item); diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c index 7e78dca1f252..9252ad126335 100644 --- a/net/rds/ib_stats.c +++ b/net/rds/ib_stats.c @@ -55,6 +55,8 @@ static const char *const rds_ib_stat_names[] = { "ib_rx_refill_from_cq", "ib_rx_refill_from_thread", "ib_rx_alloc_limit", + "ib_rx_total_frags", + "ib_rx_total_incs", "ib_rx_credit_updates", "ib_ack_sent", "ib_ack_send_failure", -- cgit v1.2.3 From be2f76eacc278c272f26d46e4168efe5a55f5383 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 4 Jul 2016 16:16:36 -0700 Subject: RDS: IB: Add vector spreading for cqs Based on available device vectors, allocate cqs accordingly to get better spread of completion vectors which helps performace great deal.. Signed-off-by: Santosh Shilimkar --- net/rds/ib.c | 11 +++++++++++ net/rds/ib.h | 5 +++++ net/rds/ib_cm.c | 40 +++++++++++++++++++++++++++++++++++++--- 3 files changed, 53 insertions(+), 3 deletions(-) diff --git a/net/rds/ib.c b/net/rds/ib.c index 5680d90b0b77..8d70884d7bb6 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -111,6 +111,9 @@ static void rds_ib_dev_free(struct work_struct *work) kfree(i_ipaddr); } + if (rds_ibdev->vector_load) + kfree(rds_ibdev->vector_load); + kfree(rds_ibdev); } @@ -159,6 +162,14 @@ static void rds_ib_add_one(struct ib_device *device) rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom; rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom; + rds_ibdev->vector_load = kzalloc(sizeof(int) * device->num_comp_vectors, + GFP_KERNEL); + if (!rds_ibdev->vector_load) { + pr_err("RDS/IB: %s failed to allocate vector memory\n", + __func__); + goto put_dev; + } + rds_ibdev->dev = device; rds_ibdev->pd = ib_alloc_pd(device, 0); if (IS_ERR(rds_ibdev->pd)) { diff --git a/net/rds/ib.h b/net/rds/ib.h index c62e5513d306..1fe9f79fead5 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -185,6 +185,10 @@ struct rds_ib_connection { /* Endpoint role in connection */ bool i_active_side; + + /* Send/Recv vectors */ + int i_scq_vector; + int i_rcq_vector; }; /* This assumes that atomic_t is at least 32 bits */ @@ -227,6 +231,7 @@ struct rds_ib_device { spinlock_t spinlock; /* protect the above */ atomic_t refcount; struct work_struct free_work; + int *vector_load; }; #define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device) diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 4d1bf04b06b5..33c8584ada1f 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -358,6 +358,28 @@ static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context) tasklet_schedule(&ic->i_send_tasklet); } +static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev) +{ + int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1]; + int index = rds_ibdev->dev->num_comp_vectors - 1; + int i; + + for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) { + if (rds_ibdev->vector_load[i] < min) { + index = i; + min = rds_ibdev->vector_load[i]; + } + } + + rds_ibdev->vector_load[index]++; + return index; +} + +static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index) +{ + rds_ibdev->vector_load[index]--; +} + /* * This needs to be very careful to not leave IS_ERR pointers around for * cleanup to trip over. @@ -399,25 +421,30 @@ static int rds_ib_setup_qp(struct rds_connection *conn) /* Protection domain and memory range */ ic->i_pd = rds_ibdev->pd; + ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev); cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1; - + cq_attr.comp_vector = ic->i_scq_vector; ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send, rds_ib_cq_event_handler, conn, &cq_attr); if (IS_ERR(ic->i_send_cq)) { ret = PTR_ERR(ic->i_send_cq); ic->i_send_cq = NULL; + ibdev_put_vector(rds_ibdev, ic->i_scq_vector); rdsdebug("ib_create_cq send failed: %d\n", ret); goto out; } + ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); cq_attr.cqe = ic->i_recv_ring.w_nr; + cq_attr.comp_vector = ic->i_rcq_vector; ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv, rds_ib_cq_event_handler, conn, &cq_attr); if (IS_ERR(ic->i_recv_cq)) { ret = PTR_ERR(ic->i_recv_cq); ic->i_recv_cq = NULL; + ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); rdsdebug("ib_create_cq recv failed: %d\n", ret); goto out; } @@ -780,10 +807,17 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp) /* first destroy the ib state that generates callbacks */ if (ic->i_cm_id->qp) rdma_destroy_qp(ic->i_cm_id); - if (ic->i_send_cq) + if (ic->i_send_cq) { + if (ic->rds_ibdev) + ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector); ib_destroy_cq(ic->i_send_cq); - if (ic->i_recv_cq) + } + + if (ic->i_recv_cq) { + if (ic->rds_ibdev) + ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector); ib_destroy_cq(ic->i_recv_cq); + } /* then free the resources that ib callbacks use */ if (ic->i_send_hdrs) -- cgit v1.2.3 From 941f8d55f6d613a460a5e080d25a38509f45eb75 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 18 Feb 2016 20:06:47 -0800 Subject: RDS: RDMA: Fix the composite message user notification When application sends an RDS RDMA composite message consist of RDMA transfer to be followed up by non RDMA payload, it expect to be notified *only* when the full message gets delivered. RDS RDMA notification doesn't behave this way though. Thanks to Venkat for debug and root casuing the issue where only first part of the message(RDMA) was successfully delivered but remainder payload delivery failed. In that case, application should not be notified with a false positive of message delivery success. Fix this case by making sure the user gets notified only after the full message delivery. Reviewed-by: Venkat Venkatsubra Signed-off-by: Santosh Shilimkar --- net/rds/ib_send.c | 25 +++++++++++++++---------- net/rds/rdma.c | 10 ++++++++++ net/rds/rds.h | 1 + net/rds/send.c | 4 +++- 4 files changed, 29 insertions(+), 11 deletions(-) diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 19eca5c4c00c..5e72de10c484 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -69,16 +69,6 @@ static void rds_ib_send_complete(struct rds_message *rm, complete(rm, notify_status); } -static void rds_ib_send_unmap_data(struct rds_ib_connection *ic, - struct rm_data_op *op, - int wc_status) -{ - if (op->op_nents) - ib_dma_unmap_sg(ic->i_cm_id->device, - op->op_sg, op->op_nents, - DMA_TO_DEVICE); -} - static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, struct rm_rdma_op *op, int wc_status) @@ -139,6 +129,21 @@ static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic, rds_ib_stats_inc(s_ib_atomic_fadd); } +static void rds_ib_send_unmap_data(struct rds_ib_connection *ic, + struct rm_data_op *op, + int wc_status) +{ + struct rds_message *rm = container_of(op, struct rds_message, data); + + if (op->op_nents) + ib_dma_unmap_sg(ic->i_cm_id->device, + op->op_sg, op->op_nents, + DMA_TO_DEVICE); + + if (rm->rdma.op_active && rm->data.op_notify) + rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status); +} + /* * Unmap the resources associated with a struct send_work. * diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 4297f3f337d7..138aef644c56 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -627,6 +627,16 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, } op->op_notifier->n_user_token = args->user_token; op->op_notifier->n_status = RDS_RDMA_SUCCESS; + + /* Enable rmda notification on data operation for composite + * rds messages and make sure notification is enabled only + * for the data operation which follows it so that application + * gets notified only after full message gets delivered. + */ + if (rm->data.op_sg) { + rm->rdma.op_notify = 0; + rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); + } } /* The cookie contains the R_Key of the remote memory region, and diff --git a/net/rds/rds.h b/net/rds/rds.h index ebbf909b87ec..0bb8213c7d0b 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -419,6 +419,7 @@ struct rds_message { } rdma; struct rm_data_op { unsigned int op_active:1; + unsigned int op_notify:1; unsigned int op_nents; unsigned int op_count; unsigned int op_dmasg; diff --git a/net/rds/send.c b/net/rds/send.c index 0a6f38b1c8a5..45e025b65d29 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -476,12 +476,14 @@ void rds_rdma_send_complete(struct rds_message *rm, int status) struct rm_rdma_op *ro; struct rds_notifier *notifier; unsigned long flags; + unsigned int notify = 0; spin_lock_irqsave(&rm->m_rs_lock, flags); + notify = rm->rdma.op_notify | rm->data.op_notify; ro = &rm->rdma; if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && - ro->op_active && ro->op_notify && ro->op_notifier) { + ro->op_active && notify && ro->op_notifier) { notifier = ro->op_notifier; rs = rm->m_rs; sock_hold(rds_rs_to_sk(rs)); -- cgit v1.2.3 From cf657269d311d575eb196c7045579b3443631b8b Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 29 Sep 2016 11:07:11 -0700 Subject: RDS: IB: fix panic due to handlers running post teardown Shutdown code reaping loop takes care of emptying the CQ's before they being destroyed. And once tasklets are killed, the hanlders are not expected to run. But because of core tasklet code issues, tasklet handler could still run even after tasklet_kill, RDS IB shutdown code already reaps the CQs before freeing cq/qp resources so as such the handlers have nothing left to do post shutdown. On other hand any handler running after teardown and trying to access already freed qp/cq resources causes issues Patch fixes this race by makes sure that handlers returns without any action post teardown. Reviewed-by: Wengang Signed-off-by: Santosh Shilimkar --- net/rds/ib.h | 1 + net/rds/ib_cm.c | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/net/rds/ib.h b/net/rds/ib.h index 1fe9f79fead5..540458928f3c 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -185,6 +185,7 @@ struct rds_ib_connection { /* Endpoint role in connection */ bool i_active_side; + atomic_t i_cq_quiesce; /* Send/Recv vectors */ int i_scq_vector; diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 33c8584ada1f..ce3775abc6e7 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -128,6 +128,8 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even ic->i_flowctl ? ", flow control" : ""); } + atomic_set(&ic->i_cq_quiesce, 0); + /* Init rings and fill recv. this needs to wait until protocol * negotiation is complete, since ring layout is different * from 3.1 to 4.1. @@ -267,6 +269,10 @@ static void rds_ib_tasklet_fn_send(unsigned long data) rds_ib_stats_inc(s_ib_tasklet_call); + /* if cq has been already reaped, ignore incoming cq event */ + if (atomic_read(&ic->i_cq_quiesce)) + return; + poll_scq(ic, ic->i_send_cq, ic->i_send_wc); ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); poll_scq(ic, ic->i_send_cq, ic->i_send_wc); @@ -308,6 +314,10 @@ static void rds_ib_tasklet_fn_recv(unsigned long data) rds_ib_stats_inc(s_ib_tasklet_call); + /* if cq has been already reaped, ignore incoming cq event */ + if (atomic_read(&ic->i_cq_quiesce)) + return; + memset(&state, 0, sizeof(state)); poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); @@ -804,6 +814,8 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp) tasklet_kill(&ic->i_send_tasklet); tasklet_kill(&ic->i_recv_tasklet); + atomic_set(&ic->i_cq_quiesce, 1); + /* first destroy the ib state that generates callbacks */ if (ic->i_cm_id->qp) rdma_destroy_qp(ic->i_cm_id); -- cgit v1.2.3 From 192a798f52998a643cef84fce0204be56666b0bf Mon Sep 17 00:00:00 2001 From: Venkat Venkatsubra Date: Sat, 9 Jul 2016 17:36:20 -0700 Subject: RDS: add stat for socket recv memory usage Tracks the receive side memory added to scokets and removed from sockets. Signed-off-by: Venkat Venkatsubra Signed-off-by: Santosh Shilimkar --- net/rds/rds.h | 3 +++ net/rds/recv.c | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/net/rds/rds.h b/net/rds/rds.h index 0bb8213c7d0b..8ccd5a93e56c 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -631,6 +631,9 @@ struct rds_statistics { uint64_t s_cong_update_received; uint64_t s_cong_send_error; uint64_t s_cong_send_blocked; + uint64_t s_recv_bytes_added_to_socket; + uint64_t s_recv_bytes_removed_from_socket; + }; /* af_rds.c */ diff --git a/net/rds/recv.c b/net/rds/recv.c index 9d0666e5fe35..ba19eeeae85a 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -94,6 +94,10 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, return; rs->rs_rcv_bytes += delta; + if (delta > 0) + rds_stats_add(s_recv_bytes_added_to_socket, delta); + else + rds_stats_add(s_recv_bytes_removed_from_socket, -delta); now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d " -- cgit v1.2.3 From f9fb69adb6c7acca60977a4db5a5f95b8e66c041 Mon Sep 17 00:00:00 2001 From: Avinash Repaka Date: Mon, 29 Feb 2016 15:30:57 -0800 Subject: RDS: make message size limit compliant with spec RDS support max message size as 1M but the code doesn't check this in all cases. Patch fixes it for RDMA & non-RDMA and RDS MR size and its enforced irrespective of underlying transport. Signed-off-by: Avinash Repaka Signed-off-by: Santosh Shilimkar --- net/rds/rdma.c | 9 ++++++++- net/rds/rds.h | 3 +++ net/rds/send.c | 31 +++++++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 138aef644c56..f06fac4886b0 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -40,7 +40,6 @@ /* * XXX * - build with sparse - * - should we limit the size of a mr region? let transport return failure? * - should we detect duplicate keys on a socket? hmm. * - an rdma is an mlock, apply rlimit? */ @@ -200,6 +199,14 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, goto out; } + /* Restrict the size of mr irrespective of underlying transport + * To account for unaligned mr regions, subtract one from nr_pages + */ + if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) { + ret = -EMSGSIZE; + goto out; + } + rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", args->vec.addr, args->vec.bytes, nr_pages); diff --git a/net/rds/rds.h b/net/rds/rds.h index 8ccd5a93e56c..f713194e4620 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -50,6 +50,9 @@ void rdsdebug(char *fmt, ...) #define RDS_FRAG_SHIFT 12 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) +/* Used to limit both RDMA and non-RDMA RDS message to 1MB */ +#define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20)) + #define RDS_CONG_MAP_BYTES (65536 / 8) #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE) #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8) diff --git a/net/rds/send.c b/net/rds/send.c index 45e025b65d29..5cc64039caf7 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -994,6 +994,26 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn) return hash; } +static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes) +{ + struct rds_rdma_args *args; + struct cmsghdr *cmsg; + + for_each_cmsghdr(cmsg, msg) { + if (!CMSG_OK(msg, cmsg)) + return -EINVAL; + + if (cmsg->cmsg_level != SOL_RDS) + continue; + + if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) { + args = CMSG_DATA(cmsg); + *rdma_bytes += args->remote_vec.bytes; + } + } + return 0; +} + int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) { struct sock *sk = sock->sk; @@ -1008,6 +1028,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) int nonblock = msg->msg_flags & MSG_DONTWAIT; long timeo = sock_sndtimeo(sk, nonblock); struct rds_conn_path *cpath; + size_t total_payload_len = payload_len, rdma_payload_len = 0; /* Mirror Linux UDP mirror of BSD error message compatibility */ /* XXX: Perhaps MSG_MORE someday */ @@ -1040,6 +1061,16 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) } release_sock(sk); + ret = rds_rdma_bytes(msg, &rdma_payload_len); + if (ret) + goto out; + + total_payload_len += rdma_payload_len; + if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) { + ret = -EMSGSIZE; + goto out; + } + if (payload_len > rds_sk_sndbuf(rs)) { ret = -EMSGSIZE; goto out; -- cgit v1.2.3 From 3289025aedc018f8fd9d0e37fb9efa0c6d531ffa Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 4 Jul 2016 22:35:15 -0700 Subject: RDS: add receive message trace used by application Socket option to tap receive path latency in various stages in nano seconds. It can be enabled on selective sockets using using SO_RDS_MSG_RXPATH_LATENCY socket option. RDS will return the data to application with RDS_CMSG_RXPATH_LATENCY in defined format. Scope is left to add more trace points for future without need of change in the interface. Reviewed-by: Sowmini Varadhan Signed-off-by: Santosh Shilimkar --- include/uapi/linux/rds.h | 33 +++++++++++++++++++++++++++++++++ net/rds/af_rds.c | 28 ++++++++++++++++++++++++++++ net/rds/ib_recv.c | 4 ++++ net/rds/rds.h | 10 ++++++++++ net/rds/recv.c | 32 +++++++++++++++++++++++++++++--- net/rds/tcp_recv.c | 5 +++++ 6 files changed, 109 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 0f9265cb2a96..3833113ab2c0 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -52,6 +52,13 @@ #define RDS_GET_MR_FOR_DEST 7 #define SO_RDS_TRANSPORT 8 +/* Socket option to tap receive path latency + * SO_RDS: SO_RDS_MSG_RXPATH_LATENCY + * Format used struct rds_rx_trace_so + */ +#define SO_RDS_MSG_RXPATH_LATENCY 10 + + /* supported values for SO_RDS_TRANSPORT */ #define RDS_TRANS_IB 0 #define RDS_TRANS_IWARP 1 @@ -77,6 +84,12 @@ * the same as for the GET_MR setsockopt. * RDS_CMSG_RDMA_STATUS (recvmsg) * Returns the status of a completed RDMA operation. + * RDS_CMSG_RXPATH_LATENCY(recvmsg) + * Returns rds message latencies in various stages of receive + * path in nS. Its set per socket using SO_RDS_MSG_RXPATH_LATENCY + * socket option. Legitimate points are defined in + * enum rds_message_rxpath_latency. More points can be added in + * future. CSMG format is struct rds_cmsg_rx_trace. */ #define RDS_CMSG_RDMA_ARGS 1 #define RDS_CMSG_RDMA_DEST 2 @@ -87,6 +100,7 @@ #define RDS_CMSG_ATOMIC_CSWP 7 #define RDS_CMSG_MASKED_ATOMIC_FADD 8 #define RDS_CMSG_MASKED_ATOMIC_CSWP 9 +#define RDS_CMSG_RXPATH_LATENCY 11 #define RDS_INFO_FIRST 10000 #define RDS_INFO_COUNTERS 10000 @@ -171,6 +185,25 @@ struct rds_info_rdma_connection { uint32_t rdma_mr_size; }; +/* RDS message Receive Path Latency points */ +enum rds_message_rxpath_latency { + RDS_MSG_RX_HDR_TO_DGRAM_START = 0, + RDS_MSG_RX_DGRAM_REASSEMBLE, + RDS_MSG_RX_DGRAM_DELIVERED, + RDS_MSG_RX_DGRAM_TRACE_MAX +}; + +struct rds_rx_trace_so { + u8 rx_traces; + u8 rx_trace_pos[RDS_MSG_RX_DGRAM_TRACE_MAX]; +}; + +struct rds_cmsg_rx_trace { + u8 rx_traces; + u8 rx_trace_pos[RDS_MSG_RX_DGRAM_TRACE_MAX]; + u64 rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX]; +}; + /* * Congestion monitoring. * Congestion control in RDS happens at the host connection diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 2ac1e6194be3..fd8217404162 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -298,6 +298,30 @@ static int rds_enable_recvtstamp(struct sock *sk, char __user *optval, return 0; } +static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval, + int optlen) +{ + struct rds_rx_trace_so trace; + int i; + + if (optlen != sizeof(struct rds_rx_trace_so)) + return -EFAULT; + + if (copy_from_user(&trace, optval, sizeof(trace))) + return -EFAULT; + + rs->rs_rx_traces = trace.rx_traces; + for (i = 0; i < rs->rs_rx_traces; i++) { + if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) { + rs->rs_rx_traces = 0; + return -EFAULT; + } + rs->rs_rx_trace[i] = trace.rx_trace_pos[i]; + } + + return 0; +} + static int rds_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { @@ -338,6 +362,9 @@ static int rds_setsockopt(struct socket *sock, int level, int optname, ret = rds_enable_recvtstamp(sock->sk, optval, optlen); release_sock(sock->sk); break; + case SO_RDS_MSG_RXPATH_LATENCY: + ret = rds_recv_track_latency(rs, optval, optlen); + break; default: ret = -ENOPROTOOPT; } @@ -484,6 +511,7 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol) INIT_LIST_HEAD(&rs->rs_cong_list); spin_lock_init(&rs->rs_rdma_lock); rs->rs_rdma_keys = RB_ROOT; + rs->rs_rx_traces = 0; spin_lock_bh(&rds_sock_lock); list_add_tail(&rs->rs_item, &rds_sock_list); diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 4b0f12679219..e10624aa6959 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -911,8 +911,12 @@ static void rds_ib_process_recv(struct rds_connection *conn, ic->i_ibinc = ibinc; hdr = &ibinc->ii_inc.i_hdr; + ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] = + local_clock(); memcpy(hdr, ihdr, sizeof(*hdr)); ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); + ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] = + local_clock(); rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc, ic->i_recv_data_rem, hdr->h_flags); diff --git a/net/rds/rds.h b/net/rds/rds.h index f713194e4620..07fff73dd4f3 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -253,6 +253,11 @@ struct rds_ext_header_rdma_dest { #define RDS_EXTHDR_GEN_NUM 6 #define __RDS_EXTHDR_MAX 16 /* for now */ +#define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1) +#define RDS_MSG_RX_HDR 0 +#define RDS_MSG_RX_START 1 +#define RDS_MSG_RX_END 2 +#define RDS_MSG_RX_CMSG 3 struct rds_incoming { atomic_t i_refcount; @@ -265,6 +270,7 @@ struct rds_incoming { rds_rdma_cookie_t i_rdma_cookie; struct timeval i_rx_tstamp; + u64 i_rx_lat_trace[RDS_RX_MAX_TRACES]; }; struct rds_mr { @@ -575,6 +581,10 @@ struct rds_sock { unsigned char rs_recverr, rs_cong_monitor; u32 rs_hash_initval; + + /* Socket receive path trace points*/ + u8 rs_rx_traces; + u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX]; }; static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk) diff --git a/net/rds/recv.c b/net/rds/recv.c index ba19eeeae85a..8b7e7b7f2c2d 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -43,6 +43,8 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, __be32 saddr) { + int i; + atomic_set(&inc->i_refcount, 1); INIT_LIST_HEAD(&inc->i_item); inc->i_conn = conn; @@ -50,6 +52,9 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, inc->i_rdma_cookie = 0; inc->i_rx_tstamp.tv_sec = 0; inc->i_rx_tstamp.tv_usec = 0; + + for (i = 0; i < RDS_RX_MAX_TRACES; i++) + inc->i_rx_lat_trace[i] = 0; } EXPORT_SYMBOL_GPL(rds_inc_init); @@ -373,6 +378,7 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, if (sock_flag(sk, SOCK_RCVTSTAMP)) do_gettimeofday(&inc->i_rx_tstamp); rds_inc_addref(inc); + inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock(); list_add_tail(&inc->i_item, &rs->rs_recv_queue); __rds_wake_sk_sleep(sk); } else { @@ -534,7 +540,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg, ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST, sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie); if (ret) - return ret; + goto out; } if ((inc->i_rx_tstamp.tv_sec != 0) && @@ -543,10 +549,30 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg, sizeof(struct timeval), &inc->i_rx_tstamp); if (ret) - return ret; + goto out; } - return 0; + if (rs->rs_rx_traces) { + struct rds_cmsg_rx_trace t; + int i, j; + + inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); + t.rx_traces = rs->rs_rx_traces; + for (i = 0; i < rs->rs_rx_traces; i++) { + j = rs->rs_rx_trace[i]; + t.rx_trace_pos[i] = j; + t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] - + inc->i_rx_lat_trace[j]; + } + + ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY, + sizeof(t), &t); + if (ret) + goto out; + } + +out: + return ret; } int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index ad4892e97f91..e006ef8e6d40 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@ -180,6 +180,9 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, rdsdebug("alloced tinc %p\n", tinc); rds_inc_path_init(&tinc->ti_inc, cp, cp->cp_conn->c_faddr); + tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] = + local_clock(); + /* * XXX * we might be able to use the __ variants when * we've already serialized at a higher level. @@ -204,6 +207,8 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, /* could be 0 for a 0 len message */ tc->t_tinc_data_rem = be32_to_cpu(tinc->ti_inc.i_hdr.h_len); + tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_START] = + local_clock(); } } -- cgit v1.2.3 From e4781421e883340b796da5a724bda7226817990b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 20 Dec 2016 21:57:02 +0100 Subject: netfilter: merge udp and udplite conntrack helpers udplite was copied from udp, they are virtually 100% identical. This adds udplite tracker to udp instead, removes udplite module, and then makes the udplite tracker builtin. udplite will then simply re-use udp timeout settings. It makes little sense to add separate sysctls, nowadays we have fine-grained timeout policy support via the CT target. old: text data bss dec hex filename 1633 672 0 2305 901 nf_conntrack_proto_udp.o 1756 672 0 2428 97c nf_conntrack_proto_udplite.o 69526 17937 268 87731 156b3 nf_conntrack.ko new: text data bss dec hex filename 2442 1184 0 3626 e2a nf_conntrack_proto_udp.o 68565 17721 268 86554 1521a nf_conntrack.ko Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/ipv4/nf_conntrack_ipv4.h | 1 + include/net/netfilter/ipv6/nf_conntrack_ipv6.h | 1 + include/net/netns/conntrack.h | 16 -- net/netfilter/Makefile | 1 - net/netfilter/nf_conntrack_proto_udp.c | 123 ++++++++++ net/netfilter/nf_conntrack_proto_udplite.c | 324 ------------------------- 6 files changed, 125 insertions(+), 341 deletions(-) delete mode 100644 net/netfilter/nf_conntrack_proto_udplite.c diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h index 919e4e8af327..6ff32815641b 100644 --- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h +++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h @@ -14,6 +14,7 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; +extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; #ifdef CONFIG_NF_CT_PROTO_DCCP extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4; diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index eaea968f8657..c59b82456f89 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -5,6 +5,7 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; +extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; #ifdef CONFIG_NF_CT_PROTO_DCCP extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6; diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index cf799fc3fdec..17724c62de97 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -69,19 +69,6 @@ struct nf_sctp_net { }; #endif -#ifdef CONFIG_NF_CT_PROTO_UDPLITE -enum udplite_conntrack { - UDPLITE_CT_UNREPLIED, - UDPLITE_CT_REPLIED, - UDPLITE_CT_MAX -}; - -struct nf_udplite_net { - struct nf_proto_net pn; - unsigned int timeouts[UDPLITE_CT_MAX]; -}; -#endif - struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; @@ -94,9 +81,6 @@ struct nf_ip_net { #ifdef CONFIG_NF_CT_PROTO_SCTP struct nf_sctp_net sctp; #endif -#ifdef CONFIG_NF_CT_PROTO_UDPLITE - struct nf_udplite_net udplite; -#endif }; struct ct_pcpu { diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index ca30d1960f1d..bf5c577113b6 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -7,7 +7,6 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o -nf_conntrack-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o obj-$(CONFIG_NETFILTER) = netfilter.o diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 20f35ed68030..ae63944c9dc4 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -108,6 +108,59 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb, return true; } +#ifdef CONFIG_NF_CT_PROTO_UDPLITE +static int udplite_error(struct net *net, struct nf_conn *tmpl, + struct sk_buff *skb, + unsigned int dataoff, + enum ip_conntrack_info *ctinfo, + u8 pf, unsigned int hooknum) +{ + unsigned int udplen = skb->len - dataoff; + const struct udphdr *hdr; + struct udphdr _hdr; + unsigned int cscov; + + /* Header is too small? */ + hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); + if (!hdr) { + if (LOG_INVALID(net, IPPROTO_UDPLITE)) + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, + "nf_ct_udplite: short packet "); + return -NF_ACCEPT; + } + + cscov = ntohs(hdr->len); + if (cscov == 0) { + cscov = udplen; + } else if (cscov < sizeof(*hdr) || cscov > udplen) { + if (LOG_INVALID(net, IPPROTO_UDPLITE)) + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, + "nf_ct_udplite: invalid checksum coverage "); + return -NF_ACCEPT; + } + + /* UDPLITE mandates checksums */ + if (!hdr->check) { + if (LOG_INVALID(net, IPPROTO_UDPLITE)) + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, + "nf_ct_udplite: checksum missing "); + return -NF_ACCEPT; + } + + /* Checksum invalid? Ignore. */ + if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && + nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP, + pf)) { + if (LOG_INVALID(net, IPPROTO_UDPLITE)) + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, + "nf_ct_udplite: bad UDPLite checksum "); + return -NF_ACCEPT; + } + + return NF_ACCEPT; +} +#endif + static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, @@ -290,6 +343,41 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly = }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4); +#ifdef CONFIG_NF_CT_PROTO_UDPLITE +struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = +{ + .l3proto = PF_INET, + .l4proto = IPPROTO_UDPLITE, + .name = "udplite", + .allow_clash = true, + .pkt_to_tuple = udp_pkt_to_tuple, + .invert_tuple = udp_invert_tuple, + .print_tuple = udp_print_tuple, + .packet = udp_packet, + .get_timeouts = udp_get_timeouts, + .new = udp_new, + .error = udplite_error, +#if IS_ENABLED(CONFIG_NF_CT_NETLINK) + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, + .nla_policy = nf_ct_port_nla_policy, +#endif +#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) + .ctnl_timeout = { + .nlattr_to_obj = udp_timeout_nlattr_to_obj, + .obj_to_nlattr = udp_timeout_obj_to_nlattr, + .nlattr_max = CTA_TIMEOUT_UDP_MAX, + .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, + .nla_policy = udp_timeout_nla_policy, + }, +#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ + .init_net = udp_init_net, + .get_net_proto = udp_get_net_proto, +}; +EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4); +#endif + struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly = { .l3proto = PF_INET6, @@ -322,3 +410,38 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly = .get_net_proto = udp_get_net_proto, }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6); + +#ifdef CONFIG_NF_CT_PROTO_UDPLITE +struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = +{ + .l3proto = PF_INET6, + .l4proto = IPPROTO_UDPLITE, + .name = "udplite", + .allow_clash = true, + .pkt_to_tuple = udp_pkt_to_tuple, + .invert_tuple = udp_invert_tuple, + .print_tuple = udp_print_tuple, + .packet = udp_packet, + .get_timeouts = udp_get_timeouts, + .new = udp_new, + .error = udplite_error, +#if IS_ENABLED(CONFIG_NF_CT_NETLINK) + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, + .nla_policy = nf_ct_port_nla_policy, +#endif +#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) + .ctnl_timeout = { + .nlattr_to_obj = udp_timeout_nlattr_to_obj, + .obj_to_nlattr = udp_timeout_obj_to_nlattr, + .nlattr_max = CTA_TIMEOUT_UDP_MAX, + .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, + .nla_policy = udp_timeout_nla_policy, + }, +#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ + .init_net = udp_init_net, + .get_net_proto = udp_get_net_proto, +}; +EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); +#endif diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c deleted file mode 100644 index c35f7bf05d8c..000000000000 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ /dev/null @@ -1,324 +0,0 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * (C) 2007 Patrick McHardy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = { - [UDPLITE_CT_UNREPLIED] = 30*HZ, - [UDPLITE_CT_REPLIED] = 180*HZ, -}; - -static inline struct nf_udplite_net *udplite_pernet(struct net *net) -{ - return &net->ct.nf_ct_proto.udplite; -} - -static bool udplite_pkt_to_tuple(const struct sk_buff *skb, - unsigned int dataoff, - struct net *net, - struct nf_conntrack_tuple *tuple) -{ - const struct udphdr *hp; - struct udphdr _hdr; - - /* Actually only need first 4 bytes to get ports. */ - hp = skb_header_pointer(skb, dataoff, 4, &_hdr); - if (hp == NULL) - return false; - - tuple->src.u.udp.port = hp->source; - tuple->dst.u.udp.port = hp->dest; - return true; -} - -static bool udplite_invert_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple *orig) -{ - tuple->src.u.udp.port = orig->dst.u.udp.port; - tuple->dst.u.udp.port = orig->src.u.udp.port; - return true; -} - -/* Print out the per-protocol part of the tuple. */ -static void udplite_print_tuple(struct seq_file *s, - const struct nf_conntrack_tuple *tuple) -{ - seq_printf(s, "sport=%hu dport=%hu ", - ntohs(tuple->src.u.udp.port), - ntohs(tuple->dst.u.udp.port)); -} - -static unsigned int *udplite_get_timeouts(struct net *net) -{ - return udplite_pernet(net)->timeouts; -} - -/* Returns verdict for packet, and may modify conntracktype */ -static int udplite_packet(struct nf_conn *ct, - const struct sk_buff *skb, - unsigned int dataoff, - enum ip_conntrack_info ctinfo, - u_int8_t pf, - unsigned int hooknum, - unsigned int *timeouts) -{ - /* If we've seen traffic both ways, this is some kind of UDP - stream. Extend timeout. */ - if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { - nf_ct_refresh_acct(ct, ctinfo, skb, - timeouts[UDPLITE_CT_REPLIED]); - /* Also, more likely to be important, and not a probe */ - if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) - nf_conntrack_event_cache(IPCT_ASSURED, ct); - } else { - nf_ct_refresh_acct(ct, ctinfo, skb, - timeouts[UDPLITE_CT_UNREPLIED]); - } - return NF_ACCEPT; -} - -/* Called when a new connection for this protocol found. */ -static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, unsigned int *timeouts) -{ - return true; -} - -static int udplite_error(struct net *net, struct nf_conn *tmpl, - struct sk_buff *skb, - unsigned int dataoff, - enum ip_conntrack_info *ctinfo, - u_int8_t pf, - unsigned int hooknum) -{ - unsigned int udplen = skb->len - dataoff; - const struct udphdr *hdr; - struct udphdr _hdr; - unsigned int cscov; - - /* Header is too small? */ - hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); - if (hdr == NULL) { - if (LOG_INVALID(net, IPPROTO_UDPLITE)) - nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, - "nf_ct_udplite: short packet "); - return -NF_ACCEPT; - } - - cscov = ntohs(hdr->len); - if (cscov == 0) - cscov = udplen; - else if (cscov < sizeof(*hdr) || cscov > udplen) { - if (LOG_INVALID(net, IPPROTO_UDPLITE)) - nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, - "nf_ct_udplite: invalid checksum coverage "); - return -NF_ACCEPT; - } - - /* UDPLITE mandates checksums */ - if (!hdr->check) { - if (LOG_INVALID(net, IPPROTO_UDPLITE)) - nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, - "nf_ct_udplite: checksum missing "); - return -NF_ACCEPT; - } - - /* Checksum invalid? Ignore. */ - if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && - nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP, - pf)) { - if (LOG_INVALID(net, IPPROTO_UDPLITE)) - nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, - "nf_ct_udplite: bad UDPLite checksum "); - return -NF_ACCEPT; - } - - return NF_ACCEPT; -} - -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) - -#include -#include - -static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], - struct net *net, void *data) -{ - unsigned int *timeouts = data; - struct nf_udplite_net *un = udplite_pernet(net); - - /* set default timeouts for UDPlite. */ - timeouts[UDPLITE_CT_UNREPLIED] = un->timeouts[UDPLITE_CT_UNREPLIED]; - timeouts[UDPLITE_CT_REPLIED] = un->timeouts[UDPLITE_CT_REPLIED]; - - if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) { - timeouts[UDPLITE_CT_UNREPLIED] = - ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ; - } - if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) { - timeouts[UDPLITE_CT_REPLIED] = - ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ; - } - return 0; -} - -static int -udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) -{ - const unsigned int *timeouts = data; - - if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED, - htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) || - nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED, - htonl(timeouts[UDPLITE_CT_REPLIED] / HZ))) - goto nla_put_failure; - return 0; - -nla_put_failure: - return -ENOSPC; -} - -static const struct nla_policy -udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = { - [CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 }, - [CTA_TIMEOUT_UDPLITE_REPLIED] = { .type = NLA_U32 }, -}; -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ - -#ifdef CONFIG_SYSCTL -static struct ctl_table udplite_sysctl_table[] = { - { - .procname = "nf_conntrack_udplite_timeout", - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, - { - .procname = "nf_conntrack_udplite_timeout_stream", - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, - { } -}; -#endif /* CONFIG_SYSCTL */ - -static int udplite_kmemdup_sysctl_table(struct nf_proto_net *pn, - struct nf_udplite_net *un) -{ -#ifdef CONFIG_SYSCTL - if (pn->ctl_table) - return 0; - - pn->ctl_table = kmemdup(udplite_sysctl_table, - sizeof(udplite_sysctl_table), - GFP_KERNEL); - if (!pn->ctl_table) - return -ENOMEM; - - pn->ctl_table[0].data = &un->timeouts[UDPLITE_CT_UNREPLIED]; - pn->ctl_table[1].data = &un->timeouts[UDPLITE_CT_REPLIED]; -#endif - return 0; -} - -static int udplite_init_net(struct net *net, u_int16_t proto) -{ - struct nf_udplite_net *un = udplite_pernet(net); - struct nf_proto_net *pn = &un->pn; - - if (!pn->users) { - int i; - - for (i = 0 ; i < UDPLITE_CT_MAX; i++) - un->timeouts[i] = udplite_timeouts[i]; - } - - return udplite_kmemdup_sysctl_table(pn, un); -} - -struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = -{ - .l3proto = PF_INET, - .l4proto = IPPROTO_UDPLITE, - .name = "udplite", - .allow_clash = true, - .pkt_to_tuple = udplite_pkt_to_tuple, - .invert_tuple = udplite_invert_tuple, - .print_tuple = udplite_print_tuple, - .packet = udplite_packet, - .get_timeouts = udplite_get_timeouts, - .new = udplite_new, - .error = udplite_error, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, - .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, - .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, - .nla_policy = nf_ct_port_nla_policy, -#endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) - .ctnl_timeout = { - .nlattr_to_obj = udplite_timeout_nlattr_to_obj, - .obj_to_nlattr = udplite_timeout_obj_to_nlattr, - .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX, - .obj_size = sizeof(unsigned int) * - CTA_TIMEOUT_UDPLITE_MAX, - .nla_policy = udplite_timeout_nla_policy, - }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ - .init_net = udplite_init_net, -}; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4); - -struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = -{ - .l3proto = PF_INET6, - .l4proto = IPPROTO_UDPLITE, - .name = "udplite", - .allow_clash = true, - .pkt_to_tuple = udplite_pkt_to_tuple, - .invert_tuple = udplite_invert_tuple, - .print_tuple = udplite_print_tuple, - .packet = udplite_packet, - .get_timeouts = udplite_get_timeouts, - .new = udplite_new, - .error = udplite_error, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, - .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, - .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, - .nla_policy = nf_ct_port_nla_policy, -#endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) - .ctnl_timeout = { - .nlattr_to_obj = udplite_timeout_nlattr_to_obj, - .obj_to_nlattr = udplite_timeout_obj_to_nlattr, - .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX, - .obj_size = sizeof(unsigned int) * - CTA_TIMEOUT_UDPLITE_MAX, - .nla_policy = udplite_timeout_nla_policy, - }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ - .init_net = udplite_init_net, -}; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); -- cgit v1.2.3 From 9700ba805b4f58d26c4621ad6ba6b0e632e7a04b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 20 Dec 2016 21:57:03 +0100 Subject: netfilter: nat: merge udp and udplite helpers udplite nat was copied from udp nat, they are virtually 100% identical. Not really surprising given udplite is just udp with partial csum coverage. old: text data bss dec hex filename 11606 1457 210 13273 33d9 nf_nat.ko 330 0 2 332 14c nf_nat_proto_udp.o 276 0 2 278 116 nf_nat_proto_udplite.o new: text data bss dec hex filename 11598 1457 210 13265 33d1 nf_nat.ko 640 0 4 644 284 nf_nat_proto_udp.o Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Makefile | 1 - net/netfilter/nf_nat_proto_udp.c | 78 ++++++++++++++++++++++++++++++------ net/netfilter/nf_nat_proto_udplite.c | 73 --------------------------------- 3 files changed, 66 insertions(+), 86 deletions(-) delete mode 100644 net/netfilter/nf_nat_proto_udplite.c diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index bf5c577113b6..6b3034f12661 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -46,7 +46,6 @@ nf_nat-y := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \ # NAT protocols (nf_nat) nf_nat-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o nf_nat-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o -nf_nat-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o # generic transport layer logging obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c index b1e627227b6e..edd4a77dc09a 100644 --- a/net/netfilter/nf_nat_proto_udp.c +++ b/net/netfilter/nf_nat_proto_udp.c @@ -30,20 +30,15 @@ udp_unique_tuple(const struct nf_nat_l3proto *l3proto, &udp_port_rover); } -static bool -udp_manip_pkt(struct sk_buff *skb, - const struct nf_nat_l3proto *l3proto, - unsigned int iphdroff, unsigned int hdroff, - const struct nf_conntrack_tuple *tuple, - enum nf_nat_manip_type maniptype) +static void +__udp_manip_pkt(struct sk_buff *skb, + const struct nf_nat_l3proto *l3proto, + unsigned int iphdroff, struct udphdr *hdr, + const struct nf_conntrack_tuple *tuple, + enum nf_nat_manip_type maniptype, bool do_csum) { - struct udphdr *hdr; __be16 *portptr, newport; - if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) - return false; - hdr = (struct udphdr *)(skb->data + hdroff); - if (maniptype == NF_NAT_MANIP_SRC) { /* Get rid of src port */ newport = tuple->src.u.udp.port; @@ -53,7 +48,7 @@ udp_manip_pkt(struct sk_buff *skb, newport = tuple->dst.u.udp.port; portptr = &hdr->dest; } - if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) { + if (do_csum) { l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype); inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, @@ -62,9 +57,68 @@ udp_manip_pkt(struct sk_buff *skb, hdr->check = CSUM_MANGLED_0; } *portptr = newport; +} + +static bool udp_manip_pkt(struct sk_buff *skb, + const struct nf_nat_l3proto *l3proto, + unsigned int iphdroff, unsigned int hdroff, + const struct nf_conntrack_tuple *tuple, + enum nf_nat_manip_type maniptype) +{ + struct udphdr *hdr; + bool do_csum; + + if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) + return false; + + hdr = (struct udphdr *)(skb->data + hdroff); + do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL; + + __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, do_csum); + return true; +} + +#ifdef CONFIG_NF_NAT_PROTO_UDPLITE +static u16 udplite_port_rover; + +static bool udplite_manip_pkt(struct sk_buff *skb, + const struct nf_nat_l3proto *l3proto, + unsigned int iphdroff, unsigned int hdroff, + const struct nf_conntrack_tuple *tuple, + enum nf_nat_manip_type maniptype) +{ + struct udphdr *hdr; + + if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) + return false; + + hdr = (struct udphdr *)(skb->data + hdroff); + __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, true); return true; } +static void +udplite_unique_tuple(const struct nf_nat_l3proto *l3proto, + struct nf_conntrack_tuple *tuple, + const struct nf_nat_range *range, + enum nf_nat_manip_type maniptype, + const struct nf_conn *ct) +{ + nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, + &udplite_port_rover); +} + +const struct nf_nat_l4proto nf_nat_l4proto_udplite = { + .l4proto = IPPROTO_UDPLITE, + .manip_pkt = udplite_manip_pkt, + .in_range = nf_nat_l4proto_in_range, + .unique_tuple = udplite_unique_tuple, +#if IS_ENABLED(CONFIG_NF_CT_NETLINK) + .nlattr_to_range = nf_nat_l4proto_nlattr_to_range, +#endif +}; +#endif /* CONFIG_NF_NAT_PROTO_UDPLITE */ + const struct nf_nat_l4proto nf_nat_l4proto_udp = { .l4proto = IPPROTO_UDP, .manip_pkt = udp_manip_pkt, diff --git a/net/netfilter/nf_nat_proto_udplite.c b/net/netfilter/nf_nat_proto_udplite.c deleted file mode 100644 index 366bfbfd82a1..000000000000 --- a/net/netfilter/nf_nat_proto_udplite.c +++ /dev/null @@ -1,73 +0,0 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2006 Netfilter Core Team - * (C) 2008 Patrick McHardy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include - -#include -#include -#include -#include - -static u16 udplite_port_rover; - -static void -udplite_unique_tuple(const struct nf_nat_l3proto *l3proto, - struct nf_conntrack_tuple *tuple, - const struct nf_nat_range *range, - enum nf_nat_manip_type maniptype, - const struct nf_conn *ct) -{ - nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, - &udplite_port_rover); -} - -static bool -udplite_manip_pkt(struct sk_buff *skb, - const struct nf_nat_l3proto *l3proto, - unsigned int iphdroff, unsigned int hdroff, - const struct nf_conntrack_tuple *tuple, - enum nf_nat_manip_type maniptype) -{ - struct udphdr *hdr; - __be16 *portptr, newport; - - if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) - return false; - - hdr = (struct udphdr *)(skb->data + hdroff); - - if (maniptype == NF_NAT_MANIP_SRC) { - /* Get rid of source port */ - newport = tuple->src.u.udp.port; - portptr = &hdr->source; - } else { - /* Get rid of dst port */ - newport = tuple->dst.u.udp.port; - portptr = &hdr->dest; - } - - l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype); - inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false); - if (!hdr->check) - hdr->check = CSUM_MANGLED_0; - - *portptr = newport; - return true; -} - -const struct nf_nat_l4proto nf_nat_l4proto_udplite = { - .l4proto = IPPROTO_UDPLITE, - .manip_pkt = udplite_manip_pkt, - .in_range = nf_nat_l4proto_in_range, - .unique_tuple = udplite_unique_tuple, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - .nlattr_to_range = nf_nat_l4proto_nlattr_to_range, -#endif -}; -- cgit v1.2.3 From 237bab6611c607a9e63d50164609923feb8b83b3 Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Sun, 25 Dec 2016 19:58:58 +0800 Subject: netfilter: nf_tables: add missing descriptions in nft_ct_keys We missed to add descriptions about NFT_CT_LABELS, NFT_CT_PKTS and NFT_CT_BYTES, now add it. Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 881d49e94569..5726f90bfc2f 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -860,6 +860,9 @@ enum nft_rt_attributes { * @NFT_CT_PROTOCOL: conntrack layer 4 protocol * @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source * @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination + * @NFT_CT_LABELS: conntrack labels + * @NFT_CT_PKTS: conntrack packets + * @NFT_CT_BYTES: conntrack bytes */ enum nft_ct_keys { NFT_CT_STATE, -- cgit v1.2.3 From 949a358418aae397d7cf1622aa6515eca766b9e7 Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Sun, 25 Dec 2016 19:58:59 +0800 Subject: netfilter: nft_ct: add average bytes per packet support Similar to xt_connbytes, user can match how many average bytes per packet a connection has transferred so far. Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 2 ++ net/netfilter/nft_ct.c | 22 +++++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 5726f90bfc2f..b00a05d1ee56 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -863,6 +863,7 @@ enum nft_rt_attributes { * @NFT_CT_LABELS: conntrack labels * @NFT_CT_PKTS: conntrack packets * @NFT_CT_BYTES: conntrack bytes + * @NFT_CT_AVGPKT: conntrack average bytes per packet */ enum nft_ct_keys { NFT_CT_STATE, @@ -881,6 +882,7 @@ enum nft_ct_keys { NFT_CT_LABELS, NFT_CT_PKTS, NFT_CT_BYTES, + NFT_CT_AVGPKT, }; /** diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index e6baeaebe653..d774d7823688 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -129,6 +129,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr, memcpy(dest, &count, sizeof(count)); return; } + case NFT_CT_AVGPKT: { + const struct nf_conn_acct *acct = nf_conn_acct_find(ct); + u64 avgcnt = 0, bcnt = 0, pcnt = 0; + + if (acct) { + pcnt = nft_ct_get_eval_counter(acct->counter, + NFT_CT_PKTS, priv->dir); + bcnt = nft_ct_get_eval_counter(acct->counter, + NFT_CT_BYTES, priv->dir); + if (pcnt != 0) + avgcnt = div64_u64(bcnt, pcnt); + } + + memcpy(dest, &avgcnt, sizeof(avgcnt)); + return; + } case NFT_CT_L3PROTOCOL: *dest = nf_ct_l3num(ct); return; @@ -316,6 +332,7 @@ static int nft_ct_get_init(const struct nft_ctx *ctx, break; case NFT_CT_BYTES: case NFT_CT_PKTS: + case NFT_CT_AVGPKT: /* no direction? return sum of original + reply */ if (tb[NFTA_CT_DIRECTION] == NULL) priv->dir = IP_CT_DIR_MAX; @@ -346,7 +363,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx, if (err < 0) return err; - if (priv->key == NFT_CT_BYTES || priv->key == NFT_CT_PKTS) + if (priv->key == NFT_CT_BYTES || + priv->key == NFT_CT_PKTS || + priv->key == NFT_CT_AVGPKT) nf_ct_set_acct(ctx->net, true); return 0; @@ -445,6 +464,7 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr) break; case NFT_CT_BYTES: case NFT_CT_PKTS: + case NFT_CT_AVGPKT: if (priv->dir < IP_CT_DIR_MAX && nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir)) goto nla_put_failure; -- cgit v1.2.3 From 28fa4f308e44aeea41d2a69604f5e15dcf2aa93f Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 2 Jan 2017 23:49:15 +0100 Subject: net: freescale: dpaa: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 27e7044667d1..15571e251fb9 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -72,8 +72,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = { #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu) #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global) -static int dpaa_get_settings(struct net_device *net_dev, - struct ethtool_cmd *et_cmd) +static int dpaa_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) { int err; @@ -82,13 +82,13 @@ static int dpaa_get_settings(struct net_device *net_dev, return 0; } - err = phy_ethtool_gset(net_dev->phydev, et_cmd); + err = phy_ethtool_ksettings_get(net_dev->phydev, cmd); return err; } -static int dpaa_set_settings(struct net_device *net_dev, - struct ethtool_cmd *et_cmd) +static int dpaa_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) { int err; @@ -97,9 +97,9 @@ static int dpaa_set_settings(struct net_device *net_dev, return -ENODEV; } - err = phy_ethtool_sset(net_dev->phydev, et_cmd); + err = phy_ethtool_ksettings_set(net_dev->phydev, cmd); if (err < 0) - netdev_err(net_dev, "phy_ethtool_sset() = %d\n", err); + netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err); return err; } @@ -402,8 +402,6 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, } const struct ethtool_ops dpaa_ethtool_ops = { - .get_settings = dpaa_get_settings, - .set_settings = dpaa_set_settings, .get_drvinfo = dpaa_get_drvinfo, .get_msglevel = dpaa_get_msglevel, .set_msglevel = dpaa_set_msglevel, @@ -414,4 +412,6 @@ const struct ethtool_ops dpaa_ethtool_ops = { .get_sset_count = dpaa_get_sset_count, .get_ethtool_stats = dpaa_get_ethtool_stats, .get_strings = dpaa_get_strings, + .get_link_ksettings = dpaa_get_link_ksettings, + .set_link_ksettings = dpaa_set_link_ksettings, }; -- cgit v1.2.3 From 07b76ba9f827ffe8faebe78a98c96174a2538078 Mon Sep 17 00:00:00 2001 From: Volodymyr Bendiuga Date: Tue, 3 Jan 2017 10:49:09 +0100 Subject: dsa:mv88e6xxx: allow address 0x1 in smi_init Some devices, such as the mv88e6097 do have ADDR[0] external and so it is possible to configure the device to use SMI address 0x1. Remove the restriction, as there are boards using this address. Signed-off-by: Volodymyr Bendiuga Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index f7222dc6581d..b5f0e1ec864d 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -4240,10 +4240,6 @@ static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip) static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, struct mii_bus *bus, int sw_addr) { - /* ADDR[0] pin is unavailable externally and considered zero */ - if (sw_addr & 0x1) - return -EINVAL; - if (sw_addr == 0) chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; else if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_MULTI_CHIP)) -- cgit v1.2.3 From 1708ebc9636a249e104b83c6d105f15244825281 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 3 Jan 2017 12:13:39 +0100 Subject: ipmr, ip6mr: add RTNH_F_UNRESOLVED flag to unresolved cache entries While working with ipmr, we noticed that it is impossible to determine if an entry is actually unresolved or its IIF interface has disappeared (e.g. virtual interface got deleted). These entries look almost identical to user-space when dumping or receiving notifications. So in order to recognize them add a new RTNH_F_UNRESOLVED flag which is set when sending an unresolved cache entry to user-space. Suggested-by: Roopa Prabhu Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 1 + net/ipv4/ipmr.c | 4 +++- net/ipv6/ip6mr.c | 4 +++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index e14377f2ec27..8c93ad1ef9ab 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -350,6 +350,7 @@ struct rtnexthop { #define RTNH_F_ONLINK 4 /* Gateway is forced on link */ #define RTNH_F_OFFLOAD 8 /* offloaded route */ #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */ +#define RTNH_F_UNRESOLVED 32 /* The entry is unresolved (ipmr) */ #define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index efc1e76d4977..b35dda57586b 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2091,8 +2091,10 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, int ct; /* If cache is unresolved, don't try to parse IIF and OIF */ - if (c->mfc_parent >= MAXVIFS) + if (c->mfc_parent >= MAXVIFS) { + rtm->rtm_flags |= RTNH_F_UNRESOLVED; return -ENOENT; + } if (VIF_EXISTS(mrt, c->mfc_parent) && nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0) diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 604d8953c775..e275077e8af2 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2243,8 +2243,10 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, int ct; /* If cache is unresolved, don't try to parse IIF and OIF */ - if (c->mf6c_parent >= MAXMIFS) + if (c->mf6c_parent >= MAXMIFS) { + rtm->rtm_flags |= RTNH_F_UNRESOLVED; return -ENOENT; + } if (MIF_EXISTS(mrt, c->mf6c_parent) && nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0) -- cgit v1.2.3 From 14077e9ed3cf0dd1b11cf021ac7b153b68e5ac3f Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 3 Jan 2017 15:46:00 +0000 Subject: sfc: declare module version (same as ethtool drvinfo version) Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 5a5dcad8c49a..bbbed2e84de8 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -3585,3 +3585,4 @@ MODULE_AUTHOR("Solarflare Communications and " MODULE_DESCRIPTION("Solarflare network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, efx_pci_table); +MODULE_VERSION(EFX_DRIVER_VERSION); -- cgit v1.2.3 From e7072f6669860c4918ced4acf6a7e482023bd40f Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 3 Jan 2017 15:46:15 +0000 Subject: sfc-falcon: declare module version (same as ethtool drvinfo version) Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/falcon/efx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 438ef9e7a10d..ec3ac0e45cc9 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -3348,3 +3348,4 @@ MODULE_AUTHOR("Solarflare Communications and " MODULE_DESCRIPTION("Solarflare Falcon network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ef4_pci_table); +MODULE_VERSION(EF4_DRIVER_VERSION); -- cgit v1.2.3 From 7f953ab2ba46e8649537942c0a64668ca2ce5cc5 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Tue, 3 Jan 2017 06:31:47 -0800 Subject: af_packet: TX_RING support for TPACKET_V3 Although TPACKET_V3 Rx has some benefits over TPACKET_V2 Rx, *_v3 does not currently have TX_RING support. As a result an application that wants the best perf for Tx and Rx (e.g. to handle request/response transacations) ends up needing 2 sockets, one with *_v2 for Tx and another with *_v3 for Rx. This patch enables TPACKET_V2 compatible Tx features in TPACKET_V3 so that an application can use a single descriptor to get the benefits of _v3 RX_RING and _v2 TX_RING. An application may do a block-send by first filling up multiple frames in the Tx ring and then triggering a transmit. This patch only support fixed size Tx frames for TPACKET_V3, and requires that tp_next_offset must be zero. Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- Documentation/networking/packet_mmap.txt | 9 ++++++-- net/packet/af_packet.c | 39 ++++++++++++++++++++++++-------- 2 files changed, 37 insertions(+), 11 deletions(-) diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index daa015af16a0..f3b9e507ab05 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt @@ -565,7 +565,7 @@ TPACKET_V1 --> TPACKET_V2: (void *)hdr + TPACKET_ALIGN(sizeof(struct tpacket_hdr)) TPACKET_V2 --> TPACKET_V3: - - Flexible buffer implementation: + - Flexible buffer implementation for RX_RING: 1. Blocks can be configured with non-static frame-size 2. Read/poll is at a block-level (as opposed to packet-level) 3. Added poll timeout to avoid indefinite user-space wait @@ -574,7 +574,12 @@ TPACKET_V2 --> TPACKET_V3: 4.1 block::timeout 4.2 tpkt_hdr::sk_rxhash - RX Hash data available in user space - - Currently only RX_RING available + - TX_RING semantics are conceptually similar to TPACKET_V2; + use tpacket3_hdr instead of tpacket2_hdr, and TPACKET3_HDRLEN + instead of TPACKET2_HDRLEN. In the current implementation, + the tp_next_offset field in the tpacket3_hdr MUST be set to + zero, indicating that the ring does not hold variable sized frames. + Packets with non-zero values of tp_next_offset will be dropped. ------------------------------------------------------------------------------- + AF_PACKET fanout mode diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index b9e1a13b4ba3..7e39087d519b 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -409,6 +409,9 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status) flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; case TPACKET_V3: + h.h3->tp_status = status; + flush_dcache_page(pgv_to_page(&h.h3->tp_status)); + break; default: WARN(1, "TPACKET version not supported.\n"); BUG(); @@ -432,6 +435,8 @@ static int __packet_get_status(struct packet_sock *po, void *frame) flush_dcache_page(pgv_to_page(&h.h2->tp_status)); return h.h2->tp_status; case TPACKET_V3: + flush_dcache_page(pgv_to_page(&h.h3->tp_status)); + return h.h3->tp_status; default: WARN(1, "TPACKET version not supported.\n"); BUG(); @@ -2497,6 +2502,13 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame, ph.raw = frame; switch (po->tp_version) { + case TPACKET_V3: + if (ph.h3->tp_next_offset != 0) { + pr_warn_once("variable sized slot not supported"); + return -EINVAL; + } + tp_len = ph.h3->tp_len; + break; case TPACKET_V2: tp_len = ph.h2->tp_len; break; @@ -2516,6 +2528,9 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame, off_max = po->tx_ring.frame_size - tp_len; if (po->sk.sk_type == SOCK_DGRAM) { switch (po->tp_version) { + case TPACKET_V3: + off = ph.h3->tp_net; + break; case TPACKET_V2: off = ph.h2->tp_net; break; @@ -2525,6 +2540,9 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame, } } else { switch (po->tp_version) { + case TPACKET_V3: + off = ph.h3->tp_mac; + break; case TPACKET_V2: off = ph.h2->tp_mac; break; @@ -4113,11 +4131,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, struct tpacket_req *req = &req_u->req; lock_sock(sk); - /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ - if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { - net_warn_ratelimited("Tx-ring is not supported.\n"); - goto out; - } rb = tx_ring ? &po->tx_ring : &po->rx_ring; rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; @@ -4177,11 +4190,19 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, goto out; switch (po->tp_version) { case TPACKET_V3: - /* Transmit path is not supported. We checked - * it above but just being paranoid - */ - if (!tx_ring) + /* Block transmit is not supported yet */ + if (!tx_ring) { init_prb_bdqc(po, rb, pg_vec, req_u); + } else { + struct tpacket_req3 *req3 = &req_u->req3; + + if (req3->tp_retire_blk_tov || + req3->tp_sizeof_priv || + req3->tp_feature_req_word) { + err = -EINVAL; + goto out; + } + } break; default: break; -- cgit v1.2.3 From fe878cad38d389cf5dbcce501951b2269d6d02bf Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Tue, 3 Jan 2017 06:31:48 -0800 Subject: tools: test case for TPACKET_V3/TX_RING support Add a test case and sample code for (TPACKET_V3, PACKET_TX_RING) Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- tools/testing/selftests/net/psock_tpacket.c | 91 +++++++++++++++++++++++------ 1 file changed, 74 insertions(+), 17 deletions(-) diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c index 24adf709bd9d..4a1bc648b35e 100644 --- a/tools/testing/selftests/net/psock_tpacket.c +++ b/tools/testing/selftests/net/psock_tpacket.c @@ -311,20 +311,33 @@ static inline void __v2_tx_user_ready(struct tpacket2_hdr *hdr) __sync_synchronize(); } -static inline int __v1_v2_tx_kernel_ready(void *base, int version) +static inline int __v3_tx_kernel_ready(struct tpacket3_hdr *hdr) +{ + return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)); +} + +static inline void __v3_tx_user_ready(struct tpacket3_hdr *hdr) +{ + hdr->tp_status = TP_STATUS_SEND_REQUEST; + __sync_synchronize(); +} + +static inline int __tx_kernel_ready(void *base, int version) { switch (version) { case TPACKET_V1: return __v1_tx_kernel_ready(base); case TPACKET_V2: return __v2_tx_kernel_ready(base); + case TPACKET_V3: + return __v3_tx_kernel_ready(base); default: bug_on(1); return 0; } } -static inline void __v1_v2_tx_user_ready(void *base, int version) +static inline void __tx_user_ready(void *base, int version) { switch (version) { case TPACKET_V1: @@ -333,6 +346,9 @@ static inline void __v1_v2_tx_user_ready(void *base, int version) case TPACKET_V2: __v2_tx_user_ready(base); break; + case TPACKET_V3: + __v3_tx_user_ready(base); + break; } } @@ -348,7 +364,22 @@ static void __v1_v2_set_packet_loss_discard(int sock) } } -static void walk_v1_v2_tx(int sock, struct ring *ring) +static inline void *get_next_frame(struct ring *ring, int n) +{ + uint8_t *f0 = ring->rd[0].iov_base; + + switch (ring->version) { + case TPACKET_V1: + case TPACKET_V2: + return ring->rd[n].iov_base; + case TPACKET_V3: + return f0 + (n * ring->req3.tp_frame_size); + default: + bug_on(1); + } +} + +static void walk_tx(int sock, struct ring *ring) { struct pollfd pfd; int rcv_sock, ret; @@ -360,9 +391,19 @@ static void walk_v1_v2_tx(int sock, struct ring *ring) .sll_family = PF_PACKET, .sll_halen = ETH_ALEN, }; + int nframes; + + /* TPACKET_V{1,2} sets up the ring->rd* related variables based + * on frames (e.g., rd_num is tp_frame_nr) whereas V3 sets these + * up based on blocks (e.g, rd_num is tp_block_nr) + */ + if (ring->version <= TPACKET_V2) + nframes = ring->rd_num; + else + nframes = ring->req3.tp_frame_nr; bug_on(ring->type != PACKET_TX_RING); - bug_on(ring->rd_num < NUM_PACKETS); + bug_on(nframes < NUM_PACKETS); rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (rcv_sock == -1) { @@ -388,10 +429,11 @@ static void walk_v1_v2_tx(int sock, struct ring *ring) create_payload(packet, &packet_len); while (total_packets > 0) { - while (__v1_v2_tx_kernel_ready(ring->rd[frame_num].iov_base, - ring->version) && + void *next = get_next_frame(ring, frame_num); + + while (__tx_kernel_ready(next, ring->version) && total_packets > 0) { - ppd.raw = ring->rd[frame_num].iov_base; + ppd.raw = next; switch (ring->version) { case TPACKET_V1: @@ -413,14 +455,27 @@ static void walk_v1_v2_tx(int sock, struct ring *ring) packet_len); total_bytes += ppd.v2->tp_h.tp_snaplen; break; + case TPACKET_V3: { + struct tpacket3_hdr *tx = next; + + tx->tp_snaplen = packet_len; + tx->tp_len = packet_len; + tx->tp_next_offset = 0; + + memcpy((uint8_t *)tx + TPACKET3_HDRLEN - + sizeof(struct sockaddr_ll), packet, + packet_len); + total_bytes += tx->tp_snaplen; + break; + } } status_bar_update(); total_packets--; - __v1_v2_tx_user_ready(ppd.raw, ring->version); + __tx_user_ready(next, ring->version); - frame_num = (frame_num + 1) % ring->rd_num; + frame_num = (frame_num + 1) % nframes; } poll(&pfd, 1, 1); @@ -460,7 +515,7 @@ static void walk_v1_v2(int sock, struct ring *ring) if (ring->type == PACKET_RX_RING) walk_v1_v2_rx(sock, ring); else - walk_v1_v2_tx(sock, ring); + walk_tx(sock, ring); } static uint64_t __v3_prev_block_seq_num = 0; @@ -583,7 +638,7 @@ static void walk_v3(int sock, struct ring *ring) if (ring->type == PACKET_RX_RING) walk_v3_rx(sock, ring); else - bug_on(1); + walk_tx(sock, ring); } static void __v1_v2_fill(struct ring *ring, unsigned int blocks) @@ -602,12 +657,13 @@ static void __v1_v2_fill(struct ring *ring, unsigned int blocks) ring->flen = ring->req.tp_frame_size; } -static void __v3_fill(struct ring *ring, unsigned int blocks) +static void __v3_fill(struct ring *ring, unsigned int blocks, int type) { - ring->req3.tp_retire_blk_tov = 64; - ring->req3.tp_sizeof_priv = 0; - ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH; - + if (type == PACKET_RX_RING) { + ring->req3.tp_retire_blk_tov = 64; + ring->req3.tp_sizeof_priv = 0; + ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH; + } ring->req3.tp_block_size = getpagesize() << 2; ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7; ring->req3.tp_block_nr = blocks; @@ -641,7 +697,7 @@ static void setup_ring(int sock, struct ring *ring, int version, int type) break; case TPACKET_V3: - __v3_fill(ring, blocks); + __v3_fill(ring, blocks, type); ret = setsockopt(sock, SOL_PACKET, type, &ring->req3, sizeof(ring->req3)); break; @@ -796,6 +852,7 @@ int main(void) ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING); ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING); + ret |= test_tpacket(TPACKET_V3, PACKET_TX_RING); if (ret) return 1; -- cgit v1.2.3 From 8c44e1af16b2983b3df93117cd0ca40638998ce3 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Tue, 3 Jan 2017 10:55:09 -0500 Subject: tipc: unify tipc_wait_for_sndpkt() and tipc_wait_for_sndmsg() functions The functions tipc_wait_for_sndpkt() and tipc_wait_for_sndmsg() are very similar. The latter function is also called from two locations, and there will be more in the coming commits, which will all need to test on different conditions. Instead of making yet another duplicates of the function, we now introduce a new macro tipc_wait_for_cond() where the wakeup condition can be stated as an argument to the call. This macro replaces all current and future uses of the two functions, which can now be eliminated. Acked-by: Parthasarathy Bhuvaragan Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/socket.c | 108 +++++++++++++++++++++++++----------------------------- 1 file changed, 49 insertions(+), 59 deletions(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 800caaa699a1..f27462eeccbe 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -110,7 +110,6 @@ static void tipc_write_space(struct sock *sk); static void tipc_sock_destruct(struct sock *sk); static int tipc_release(struct socket *sock); static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); -static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); static void tipc_sk_timeout(unsigned long data); static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, struct tipc_name_seq const *seq); @@ -334,6 +333,49 @@ static int tipc_set_sk_state(struct sock *sk, int state) return res; } +static int tipc_sk_sock_err(struct socket *sock, long *timeout) +{ + struct sock *sk = sock->sk; + int err = sock_error(sk); + int typ = sock->type; + + if (err) + return err; + if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) { + if (sk->sk_state == TIPC_DISCONNECTING) + return -EPIPE; + else if (!tipc_sk_connected(sk)) + return -ENOTCONN; + } + if (!*timeout) + return -EAGAIN; + if (signal_pending(current)) + return sock_intr_errno(*timeout); + + return 0; +} + +#define tipc_wait_for_cond(sock_, timeout_, condition_) \ +({ \ + int rc_ = 0; \ + int done_ = 0; \ + \ + while (!(condition_) && !done_) { \ + struct sock *sk_ = sock->sk; \ + DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ + \ + rc_ = tipc_sk_sock_err(sock_, timeout_); \ + if (rc_) \ + break; \ + prepare_to_wait(sk_sleep(sk_), &wait_, \ + TASK_INTERRUPTIBLE); \ + done_ = sk_wait_event(sk_, timeout_, \ + (condition_), &wait_); \ + remove_wait_queue(sk_sleep(sk_), &wait_); \ + } \ + rc_; \ +}) + /** * tipc_sk_create - create a TIPC socket * @net: network namespace (must be default network) @@ -721,7 +763,7 @@ new_mtu: if (rc == -ELINKCONG) { tsk->link_cong = 1; - rc = tipc_wait_for_sndmsg(sock, &timeo); + rc = tipc_wait_for_cond(sock, &timeo, !tsk->link_cong); if (!rc) continue; } @@ -830,31 +872,6 @@ exit: kfree_skb(skb); } -static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) -{ - DEFINE_WAIT_FUNC(wait, woken_wake_function); - struct sock *sk = sock->sk; - struct tipc_sock *tsk = tipc_sk(sk); - int done; - - do { - int err = sock_error(sk); - if (err) - return err; - if (sk->sk_shutdown & SEND_SHUTDOWN) - return -EPIPE; - if (!*timeo_p) - return -EAGAIN; - if (signal_pending(current)) - return sock_intr_errno(*timeo_p); - - add_wait_queue(sk_sleep(sk), &wait); - done = sk_wait_event(sk, timeo_p, !tsk->link_cong, &wait); - remove_wait_queue(sk_sleep(sk), &wait); - } while (!done); - return 0; -} - /** * tipc_sendmsg - send message in connectionless manner * @sock: socket structure @@ -970,7 +987,7 @@ new_mtu: } if (rc == -ELINKCONG) { tsk->link_cong = 1; - rc = tipc_wait_for_sndmsg(sock, &timeo); + rc = tipc_wait_for_cond(sock, &timeo, !tsk->link_cong); if (!rc) continue; } @@ -985,36 +1002,6 @@ new_mtu: return rc; } -static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) -{ - DEFINE_WAIT_FUNC(wait, woken_wake_function); - struct sock *sk = sock->sk; - struct tipc_sock *tsk = tipc_sk(sk); - int done; - - do { - int err = sock_error(sk); - if (err) - return err; - if (sk->sk_state == TIPC_DISCONNECTING) - return -EPIPE; - else if (!tipc_sk_connected(sk)) - return -ENOTCONN; - if (!*timeo_p) - return -EAGAIN; - if (signal_pending(current)) - return sock_intr_errno(*timeo_p); - - add_wait_queue(sk_sleep(sk), &wait); - done = sk_wait_event(sk, timeo_p, - (!tsk->link_cong && - !tsk_conn_cong(tsk)) || - !tipc_sk_connected(sk), &wait); - remove_wait_queue(sk_sleep(sk), &wait); - } while (!done); - return 0; -} - /** * tipc_send_stream - send stream-oriented data * @sock: socket structure @@ -1109,7 +1096,10 @@ next: tsk->link_cong = 1; } - rc = tipc_wait_for_sndpkt(sock, &timeo); + rc = tipc_wait_for_cond(sock, &timeo, + (!tsk->link_cong && + !tsk_conn_cong(tsk) && + tipc_sk_connected(sk))); } while (!rc); __skb_queue_purge(&pktchain); -- cgit v1.2.3 From 4d8642d896c53966d32d5e343c3620813dd0e7c8 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Tue, 3 Jan 2017 10:55:10 -0500 Subject: tipc: modify struct tipc_plist to be more versatile During multicast reception we currently use a simple linked list with push/pop semantics to store port numbers. We now see a need for a more generic list for storing values of type u32. We therefore make some modifications to this list, while replacing the prefix 'tipc_plist_' with 'u32_'. We also add a couple of new functions which will come to use in the next commits. Acked-by: Parthasarathy Bhuvaragan Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/name_table.c | 100 ++++++++++++++++++++++++++++++++++++-------------- net/tipc/name_table.h | 21 ++++------- net/tipc/socket.c | 8 ++-- 3 files changed, 83 insertions(+), 46 deletions(-) diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index e190460fe0d3..5a86df1e5fc2 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -608,7 +608,7 @@ not_found: * Returns non-zero if any off-node ports overlap */ int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper, - u32 limit, struct tipc_plist *dports) + u32 limit, struct list_head *dports) { struct name_seq *seq; struct sub_seq *sseq; @@ -633,7 +633,7 @@ int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper, info = sseq->info; list_for_each_entry(publ, &info->node_list, node_list) { if (publ->scope <= limit) - tipc_plist_push(dports, publ->ref); + u32_push(dports, publ->ref); } if (info->cluster_list_size != info->node_list_size) @@ -1022,40 +1022,84 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -void tipc_plist_push(struct tipc_plist *pl, u32 port) +struct u32_item { + struct list_head list; + u32 value; +}; + +bool u32_find(struct list_head *l, u32 value) { - struct tipc_plist *nl; + struct u32_item *item; - if (likely(!pl->port)) { - pl->port = port; - return; + list_for_each_entry(item, l, list) { + if (item->value == value) + return true; } - if (pl->port == port) - return; - list_for_each_entry(nl, &pl->list, list) { - if (nl->port == port) - return; + return false; +} + +bool u32_push(struct list_head *l, u32 value) +{ + struct u32_item *item; + + list_for_each_entry(item, l, list) { + if (item->value == value) + return false; + } + item = kmalloc(sizeof(*item), GFP_ATOMIC); + if (unlikely(!item)) + return false; + + item->value = value; + list_add(&item->list, l); + return true; +} + +u32 u32_pop(struct list_head *l) +{ + struct u32_item *item; + u32 value = 0; + + if (list_empty(l)) + return 0; + item = list_first_entry(l, typeof(*item), list); + value = item->value; + list_del(&item->list); + kfree(item); + return value; +} + +bool u32_del(struct list_head *l, u32 value) +{ + struct u32_item *item, *tmp; + + list_for_each_entry_safe(item, tmp, l, list) { + if (item->value != value) + continue; + list_del(&item->list); + kfree(item); + return true; } - nl = kmalloc(sizeof(*nl), GFP_ATOMIC); - if (nl) { - nl->port = port; - list_add(&nl->list, &pl->list); + return false; +} + +void u32_list_purge(struct list_head *l) +{ + struct u32_item *item, *tmp; + + list_for_each_entry_safe(item, tmp, l, list) { + list_del(&item->list); + kfree(item); } } -u32 tipc_plist_pop(struct tipc_plist *pl) +int u32_list_len(struct list_head *l) { - struct tipc_plist *nl; - u32 port = 0; + struct u32_item *item; + int i = 0; - if (likely(list_empty(&pl->list))) { - port = pl->port; - pl->port = 0; - return port; + list_for_each_entry(item, l, list) { + i++; } - nl = list_first_entry(&pl->list, typeof(*nl), list); - port = nl->port; - list_del(&nl->list); - kfree(nl); - return port; + return i; } diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 1524a73830f7..c89bb3f5c364 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -99,7 +99,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node); int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper, - u32 limit, struct tipc_plist *dports); + u32 limit, struct list_head *dports); struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, u32 upper, u32 scope, u32 port_ref, u32 key); @@ -116,18 +116,11 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s); int tipc_nametbl_init(struct net *net); void tipc_nametbl_stop(struct net *net); -struct tipc_plist { - struct list_head list; - u32 port; -}; - -static inline void tipc_plist_init(struct tipc_plist *pl) -{ - INIT_LIST_HEAD(&pl->list); - pl->port = 0; -} - -void tipc_plist_push(struct tipc_plist *pl, u32 port); -u32 tipc_plist_pop(struct tipc_plist *pl); +bool u32_push(struct list_head *l, u32 value); +u32 u32_pop(struct list_head *l); +bool u32_find(struct list_head *l, u32 value); +bool u32_del(struct list_head *l, u32 value); +void u32_list_purge(struct list_head *l); +int u32_list_len(struct list_head *l); #endif diff --git a/net/tipc/socket.c b/net/tipc/socket.c index f27462eeccbe..fae6a55ef1b0 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -788,7 +788,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, struct sk_buff_head *inputq) { struct tipc_msg *msg; - struct tipc_plist dports; + struct list_head dports; u32 portid; u32 scope = TIPC_CLUSTER_SCOPE; struct sk_buff_head tmpq; @@ -796,7 +796,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, struct sk_buff *skb, *_skb; __skb_queue_head_init(&tmpq); - tipc_plist_init(&dports); + INIT_LIST_HEAD(&dports); skb = tipc_skb_peek(arrvq, &inputq->lock); for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { @@ -810,8 +810,8 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, tipc_nametbl_mc_translate(net, msg_nametype(msg), msg_namelower(msg), msg_nameupper(msg), scope, &dports); - portid = tipc_plist_pop(&dports); - for (; portid; portid = tipc_plist_pop(&dports)) { + portid = u32_pop(&dports); + for (; portid; portid = u32_pop(&dports)) { _skb = __pskb_copy(skb, hsz, GFP_ATOMIC); if (_skb) { msg_set_destport(buf_msg(_skb), portid); -- cgit v1.2.3 From 365ad353c2564bba8835290061308ba825166b3a Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Tue, 3 Jan 2017 10:55:11 -0500 Subject: tipc: reduce risk of user starvation during link congestion The socket code currently handles link congestion by either blocking and trying to send again when the congestion has abated, or just returning to the user with -EAGAIN and let him re-try later. This mechanism is prone to starvation, because the wakeup algorithm is non-atomic. During the time the link issues a wakeup signal, until the socket wakes up and re-attempts sending, other senders may have come in between and occupied the free buffer space in the link. This in turn may lead to a socket having to make many send attempts before it is successful. In extremely loaded systems we have observed latency times of several seconds before a low-priority socket is able to send out a message. In this commit, we simplify this mechanism and reduce the risk of the described scenario happening. When a message is attempted sent via a congested link, we now let it be added to the link's backlog queue anyway, thus permitting an oversubscription of one message per source socket. We still create a wakeup item and return an error code, hence instructing the sender to block or stop sending. Only when enough space has been freed up in the link's backlog queue do we issue a wakeup event that allows the sender to continue with the next message, if any. The fact that a socket now can consider a message sent even when the link returns a congestion code means that the sending socket code can be simplified. Also, since this is a good opportunity to get rid of the obsolete 'mtu change' condition in the three socket send functions, we now choose to refactor those functions completely. Signed-off-by: Parthasarathy Bhuvaragan Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/bcast.c | 6 +- net/tipc/link.c | 75 +++++------- net/tipc/msg.h | 2 - net/tipc/node.c | 15 +-- net/tipc/socket.c | 347 ++++++++++++++++++++++++------------------------------ 5 files changed, 194 insertions(+), 251 deletions(-) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index aa1babbea385..c35fad3e08e8 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -174,7 +174,7 @@ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq) * and to identified node local sockets * @net: the applicable net namespace * @list: chain of buffers containing message - * Consumes the buffer chain, except when returning -ELINKCONG + * Consumes the buffer chain. * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE */ int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list) @@ -197,7 +197,7 @@ int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list) tipc_bcast_unlock(net); /* Don't send to local node if adding to link failed */ - if (unlikely(rc)) { + if (unlikely(rc && (rc != -ELINKCONG))) { __skb_queue_purge(&rcvq); return rc; } @@ -206,7 +206,7 @@ int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list) tipc_bcbase_xmit(net, &xmitq); tipc_sk_mcast_rcv(net, &rcvq, &inputq); __skb_queue_purge(list); - return 0; + return rc; } /* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link diff --git a/net/tipc/link.c b/net/tipc/link.c index bda89bf9f4ff..b758ca8b2f79 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -776,60 +776,47 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) /** * link_schedule_user - schedule a message sender for wakeup after congestion - * @link: congested link - * @list: message that was attempted sent + * @l: congested link + * @hdr: header of message that is being sent * Create pseudo msg to send back to user when congestion abates - * Does not consume buffer list */ -static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) +static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr) { - struct tipc_msg *msg = buf_msg(skb_peek(list)); - int imp = msg_importance(msg); - u32 oport = msg_origport(msg); - u32 addr = tipc_own_addr(link->net); + u32 dnode = tipc_own_addr(l->net); + u32 dport = msg_origport(hdr); struct sk_buff *skb; - /* This really cannot happen... */ - if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { - pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); - return -ENOBUFS; - } - /* Non-blocking sender: */ - if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) - return -ELINKCONG; - /* Create and schedule wakeup pseudo message */ skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, - addr, addr, oport, 0, 0); + dnode, l->addr, dport, 0, 0); if (!skb) return -ENOBUFS; - TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); - TIPC_SKB_CB(skb)->chain_imp = imp; - skb_queue_tail(&link->wakeupq, skb); - link->stats.link_congs++; + msg_set_dest_droppable(buf_msg(skb), true); + TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr); + skb_queue_tail(&l->wakeupq, skb); + l->stats.link_congs++; return -ELINKCONG; } /** * link_prepare_wakeup - prepare users for wakeup after congestion - * @link: congested link - * Move a number of waiting users, as permitted by available space in - * the send queue, from link wait queue to node wait queue for wakeup + * @l: congested link + * Wake up a number of waiting users, as permitted by available space + * in the send queue */ void link_prepare_wakeup(struct tipc_link *l) { - int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; - int imp, lim; struct sk_buff *skb, *tmp; + int imp, i = 0; skb_queue_walk_safe(&l->wakeupq, skb, tmp) { imp = TIPC_SKB_CB(skb)->chain_imp; - lim = l->backlog[imp].limit; - pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; - if ((pnd[imp] + l->backlog[imp].len) >= lim) + if (l->backlog[imp].len < l->backlog[imp].limit) { + skb_unlink(skb, &l->wakeupq); + skb_queue_tail(l->inputq, skb); + } else if (i++ > 10) { break; - skb_unlink(skb, &l->wakeupq); - skb_queue_tail(l->inputq, skb); + } } } @@ -869,8 +856,7 @@ void tipc_link_reset(struct tipc_link *l) * @list: chain of buffers containing message * @xmitq: returned list of packets to be sent by caller * - * Consumes the buffer chain, except when returning -ELINKCONG, - * since the caller then may want to make more send attempts. + * Consumes the buffer chain. * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted */ @@ -879,7 +865,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, { struct tipc_msg *hdr = buf_msg(skb_peek(list)); unsigned int maxwin = l->window; - unsigned int i, imp = msg_importance(hdr); + int imp = msg_importance(hdr); unsigned int mtu = l->mtu; u16 ack = l->rcv_nxt - 1; u16 seqno = l->snd_nxt; @@ -888,19 +874,22 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, struct sk_buff_head *backlogq = &l->backlogq; struct sk_buff *skb, *_skb, *bskb; int pkt_cnt = skb_queue_len(list); + int rc = 0; - /* Match msg importance against this and all higher backlog limits: */ - if (!skb_queue_empty(backlogq)) { - for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { - if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) - return link_schedule_user(l, list); - } - } if (unlikely(msg_size(hdr) > mtu)) { skb_queue_purge(list); return -EMSGSIZE; } + /* Allow oversubscription of one data msg per source at congestion */ + if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { + if (imp == TIPC_SYSTEM_IMPORTANCE) { + pr_warn("%s<%s>, link overflow", link_rst_msg, l->name); + return -ENOBUFS; + } + rc = link_schedule_user(l, hdr); + } + if (pkt_cnt > 1) { l->stats.sent_fragmented++; l->stats.sent_fragments += pkt_cnt; @@ -946,7 +935,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, skb_queue_splice_tail_init(list, backlogq); } l->snd_nxt = seqno; - return 0; + return rc; } void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq) diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 8d408612ffa4..850ae0e469f5 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -98,8 +98,6 @@ struct tipc_skb_cb { u32 bytes_read; struct sk_buff *tail; bool validated; - bool wakeup_pending; - u16 chain_sz; u16 chain_imp; u16 ackers; }; diff --git a/net/tipc/node.c b/net/tipc/node.c index 9d2f4c2b08ab..2883f6a0ed98 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1167,7 +1167,7 @@ msg_full: * @list: chain of buffers containing message * @dnode: address of destination node * @selector: a number used for deterministic link selection - * Consumes the buffer chain, except when returning -ELINKCONG + * Consumes the buffer chain. * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF */ int tipc_node_xmit(struct net *net, struct sk_buff_head *list, @@ -1206,10 +1206,10 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list, spin_unlock_bh(&le->lock); tipc_node_read_unlock(n); - if (likely(rc == 0)) - tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); - else if (rc == -ENOBUFS) + if (unlikely(rc == -ENOBUFS)) tipc_node_link_down(n, bearer_id, false); + else + tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); tipc_node_put(n); @@ -1221,20 +1221,15 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list, * messages, which will not be rejected * The only exception is datagram messages rerouted after secondary * lookup, which are rare and safe to dispose of anyway. - * TODO: Return real return value, and let callers use - * tipc_wait_for_sendpkt() where applicable */ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, u32 selector) { struct sk_buff_head head; - int rc; skb_queue_head_init(&head); __skb_queue_tail(&head, skb); - rc = tipc_node_xmit(net, &head, dnode, selector); - if (rc == -ELINKCONG) - kfree_skb(skb); + tipc_node_xmit(net, &head, dnode, selector); return 0; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index fae6a55ef1b0..d2f353934f82 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -67,12 +67,14 @@ enum { * @max_pkt: maximum packet size "hint" used when building messages sent by port * @portid: unique port identity in TIPC socket hash table * @phdr: preformatted message header used when sending messages + * #cong_links: list of congested links * @publications: list of publications for port + * @blocking_link: address of the congested link we are currently sleeping on * @pub_count: total # of publications port has made during its lifetime * @probing_state: * @conn_timeout: the time we can wait for an unresponded setup request * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue - * @link_cong: non-zero if owner must sleep because of link congestion + * @cong_link_cnt: number of congested links * @sent_unacked: # messages sent by socket, and not yet acked by peer * @rcv_unacked: # messages read by user, but not yet acked back to peer * @peer: 'connected' peer for dgram/rdm @@ -87,13 +89,13 @@ struct tipc_sock { u32 max_pkt; u32 portid; struct tipc_msg phdr; - struct list_head sock_list; + struct list_head cong_links; struct list_head publications; u32 pub_count; uint conn_timeout; atomic_t dupl_rcvcnt; bool probe_unacked; - bool link_cong; + u16 cong_link_cnt; u16 snt_unacked; u16 snd_win; u16 peer_caps; @@ -118,8 +120,7 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); static int tipc_sk_insert(struct tipc_sock *tsk); static void tipc_sk_remove(struct tipc_sock *tsk); -static int __tipc_send_stream(struct socket *sock, struct msghdr *m, - size_t dsz); +static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); static const struct proto_ops packet_ops; @@ -424,6 +425,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, tsk = tipc_sk(sk); tsk->max_pkt = MAX_PKT_DEFAULT; INIT_LIST_HEAD(&tsk->publications); + INIT_LIST_HEAD(&tsk->cong_links); msg = &tsk->phdr; tn = net_generic(sock_net(sk), tipc_net_id); tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, @@ -474,9 +476,14 @@ static void __tipc_shutdown(struct socket *sock, int error) struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); struct net *net = sock_net(sk); + long timeout = CONN_TIMEOUT_DEFAULT; u32 dnode = tsk_peer_node(tsk); struct sk_buff *skb; + /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */ + tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && + !tsk_conn_cong(tsk))); + /* Reject all unreceived messages, except on an active connection * (which disconnects locally & sends a 'FIN+' to peer). */ @@ -547,7 +554,8 @@ static int tipc_release(struct socket *sock) /* Reject any messages that accumulated in backlog queue */ release_sock(sk); - + u32_list_purge(&tsk->cong_links); + tsk->cong_link_cnt = 0; call_rcu(&tsk->rcu, tipc_sk_callback); sock->sk = NULL; @@ -690,7 +698,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock, switch (sk->sk_state) { case TIPC_ESTABLISHED: - if (!tsk->link_cong && !tsk_conn_cong(tsk)) + if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) mask |= POLLOUT; /* fall thru' */ case TIPC_LISTEN: @@ -699,7 +707,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock, mask |= (POLLIN | POLLRDNORM); break; case TIPC_OPEN: - if (!tsk->link_cong) + if (!tsk->cong_link_cnt) mask |= POLLOUT; if (tipc_sk_type_connectionless(sk) && (!skb_queue_empty(&sk->sk_receive_queue))) @@ -718,63 +726,48 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock, * @sock: socket structure * @seq: destination address * @msg: message to send - * @dsz: total length of message data - * @timeo: timeout to wait for wakeup + * @dlen: length of data to send + * @timeout: timeout to wait for wakeup * * Called from function tipc_sendmsg(), which has done all sanity checks * Returns the number of bytes sent on success, or errno */ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, - struct msghdr *msg, size_t dsz, long timeo) + struct msghdr *msg, size_t dlen, long timeout) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); + struct tipc_msg *hdr = &tsk->phdr; struct net *net = sock_net(sk); - struct tipc_msg *mhdr = &tsk->phdr; - struct sk_buff_head pktchain; - struct iov_iter save = msg->msg_iter; - uint mtu; + int mtu = tipc_bcast_get_mtu(net); + struct sk_buff_head pkts; int rc; - if (!timeo && tsk->link_cong) - return -ELINKCONG; - - msg_set_type(mhdr, TIPC_MCAST_MSG); - msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE); - msg_set_destport(mhdr, 0); - msg_set_destnode(mhdr, 0); - msg_set_nametype(mhdr, seq->type); - msg_set_namelower(mhdr, seq->lower); - msg_set_nameupper(mhdr, seq->upper); - msg_set_hdr_sz(mhdr, MCAST_H_SIZE); - - skb_queue_head_init(&pktchain); + rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); + if (unlikely(rc)) + return rc; -new_mtu: - mtu = tipc_bcast_get_mtu(net); - rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain); - if (unlikely(rc < 0)) + msg_set_type(hdr, TIPC_MCAST_MSG); + msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); + msg_set_destport(hdr, 0); + msg_set_destnode(hdr, 0); + msg_set_nametype(hdr, seq->type); + msg_set_namelower(hdr, seq->lower); + msg_set_nameupper(hdr, seq->upper); + msg_set_hdr_sz(hdr, MCAST_H_SIZE); + + skb_queue_head_init(&pkts); + rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); + if (unlikely(rc != dlen)) return rc; - do { - rc = tipc_bcast_xmit(net, &pktchain); - if (likely(!rc)) - return dsz; - - if (rc == -ELINKCONG) { - tsk->link_cong = 1; - rc = tipc_wait_for_cond(sock, &timeo, !tsk->link_cong); - if (!rc) - continue; - } - __skb_queue_purge(&pktchain); - if (rc == -EMSGSIZE) { - msg->msg_iter = save; - goto new_mtu; - } - break; - } while (1); - return rc; + rc = tipc_bcast_xmit(net, &pkts); + if (unlikely(rc == -ELINKCONG)) { + tsk->cong_link_cnt = 1; + rc = 0; + } + + return rc ? rc : dlen; } /** @@ -898,35 +891,38 @@ static int tipc_sendmsg(struct socket *sock, return ret; } -static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) +static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) { - DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); struct sock *sk = sock->sk; - struct tipc_sock *tsk = tipc_sk(sk); struct net *net = sock_net(sk); - struct tipc_msg *mhdr = &tsk->phdr; - u32 dnode, dport; - struct sk_buff_head pktchain; - bool is_connectionless = tipc_sk_type_connectionless(sk); - struct sk_buff *skb; + struct tipc_sock *tsk = tipc_sk(sk); + DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); + long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); + struct list_head *clinks = &tsk->cong_links; + bool syn = !tipc_sk_type_connectionless(sk); + struct tipc_msg *hdr = &tsk->phdr; struct tipc_name_seq *seq; - struct iov_iter save; - u32 mtu; - long timeo; - int rc; + struct sk_buff_head pkts; + u32 type, inst, domain; + u32 dnode, dport; + int mtu, rc; - if (dsz > TIPC_MAX_USER_MSG_SIZE) + if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) return -EMSGSIZE; + if (unlikely(!dest)) { - if (is_connectionless && tsk->peer.family == AF_TIPC) - dest = &tsk->peer; - else + dest = &tsk->peer; + if (!syn || dest->family != AF_TIPC) return -EDESTADDRREQ; - } else if (unlikely(m->msg_namelen < sizeof(*dest)) || - dest->family != AF_TIPC) { - return -EINVAL; } - if (!is_connectionless) { + + if (unlikely(m->msg_namelen < sizeof(*dest))) + return -EINVAL; + + if (unlikely(dest->family != AF_TIPC)) + return -EINVAL; + + if (unlikely(syn)) { if (sk->sk_state == TIPC_LISTEN) return -EPIPE; if (sk->sk_state != TIPC_OPEN) @@ -938,72 +934,62 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) tsk->conn_instance = dest->addr.name.name.instance; } } - seq = &dest->addr.nameseq; - timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); - if (dest->addrtype == TIPC_ADDR_MCAST) { - return tipc_sendmcast(sock, seq, m, dsz, timeo); - } else if (dest->addrtype == TIPC_ADDR_NAME) { - u32 type = dest->addr.name.name.type; - u32 inst = dest->addr.name.name.instance; - u32 domain = dest->addr.name.domain; + seq = &dest->addr.nameseq; + if (dest->addrtype == TIPC_ADDR_MCAST) + return tipc_sendmcast(sock, seq, m, dlen, timeout); + if (dest->addrtype == TIPC_ADDR_NAME) { + type = dest->addr.name.name.type; + inst = dest->addr.name.name.instance; + domain = dest->addr.name.domain; dnode = domain; - msg_set_type(mhdr, TIPC_NAMED_MSG); - msg_set_hdr_sz(mhdr, NAMED_H_SIZE); - msg_set_nametype(mhdr, type); - msg_set_nameinst(mhdr, inst); - msg_set_lookup_scope(mhdr, tipc_addr_scope(domain)); + msg_set_type(hdr, TIPC_NAMED_MSG); + msg_set_hdr_sz(hdr, NAMED_H_SIZE); + msg_set_nametype(hdr, type); + msg_set_nameinst(hdr, inst); + msg_set_lookup_scope(hdr, tipc_addr_scope(domain)); dport = tipc_nametbl_translate(net, type, inst, &dnode); - msg_set_destnode(mhdr, dnode); - msg_set_destport(mhdr, dport); + msg_set_destnode(hdr, dnode); + msg_set_destport(hdr, dport); if (unlikely(!dport && !dnode)) return -EHOSTUNREACH; + } else if (dest->addrtype == TIPC_ADDR_ID) { dnode = dest->addr.id.node; - msg_set_type(mhdr, TIPC_DIRECT_MSG); - msg_set_lookup_scope(mhdr, 0); - msg_set_destnode(mhdr, dnode); - msg_set_destport(mhdr, dest->addr.id.ref); - msg_set_hdr_sz(mhdr, BASIC_H_SIZE); + msg_set_type(hdr, TIPC_DIRECT_MSG); + msg_set_lookup_scope(hdr, 0); + msg_set_destnode(hdr, dnode); + msg_set_destport(hdr, dest->addr.id.ref); + msg_set_hdr_sz(hdr, BASIC_H_SIZE); } - skb_queue_head_init(&pktchain); - save = m->msg_iter; -new_mtu: + /* Block or return if destination link is congested */ + rc = tipc_wait_for_cond(sock, &timeout, !u32_find(clinks, dnode)); + if (unlikely(rc)) + return rc; + + skb_queue_head_init(&pkts); mtu = tipc_node_get_mtu(net, dnode, tsk->portid); - rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain); - if (rc < 0) + rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); + if (unlikely(rc != dlen)) return rc; - do { - skb = skb_peek(&pktchain); - TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; - rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid); - if (likely(!rc)) { - if (!is_connectionless) - tipc_set_sk_state(sk, TIPC_CONNECTING); - return dsz; - } - if (rc == -ELINKCONG) { - tsk->link_cong = 1; - rc = tipc_wait_for_cond(sock, &timeo, !tsk->link_cong); - if (!rc) - continue; - } - __skb_queue_purge(&pktchain); - if (rc == -EMSGSIZE) { - m->msg_iter = save; - goto new_mtu; - } - break; - } while (1); + rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); + if (unlikely(rc == -ELINKCONG)) { + u32_push(clinks, dnode); + tsk->cong_link_cnt++; + rc = 0; + } - return rc; + if (unlikely(syn && !rc)) + tipc_set_sk_state(sk, TIPC_CONNECTING); + + return rc ? rc : dlen; } /** - * tipc_send_stream - send stream-oriented data + * tipc_sendstream - send stream-oriented data * @sock: socket structure * @m: data to send * @dsz: total length of data to be transmitted @@ -1013,97 +999,69 @@ new_mtu: * Returns the number of bytes sent on success (or partial success), * or errno if no data sent */ -static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) +static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz) { struct sock *sk = sock->sk; int ret; lock_sock(sk); - ret = __tipc_send_stream(sock, m, dsz); + ret = __tipc_sendstream(sock, m, dsz); release_sock(sk); return ret; } -static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) +static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) { struct sock *sk = sock->sk; - struct net *net = sock_net(sk); - struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_msg *mhdr = &tsk->phdr; - struct sk_buff_head pktchain; DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); - u32 portid = tsk->portid; - int rc = -EINVAL; - long timeo; - u32 dnode; - uint mtu, send, sent = 0; - struct iov_iter save; - int hlen = MIN_H_SIZE; - - /* Handle implied connection establishment */ - if (unlikely(dest)) { - rc = __tipc_sendmsg(sock, m, dsz); - hlen = msg_hdr_sz(mhdr); - if (dsz && (dsz == rc)) - tsk->snt_unacked = tsk_inc(tsk, dsz + hlen); - return rc; - } - if (dsz > (uint)INT_MAX) - return -EMSGSIZE; - - if (unlikely(!tipc_sk_connected(sk))) { - if (sk->sk_state == TIPC_DISCONNECTING) - return -EPIPE; - else - return -ENOTCONN; - } + long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); + struct tipc_sock *tsk = tipc_sk(sk); + struct tipc_msg *hdr = &tsk->phdr; + struct net *net = sock_net(sk); + struct sk_buff_head pkts; + u32 dnode = tsk_peer_node(tsk); + int send, sent = 0; + int rc = 0; - timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); - if (!timeo && tsk->link_cong) - return -ELINKCONG; + skb_queue_head_init(&pkts); - dnode = tsk_peer_node(tsk); - skb_queue_head_init(&pktchain); + if (unlikely(dlen > INT_MAX)) + return -EMSGSIZE; -next: - save = m->msg_iter; - mtu = tsk->max_pkt; - send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); - rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain); - if (unlikely(rc < 0)) + /* Handle implicit connection setup */ + if (unlikely(dest)) { + rc = __tipc_sendmsg(sock, m, dlen); + if (dlen && (dlen == rc)) + tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); return rc; + } do { - if (likely(!tsk_conn_cong(tsk))) { - rc = tipc_node_xmit(net, &pktchain, dnode, portid); - if (likely(!rc)) { - tsk->snt_unacked += tsk_inc(tsk, send + hlen); - sent += send; - if (sent == dsz) - return dsz; - goto next; - } - if (rc == -EMSGSIZE) { - __skb_queue_purge(&pktchain); - tsk->max_pkt = tipc_node_get_mtu(net, dnode, - portid); - m->msg_iter = save; - goto next; - } - if (rc != -ELINKCONG) - break; - - tsk->link_cong = 1; - } - rc = tipc_wait_for_cond(sock, &timeo, - (!tsk->link_cong && + rc = tipc_wait_for_cond(sock, &timeout, + (!tsk->cong_link_cnt && !tsk_conn_cong(tsk) && tipc_sk_connected(sk))); - } while (!rc); + if (unlikely(rc)) + break; + + send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); + rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts); + if (unlikely(rc != send)) + break; + + rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); + if (unlikely(rc == -ELINKCONG)) { + tsk->cong_link_cnt = 1; + rc = 0; + } + if (likely(!rc)) { + tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE); + sent += send; + } + } while (sent < dlen && !rc); - __skb_queue_purge(&pktchain); - return sent ? sent : rc; + return rc ? rc : sent; } /** @@ -1121,7 +1079,7 @@ static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) if (dsz > TIPC_MAX_USER_MSG_SIZE) return -EMSGSIZE; - return tipc_send_stream(sock, m, dsz); + return tipc_sendstream(sock, m, dsz); } /* tipc_sk_finish_conn - complete the setup of a connection @@ -1688,6 +1646,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb, unsigned int limit = rcvbuf_limit(sk, skb); int err = TIPC_OK; int usr = msg_user(hdr); + u32 onode; if (unlikely(msg_user(hdr) == CONN_MANAGER)) { tipc_sk_proto_rcv(tsk, skb, xmitq); @@ -1695,8 +1654,10 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb, } if (unlikely(usr == SOCK_WAKEUP)) { + onode = msg_orignode(hdr); kfree_skb(skb); - tsk->link_cong = 0; + u32_del(&tsk->cong_links, onode); + tsk->cong_link_cnt--; sk->sk_write_space(sk); return false; } @@ -2104,7 +2065,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) struct msghdr m = {NULL,}; tsk_advance_rx_queue(sk); - __tipc_send_stream(new_sock, &m, 0); + __tipc_sendstream(new_sock, &m, 0); } else { __skb_dequeue(&sk->sk_receive_queue); __skb_queue_head(&new_sk->sk_receive_queue, buf); @@ -2565,7 +2526,7 @@ static const struct proto_ops stream_ops = { .shutdown = tipc_shutdown, .setsockopt = tipc_setsockopt, .getsockopt = tipc_getsockopt, - .sendmsg = tipc_send_stream, + .sendmsg = tipc_sendstream, .recvmsg = tipc_recv_stream, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage -- cgit v1.2.3 From 1fe954b2097bb907b4578e6a74e4c1d23785a601 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 28 Sep 2016 16:01:48 -0700 Subject: ixgbe: do not disable FEC from the driver FEC is configured by the NVM and the driver should not be overriding it. Signed-off-by: Emil Tantilov Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 11fb433eb924..f06b0e1bfdbf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2108,8 +2108,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, return status; reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ | - IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC); reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); -- cgit v1.2.3 From cb8e051446ae554aae38163d3421edc793221784 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 26 Oct 2016 16:25:18 -0700 Subject: ixgbe: Report driver version to firmware for x550 devices Some x550 devices require the driver version reported to its firmware; this patch sends the driver version string to the firmware through the host interface command for x550 devices. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 7 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 3 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 5 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 14 +++++- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 58 ++++++++++++++++++++++++- 5 files changed, 80 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 8832df3eba25..5d8f1ae125a5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3578,7 +3578,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, * Calculates the checksum for some buffer on a specified length. The * checksum calculated is returned. **/ -static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) { u32 i; u8 sum = 0; @@ -3722,6 +3722,8 @@ rel_out: * @min: driver version minor number * @build: driver version build number * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string * * Sends driver version number to firmware through the manageability * block. On success return 0 @@ -3729,7 +3731,8 @@ rel_out: * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub) + u8 build, u8 sub, __always_unused u16 len, + __always_unused const char *driver_ver) { struct ixgbe_hic_drv_info fw_cmd; int i; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 5b3e3c65927e..393aeabe2d8f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -111,7 +111,8 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 ver); + u8 build, u8 ver, u16 len, const char *str); +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, u32 timeout, bool return_data); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1e2f39ebd824..6732c905926d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9833,8 +9833,9 @@ skip_sriov: * since os does not support feature */ if (hw->mac.ops.set_fw_drv_ver) - hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, - 0xFF); + hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, + sizeof(ixgbe_driver_version) - 1, + ixgbe_driver_version); /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index cf21273db201..0d855115987d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2619,6 +2619,7 @@ enum ixgbe_fdir_pballoc_type { #define FW_CEM_UNUSED_VER 0x0 #define FW_CEM_MAX_RETRIES 3 #define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ #define FW_READ_SHADOW_RAM_CMD 0x31 #define FW_READ_SHADOW_RAM_LEN 0x6 #define FW_WRITE_SHADOW_RAM_CMD 0x33 @@ -2686,6 +2687,16 @@ struct ixgbe_hic_drv_info { u16 pad2; /* end spacing to ensure length is mult. of dword2 */ }; +struct ixgbe_hic_drv_info2 { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; +}; + /* These need to be dword aligned */ struct ixgbe_hic_read_shadow_ram { union ixgbe_hic_hdr2 hdr; @@ -3362,7 +3373,8 @@ struct ixgbe_mac_operations { void (*fc_autoneg)(struct ixgbe_hw *); /* Manageability interface */ - s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, + const char *); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); void (*disable_rx)(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index f06b0e1bfdbf..92f2b5ea38c1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2354,6 +2354,62 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) return 0; } +/** + * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, + const char *driver_ver) +{ + struct ixgbe_hic_drv_info2 fw_cmd; + s32 ret_val; + int i; + + if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) + return IXGBE_ERR_INVALID_ARGUMENT; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + memcpy(fw_cmd.driver_string, driver_ver, len); + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status != + FW_CEM_RESP_STATUS_SUCCESS) + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + return 0; + } + + return ret_val; +} + /** ixgbe_get_lcd_x550em - Determine lowest common denominator * @hw: pointer to hardware structure * @lcd_speed: pointer to lowest common link speed @@ -3273,7 +3329,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, .clear_vfta = &ixgbe_clear_vfta_generic, \ .set_vfta = &ixgbe_set_vfta_generic, \ .fc_enable = &ixgbe_fc_enable_generic, \ - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ .init_uta_tables = &ixgbe_init_uta_tables_generic, \ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ -- cgit v1.2.3 From 5c092749e304d8b49567be633d5be31393538e3b Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Mon, 31 Oct 2016 12:11:58 -0700 Subject: ixgbe: Fix check for ixgbe_phy_x550em_ext_t reset The generic PHY reset check we had previously is not sufficient for the ixgbe_phy_x550em_ext_t PHY type. Check 1.CC02.0 instead - same as ixgbe_init_ext_t_x550(). Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 3b8362085f57..b27293863204 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -452,10 +452,27 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) */ for (i = 0; i < 30; i++) { msleep(100); - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &ctrl); - if (!(ctrl & MDIO_CTRL1_RESET)) { - udelay(2); - break; + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + MDIO_MMD_PMAPMD, &ctrl); + if (status) + return status; + + if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + udelay(2); + break; + } + } else { + status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, &ctrl); + if (status) + return status; + + if (!(ctrl & MDIO_CTRL1_RESET)) { + udelay(2); + break; + } } } -- cgit v1.2.3 From 2bf1a87b903bd81b1448a1cef73de59fb6c4d340 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 4 Nov 2016 14:03:03 -0700 Subject: ixgbe: add mask for 64 RSS queues The indirection table was reported incorrectly for X550 and newer where we can support up to 64 RSS queues. Reported-by Krishneil Singh Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 15ab337fd7ad..10d29678d65e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -308,6 +308,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) ixgbe_cache_ring_rss(adapter); } +#define IXGBE_RSS_64Q_MASK 0x3F #define IXGBE_RSS_16Q_MASK 0xF #define IXGBE_RSS_8Q_MASK 0x7 #define IXGBE_RSS_4Q_MASK 0x3 @@ -604,6 +605,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) **/ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) { + struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *f; u16 rss_i; @@ -612,7 +614,11 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) rss_i = f->limit; f->indices = rss_i; - f->mask = IXGBE_RSS_16Q_MASK; + + if (hw->mac.type < ixgbe_mac_X550) + f->mask = IXGBE_RSS_16Q_MASK; + else + f->mask = IXGBE_RSS_64Q_MASK; /* disable ATR by default, it will be configured below */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; -- cgit v1.2.3 From 910c9c0f59567ec204924d88ca04337bb04f17d9 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 9 Nov 2016 10:48:48 -0800 Subject: ixgbe: Add bounds check for x540 LED functions This is an extension of commit 003287e0f087 ("ixgbevf: Correct parameter sent to LED function"); add bounds checking to x540 functions to ensure the index is valid. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index e2ff823ee202..afc4d16b0b8b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -780,8 +780,10 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) ixgbe_link_speed speed; bool link_up; - /* - * Link should be up in order for the blink bit in the LED control + if (index > 3) + return IXGBE_ERR_PARAM; + + /* Link should be up in order for the blink bit in the LED control * register to work. Force link and speed in the MAC if link is down. * This will be reversed when we stop the blinking. */ @@ -814,6 +816,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) u32 macc_reg; u32 ledctl_reg; + if (index > 3) + return IXGBE_ERR_PARAM; + /* Restore the LED to its default value. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); -- cgit v1.2.3 From 3f0d646b720d541309b11e190db58086f446f41e Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Thu, 10 Nov 2016 09:57:29 -0800 Subject: ixgbe: Reduce I2C retry count on X550 devices A retry count of 10 is likely to run into problems on X550 devices that have to detect and reset unresponsive CS4227 devices. So, reduce the I2C retry count to 3 for X550 and above. This should avoid any possible regressions in existing devices. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index b27293863204..019b3c20b5c2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -113,7 +113,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 10; + int max_retry = 3; int retry = 0; u8 csum_byte; u8 high_bits; @@ -1755,6 +1755,8 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) max_retry = IXGBE_SFP_DETECT_RETRIES; -- cgit v1.2.3 From f215266470dfe86196a31fe0725a86cea77f9a18 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Thu, 10 Nov 2016 16:00:33 -0800 Subject: ixgbe: Fix reporting of 100Mb capability BaseT adapters that are capable of supporting 100Mb are not reporting this capability. This patch corrects the reporting so that 100Mb is shown as supported on those adapters. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index fd192bf29b26..8789f214a4f9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -199,7 +199,7 @@ static int ixgbe_get_settings(struct net_device *netdev, if (supported_link & IXGBE_LINK_SPEED_100_FULL) ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? SUPPORTED_1000baseKX_Full : - SUPPORTED_1000baseT_Full; + SUPPORTED_100baseT_Full; /* default advertised speed if phy.autoneg_advertised isn't set */ ecmd->advertising = ecmd->supported; -- cgit v1.2.3 From f7f37e7ff2b9b7eff7fbd035569cab35896869a3 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 11 Nov 2016 10:07:47 -0800 Subject: ixgbe: handle close/suspend race with netif_device_detach/present When an interface is part of a namespace it is possible that ixgbe_close() may be called while __ixgbe_shutdown() is running which ends up in a double free WARN and/or a BUG in free_msi_irqs(). To handle this situation we extend the rtnl_lock() to protect the call to netif_device_detach() and ixgbe_clear_interrupt_scheme() in __ixgbe_shutdown() and check for netif_device_present() to avoid clearing the interrupts second time in ixgbe_close(); Also extend the rtnl lock in ixgbe_resume() to netif_device_attach(). Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6732c905926d..476f01282588 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6200,7 +6200,8 @@ int ixgbe_close(struct net_device *netdev) ixgbe_ptp_stop(adapter); - ixgbe_close_suspend(adapter); + if (netif_device_present(netdev)) + ixgbe_close_suspend(adapter); ixgbe_fdir_filter_exit(adapter); @@ -6245,14 +6246,12 @@ static int ixgbe_resume(struct pci_dev *pdev) if (!err && netif_running(netdev)) err = ixgbe_open(netdev); - rtnl_unlock(); - if (err) - return err; - - netif_device_attach(netdev); + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); - return 0; + return err; } #endif /* CONFIG_PM */ @@ -6267,14 +6266,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); - rtnl_lock(); if (netif_running(netdev)) ixgbe_close_suspend(adapter); - rtnl_unlock(); ixgbe_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); -- cgit v1.2.3 From 2dad7b2775ea030c898fe4946971edd25af237d1 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 11 Nov 2016 10:12:51 -0800 Subject: ixgbevf: handle race between close and suspend on shutdown When an interface is part of a namespace it is possible that ixgbevf_close() may be called while ixgbevf_suspend() is running which ends up in a double free WARN and/or a BUG in free_msi_irqs() To handle this situation we extend the rtnl_lock() to protect the call to netif_device_detach() and check for !netif_device_present() to avoid entering close while in suspend. Also added rtnl locks to ixgbevf_queue_reset_subtask(). CC: Alexander Duyck Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 6d4bef5803f2..8574f212a67e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3242,6 +3242,9 @@ int ixgbevf_close(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + if (!netif_device_present(netdev)) + return 0; + ixgbevf_down(adapter); ixgbevf_free_irq(adapter); @@ -3268,6 +3271,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. */ + rtnl_lock(); + if (netif_running(dev)) ixgbevf_close(dev); @@ -3276,6 +3281,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) if (netif_running(dev)) ixgbevf_open(dev); + + rtnl_unlock(); } static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, @@ -3796,17 +3803,17 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) { - rtnl_lock(); ixgbevf_down(adapter); ixgbevf_free_irq(adapter); ixgbevf_free_all_tx_resources(adapter); ixgbevf_free_all_rx_resources(adapter); ixgbevf_clear_interrupt_scheme(adapter); - rtnl_unlock(); } + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); -- cgit v1.2.3 From a9d2d53a788a9c5bc8a7d1b4ea7857b68e221357 Mon Sep 17 00:00:00 2001 From: Ken Cox Date: Tue, 15 Nov 2016 13:00:37 -0600 Subject: ixgbe: test for trust in macvlan adjustments for VF There are two methods for setting mac addresses in a Macvlan, that differentiate themselves in the function macvlan_set_mac_Address. If the macvlan mode is passthru, then we use the dev_set_mac_address method, otherwise we use the dev_uc api via macvlan_sync_addresses. The latter method (which would stem from using any non-passthru mode, like bridge, or vepa), calls down into the driver in a path that terminates in ixgbevf_set_uc_addr_vf, which sends a IXGBE_VF_SET_MACVLAN message, which causes the pf to spawn the noted error message. This occurs because it appears that the guest is trying to delete the mac address of the macvlan before adding another. The other path in macvlan_set_mac_address uses dev_set_mac_address, which calls into ixgbevf_set_mac which uses the IXGBE_VF_SET_MAC_ADDR to the pf to set the macvlan mac address. The discrepancy here is in the handlers. The handler function for IXGBE_VF_SET_MAC_ADDR (ixgbe_set_vf_mac_addr) has a check for the vfinfo[].trusted bit to allow the operation if the vf is trusted. In comparison, the IXGBE_VF_SET_MACVLAN message handler (ixgbe_set_vf_macvlan_msg) has no such check of the trusted bit. Signed-off-by: Ken Cox Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7e5d9850e4b2..dd1187c3b117 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -934,7 +934,8 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, IXGBE_VT_MSGINFO_SHIFT; int err; - if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && + index > 0) { e_warn(drv, "VF %d requested MACVLAN filter but is administratively denied\n", vf); -- cgit v1.2.3 From 126db13fa0e6d05c9f94e0125f61e773bd5ab079 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 16 Nov 2016 09:48:02 -0800 Subject: ixgbe: fix AER error handling Make sure that we free the IRQs in ixgbe_io_error_detected() when responding to an PCIe AER error and also restore them when the interface recovers from it. Previously it was possible to trigger BUG_ON() check in free_msix_irqs() in the case where we call ixgbe_remove() after a failed recovery from AER error because the interrupts were not freed. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 476f01282588..9ca167c5d1fa 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10082,7 +10082,7 @@ skip_bad_vf_detection: } if (netif_running(netdev)) - ixgbe_down(adapter); + ixgbe_close_suspend(adapter); if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) pci_disable_device(pdev); @@ -10152,10 +10152,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev) } #endif + rtnl_lock(); if (netif_running(netdev)) - ixgbe_up(adapter); + ixgbe_open(netdev); netif_device_attach(netdev); + rtnl_unlock(); } static const struct pci_error_handlers ixgbe_err_handler = { -- cgit v1.2.3 From b19cf6eea9e2a497e6475fd02c0703f0b3a6d083 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 16 Nov 2016 11:25:34 -0800 Subject: ixgbevf: fix AER error handling Make sure that we free the IRQs in ixgbevf_io_error_detected() when responding to an PCIe AER error and also restore them when the interface recovers from it. Previously it was possible to trigger BUG_ON() check in free_msix_irqs() in the case where we call ixgbevf_remove() after a failed recovery from AER error because the interrupts were not freed. Also moved the down and free functions into ixgbevf_close_suspend() same as with ixgbe. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 43 +++++++++++++---------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 8574f212a67e..a78e4901118b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3227,6 +3227,21 @@ err_setup_reset: return err; } +/** + * ixgbevf_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter) +{ + ixgbevf_down(adapter); + ixgbevf_free_irq(adapter); + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_free_all_rx_resources(adapter); +} + /** * ixgbevf_close - Disables a network interface * @netdev: network interface device structure @@ -3242,14 +3257,8 @@ int ixgbevf_close(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - if (!netif_device_present(netdev)) - return 0; - - ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); - - ixgbevf_free_all_tx_resources(adapter); - ixgbevf_free_all_rx_resources(adapter); + if (netif_device_present(netdev)) + ixgbevf_close_suspend(adapter); return 0; } @@ -3806,13 +3815,10 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) rtnl_lock(); netif_device_detach(netdev); - if (netif_running(netdev)) { - ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); - ixgbevf_free_all_tx_resources(adapter); - ixgbevf_free_all_rx_resources(adapter); - ixgbevf_clear_interrupt_scheme(adapter); - } + if (netif_running(netdev)) + ixgbevf_close_suspend(adapter); + + ixgbevf_clear_interrupt_scheme(adapter); rtnl_unlock(); #ifdef CONFIG_PM @@ -4251,7 +4257,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, } if (netif_running(netdev)) - ixgbevf_down(adapter); + ixgbevf_close_suspend(adapter); if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) pci_disable_device(pdev); @@ -4299,12 +4305,13 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) static void ixgbevf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbevf_adapter *adapter = netdev_priv(netdev); + rtnl_lock(); if (netif_running(netdev)) - ixgbevf_up(adapter); + ixgbevf_open(netdev); netif_device_attach(netdev); + rtnl_unlock(); } /* PCI Error Recovery (ERS) */ -- cgit v1.2.3 From aeb4c73100be8aade8a1189b50bd226b709ca8bb Mon Sep 17 00:00:00 2001 From: Yusuke Suzuki Date: Mon, 21 Nov 2016 06:48:45 +0000 Subject: ixgbe: Fix incorrect bitwise operations of PTP Rx timestamp flags Rx timestamp does not work on 82599 and X540 because bitwise operation of RX_HWTSTAMP flags is incorrect and ixgbe_ptp_rx_hwtstamp() is never called. This patch fixes it to enable Rx timestamp on 82599 and X540. Without this fix: ptp4l[278.730]: selected /dev/ptp8 as PTP clock ptp4l[278.733]: port 1: INITIALIZING to LISTENING on INITIALIZE ptp4l[278.733]: port 0: INITIALIZING to LISTENING on INITIALIZE ptp4l[278.834]: port 1: received SYNC without timestamp ptp4l[278.835]: port 1: new foreign master 1c3947.fffe.60f9cc-1 ptp4l[279.834]: port 1: received SYNC without timestamp ptp4l[280.834]: port 1: received SYNC without timestamp ptp4l[281.834]: port 1: received SYNC without timestamp ptp4l[282.834]: port 1: received SYNC without timestamp ptp4l[282.835]: selected best master clock 1c3947.fffe.60f9cc ptp4l[282.835]: port 1: LISTENING to UNCALIBRATED on RS_SLAVE ptp4l[283.834]: port 1: received SYNC without timestamp With this fix: ptp4l[239.154]: selected /dev/ptp8 as PTP clock ptp4l[239.157]: port 1: INITIALIZING to LISTENING on INITIALIZE ptp4l[239.157]: port 0: INITIALIZING to LISTENING on INITIALIZE ptp4l[240.989]: port 1: new foreign master 1c3947.fffe.60f9cc-1 ptp4l[244.989]: selected best master clock 1c3947.fffe.60f9cc ptp4l[244.989]: port 1: LISTENING to UNCALIBRATED on RS_SLAVE ptp4l[246.977]: master offset -899583339542096 s0 freq +0 path delay 16222 ptp4l[247.977]: master offset -899583339617265 s1 freq -75169 path delay 16177 ptp4l[248.977]: master offset -130 s2 freq -75299 path delay 16177 ptp4l[248.977]: port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED ptp4l[249.977]: master offset -9 s2 freq -75217 path delay 16177 ptp4l[250.977]: master offset 88 s2 freq -75123 path delay 16132 Fixes: a9763f3cb54c ("ixgbe: Update PTP to support X550EM_x devices") Signed-off-by: Yusuke Suzuki Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 1efb404431e9..ef0635e0918c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -858,14 +858,14 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -879,8 +879,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: -- cgit v1.2.3 From 26403b7fde65ecfc74f5571e3cfa602dc9b3d6cb Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 23 Nov 2016 11:24:08 -0800 Subject: ixgbevf: restore hw_addr on resume or error Restore adapter->hw.hw_addr after handling an error, or a resume operation to make sure we can access the registers. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index a78e4901118b..dc23f56d5c10 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3851,6 +3851,8 @@ static int ixgbevf_resume(struct pci_dev *pdev) dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } + + adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); @@ -4285,6 +4287,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; } + adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); -- cgit v1.2.3 From 54f6d4c42451dbd2cc7e0f0bd8fc3eddcab511fe Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Tue, 13 Dec 2016 20:34:51 -0500 Subject: ixgbe: Configure advertised speeds correctly for KR/KX backplane This patch ensures that the advertised link speeds are configured for X553 KR/KX backplane. Without this patch the link remains at 1G when resuming from low power after being downshifted by LPLU. Signed-off-by: Don Skidmore Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 92f2b5ea38c1..e3d14f4a53f2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2187,12 +2187,11 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) /** * ixgbe_setup_kr_x550em - Configure the KR PHY * @hw: pointer to hardware structure - * - * Configures the integrated KR PHY for X550EM_x. **/ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { - if (hw->mac.type != ixgbe_mac_X550EM_x) + /* leave link alone for 2.5G */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) return 0; return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); -- cgit v1.2.3 From 3efa9ed260ce838976eb9177bae7249caf7a2aa1 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Wed, 14 Dec 2016 11:02:00 -0800 Subject: ixgbe: Fix issues with EEPROM access There are two problems with EEPROM access. One is that it needs to hold the semaphore until the entire response is read or else the response can be corrupted by other firmware accesses. The second problem is that acquiring and releasing the semaphore is slow, so it should be taken and released once when multiple EEPROM accesses will be done. Both of these issues can be solved by adding a new function, ixgbe_hic_unlocked, to issue firmware commands that will assume that the caller has acquired the needed semaphore. Signed-off-by: Mark Rustad Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 97 ++++++++++++++++--------- drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 75 +++++++------------ 3 files changed, 91 insertions(+), 82 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 5d8f1ae125a5..851f48555506 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3593,43 +3593,29 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) } /** - * ixgbe_host_interface_command - Issue command to manageability block + * ixgbe_hic_unlocked - Issue command to manageability block unlocked * @hw: pointer to the HW structure - * @buffer: contains the command to write and where the return status will - * be placed + * @buffer: command to write and where the return status will be placed * @length: length of buffer, must be multiple of 4 bytes * @timeout: time in ms to wait for command completion - * @return_data: read and return data from the buffer (true) or not (false) - * Needed because FW structures are big endian and decoding of - * these fields can be 8 bit or 16 bit based on command. Decoding - * is not easily understood without making a table of commands. - * So we will leave this up to the caller to read back the data - * in these cases. * - * Communicates with the manageability block. On success return 0 - * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + * Communicates with the manageability block. On success return 0 + * else returns semaphore error when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * + * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held + * by the caller. **/ -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - u32 length, u32 timeout, - bool return_data) +s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, + u32 timeout) { - u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - u32 hicr, i, bi, fwsts; - u16 buf_len, dword_len; - union { - struct ixgbe_hic_hdr hdr; - u32 u32arr[1]; - } *bp = buffer; - s32 status; + u32 hicr, i, fwsts; + u16 dword_len; if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } - /* Take management host interface semaphore */ - status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); - if (status) - return status; /* Set bit 9 of FWSTS clearing FW reset indication */ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); @@ -3639,15 +3625,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_EN)) { hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); - status = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto rel_out; + return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Calculate length in DWORDs. We must be DWORD aligned */ if (length % sizeof(u32)) { hw_dbg(hw, "Buffer length failure, not aligned to dword"); - status = IXGBE_ERR_INVALID_ARGUMENT; - goto rel_out; + return IXGBE_ERR_INVALID_ARGUMENT; } dword_len = length >> 2; @@ -3657,7 +3641,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, cpu_to_le32(bp->u32arr[i])); + i, cpu_to_le32(buffer[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); @@ -3671,11 +3655,54 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, /* Check command successful completion. */ if ((timeout && i == timeout) || - !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { - hw_dbg(hw, "Command has failed with no status valid.\n"); - status = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto rel_out; + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + + return 0; +} + +/** + * ixgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + u32 length, u32 timeout, + bool return_data) +{ + u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + union { + struct ixgbe_hic_hdr hdr; + u32 u32arr[1]; + } *bp = buffer; + u16 buf_len, dword_len; + s32 status; + u32 bi; + + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Take management host interface semaphore */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, buffer, length, timeout); + if (status) + goto rel_out; if (!return_data) goto rel_out; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 393aeabe2d8f..671a0cdf6935 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -115,6 +115,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, u32 timeout, bool return_data); +s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index e3d14f4a53f2..398973eed903 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -624,41 +624,6 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, return status; } -/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface - * command assuming that the semaphore is already obtained. - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the hostif. - **/ -static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, - u16 *data) -{ - s32 status; - struct ixgbe_hic_read_shadow_ram buffer; - - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* convert offset from words to bytes */ - buffer.address = cpu_to_be32(offset * 2); - /* one word */ - buffer.length = cpu_to_be16(sizeof(u16)); - - status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, false); - if (status) - return status; - - *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, - FW_NVM_DATA_OFFSET); - - return 0; -} - /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read @@ -670,6 +635,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; struct ixgbe_hic_read_shadow_ram buffer; u32 current_word = 0; u16 words_to_read; @@ -677,7 +643,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u32 i; /* Take semaphore for the entire operation. */ - status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = hw->mac.ops.acquire_swfw_sync(hw, mask); if (status) { hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); return status; @@ -698,10 +664,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.length = cpu_to_be16(words_to_read * 2); - status = ixgbe_host_interface_command(hw, &buffer, - sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, - false); + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); if (status) { hw_dbg(hw, "Host interface command failed\n"); goto out; @@ -725,7 +689,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, } out: - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + hw->mac.ops.release_swfw_sync(hw, mask); return status; } @@ -896,15 +860,32 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) **/ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) { - s32 status = 0; + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + s32 status; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { - status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - status = IXGBE_ERR_SWFW_SYNC; + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32(offset * 2); + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + + status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); + if (!status) { + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); } + hw->mac.ops.release_swfw_sync(hw, mask); return status; } -- cgit v1.2.3 From d2a10ae72ed61759638ec06927bc9850c278d310 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Wed, 14 Dec 2016 11:02:06 -0800 Subject: ixgbe: Remove unused firmware version functions and method The firmware version method and functions are not used anywhere, so remove them all. Signed-off-by: Mark Rustad Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 2 -- drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 2 -- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 34 -------------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 4 --- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 - drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 1 - drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 3 +-- 7 files changed, 1 insertion(+), 46 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 805ab319e578..523f9d05a810 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -139,8 +139,6 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) case ixgbe_phy_tn: phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; phy->ops.check_link = &ixgbe_check_phy_link_tnx; - phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; break; case ixgbe_phy_nl: phy->ops.reset = &ixgbe_reset_phy_nl; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index e00aaeb91827..30535e6b68f0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -331,8 +331,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) case ixgbe_phy_tn: phy->ops.check_link = &ixgbe_check_phy_link_tnx; phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; break; default: break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 019b3c20b5c2..67a399bab6f0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -976,40 +976,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) return 0; } -/** - * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status; - - status = hw->phy.ops.read_reg(hw, TNX_FW_REV, - MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status; - - status = hw->phy.ops.read_reg(hw, AQ_FW_REV, - MDIO_MMD_VEND1, - firmware_version); - - return status; -} - /** * ixgbe_reset_phy_nl - Performs a PHY reset * @hw: pointer to hardware structure diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index ecf05f838fc5..5aa2c3cf7aec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -168,10 +168,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up); s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version); -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 0d855115987d..5c59b3c440ec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3404,7 +3404,6 @@ struct ixgbe_phy_operations { s32 (*setup_internal_link)(struct ixgbe_hw *); s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); - s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index afc4d16b0b8b..84a467a8ed3d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -918,7 +918,6 @@ static const struct ixgbe_phy_operations phy_ops_X540 = { .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, .set_phy_power = &ixgbe_set_copper_phy_power, - .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, }; static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 398973eed903..5a6c4b7f7e33 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -3419,8 +3419,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ .setup_link = &ixgbe_setup_phy_link_generic, \ .set_phy_power = NULL, \ - .check_overtemp = &ixgbe_tn_check_overtemp, \ - .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, static const struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY -- cgit v1.2.3 From 12c78ef0982201463f87494bedf289c094b24853 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Wed, 14 Dec 2016 11:02:11 -0800 Subject: ixgbe: Implement firmware interface to access some PHYs Implement new interface for firmware commands to access some PHYs. Signed-off-by: Mark Rustad Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 2 + drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 66 +++++++++++++++++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 45 +++++++++++++++++ 3 files changed, 113 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 671a0cdf6935..e083732adf64 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -116,6 +116,8 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, u32 timeout, bool return_data); s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 5c59b3c440ec..a8f2d7035241 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2645,6 +2645,59 @@ enum ixgbe_fdir_pballoc_type { #define FW_INT_PHY_REQ_LEN 10 #define FW_INT_PHY_REQ_READ 0 #define FW_INT_PHY_REQ_WRITE 1 +#define FW_PHY_ACT_REQ_CMD 5 +#define FW_PHY_ACT_DATA_COUNT 4 +#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) +#define FW_PHY_ACT_INIT_PHY 1 +#define FW_PHY_ACT_SETUP_LINK 2 +#define FW_PHY_ACT_LINK_SPEED_10 BIT(0) +#define FW_PHY_ACT_LINK_SPEED_100 BIT(1) +#define FW_PHY_ACT_LINK_SPEED_1G BIT(2) +#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3) +#define FW_PHY_ACT_LINK_SPEED_5G BIT(4) +#define FW_PHY_ACT_LINK_SPEED_10G BIT(5) +#define FW_PHY_ACT_LINK_SPEED_20G BIT(6) +#define FW_PHY_ACT_LINK_SPEED_25G BIT(7) +#define FW_PHY_ACT_LINK_SPEED_40G BIT(8) +#define FW_PHY_ACT_LINK_SPEED_50G BIT(9) +#define FW_PHY_ACT_LINK_SPEED_100G BIT(10) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 +#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \ + HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u +#define FW_PHY_ACT_SETUP_LINK_LP BIT(18) +#define FW_PHY_ACT_SETUP_LINK_HP BIT(19) +#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20) +#define FW_PHY_ACT_SETUP_LINK_AN BIT(22) +#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0) +#define FW_PHY_ACT_GET_LINK_INFO 3 +#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19) +#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20) +#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21) +#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22) +#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24) +#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29) +#define FW_PHY_ACT_FORCE_LINK_DOWN 4 +#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0) +#define FW_PHY_ACT_PHY_SW_RESET 5 +#define FW_PHY_ACT_PHY_HW_RESET 6 +#define FW_PHY_ACT_GET_PHY_INFO 7 +#define FW_PHY_ACT_UD_2 0x1002 +#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6) +#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5) +#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4) +#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3) +#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2) +#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1) +#define FW_PHY_ACT_RETRIES 50 +#define FW_PHY_INFO_SPEED_MASK 0xFFFu +#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u +#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu /* Host Interface Command Structures */ struct ixgbe_hic_hdr { @@ -2745,6 +2798,19 @@ struct ixgbe_hic_internal_phy_resp { __be32 read_data; }; +struct ixgbe_hic_phy_activity_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad; + __le16 activity_id; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + +struct ixgbe_hic_phy_activity_resp { + struct ixgbe_hic_hdr hdr; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 5a6c4b7f7e33..9dc6079f2ac4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -402,6 +402,51 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); } +/** + * ixgbe_fw_phy_activity - Perform an activity on a PHY + * @hw: pointer to hardware structure + * @activity: activity to perform + * @data: Pointer to 4 32-bit words of data + */ +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]) +{ + union { + struct ixgbe_hic_phy_activity_req cmd; + struct ixgbe_hic_phy_activity_resp rsp; + } hic; + u16 retries = FW_PHY_ACT_RETRIES; + s32 rc; + u32 i; + + do { + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; + hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.activity_id = cpu_to_le16(activity); + for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) + hic.cmd.data[i] = cpu_to_be32((*data)[i]); + + rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (rc) + return rc; + if (hic.rsp.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) { + for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) + (*data)[i] = be32_to_cpu(hic.rsp.data[i]); + return 0; + } + usleep_range(20, 30); + --retries; + } while (retries > 0); + + return IXGBE_ERR_HOST_INTERFACE_COMMAND; +} + /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params * @hw: pointer to hardware structure * -- cgit v1.2.3 From b3eb4e1860f3595431f74064870c36da295a9fbe Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Wed, 14 Dec 2016 11:02:16 -0800 Subject: ixgbe: Implement support for firmware-controlled PHYs Implement support for devices that have firmware-controlled PHYs. Signed-off-by: Mark Rustad Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 4 + drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 9 + drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 153 ++++++++- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 64 ++++ drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 3 + drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 7 + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 408 ++++++++++++++++++++++- 7 files changed, 642 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ef81c3d8c295..53fb427b1c29 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -661,6 +661,8 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) +#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) +#define IXGBE_FLAG2_EEE_ENABLED BIT(15) /* Tx fast path data */ int num_tx_queues; @@ -862,6 +864,7 @@ enum ixgbe_boards { board_X550, board_X550EM_x, board_x550em_a, + board_x550em_a_fw, }; extern const struct ixgbe_info ixgbe_82598_info; @@ -870,6 +873,7 @@ extern const struct ixgbe_info ixgbe_X540_info; extern const struct ixgbe_info ixgbe_X550_info; extern const struct ixgbe_info ixgbe_X550EM_x_info; extern const struct ixgbe_info ixgbe_x550em_a_info; +extern const struct ixgbe_info ixgbe_x550em_a_fw_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 851f48555506..094e1d63309a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -100,6 +100,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: supported = true; break; default: @@ -3382,6 +3384,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, else *speed = IXGBE_LINK_SPEED_100_FULL; break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { + *speed = IXGBE_LINK_SPEED_10_FULL; + } + break; default: *speed = IXGBE_LINK_SPEED_UNKNOWN; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 8789f214a4f9..17589068da78 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -197,15 +197,17 @@ static int ixgbe_get_settings(struct net_device *netdev, SUPPORTED_1000baseKX_Full : SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? - SUPPORTED_1000baseKX_Full : - SUPPORTED_100baseT_Full; + ecmd->supported |= SUPPORTED_100baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_10_FULL) + ecmd->supported |= SUPPORTED_10baseT_Full; /* default advertised speed if phy.autoneg_advertised isn't set */ ecmd->advertising = ecmd->supported; /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { ecmd->advertising = 0; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) @@ -237,6 +239,7 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_phy_tn: case ixgbe_phy_aq: case ixgbe_phy_x550em_ext_t: + case ixgbe_phy_fw: case ixgbe_phy_cu_unknown: ecmd->supported |= SUPPORTED_TP; ecmd->advertising |= ADVERTISED_TP; @@ -346,6 +349,9 @@ static int ixgbe_get_settings(struct net_device *netdev, case IXGBE_LINK_SPEED_100_FULL: ethtool_cmd_speed_set(ecmd, SPEED_100); break; + case IXGBE_LINK_SPEED_10_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_10); + break; default: break; } @@ -394,6 +400,9 @@ static int ixgbe_set_settings(struct net_device *netdev, if (ecmd->advertising & ADVERTISED_100baseT_Full) advertised |= IXGBE_LINK_SPEED_100_FULL; + if (ecmd->advertising & ADVERTISED_10baseT_Full) + advertised |= IXGBE_LINK_SPEED_10_FULL; + if (old == advertised) return err; /* this sets the link speed and restarts auto-neg */ @@ -3173,6 +3182,9 @@ static int ixgbe_get_module_info(struct net_device *dev, u8 sff8472_rev, addr_mode; bool page_swap = false; + if (hw->phy.type == ixgbe_phy_fw) + return -ENXIO; + /* Check whether we support SFF-8472 or not */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_COMP, @@ -3218,6 +3230,9 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, if (ee->len == 0) return -EINVAL; + if (hw->phy.type == ixgbe_phy_fw) + return -ENXIO; + for (i = ee->offset; i < ee->offset + ee->len; i++) { /* I2C reads can take long time */ if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) @@ -3237,6 +3252,136 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, return 0; } +static const struct { + ixgbe_link_speed mac_speed; + u32 supported; +} ixgbe_ls_map[] = { + { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full }, + { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full }, + { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full }, + { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full }, + { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full }, +}; + +static const struct { + u32 lp_advertised; + u32 mac_speed; +} ixgbe_lp_map[] = { + { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full }, + { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full }, + { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full }, + { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full }, + { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full }, + { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full}, +}; + +static int +ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata) +{ + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + struct ixgbe_hw *hw = &adapter->hw; + s32 rc; + u16 i; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info); + if (rc) + return rc; + + edata->lp_advertised = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) { + if (info[0] & ixgbe_lp_map[i].lp_advertised) + edata->lp_advertised |= ixgbe_lp_map[i].mac_speed; + } + + edata->supported = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { + if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed) + edata->supported |= ixgbe_ls_map[i].supported; + } + + edata->advertised = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { + if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed) + edata->advertised |= ixgbe_ls_map[i].supported; + } + + edata->eee_enabled = !!edata->advertised; + edata->tx_lpi_enabled = edata->eee_enabled; + if (edata->advertised & edata->lp_advertised) + edata->eee_active = true; + + return 0; +} + +static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) + return -EOPNOTSUPP; + + if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw) + return ixgbe_get_eee_fw(adapter, edata); + + return -EOPNOTSUPP; +} + +static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct ethtool_eee eee_data; + s32 ret_val; + + if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) + return -EOPNOTSUPP; + + memset(&eee_data, 0, sizeof(struct ethtool_eee)); + + ret_val = ixgbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_err(drv, "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_err(drv, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_err(drv, + "Setting EEE advertised speeds is not supported\n"); + return -EINVAL; + } + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + if (edata->eee_enabled) { + adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; + hw->phy.eee_speeds_advertised = + hw->phy.eee_speeds_supported; + } else { + adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; + hw->phy.eee_speeds_advertised = 0; + } + + /* reset link */ + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } + + return 0; +} + static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -3269,6 +3414,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .get_rxfh_key_size = ixgbe_get_rxfh_key_size, .get_rxfh = ixgbe_get_rxfh, .set_rxfh = ixgbe_set_rxfh, + .get_eee = ixgbe_get_eee, + .set_eee = ixgbe_set_eee, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, .get_ts_info = ixgbe_get_ts_info, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9ca167c5d1fa..0c6eca570791 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -86,6 +86,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_X550] = &ixgbe_X550_info, [board_X550EM_x] = &ixgbe_X550EM_x_info, [board_x550em_a] = &ixgbe_x550em_a_info, + [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -140,6 +141,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, /* required last entry */ {0, } }; @@ -180,6 +183,7 @@ MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, u32 reg, u16 *value) @@ -2447,6 +2451,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 eicr = adapter->interrupt_event; + s32 rc; if (test_bit(__IXGBE_DOWN, &adapter->state)) return; @@ -2485,6 +2490,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) return; break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + rc = hw->phy.ops.check_overtemp(hw); + if (rc != IXGBE_ERR_OVERTEMP) + return; + break; default: if (adapter->hw.mac.type >= ixgbe_mac_X540) return; @@ -2531,6 +2542,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) return; } return; + case ixgbe_mac_x550em_a: + if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { + adapter->interrupt_event = eicr; + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + IXGBE_EICR_GPI_SDP0_X550EM_a); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, + IXGBE_EICR_GPI_SDP0_X550EM_a); + } + return; + case ixgbe_mac_X550: case ixgbe_mac_X540: if (!(eicr & IXGBE_EICR_TS)) return; @@ -5294,6 +5317,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); + if (adapter->hw.phy.type == ixgbe_phy_fw) + ixgbe_watchdog_link_is_down(adapter); ixgbe_down(adapter); /* * If SR-IOV enabled then wait a bit before bringing the adapter @@ -5553,6 +5578,31 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_clean_all_rx_rings(adapter); } +/** + * ixgbe_eee_capable - helper function to determine EEE support on X550 + * @adapter: board private structure + */ +static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + if (!hw->phy.eee_speeds_supported) + break; + adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; + if (!hw->phy.eee_speeds_advertised) + break; + adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; + break; + default: + adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; + adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; + break; + } +} + /** * ixgbe_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure @@ -5717,6 +5767,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, break; case ixgbe_mac_x550em_a: adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; + default: + break; + } /* fall through */ case ixgbe_mac_X550EM_x: #ifdef CONFIG_IXGBE_DCB @@ -5730,6 +5788,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, #endif /* IXGBE_FCOE */ /* Fall Through */ case ixgbe_mac_X550: + if (hw->mac.type == ixgbe_mac_X550) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif @@ -6807,6 +6867,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) case IXGBE_LINK_SPEED_100_FULL: speed_str = "100 Mbps"; break; + case IXGBE_LINK_SPEED_10_FULL: + speed_str = "10 Mbps"; + break; default: speed_str = "unknown speed"; break; @@ -9595,6 +9658,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); hw->phy.reset_if_overtemp = false; + ixgbe_set_eee_capable(adapter); if (err == IXGBE_ERR_SFP_NOT_PRESENT) { err = 0; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 67a399bab6f0..2fcde8777a29 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -784,6 +784,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, if (speed & IXGBE_LINK_SPEED_100_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + if (speed & IXGBE_LINK_SPEED_10_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; + /* Setup link based on the new speed settings */ hw->phy.ops.setup_link(hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index a8f2d7035241..b2cd8d443fc5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -92,6 +92,8 @@ #define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 #define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE +#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 +#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED @@ -1914,6 +1916,7 @@ enum { #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 #define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINKS_SPEED_10_X550EM_A 0 #define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ @@ -2926,6 +2929,7 @@ typedef u32 ixgbe_autoneg_advertised; /* Link speed */ typedef u32 ixgbe_link_speed; #define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_10_FULL 0x0002 #define IXGBE_LINK_SPEED_100_FULL 0x0008 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 #define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 @@ -3141,6 +3145,7 @@ enum ixgbe_phy_type { ixgbe_phy_qsfp_unknown, ixgbe_phy_sfp_unsupported, ixgbe_phy_sgmii, + ixgbe_phy_fw, ixgbe_phy_generic }; @@ -3555,6 +3560,8 @@ struct ixgbe_phy_info { bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; ixgbe_link_speed speeds_supported; + ixgbe_link_speed eee_speeds_supported; + ixgbe_link_speed eee_speeds_advertised; enum ixgbe_smart_speed smart_speed; bool smart_speed_active; bool multispeed_fiber; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 9dc6079f2ac4..200f847fd8f3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -63,6 +63,18 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) return 0; } +static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + /* Start with X540 invariants, since so similar */ + ixgbe_get_invariants_X540(hw); + + phy->ops.set_phy_power = NULL; + + return 0; +} + /** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control * @hw: pointer to hardware structure **/ @@ -447,6 +459,159 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, return IXGBE_ERR_HOST_INTERFACE_COMMAND; } +static const struct { + u16 fw_speed; + ixgbe_link_speed phy_speed; +} ixgbe_fw_map[] = { + { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, + { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, + { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, +}; + +/** + * ixgbe_get_phy_id_fw - Get the phy ID via firmware command + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) +{ + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + u16 phy_speeds; + u16 phy_id_lo; + s32 rc; + u16 i; + + if (hw->phy.id) + return 0; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); + if (rc) + return rc; + + hw->phy.speeds_supported = 0; + phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; + for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + if (phy_speeds & ixgbe_fw_map[i].fw_speed) + hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; + } + + hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; + phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; + hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; + hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; + if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) + return IXGBE_ERR_PHY_ADDR_INVALID; + + hw->phy.autoneg_advertised = hw->phy.speeds_supported; + hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + return 0; +} + +/** + * ixgbe_identify_phy_fw - Get PHY type based on firmware command + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) +{ + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + + hw->phy.type = ixgbe_phy_fw; + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; + return ixgbe_get_phy_id_fw(hw); +} + +/** + * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + + setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; + return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); +} + +/** + * ixgbe_setup_fw_link - Setup firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + u16 i; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return 0; + + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + case ixgbe_fc_rx_pause: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + case ixgbe_fc_tx_pause: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + default: + break; + } + + for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) + setup[0] |= ixgbe_fw_map[i].fw_speed; + } + setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; + + if (hw->phy.eee_speeds_advertised) + setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); + if (rc) + return rc; + if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) + return IXGBE_ERR_OVERTEMP; + return 0; +} + +/** + * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + */ +static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) +{ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + return ixgbe_setup_fw_link(hw); +} + /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params * @hw: pointer to hardware structure * @@ -1794,6 +1959,125 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, return rc; } +/** + * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval, flx_val; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + if (rc) + return rc; + + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); + if (rc) + return rc; + + ixgbe_restart_an_internal_phy_x550em(hw); + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +/** + * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + */ +static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + ixgbe_link_speed speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) + goto out; + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) + goto out; + + /* Check if auto-negotiation has completed */ + status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); + if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + /* Negotiate the flow control */ + status = ixgbe_negotiate_fc(hw, info[0], info[0], + FW_PHY_ACT_GET_LINK_INFO_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_FC_TX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); + +out: + if (!status) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + /** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers * @hw: pointer to hardware structure **/ @@ -1806,6 +2090,17 @@ static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) mac->ops.setup_fc = NULL; mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; break; + case ixgbe_media_type_copper: + if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && + hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + break; + } + mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; + mac->ops.setup_fc = ixgbe_fc_autoneg_fw; + mac->ops.setup_link = ixgbe_setup_sgmii_fw; + mac->ops.check_link = ixgbe_check_mac_link_generic; + break; case ixgbe_media_type_backplane: mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; @@ -1853,7 +2148,7 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.check_link = ixgbe_check_link_t_X550em; - return; + break; case ixgbe_media_type_backplane: if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) @@ -1896,6 +2191,12 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { + if (hw->phy.type == ixgbe_phy_fw) { + *autoneg = true; + *speed = hw->phy.speeds_supported; + return 0; + } + /* SFP */ if (hw->phy.media_type == ixgbe_media_type_fiber) { /* CS4227 SFP must not enable auto-negotiation */ @@ -2733,6 +3034,50 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return 0; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); + if (rc) + return rc; + memset(store, 0, sizeof(store)); + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); + if (rc) + return rc; + + return ixgbe_setup_fw_link(hw); +} + +/** + * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp + * @hw: pointer to hardware structure + */ +static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); + if (rc) + return rc; + + if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { + ixgbe_shutdown_fw_phy(hw); + return IXGBE_ERR_OVERTEMP; + } + return 0; +} + /** * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register * @hw: pointer to hardware structure @@ -2819,6 +3164,10 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; phy->ops.reset = ixgbe_reset_phy_t_X550em; break; + case ixgbe_phy_fw: + phy->ops.setup_link = ixgbe_setup_fw_link; + phy->ops.reset = ixgbe_reset_phy_fw; + break; default: break; } @@ -2856,6 +3205,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: media_type = ixgbe_media_type_copper; break; default: @@ -2923,6 +3274,13 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) hlreg0 &= ~IXGBE_HLREG0_MDCSPD; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + /* Select fast MDIO clock speed for these devices */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 |= IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; default: break; } @@ -3434,6 +3792,27 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, }; +static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = ixgbe_reset_hw_X550em, + .get_media_type = ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = ixgbe_get_link_capabilities_X550em, + .get_bus_info = ixgbe_get_bus_info_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, + .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, + .setup_fc = ixgbe_setup_fc_x550em, + .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, +}; + #define X550_COMMON_EEP \ .read = &ixgbe_read_ee_hostif_X550, \ .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ @@ -3463,11 +3842,11 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ .setup_link = &ixgbe_setup_phy_link_generic, \ - .set_phy_power = NULL, \ - .check_overtemp = &ixgbe_tn_check_overtemp, + .set_phy_power = NULL, static const struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, .init = NULL, .identify = &ixgbe_identify_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, @@ -3476,6 +3855,7 @@ static const struct ixgbe_phy_operations phy_ops_X550 = { static const struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, .read_reg = &ixgbe_read_phy_reg_generic, @@ -3484,6 +3864,7 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = { static const struct ixgbe_phy_operations phy_ops_x550em_a = { X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, .read_reg = &ixgbe_read_phy_reg_x550a, @@ -3492,6 +3873,17 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = { .write_reg_mdi = &ixgbe_write_phy_reg_mdi, }; +static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { + X550_COMMON_PHY + .check_overtemp = ixgbe_check_overtemp_fw, + .init = ixgbe_init_phy_ops_X550em, + .identify = ixgbe_identify_phy_fw, + .read_reg = NULL, + .write_reg = NULL, + .read_reg_mdi = NULL, + .write_reg_mdi = NULL, +}; + static const struct ixgbe_link_operations link_ops_x550em_x = { .read_link = &ixgbe_read_i2c_combined_generic, .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, @@ -3541,3 +3933,13 @@ const struct ixgbe_info ixgbe_x550em_a_info = { .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_x550em_a, }; + +const struct ixgbe_info ixgbe_x550em_a_fw_info = { + .mac = ixgbe_mac_x550em_a, + .get_invariants = ixgbe_get_invariants_X550_a_fw, + .mac_ops = &mac_ops_x550em_a_fw, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_a_fw, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, +}; -- cgit v1.2.3 From 41e544cdad0bd669600825d8de73c8f420640bf9 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Thu, 15 Dec 2016 21:18:31 -0500 Subject: ixgbevf: Add support for VF promiscuous mode This patch extends the mailbox message to allow for VF promiscuous mode support. Signed-off-by: Don Skidmore Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 1 + drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 15 +++++++++++++- drivers/net/ethernet/intel/ixgbevf/mbx.h | 1 + drivers/net/ethernet/intel/ixgbevf/vf.c | 24 +++++++++++++++++++---- 4 files changed, 36 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 5639fbe294d0..3fe6504eeab1 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -464,6 +464,7 @@ enum ixgbevf_xcast_modes { IXGBEVF_XCAST_MODE_NONE = 0, IXGBEVF_XCAST_MODE_MULTI, IXGBEVF_XCAST_MODE_ALLMULTI, + IXGBEVF_XCAST_MODE_PROMISC, }; extern const struct ixgbevf_info ixgbevf_82599_vf_info; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index dc23f56d5c10..1a28349114f8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1930,6 +1930,16 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; + /* request the most inclusive mode we need */ + if (flags & IFF_PROMISC) + xcast_mode = IXGBEVF_XCAST_MODE_PROMISC; + else if (flags & IFF_ALLMULTI) + xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI; + else if (flags & (IFF_BROADCAST | IFF_MULTICAST)) + xcast_mode = IXGBEVF_XCAST_MODE_MULTI; + else + xcast_mode = IXGBEVF_XCAST_MODE_NONE; + spin_lock_bh(&adapter->mbx_lock); hw->mac.ops.update_xcast_mode(hw, xcast_mode); @@ -2071,7 +2081,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_12, + int api[] = { ixgbe_mbox_api_13, + ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; @@ -2373,6 +2384,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) switch (hw->api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: adapter->num_rx_queues = rss; adapter->num_tx_queues = rss; default: @@ -4117,6 +4129,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 340cdd469455..bc0442acae78 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -84,6 +84,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index d46ba1dabcb7..8a5db9d7219d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -330,9 +330,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) * Thus return an error if API doesn't support RETA querying or querying * is not supported for this device type. */ - if (hw->api_version != ixgbe_mbox_api_12 || - hw->mac.type >= ixgbe_mac_X550_vf) + switch (hw->api_version) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + if (hw->mac.type >= ixgbe_mac_X550_vf) + break; + default: return -EOPNOTSUPP; + } msgbuf[0] = IXGBE_VF_GET_RETA; @@ -391,9 +396,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) * Thus return an error if API doesn't support RSS Random Key retrieval * or if the operation is not supported for this device type. */ - if (hw->api_version != ixgbe_mbox_api_12 || - hw->mac.type >= ixgbe_mac_X550_vf) + switch (hw->api_version) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + if (hw->mac.type >= ixgbe_mac_X550_vf) + break; + default: return -EOPNOTSUPP; + } msgbuf[0] = IXGBE_VF_GET_RSS_KEY; err = hw->mbx.ops.write_posted(hw, msgbuf, 1); @@ -545,6 +555,11 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) switch (hw->api_version) { case ixgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case ixgbe_mbox_api_13: break; default: return -EOPNOTSUPP; @@ -884,6 +899,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, switch (hw->api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: break; default: return 0; -- cgit v1.2.3 From 07eea570acccbc0f9402357d652868571fdbb2b9 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Thu, 15 Dec 2016 21:18:32 -0500 Subject: ixgbe: Add PF support for VF promiscuous mode This patch extends the xcast mailbox message to include support for unicast promiscuous mode. To allow a VF to enter this mode the PF must be in promiscuous mode. A later patch will add the support needed in the VF driver (ixgbevf) Signed-off-by: Don Skidmore Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 47 ++++++++++++++++++++++---- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 2 ++ 4 files changed, 45 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 53fb427b1c29..aeedc812ee27 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -159,6 +159,7 @@ enum ixgbevf_xcast_modes { IXGBEVF_XCAST_MODE_NONE = 0, IXGBEVF_XCAST_MODE_MULTI, IXGBEVF_XCAST_MODE_ALLMULTI, + IXGBEVF_XCAST_MODE_PROMISC, }; struct vf_macvlans { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 01c2667c0f92..811cb4f64a5b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -74,6 +74,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index dd1187c3b117..044cb44747cf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -512,6 +512,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: /* * Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are @@ -979,6 +980,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_10: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -1003,6 +1005,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_20: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: break; default: return -1; @@ -1042,8 +1045,13 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) return -EPERM; /* verify the PF is supporting the correct API */ - if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + break; + default: return -EOPNOTSUPP; + } /* This mailbox command is supported (required) only for 82599 and x540 * VFs which support up to 4 RSS queues. Therefore we will compress the @@ -1069,8 +1077,13 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return -EPERM; /* verify the PF is supporting the correct API */ - if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + break; + default: return -EOPNOTSUPP; + } memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); @@ -1082,11 +1095,16 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, { struct ixgbe_hw *hw = &adapter->hw; int xcast_mode = msgbuf[1]; - u32 vmolr, disable, enable; + u32 vmolr, fctrl, disable, enable; /* verify the PF is supporting the correct APIs */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case ixgbe_mbox_api_13: break; default: return -EOPNOTSUPP; @@ -1102,17 +1120,34 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = 0; break; case IXGBEVF_XCAST_MODE_MULTI: - disable = IXGBE_VMOLR_MPE; + disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; break; case IXGBEVF_XCAST_MODE_ALLMULTI: - disable = 0; + disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; break; + case IXGBEVF_XCAST_MODE_PROMISC: + if (hw->mac.type <= ixgbe_mac_82599EB) + return -EOPNOTSUPP; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + if (!(fctrl & IXGBE_FCTRL_UPE)) { + /* VF promisc requires PF in promisc */ + e_warn(drv, + "Enabling VF promisc requires PF in promisc\n"); + return -EPERM; + } + + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + break; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index b2cd8d443fc5..1d07f2ead914 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1501,6 +1501,8 @@ enum { #define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) /* VMOLR bitmasks */ +#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ +#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ #define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ #define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ #define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ -- cgit v1.2.3 From ec2507d2a3060a970fa314556891828cfd60093e Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 3 Jan 2017 19:20:24 +0200 Subject: net/sched: cls_matchall: Fix error path Fix several error paths in matchall: - Release reference to actions in case the hardware fails offloading (relevant to skip_sw only) - Fix error path in case tcf_exts initialization/validation fail Fixes: bf3994d2ed31 ("net/sched: introduce Match-all classifier") Signed-off-by: Yotam Gigi Signed-off-by: David S. Miller --- net/sched/cls_matchall.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index f935429bd5ef..fcecf5aac666 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -141,10 +141,12 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp, struct tcf_exts e; int err; - tcf_exts_init(&e, TCA_MATCHALL_ACT, 0); + err = tcf_exts_init(&e, TCA_MATCHALL_ACT, 0); + if (err) + return err; err = tcf_exts_validate(net, tp, tb, est, &e, ovr); if (err < 0) - return err; + goto errout; if (tb[TCA_MATCHALL_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); @@ -154,6 +156,9 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp, tcf_exts_change(tp, &f->exts, &e); return 0; +errout: + tcf_exts_destroy(&e); + return err; } static int mall_change(struct net *net, struct sk_buff *in_skb, @@ -193,7 +198,9 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, if (!f) return -ENOBUFS; - tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0); + err = tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0); + if (err) + goto err_exts_init; if (!handle) handle = 1; @@ -202,13 +209,13 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); if (err) - goto errout; + goto err_set_parms; if (tc_should_offload(dev, tp, flags)) { err = mall_replace_hw_filter(tp, f, (unsigned long) f); if (err) { if (tc_skip_sw(flags)) - goto errout; + goto err_replace_hw_filter; else err = 0; } @@ -219,7 +226,10 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, return 0; -errout: +err_replace_hw_filter: +err_set_parms: + tcf_exts_destroy(&f->exts); +err_exts_init: kfree(f); return err; } -- cgit v1.2.3 From 1365e547c6bb6b6b137bc40a189675503baa037b Mon Sep 17 00:00:00 2001 From: Alexander Alemayhu Date: Tue, 3 Jan 2017 17:13:20 +0100 Subject: xfrm: trivial typos o s/descentant/descendant o s/workarbound/workaround Signed-off-by: Alexander Alemayhu Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 2 +- net/xfrm/xfrm_state.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 177e208e8ff5..99ad1af2927f 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -330,7 +330,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) } EXPORT_SYMBOL(xfrm_policy_destroy); -/* Rule must be locked. Release descentant resources, announce +/* Rule must be locked. Release descendant resources, announce * entry dead. The rule must be unlinked from lists to the moment. */ diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 64e3c82eedf6..c5cf4d611aab 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -409,7 +409,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) if (x->xflags & XFRM_SOFT_EXPIRE) { /* enter hard expire without soft expire first?! * setting a new date could trigger this. - * workarbound: fix x->curflt.add_time by below: + * workaround: fix x->curflt.add_time by below: */ x->curlft.add_time = now - x->saved_tmo - 1; tmo = x->lft.hard_add_expires_seconds - x->saved_tmo; -- cgit v1.2.3 From 5ec71dd7f1b63da20fefebabf1e66cb530b9eb4d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 4 Jan 2017 08:24:49 +0100 Subject: cfg80211: sysfs: use wiphy_name() Instead of open-coding dev_name(), use the wiphy_name() inline to make the code easier to understand. While at it, clean up some coding style. Signed-off-by: Johannes Berg --- net/wireless/sysfs.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 14b3f007826d..16b6b5988be9 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -39,9 +39,11 @@ SHOW_FMT(address_mask, "%pM", wiphy.addr_mask); static ssize_t name_show(struct device *dev, struct device_attribute *attr, - char *buf) { + char *buf) +{ struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy; - return sprintf(buf, "%s\n", dev_name(&wiphy->dev)); + + return sprintf(buf, "%s\n", wiphy_name(wiphy)); } static DEVICE_ATTR_RO(name); -- cgit v1.2.3 From 1ff8cebf49ed9e9ca2ae44b5c4176aef9c21af9c Mon Sep 17 00:00:00 2001 From: yuan linyu Date: Tue, 3 Jan 2017 20:42:17 +0800 Subject: scm: remove use CMSG{_COMPAT}_ALIGN(sizeof(struct {compat_}cmsghdr)) sizeof(struct cmsghdr) and sizeof(struct compat_cmsghdr) already aligned. remove use CMSG_ALIGN(sizeof(struct cmsghdr)) and CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr)) keep code consistent. Signed-off-by: yuan linyu Signed-off-by: David S. Miller --- include/linux/socket.h | 6 +++--- net/compat.c | 14 ++++++-------- net/core/scm.c | 2 +- net/ipv4/ip_sockglue.c | 2 +- net/rxrpc/sendmsg.c | 2 +- 5 files changed, 12 insertions(+), 14 deletions(-) diff --git a/include/linux/socket.h b/include/linux/socket.h index b5cc5a6d7011..c06438023d79 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -92,9 +92,9 @@ struct cmsghdr { #define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) ) -#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + CMSG_ALIGN(sizeof(struct cmsghdr)))) -#define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(len)) -#define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len)) +#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + sizeof(struct cmsghdr))) +#define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len)) +#define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len)) #define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \ (struct cmsghdr *)(ctl) : \ diff --git a/net/compat.c b/net/compat.c index 96c544b05b15..4e27dd1cd3a6 100644 --- a/net/compat.c +++ b/net/compat.c @@ -90,11 +90,11 @@ int get_compat_msghdr(struct msghdr *kmsg, #define CMSG_COMPAT_ALIGN(len) ALIGN((len), sizeof(s32)) #define CMSG_COMPAT_DATA(cmsg) \ - ((void __user *)((char __user *)(cmsg) + CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr)))) + ((void __user *)((char __user *)(cmsg) + sizeof(struct compat_cmsghdr))) #define CMSG_COMPAT_SPACE(len) \ - (CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr)) + CMSG_COMPAT_ALIGN(len)) + (sizeof(struct compat_cmsghdr) + CMSG_COMPAT_ALIGN(len)) #define CMSG_COMPAT_LEN(len) \ - (CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr)) + (len)) + (sizeof(struct compat_cmsghdr) + (len)) #define CMSG_COMPAT_FIRSTHDR(msg) \ (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \ @@ -141,8 +141,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg)) return -EINVAL; - tmp = ((ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))) + - CMSG_ALIGN(sizeof(struct cmsghdr))); + tmp = ((ucmlen - sizeof(*ucmsg)) + sizeof(struct cmsghdr)); tmp = CMSG_ALIGN(tmp); kcmlen += tmp; ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); @@ -168,8 +167,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, goto Efault; if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg)) goto Einval; - tmp = ((ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))) + - CMSG_ALIGN(sizeof(struct cmsghdr))); + tmp = ((ucmlen - sizeof(*ucmsg)) + sizeof(struct cmsghdr)); if ((char *)kcmsg_base + kcmlen - (char *)kcmsg < CMSG_ALIGN(tmp)) goto Einval; kcmsg->cmsg_len = tmp; @@ -178,7 +176,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, __get_user(kcmsg->cmsg_type, &ucmsg->cmsg_type) || copy_from_user(CMSG_DATA(kcmsg), CMSG_COMPAT_DATA(ucmsg), - (ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))))) + (ucmlen - sizeof(*ucmsg)))) goto Efault; /* Advance. */ diff --git a/net/core/scm.c b/net/core/scm.c index d8820438ba37..b6d83686e149 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -71,7 +71,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) struct file **fpp; int i, num; - num = (cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)))/sizeof(int); + num = (cmsg->cmsg_len - sizeof(struct cmsghdr))/sizeof(int); if (num <= 0) return 0; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index cf9b43de4690..1caa38d8a9c9 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -272,7 +272,7 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, continue; switch (cmsg->cmsg_type) { case IP_RETOPTS: - err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); + err = cmsg->cmsg_len - sizeof(struct cmsghdr); /* Our caller is responsible for freeing ipc->opt */ err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index b214a4d4a641..0a6ef217aa8a 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -376,7 +376,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, if (!CMSG_OK(msg, cmsg)) return -EINVAL; - len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); + len = cmsg->cmsg_len - sizeof(struct cmsghdr); _debug("CMSG %d, %d, %d", cmsg->cmsg_level, cmsg->cmsg_type, len); -- cgit v1.2.3 From ac4340fc3ce0e0b1cb627b05d6dcbd473544d7b3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 4 Jan 2017 13:24:19 -0500 Subject: net: Assert at build time the assumptions we make about the CMSG header. It must always be the case that CMSG_ALIGN(sizeof(hdr)) == sizeof(hdr). Otherwise there are missing adjustments in the various calculations that parse and build these things. Signed-off-by: David S. Miller --- net/compat.c | 3 +++ net/socket.c | 2 ++ 2 files changed, 5 insertions(+) diff --git a/net/compat.c b/net/compat.c index 4e27dd1cd3a6..ba3ac722714d 100644 --- a/net/compat.c +++ b/net/compat.c @@ -130,6 +130,9 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, __kernel_size_t kcmlen, tmp; int err = -EFAULT; + BUILD_BUG_ON(sizeof(struct compat_cmsghdr) != + CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr))); + kcmlen = 0; kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf; ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg); diff --git a/net/socket.c b/net/socket.c index 8487bf136e5c..5f3b5a2c4f37 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1948,6 +1948,8 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, ctl_buf = msg_sys->msg_control; ctl_len = msg_sys->msg_controllen; } else if (ctl_len) { + BUILD_BUG_ON(sizeof(struct cmsghdr) != + CMSG_ALIGN(sizeof(struct cmsghdr))); if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) -- cgit v1.2.3 From 9feb16ae0b2d5246e20a8dd9049441780e59c2a1 Mon Sep 17 00:00:00 2001 From: Prasad Kanneganti Date: Tue, 3 Jan 2017 11:27:33 -0800 Subject: liquidio: remove PTP support in 23XX adapters liquidio driver incorrectly indicates that PTP is supported in 23XX adapters; this patch fixes that. PTP is supported in 66XX and 68XX adapters, and the driver correctly indicates that. Signed-off-by: Prasad Kanneganti Signed-off-by: Felix Manlunas Signed-off-by: Derek Chickles Signed-off-by: Satanand Burla Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 39a9665c9d00..6443bc13af62 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2629,7 +2629,9 @@ static int liquidio_open(struct net_device *netdev) oct->droq[0]->ops.poll_mode = 1; } - oct_ptp_open(netdev); + if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) && + ptp_enable) + oct_ptp_open(netdev); ifstate_set(lio, LIO_IFSTATE_RUNNING); @@ -2973,9 +2975,13 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) */ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { + struct lio *lio = GET_LIO(netdev); + switch (cmd) { case SIOCSHWTSTAMP: - return hwtstamp_ioctl(netdev, ifr); + if ((lio->oct_dev->chip_id == OCTEON_CN66XX || + lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable) + return hwtstamp_ioctl(netdev, ifr); default: return -EOPNOTSUPP; } -- cgit v1.2.3 From a896eee3349e4e7f35a83f3b1a93c2e048d976b9 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 3 Jan 2017 14:31:49 -0500 Subject: net: dsa: remove out label in dsa_switch_setup_one The "out" label in dsa_switch_setup_one() is useless, thus remove it. Reviewed-by: Andrew Lunn Signed-off-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa.c | 40 +++++++++++++--------------------------- 1 file changed, 13 insertions(+), 27 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 7899919cd9f0..89e66b623d73 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -329,8 +329,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) if (dst->cpu_switch != -1) { netdev_err(dst->master_netdev, "multiple cpu ports?!\n"); - ret = -EINVAL; - goto out; + return -EINVAL; } dst->cpu_switch = index; dst->cpu_port = i; @@ -343,10 +342,8 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) valid_name_found = true; } - if (!valid_name_found && i == DSA_MAX_PORTS) { - ret = -EINVAL; - goto out; - } + if (!valid_name_found && i == DSA_MAX_PORTS) + return -EINVAL; /* Make the built-in MII bus mask match the number of ports, * switch drivers can override this later @@ -363,10 +360,8 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) tag_protocol = ops->get_tag_protocol(ds); dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol); - if (IS_ERR(dst->tag_ops)) { - ret = PTR_ERR(dst->tag_ops); - goto out; - } + if (IS_ERR(dst->tag_ops)) + return PTR_ERR(dst->tag_ops); dst->rcv = dst->tag_ops->rcv; } @@ -378,25 +373,23 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) */ ret = ops->setup(ds); if (ret < 0) - goto out; + return ret; if (ops->set_addr) { ret = ops->set_addr(ds, dst->master_netdev->dev_addr); if (ret < 0) - goto out; + return ret; } if (!ds->slave_mii_bus && ops->phy_read) { ds->slave_mii_bus = devm_mdiobus_alloc(parent); - if (!ds->slave_mii_bus) { - ret = -ENOMEM; - goto out; - } + if (!ds->slave_mii_bus) + return -ENOMEM; dsa_slave_mii_bus_init(ds); ret = mdiobus_register(ds->slave_mii_bus); if (ret < 0) - goto out; + return ret; } /* @@ -409,20 +402,16 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) continue; ret = dsa_slave_create(ds, parent, i, cd->port_names[i]); - if (ret < 0) { + if (ret < 0) netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n", index, i, cd->port_names[i], ret); - ret = 0; - } } /* Perform configuration of the CPU and DSA ports */ ret = dsa_cpu_dsa_setups(ds, parent); - if (ret < 0) { + if (ret < 0) netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n", index); - ret = 0; - } ret = dsa_cpu_port_ethtool_setup(ds); if (ret) @@ -453,10 +442,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) } #endif /* CONFIG_NET_DSA_HWMON */ - return ret; - -out: - return ret; + return 0; } static struct dsa_switch * -- cgit v1.2.3 From 009146d117b9b816193fce0f1ed75f015a398721 Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Tue, 3 Jan 2017 12:47:16 -0800 Subject: ipvlan: assign unique dev-id for each slave device. IPvlan setup uses one mac-address (of master). The IPv6 link-local addresses are derived using the mac-address on the link. Lack of dev-ids makes these link-local addresses same for all slaves including that of master device. dev-ids are necessary to add differentiation when L2 address is shared. Signed-off-by: Mahesh Bandewar Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan.h | 1 + drivers/net/ipvlan/ipvlan_main.c | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index dbfbb33ac66c..0a9068fdee0f 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -97,6 +97,7 @@ struct ipvl_port { struct work_struct wq; struct sk_buff_head backlog; int count; + struct ida ida; }; struct ipvl_skb_cb { diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 975f9ddb9908..ce7ca6a5aa8a 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -119,6 +119,7 @@ static int ipvlan_port_create(struct net_device *dev) skb_queue_head_init(&port->backlog); INIT_WORK(&port->wq, ipvlan_process_multicast); + ida_init(&port->ida); err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port); if (err) @@ -150,6 +151,7 @@ static void ipvlan_port_destroy(struct net_device *dev) dev_put(skb->dev); kfree_skb(skb); } + ida_destroy(&port->ida); kfree(port); } @@ -533,6 +535,16 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan_adjust_mtu(ipvlan, phy_dev); INIT_LIST_HEAD(&ipvlan->addrs); + /* Since L2 address is shared among all IPvlan slaves including + * master, use unique 16 bit dev-ids to diffentiate among them. + * Assign IDs between 0x1 and 0xFFFE (used by the master) to each + * slave link [see addrconf_ifid_eui48()]. + */ + err = ida_simple_get(&port->ida, 1, 0xFFFE, GFP_KERNEL); + if (err < 0) + goto destroy_ipvlan_port; + dev->dev_id = err; + /* TODO Probably put random address here to be presented to the * world but keep using the physical-dev address for the outgoing * packets. @@ -543,7 +555,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, err = register_netdevice(dev); if (err < 0) - goto destroy_ipvlan_port; + goto remove_ida; err = netdev_upper_dev_link(phy_dev, dev); if (err) { @@ -562,6 +574,8 @@ unlink_netdev: netdev_upper_dev_unlink(phy_dev, dev); unregister_netdev: unregister_netdevice(dev); +remove_ida: + ida_simple_remove(&port->ida, dev->dev_id); destroy_ipvlan_port: if (create) ipvlan_port_destroy(phy_dev); @@ -579,6 +593,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) kfree_rcu(addr, rcu); } + ida_simple_remove(&ipvlan->port->ida, dev->dev_id); list_del_rcu(&ipvlan->pnode); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(ipvlan->phy_dev, dev); -- cgit v1.2.3 From 4063469971af9611648382559c2d399cce03ae67 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 4 Jan 2017 15:10:21 +0300 Subject: sh_eth: handle only enabled E-MAC interrupts The driver should only handle the enabled E-MAC interrupts, like it does for the E-DMAC interrupts since commit 3893b27345ac ("sh_eth: workaround for spurious ECI interrupt"), so mask ECSR with ECSIPR when reading it in sh_eth_error(). Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f341c1bc7001..c89569509a47 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1535,7 +1535,8 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status) u32 mask; if (intr_status & EESR_ECI) { - felic_stat = sh_eth_read(ndev, ECSR); + felic_stat = sh_eth_read(ndev, ECSR) & + sh_eth_read(ndev, ECSIPR); sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ if (felic_stat & ECSR_ICD) ndev->stats.tx_carrier_errors++; -- cgit v1.2.3 From 1940f240769ada7efe9d459991fe5dd80db3771a Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 4 Jan 2017 15:10:50 +0300 Subject: sh_eth: no need for *else* after *goto* Well, checkpatch.pl complains about *else* after *return* and *break* but not after *goto*... and it probably should have complained about the code in sh_eth_error(). Win couple LoCs by removing that *else*. :-) Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index c89569509a47..72a1fad4e930 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1542,13 +1542,11 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status) ndev->stats.tx_carrier_errors++; if (felic_stat & ECSR_LCHNG) { /* Link Changed */ - if (mdp->cd->no_psr || mdp->no_ether_link) { + if (mdp->cd->no_psr || mdp->no_ether_link) goto ignore_link; - } else { - link_stat = (sh_eth_read(ndev, PSR)); - if (mdp->ether_link_active_low) - link_stat = ~link_stat; - } + link_stat = sh_eth_read(ndev, PSR); + if (mdp->ether_link_active_low) + link_stat = ~link_stat; if (!(link_stat & PHY_ST_LINK)) { sh_eth_rcv_snd_disable(ndev); } else { -- cgit v1.2.3 From 9b39f05ce8e0cf2857c37b72c0b3b92e6a026ed5 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 4 Jan 2017 15:11:21 +0300 Subject: sh_eth: factor out sh_eth_emac_interrupt() The E-MAC interrupt (EESR.ECI) is not always caused by an error condition, so it really shouldn't be handled by sh_eth_error(). Factor out the E-MAC interrupt handler, sh_eth_emac_interrupt(), removing the ECI bit from the EESR's values throughout the driver... Update Cogent Embedded's copyright and clean up the whitespace in Renesas' copyright, while at it... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 102 +++++++++++++++++----------------- drivers/net/ethernet/renesas/sh_eth.h | 2 +- 2 files changed, 53 insertions(+), 51 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 72a1fad4e930..f2af5ee84b77 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1,9 +1,9 @@ /* SuperH Ethernet device driver * - * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright (C) 2014 Renesas Electronics Corporation * Copyright (C) 2006-2012 Nobuhiro Iwamatsu * Copyright (C) 2008-2014 Renesas Solutions Corp. - * Copyright (C) 2013-2016 Cogent Embedded, Inc. + * Copyright (C) 2013-2017 Cogent Embedded, Inc. * Copyright (C) 2014 Codethink Limited * * This program is free software; you can redistribute it and/or modify it @@ -523,7 +523,7 @@ static struct sh_eth_cpu_data r7s72100_data = { .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .fdr_value = 0x0000070f, .no_psr = 1, @@ -562,7 +562,7 @@ static struct sh_eth_cpu_data r8a7740_data = { .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .fdr_value = 0x0000070f, .apr = 1, @@ -607,8 +607,7 @@ static struct sh_eth_cpu_data r8a777x_data = { .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, .apr = 1, @@ -630,8 +629,7 @@ static struct sh_eth_cpu_data r8a779x_data = { .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, .trscer_err_mask = DESC_I_RINT8, @@ -671,8 +669,7 @@ static struct sh_eth_cpu_data sh7724_data = { .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .apr = 1, .mpr = 1, @@ -707,8 +704,7 @@ static struct sh_eth_cpu_data sh7757_data = { .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .irq_flags = IRQF_SHARED, .apr = 1, @@ -776,7 +772,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .fdr_value = 0x0000072f, .irq_flags = IRQF_SHARED, @@ -807,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = { .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .apr = 1, .mpr = 1, @@ -835,8 +831,7 @@ static struct sh_eth_cpu_data sh7763_data = { .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .apr = 1, .mpr = 1, @@ -1526,43 +1521,44 @@ static void sh_eth_rcv_snd_enable(struct net_device *ndev) sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); } -/* error control function */ -static void sh_eth_error(struct net_device *ndev, u32 intr_status) +/* E-MAC interrupt handler */ +static void sh_eth_emac_interrupt(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); u32 felic_stat; u32 link_stat; - u32 mask; - if (intr_status & EESR_ECI) { - felic_stat = sh_eth_read(ndev, ECSR) & - sh_eth_read(ndev, ECSIPR); - sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ - if (felic_stat & ECSR_ICD) - ndev->stats.tx_carrier_errors++; - if (felic_stat & ECSR_LCHNG) { - /* Link Changed */ - if (mdp->cd->no_psr || mdp->no_ether_link) - goto ignore_link; - link_stat = sh_eth_read(ndev, PSR); - if (mdp->ether_link_active_low) - link_stat = ~link_stat; - if (!(link_stat & PHY_ST_LINK)) { - sh_eth_rcv_snd_disable(ndev); - } else { - /* Link Up */ - sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0); - /* clear int */ - sh_eth_modify(ndev, ECSR, 0, 0); - sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, - DMAC_M_ECI); - /* enable tx and rx */ - sh_eth_rcv_snd_enable(ndev); - } + felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR); + sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ + if (felic_stat & ECSR_ICD) + ndev->stats.tx_carrier_errors++; + if (felic_stat & ECSR_LCHNG) { + /* Link Changed */ + if (mdp->cd->no_psr || mdp->no_ether_link) + return; + link_stat = sh_eth_read(ndev, PSR); + if (mdp->ether_link_active_low) + link_stat = ~link_stat; + if (!(link_stat & PHY_ST_LINK)) { + sh_eth_rcv_snd_disable(ndev); + } else { + /* Link Up */ + sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0); + /* clear int */ + sh_eth_modify(ndev, ECSR, 0, 0); + sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, DMAC_M_ECI); + /* enable tx and rx */ + sh_eth_rcv_snd_enable(ndev); } } +} + +/* error control function */ +static void sh_eth_error(struct net_device *ndev, u32 intr_status) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 mask; -ignore_link: if (intr_status & EESR_TWB) { /* Unused write back interrupt */ if (intr_status & EESR_TABT) { /* Transmit Abort int */ @@ -1643,14 +1639,16 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) /* Get interrupt status */ intr_status = sh_eth_read(ndev, EESR); - /* Mask it with the interrupt mask, forcing ECI interrupt to be always - * enabled since it's the one that comes thru regardless of the mask, - * and we need to fully handle it in sh_eth_error() in order to quench - * it as it doesn't get cleared by just writing 1 to the ECI bit... + /* Mask it with the interrupt mask, forcing ECI interrupt to be always + * enabled since it's the one that comes thru regardless of the mask, + * and we need to fully handle it in sh_eth_emac_interrupt() in order + * to quench it as it doesn't get cleared by just writing 1 to the ECI + * bit... */ intr_enable = sh_eth_read(ndev, EESIPR); intr_status &= intr_enable | DMAC_M_ECI; - if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) + if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI | + cd->eesr_err_check)) ret = IRQ_HANDLED; else goto out; @@ -1682,6 +1680,10 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) netif_wake_queue(ndev); } + /* E-MAC interrupt */ + if (intr_status & EESR_ECI) + sh_eth_emac_interrupt(ndev); + if (intr_status & cd->eesr_err_check) { /* Clear error interrupts */ sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index d050f37f3e0f..9eec1e185adf 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -265,7 +265,7 @@ enum EESR_BIT { EESR_RTO) #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ - EESR_TFE | EESR_TDE | EESR_ECI) + EESR_TFE | EESR_TDE) /* EESIPR */ enum DMAC_IM_BIT { -- cgit v1.2.3 From 8eb9f2f9e4468e0fb86c2c06606a0ad03dd1a043 Mon Sep 17 00:00:00 2001 From: Arjun V Date: Wed, 4 Jan 2017 19:04:20 +0530 Subject: cxgb4: Support compressed error vector for T6 t6fw-1.15.15.0 enabled compressed error vector in cpl_rx_pkt for T6. Updating driver to take care of these changes. Signed-off-by: Santosh Rastapur Signed-off-by: Arjun V Signed-off-by: Hariprasad Shenai Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 5 +++++ drivers/net/ethernet/chelsio/cxgb4/sge.c | 16 ++++++++++++++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 7 +++++++ drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 15 +++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 4 ++++ 5 files changed, 45 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0bce1bf9ca0f..9a49c421f86c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -263,6 +263,11 @@ struct tp_params { u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ u32 ingress_config; /* cached TP_INGRESS_CONFIG */ + /* cached TP_OUT_CONFIG compressed error vector + * and passing outer header info for encapsulated packets. + */ + int rx_pkt_encap; + /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a * subset of the set of fields which may be present in the Compressed * Filter Tuple portion of filters and TCP TCB connections. The diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 9f606478c29c..0fe04b482c38 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2038,13 +2038,20 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct sge *s = &q->adap->sge; int cpl_trace_pkt = is_t4(q->adap->params.chip) ? CPL_TRACE_PKT : CPL_TRACE_PKT_T5; + u16 err_vec; struct port_info *pi; if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) return handle_trace_pkt(q->adap, si); pkt = (const struct cpl_rx_pkt *)rsp; - csum_ok = pkt->csum_calc && !pkt->err_vec && + /* Compressed error vector is enabled for T6 only */ + if (q->adap->params.tp.rx_pkt_encap) + err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); + else + err_vec = be16_to_cpu(pkt->err_vec); + + csum_ok = pkt->csum_calc && !err_vec && (q->netdev->features & NETIF_F_RXCSUM); if ((pkt->l2info & htonl(RXF_TCP_F)) && !(cxgb_poll_busy_polling(q)) && @@ -2092,7 +2099,12 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { - if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F))) + if (q->adap->params.tp.rx_pkt_encap) + csum_ok = err_vec & + T6_COMPR_RXERR_SUM_F; + else + csum_ok = err_vec & RXERR_CSUM_F; + if (!csum_ok) skb->ip_summed = CHECKSUM_UNNECESSARY; } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e8139514d32c..cd5f437448d3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7686,6 +7686,13 @@ int t4_init_tp_params(struct adapter *adap) &adap->params.tp.ingress_config, 1, TP_INGRESS_CONFIG_A); } + /* For T6, cache the adapter's compressed error vector + * and passing outer header info for encapsulated packets. + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + v = t4_read_reg(adap, TP_OUT_CONFIG_A); + adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0; + } /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field * shift positions of several elements of the Compressed Filter Tuple diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index a267173f5997..5043b64805f0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1175,6 +1175,21 @@ struct cpl_rx_pkt { #define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S) #define RXERR_CSUM_F RXERR_CSUM_V(1U) +#define T6_COMPR_RXERR_LEN_S 1 +#define T6_COMPR_RXERR_LEN_V(x) ((x) << T6_COMPR_RXERR_LEN_S) +#define T6_COMPR_RXERR_LEN_F T6_COMPR_RXERR_LEN_V(1U) + +#define T6_COMPR_RXERR_VEC_S 0 +#define T6_COMPR_RXERR_VEC_M 0x3F +#define T6_COMPR_RXERR_VEC_V(x) ((x) << T6_COMPR_RXERR_LEN_S) +#define T6_COMPR_RXERR_VEC_G(x) \ + (((x) >> T6_COMPR_RXERR_VEC_S) & T6_COMPR_RXERR_VEC_M) + +/* Logical OR of RX_ERROR_CSUM, RX_ERROR_CSIP */ +#define T6_COMPR_RXERR_SUM_S 4 +#define T6_COMPR_RXERR_SUM_V(x) ((x) << T6_COMPR_RXERR_SUM_S) +#define T6_COMPR_RXERR_SUM_F T6_COMPR_RXERR_SUM_V(1U) + struct cpl_trace_pkt { u8 opcode; u8 intf; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 9fea255c7e87..e685163b1357 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1276,6 +1276,10 @@ #define DBGLARPTR_M 0x7fU #define DBGLARPTR_V(x) ((x) << DBGLARPTR_S) +#define CRXPKTENC_S 3 +#define CRXPKTENC_V(x) ((x) << CRXPKTENC_S) +#define CRXPKTENC_F CRXPKTENC_V(1U) + #define TP_DBG_LA_DATAL_A 0x7ed8 #define TP_DBG_LA_CONFIG_A 0x7ed4 #define TP_OUT_CONFIG_A 0x7d04 -- cgit v1.2.3 From 5952758101fb55844957a8d4fe88402d9827cfb4 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 4 Jan 2017 19:56:24 +0100 Subject: dsa: mv88e6xxx: Optimise atu_get Lookup in the ATU can be performed starting from a given MAC address. This is faster than starting with the first possible MAC address and iterating all entries. Entries are returned in numeric order. So if the MAC address returned is bigger than what we are searching for, we know it is not in the ATU. Using the benchmark provided by Volodymyr Bendiuga , https://www.spinics.net/lists/netdev/msg411550.html on an Marvell Armada 370 RD, the test to add a number of static fdb entries went from 1.616531 seconds to 0.312052 seconds. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 5 ++-- include/linux/etherdevice.h | 60 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index b5f0e1ec864d..676b0e2ad221 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2023,7 +2023,8 @@ static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid, struct mv88e6xxx_atu_entry next; int err; - eth_broadcast_addr(next.mac); + memcpy(next.mac, addr, ETH_ALEN); + eth_addr_dec(next.mac); err = _mv88e6xxx_atu_mac_write(chip, next.mac); if (err) @@ -2041,7 +2042,7 @@ static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid, *entry = next; return 0; } - } while (!is_broadcast_ether_addr(next.mac)); + } while (ether_addr_greater(addr, next.mac)); memset(entry, 0, sizeof(*entry)); entry->fid = fid; diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 6fec9e81bd70..42add77ae47d 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -396,6 +396,66 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, return true; } +/** + * ether_addr_to_u64 - Convert an Ethernet address into a u64 value. + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return a u64 value of the address + */ +static inline u64 ether_addr_to_u64(const u8 *addr) +{ + u64 u = 0; + int i; + + for (i = 0; i < ETH_ALEN; i++) + u = u << 8 | addr[i]; + + return u; +} + +/** + * u64_to_ether_addr - Convert a u64 to an Ethernet address. + * @u: u64 to convert to an Ethernet MAC address + * @addr: Pointer to a six-byte array to contain the Ethernet address + */ +static inline void u64_to_ether_addr(u64 u, u8 *addr) +{ + int i; + + for (i = ETH_ALEN - 1; i >= 0; i--) { + addr[i] = u & 0xff; + u = u >> 8; + } +} + +/** + * eth_addr_dec - Decrement the given MAC address + * + * @addr: Pointer to a six-byte array containing Ethernet address to decrement + */ +static inline void eth_addr_dec(u8 *addr) +{ + u64 u = ether_addr_to_u64(addr); + + u--; + u64_to_ether_addr(u, addr); +} + +/** + * ether_addr_greater - Compare two Ethernet addresses + * @addr1: Pointer to a six-byte array containing the Ethernet address + * @addr2: Pointer other six-byte array containing the Ethernet address + * + * Compare two Ethernet addresses, returns true addr1 is greater than addr2 + */ +static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2) +{ + u64 u1 = ether_addr_to_u64(addr1); + u64 u2 = ether_addr_to_u64(addr2); + + return u1 > u2; +} + /** * is_etherdev_addr - Tell if given Ethernet address belongs to the device. * @dev: Pointer to a device structure -- cgit v1.2.3 From 57ea884b0dcf1e59661955919976ef138ec9cdb0 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 5 Jan 2017 02:34:28 +0100 Subject: packet: fix panic in __packet_set_timestamp on tpacket_v3 in tx mode When TX timestamping is in use with TPACKET_V3's TX ring, then we'll hit the BUG() in __packet_set_timestamp() when ring buffer slot is returned to user space via tpacket_destruct_skb(). This is due to v3 being assumed as unreachable here, but since 7f953ab2ba46 ("af_packet: TX_RING support for TPACKET_V3") it's not anymore. Fix it by filling the timestamp back into the ring slot. Fixes: 7f953ab2ba46 ("af_packet: TX_RING support for TPACKET_V3") Signed-off-by: Daniel Borkmann Acked-by: Sowmini Varadhan Signed-off-by: David S. Miller --- net/packet/af_packet.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 7e39087d519b..ddbda255b6ae 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -481,6 +481,9 @@ static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, h.h2->tp_nsec = ts.tv_nsec; break; case TPACKET_V3: + h.h3->tp_sec = ts.tv_sec; + h.h3->tp_nsec = ts.tv_nsec; + break; default: WARN(1, "TPACKET version not supported.\n"); BUG(); -- cgit v1.2.3 From b54a134a7de461f804cf0e28331d0a43ee82fb13 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 5 Jan 2017 10:38:33 +0000 Subject: rxrpc: Fix handling of enums-to-string translation in tracing Fix the way enum values are translated into strings in AF_RXRPC tracepoints. The problem with just doing a lookup in a normal flat array of strings or chars is that external tracing infrastructure can't find it. Rather, TRACE_DEFINE_ENUM must be used. Also sort the enums and string tables to make it easier to keep them in order so that a future patch to __print_symbolic() can be optimised to try a direct lookup into the table first before iterating over it. A couple of _proto() macro calls are removed because they refered to tables that got moved to the tracing infrastructure. The relevant data can be found by way of tracing. Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 424 ++++++++++++++++++++++++++++++++++++++++--- net/rxrpc/ar-internal.h | 194 -------------------- net/rxrpc/call_object.c | 18 -- net/rxrpc/conn_client.c | 8 - net/rxrpc/input.c | 10 - net/rxrpc/misc.c | 151 --------------- 6 files changed, 403 insertions(+), 402 deletions(-) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 0383e5e9a0f3..2395a57462c9 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -16,6 +16,386 @@ #include +/* + * Define enums for tracing information. + * + * These should all be kept sorted, making it easier to match the string + * mapping tables further on. + */ +#ifndef __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY + +enum rxrpc_skb_trace { + rxrpc_skb_rx_cleaned, + rxrpc_skb_rx_freed, + rxrpc_skb_rx_got, + rxrpc_skb_rx_lost, + rxrpc_skb_rx_purged, + rxrpc_skb_rx_received, + rxrpc_skb_rx_rotated, + rxrpc_skb_rx_seen, + rxrpc_skb_tx_cleaned, + rxrpc_skb_tx_freed, + rxrpc_skb_tx_got, + rxrpc_skb_tx_new, + rxrpc_skb_tx_rotated, + rxrpc_skb_tx_seen, +}; + +enum rxrpc_conn_trace { + rxrpc_conn_got, + rxrpc_conn_new_client, + rxrpc_conn_new_service, + rxrpc_conn_put_client, + rxrpc_conn_put_service, + rxrpc_conn_queued, + rxrpc_conn_seen, +}; + +enum rxrpc_client_trace { + rxrpc_client_activate_chans, + rxrpc_client_alloc, + rxrpc_client_chan_activate, + rxrpc_client_chan_disconnect, + rxrpc_client_chan_pass, + rxrpc_client_chan_unstarted, + rxrpc_client_cleanup, + rxrpc_client_count, + rxrpc_client_discard, + rxrpc_client_duplicate, + rxrpc_client_exposed, + rxrpc_client_replace, + rxrpc_client_to_active, + rxrpc_client_to_culled, + rxrpc_client_to_idle, + rxrpc_client_to_inactive, + rxrpc_client_to_upgrade, + rxrpc_client_to_waiting, + rxrpc_client_uncount, +}; + +enum rxrpc_call_trace { + rxrpc_call_connected, + rxrpc_call_error, + rxrpc_call_got, + rxrpc_call_got_kernel, + rxrpc_call_got_userid, + rxrpc_call_new_client, + rxrpc_call_new_service, + rxrpc_call_put, + rxrpc_call_put_kernel, + rxrpc_call_put_noqueue, + rxrpc_call_put_userid, + rxrpc_call_queued, + rxrpc_call_queued_ref, + rxrpc_call_release, + rxrpc_call_seen, +}; + +enum rxrpc_transmit_trace { + rxrpc_transmit_await_reply, + rxrpc_transmit_end, + rxrpc_transmit_queue, + rxrpc_transmit_queue_last, + rxrpc_transmit_rotate, + rxrpc_transmit_rotate_last, + rxrpc_transmit_wait, +}; + +enum rxrpc_receive_trace { + rxrpc_receive_end, + rxrpc_receive_front, + rxrpc_receive_incoming, + rxrpc_receive_queue, + rxrpc_receive_queue_last, + rxrpc_receive_rotate, +}; + +enum rxrpc_recvmsg_trace { + rxrpc_recvmsg_cont, + rxrpc_recvmsg_data_return, + rxrpc_recvmsg_dequeue, + rxrpc_recvmsg_enter, + rxrpc_recvmsg_full, + rxrpc_recvmsg_hole, + rxrpc_recvmsg_next, + rxrpc_recvmsg_return, + rxrpc_recvmsg_terminal, + rxrpc_recvmsg_to_be_accepted, + rxrpc_recvmsg_wait, +}; + +enum rxrpc_rtt_tx_trace { + rxrpc_rtt_tx_data, + rxrpc_rtt_tx_ping, +}; + +enum rxrpc_rtt_rx_trace { + rxrpc_rtt_rx_ping_response, + rxrpc_rtt_rx_requested_ack, +}; + +enum rxrpc_timer_trace { + rxrpc_timer_begin, + rxrpc_timer_expired, + rxrpc_timer_init_for_reply, + rxrpc_timer_init_for_send_reply, + rxrpc_timer_set_for_ack, + rxrpc_timer_set_for_ping, + rxrpc_timer_set_for_resend, + rxrpc_timer_set_for_send, +}; + +enum rxrpc_propose_ack_trace { + rxrpc_propose_ack_client_tx_end, + rxrpc_propose_ack_input_data, + rxrpc_propose_ack_ping_for_lost_ack, + rxrpc_propose_ack_ping_for_lost_reply, + rxrpc_propose_ack_ping_for_params, + rxrpc_propose_ack_processing_op, + rxrpc_propose_ack_respond_to_ack, + rxrpc_propose_ack_respond_to_ping, + rxrpc_propose_ack_retry_tx, + rxrpc_propose_ack_rotate_rx, + rxrpc_propose_ack_terminal_ack, +}; + +enum rxrpc_propose_ack_outcome { + rxrpc_propose_ack_subsume, + rxrpc_propose_ack_update, + rxrpc_propose_ack_use, +}; + +enum rxrpc_congest_change { + rxrpc_cong_begin_retransmission, + rxrpc_cong_cleared_nacks, + rxrpc_cong_new_low_nack, + rxrpc_cong_no_change, + rxrpc_cong_progress, + rxrpc_cong_retransmit_again, + rxrpc_cong_rtt_window_end, + rxrpc_cong_saw_nack, +}; + +#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */ + +/* + * Declare tracing information enums and their string mappings for display. + */ +#define rxrpc_skb_traces \ + EM(rxrpc_skb_rx_cleaned, "Rx CLN") \ + EM(rxrpc_skb_rx_freed, "Rx FRE") \ + EM(rxrpc_skb_rx_got, "Rx GOT") \ + EM(rxrpc_skb_rx_lost, "Rx *L*") \ + EM(rxrpc_skb_rx_purged, "Rx PUR") \ + EM(rxrpc_skb_rx_received, "Rx RCV") \ + EM(rxrpc_skb_rx_rotated, "Rx ROT") \ + EM(rxrpc_skb_rx_seen, "Rx SEE") \ + EM(rxrpc_skb_tx_cleaned, "Tx CLN") \ + EM(rxrpc_skb_tx_freed, "Tx FRE") \ + EM(rxrpc_skb_tx_got, "Tx GOT") \ + EM(rxrpc_skb_tx_new, "Tx NEW") \ + EM(rxrpc_skb_tx_rotated, "Tx ROT") \ + E_(rxrpc_skb_tx_seen, "Tx SEE") + +#define rxrpc_conn_traces \ + EM(rxrpc_conn_got, "GOT") \ + EM(rxrpc_conn_new_client, "NWc") \ + EM(rxrpc_conn_new_service, "NWs") \ + EM(rxrpc_conn_put_client, "PTc") \ + EM(rxrpc_conn_put_service, "PTs") \ + EM(rxrpc_conn_queued, "QUE") \ + E_(rxrpc_conn_seen, "SEE") + +#define rxrpc_client_traces \ + EM(rxrpc_client_activate_chans, "Activa") \ + EM(rxrpc_client_alloc, "Alloc ") \ + EM(rxrpc_client_chan_activate, "ChActv") \ + EM(rxrpc_client_chan_disconnect, "ChDisc") \ + EM(rxrpc_client_chan_pass, "ChPass") \ + EM(rxrpc_client_chan_unstarted, "ChUnst") \ + EM(rxrpc_client_cleanup, "Clean ") \ + EM(rxrpc_client_count, "Count ") \ + EM(rxrpc_client_discard, "Discar") \ + EM(rxrpc_client_duplicate, "Duplic") \ + EM(rxrpc_client_exposed, "Expose") \ + EM(rxrpc_client_replace, "Replac") \ + EM(rxrpc_client_to_active, "->Actv") \ + EM(rxrpc_client_to_culled, "->Cull") \ + EM(rxrpc_client_to_idle, "->Idle") \ + EM(rxrpc_client_to_inactive, "->Inac") \ + EM(rxrpc_client_to_upgrade, "->Upgd") \ + EM(rxrpc_client_to_waiting, "->Wait") \ + E_(rxrpc_client_uncount, "Uncoun") + +#define rxrpc_conn_cache_states \ + EM(RXRPC_CONN_CLIENT_INACTIVE, "Inac") \ + EM(RXRPC_CONN_CLIENT_WAITING, "Wait") \ + EM(RXRPC_CONN_CLIENT_ACTIVE, "Actv") \ + EM(RXRPC_CONN_CLIENT_CULLED, "Cull") \ + E_(RXRPC_CONN_CLIENT_IDLE, "Idle") \ + +#define rxrpc_call_traces \ + EM(rxrpc_call_connected, "CON") \ + EM(rxrpc_call_error, "*E*") \ + EM(rxrpc_call_got, "GOT") \ + EM(rxrpc_call_got_kernel, "Gke") \ + EM(rxrpc_call_got_userid, "Gus") \ + EM(rxrpc_call_new_client, "NWc") \ + EM(rxrpc_call_new_service, "NWs") \ + EM(rxrpc_call_put, "PUT") \ + EM(rxrpc_call_put_kernel, "Pke") \ + EM(rxrpc_call_put_noqueue, "PNQ") \ + EM(rxrpc_call_put_userid, "Pus") \ + EM(rxrpc_call_queued, "QUE") \ + EM(rxrpc_call_queued_ref, "QUR") \ + EM(rxrpc_call_release, "RLS") \ + E_(rxrpc_call_seen, "SEE") + +#define rxrpc_transmit_traces \ + EM(rxrpc_transmit_await_reply, "AWR") \ + EM(rxrpc_transmit_end, "END") \ + EM(rxrpc_transmit_queue, "QUE") \ + EM(rxrpc_transmit_queue_last, "QLS") \ + EM(rxrpc_transmit_rotate, "ROT") \ + EM(rxrpc_transmit_rotate_last, "RLS") \ + E_(rxrpc_transmit_wait, "WAI") + +#define rxrpc_receive_traces \ + EM(rxrpc_receive_end, "END") \ + EM(rxrpc_receive_front, "FRN") \ + EM(rxrpc_receive_incoming, "INC") \ + EM(rxrpc_receive_queue, "QUE") \ + EM(rxrpc_receive_queue_last, "QLS") \ + E_(rxrpc_receive_rotate, "ROT") + +#define rxrpc_recvmsg_traces \ + EM(rxrpc_recvmsg_cont, "CONT") \ + EM(rxrpc_recvmsg_data_return, "DATA") \ + EM(rxrpc_recvmsg_dequeue, "DEQU") \ + EM(rxrpc_recvmsg_enter, "ENTR") \ + EM(rxrpc_recvmsg_full, "FULL") \ + EM(rxrpc_recvmsg_hole, "HOLE") \ + EM(rxrpc_recvmsg_next, "NEXT") \ + EM(rxrpc_recvmsg_return, "RETN") \ + EM(rxrpc_recvmsg_terminal, "TERM") \ + EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \ + E_(rxrpc_recvmsg_wait, "WAIT") + +#define rxrpc_rtt_tx_traces \ + EM(rxrpc_rtt_tx_data, "DATA") \ + E_(rxrpc_rtt_tx_ping, "PING") + +#define rxrpc_rtt_rx_traces \ + EM(rxrpc_rtt_rx_ping_response, "PONG") \ + E_(rxrpc_rtt_rx_requested_ack, "RACK") + +#define rxrpc_timer_traces \ + EM(rxrpc_timer_begin, "Begin ") \ + EM(rxrpc_timer_expired, "*EXPR*") \ + EM(rxrpc_timer_init_for_reply, "IniRpl") \ + EM(rxrpc_timer_init_for_send_reply, "SndRpl") \ + EM(rxrpc_timer_set_for_ack, "SetAck") \ + EM(rxrpc_timer_set_for_ping, "SetPng") \ + EM(rxrpc_timer_set_for_resend, "SetRTx") \ + E_(rxrpc_timer_set_for_send, "SetTx ") + +#define rxrpc_propose_ack_traces \ + EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ + EM(rxrpc_propose_ack_input_data, "DataIn ") \ + EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ + EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ + EM(rxrpc_propose_ack_ping_for_params, "Params ") \ + EM(rxrpc_propose_ack_processing_op, "ProcOp ") \ + EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \ + EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \ + EM(rxrpc_propose_ack_retry_tx, "RetryTx") \ + EM(rxrpc_propose_ack_rotate_rx, "RxAck ") \ + E_(rxrpc_propose_ack_terminal_ack, "ClTerm ") + +#define rxrpc_propose_ack_outcomes \ + EM(rxrpc_propose_ack_subsume, " Subsume") \ + EM(rxrpc_propose_ack_update, " Update") \ + E_(rxrpc_propose_ack_use, "") + +#define rxrpc_congest_modes \ + EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \ + EM(RXRPC_CALL_FAST_RETRANSMIT, "FastReTx ") \ + EM(RXRPC_CALL_PACKET_LOSS, "PktLoss ") \ + E_(RXRPC_CALL_SLOW_START, "SlowStart") + +#define rxrpc_congest_changes \ + EM(rxrpc_cong_begin_retransmission, " Retrans") \ + EM(rxrpc_cong_cleared_nacks, " Cleared") \ + EM(rxrpc_cong_new_low_nack, " NewLowN") \ + EM(rxrpc_cong_no_change, "") \ + EM(rxrpc_cong_progress, " Progres") \ + EM(rxrpc_cong_retransmit_again, " ReTxAgn") \ + EM(rxrpc_cong_rtt_window_end, " RttWinE") \ + E_(rxrpc_cong_saw_nack, " SawNack") + +#define rxrpc_pkts \ + EM(0, "?00") \ + EM(RXRPC_PACKET_TYPE_DATA, "DATA") \ + EM(RXRPC_PACKET_TYPE_ACK, "ACK") \ + EM(RXRPC_PACKET_TYPE_BUSY, "BUSY") \ + EM(RXRPC_PACKET_TYPE_ABORT, "ABORT") \ + EM(RXRPC_PACKET_TYPE_ACKALL, "ACKALL") \ + EM(RXRPC_PACKET_TYPE_CHALLENGE, "CHALL") \ + EM(RXRPC_PACKET_TYPE_RESPONSE, "RESP") \ + EM(RXRPC_PACKET_TYPE_DEBUG, "DEBUG") \ + EM(9, "?09") \ + EM(10, "?10") \ + EM(11, "?11") \ + EM(12, "?12") \ + EM(RXRPC_PACKET_TYPE_VERSION, "VERSION") \ + EM(14, "?14") \ + E_(15, "?15") + +#define rxrpc_ack_names \ + EM(0, "-0-") \ + EM(RXRPC_ACK_REQUESTED, "REQ") \ + EM(RXRPC_ACK_DUPLICATE, "DUP") \ + EM(RXRPC_ACK_OUT_OF_SEQUENCE, "OOS") \ + EM(RXRPC_ACK_EXCEEDS_WINDOW, "WIN") \ + EM(RXRPC_ACK_NOSPACE, "MEM") \ + EM(RXRPC_ACK_PING, "PNG") \ + EM(RXRPC_ACK_PING_RESPONSE, "PNR") \ + EM(RXRPC_ACK_DELAY, "DLY") \ + EM(RXRPC_ACK_IDLE, "IDL") \ + E_(RXRPC_ACK__INVALID, "-?-") + +/* + * Export enum symbols via userspace. + */ +#undef EM +#undef E_ +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define E_(a, b) TRACE_DEFINE_ENUM(a); + +rxrpc_skb_traces; +rxrpc_conn_traces; +rxrpc_client_traces; +rxrpc_call_traces; +rxrpc_transmit_traces; +rxrpc_receive_traces; +rxrpc_recvmsg_traces; +rxrpc_rtt_tx_traces; +rxrpc_rtt_rx_traces; +rxrpc_timer_traces; +rxrpc_propose_ack_traces; +rxrpc_propose_ack_outcomes; +rxrpc_congest_changes; + +/* + * Now redefine the EM() and E_() macros to map the enums to the strings that + * will be printed in the output. + */ +#undef EM +#undef E_ +#define EM(a, b) { a, b }, +#define E_(a, b) { a, b } + TRACE_EVENT(rxrpc_conn, TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op, int usage, const void *where), @@ -38,7 +418,7 @@ TRACE_EVENT(rxrpc_conn, TP_printk("C=%p %s u=%d sp=%pSR", __entry->conn, - rxrpc_conn_traces[__entry->op], + __print_symbolic(__entry->op, rxrpc_conn_traces), __entry->usage, __entry->where) ); @@ -70,8 +450,8 @@ TRACE_EVENT(rxrpc_client, TP_printk("C=%p h=%2d %s %s i=%08x u=%d", __entry->conn, __entry->channel, - rxrpc_client_traces[__entry->op], - rxrpc_conn_cache_states[__entry->cs], + __print_symbolic(__entry->op, rxrpc_client_traces), + __print_symbolic(__entry->cs, rxrpc_conn_cache_states), __entry->cid, __entry->usage) ); @@ -100,7 +480,7 @@ TRACE_EVENT(rxrpc_call, TP_printk("c=%p %s u=%d sp=%pSR a=%p", __entry->call, - rxrpc_call_traces[__entry->op], + __print_symbolic(__entry->op, rxrpc_call_traces), __entry->usage, __entry->where, __entry->aux) @@ -130,7 +510,7 @@ TRACE_EVENT(rxrpc_skb, TP_printk("s=%p %s u=%d m=%d p=%pSR", __entry->skb, - rxrpc_skb_traces[__entry->op], + __print_symbolic(__entry->op, rxrpc_skb_traces), __entry->usage, __entry->mod_count, __entry->where) @@ -154,7 +534,8 @@ TRACE_EVENT(rxrpc_rx_packet, __entry->hdr.callNumber, __entry->hdr.serviceId, __entry->hdr.serial, __entry->hdr.seq, __entry->hdr.type, __entry->hdr.flags, - __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK") + __entry->hdr.type <= 15 ? + __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK") ); TRACE_EVENT(rxrpc_rx_done, @@ -225,7 +606,7 @@ TRACE_EVENT(rxrpc_transmit, TP_printk("c=%p %s f=%08x n=%u", __entry->call, - rxrpc_transmit_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_transmit_traces), __entry->tx_hard_ack + 1, __entry->tx_top - __entry->tx_hard_ack) ); @@ -251,7 +632,7 @@ TRACE_EVENT(rxrpc_rx_ack, TP_printk("c=%p %s f=%08x n=%u", __entry->call, - rxrpc_ack_names[__entry->reason], + __print_symbolic(__entry->reason, rxrpc_ack_names), __entry->first, __entry->n_acks) ); @@ -317,7 +698,7 @@ TRACE_EVENT(rxrpc_tx_ack, TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u", __entry->call, __entry->serial, - rxrpc_ack_names[__entry->reason], + __print_symbolic(__entry->reason, rxrpc_ack_names), __entry->ack_first, __entry->ack_serial, __entry->n_acks) @@ -349,7 +730,7 @@ TRACE_EVENT(rxrpc_receive, TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x", __entry->call, - rxrpc_receive_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_receive_traces), __entry->serial, __entry->seq, __entry->hard_ack, @@ -383,7 +764,7 @@ TRACE_EVENT(rxrpc_recvmsg, TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d", __entry->call, - rxrpc_recvmsg_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_recvmsg_traces), __entry->seq, __entry->offset, __entry->len, @@ -410,7 +791,7 @@ TRACE_EVENT(rxrpc_rtt_tx, TP_printk("c=%p %s sr=%08x", __entry->call, - rxrpc_rtt_tx_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_rtt_tx_traces), __entry->send_serial) ); @@ -443,7 +824,7 @@ TRACE_EVENT(rxrpc_rtt_rx, TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld", __entry->call, - rxrpc_rtt_rx_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), __entry->send_serial, __entry->resp_serial, __entry->rtt, @@ -481,7 +862,7 @@ TRACE_EVENT(rxrpc_timer, TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld", __entry->call, - rxrpc_timer_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_timer_traces), ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)), ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)), ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)), @@ -506,7 +887,8 @@ TRACE_EVENT(rxrpc_rx_lose, __entry->hdr.callNumber, __entry->hdr.serviceId, __entry->hdr.serial, __entry->hdr.seq, __entry->hdr.type, __entry->hdr.flags, - __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK") + __entry->hdr.type <= 15 ? + __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK") ); TRACE_EVENT(rxrpc_propose_ack, @@ -539,12 +921,12 @@ TRACE_EVENT(rxrpc_propose_ack, TP_printk("c=%p %s %s r=%08x i=%u b=%u%s", __entry->call, - rxrpc_propose_ack_traces[__entry->why], - rxrpc_ack_names[__entry->ack_reason], + __print_symbolic(__entry->why, rxrpc_propose_ack_traces), + __print_symbolic(__entry->ack_reason, rxrpc_ack_names), __entry->serial, __entry->immediate, __entry->background, - rxrpc_propose_ack_outcomes[__entry->outcome]) + __print_symbolic(__entry->outcome, rxrpc_propose_ack_outcomes)) ); TRACE_EVENT(rxrpc_retransmit, @@ -603,9 +985,9 @@ TRACE_EVENT(rxrpc_congest, TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s", __entry->call, __entry->ack_serial, - rxrpc_ack_names[__entry->sum.ack_reason], + __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names), __entry->hard_ack, - rxrpc_congest_modes[__entry->sum.mode], + __print_symbolic(__entry->sum.mode, rxrpc_congest_modes), __entry->sum.cwnd, __entry->sum.ssthresh, __entry->sum.nr_acks, __entry->sum.nr_nacks, @@ -615,7 +997,7 @@ TRACE_EVENT(rxrpc_congest, __entry->sum.cumulative_acks, __entry->sum.dup_acks, __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "", - rxrpc_congest_changes[__entry->change], + __print_symbolic(__entry->change, rxrpc_congest_changes), __entry->sum.retrans_timeo ? " rTxTo" : "") ); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index f60e35576526..84927c7b5fdf 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -593,200 +593,6 @@ struct rxrpc_ack_summary { u8 cumulative_acks; }; -enum rxrpc_skb_trace { - rxrpc_skb_rx_cleaned, - rxrpc_skb_rx_freed, - rxrpc_skb_rx_got, - rxrpc_skb_rx_lost, - rxrpc_skb_rx_received, - rxrpc_skb_rx_rotated, - rxrpc_skb_rx_purged, - rxrpc_skb_rx_seen, - rxrpc_skb_tx_cleaned, - rxrpc_skb_tx_freed, - rxrpc_skb_tx_got, - rxrpc_skb_tx_new, - rxrpc_skb_tx_rotated, - rxrpc_skb_tx_seen, - rxrpc_skb__nr_trace -}; - -extern const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7]; - -enum rxrpc_conn_trace { - rxrpc_conn_new_client, - rxrpc_conn_new_service, - rxrpc_conn_queued, - rxrpc_conn_seen, - rxrpc_conn_got, - rxrpc_conn_put_client, - rxrpc_conn_put_service, - rxrpc_conn__nr_trace -}; - -extern const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4]; - -enum rxrpc_client_trace { - rxrpc_client_activate_chans, - rxrpc_client_alloc, - rxrpc_client_chan_activate, - rxrpc_client_chan_disconnect, - rxrpc_client_chan_pass, - rxrpc_client_chan_unstarted, - rxrpc_client_cleanup, - rxrpc_client_count, - rxrpc_client_discard, - rxrpc_client_duplicate, - rxrpc_client_exposed, - rxrpc_client_replace, - rxrpc_client_to_active, - rxrpc_client_to_culled, - rxrpc_client_to_idle, - rxrpc_client_to_inactive, - rxrpc_client_to_waiting, - rxrpc_client_uncount, - rxrpc_client__nr_trace -}; - -extern const char rxrpc_client_traces[rxrpc_client__nr_trace][7]; -extern const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5]; - -enum rxrpc_call_trace { - rxrpc_call_new_client, - rxrpc_call_new_service, - rxrpc_call_queued, - rxrpc_call_queued_ref, - rxrpc_call_seen, - rxrpc_call_connected, - rxrpc_call_release, - rxrpc_call_got, - rxrpc_call_got_userid, - rxrpc_call_got_kernel, - rxrpc_call_put, - rxrpc_call_put_userid, - rxrpc_call_put_kernel, - rxrpc_call_put_noqueue, - rxrpc_call_error, - rxrpc_call__nr_trace -}; - -extern const char rxrpc_call_traces[rxrpc_call__nr_trace][4]; - -enum rxrpc_transmit_trace { - rxrpc_transmit_wait, - rxrpc_transmit_queue, - rxrpc_transmit_queue_last, - rxrpc_transmit_rotate, - rxrpc_transmit_rotate_last, - rxrpc_transmit_await_reply, - rxrpc_transmit_end, - rxrpc_transmit__nr_trace -}; - -extern const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4]; - -enum rxrpc_receive_trace { - rxrpc_receive_incoming, - rxrpc_receive_queue, - rxrpc_receive_queue_last, - rxrpc_receive_front, - rxrpc_receive_rotate, - rxrpc_receive_end, - rxrpc_receive__nr_trace -}; - -extern const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4]; - -enum rxrpc_recvmsg_trace { - rxrpc_recvmsg_enter, - rxrpc_recvmsg_wait, - rxrpc_recvmsg_dequeue, - rxrpc_recvmsg_hole, - rxrpc_recvmsg_next, - rxrpc_recvmsg_cont, - rxrpc_recvmsg_full, - rxrpc_recvmsg_data_return, - rxrpc_recvmsg_terminal, - rxrpc_recvmsg_to_be_accepted, - rxrpc_recvmsg_return, - rxrpc_recvmsg__nr_trace -}; - -extern const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5]; - -enum rxrpc_rtt_tx_trace { - rxrpc_rtt_tx_ping, - rxrpc_rtt_tx_data, - rxrpc_rtt_tx__nr_trace -}; - -extern const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5]; - -enum rxrpc_rtt_rx_trace { - rxrpc_rtt_rx_ping_response, - rxrpc_rtt_rx_requested_ack, - rxrpc_rtt_rx__nr_trace -}; - -extern const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5]; - -enum rxrpc_timer_trace { - rxrpc_timer_begin, - rxrpc_timer_init_for_reply, - rxrpc_timer_init_for_send_reply, - rxrpc_timer_expired, - rxrpc_timer_set_for_ack, - rxrpc_timer_set_for_ping, - rxrpc_timer_set_for_resend, - rxrpc_timer_set_for_send, - rxrpc_timer__nr_trace -}; - -extern const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8]; - -enum rxrpc_propose_ack_trace { - rxrpc_propose_ack_client_tx_end, - rxrpc_propose_ack_input_data, - rxrpc_propose_ack_ping_for_lost_ack, - rxrpc_propose_ack_ping_for_lost_reply, - rxrpc_propose_ack_ping_for_params, - rxrpc_propose_ack_processing_op, - rxrpc_propose_ack_respond_to_ack, - rxrpc_propose_ack_respond_to_ping, - rxrpc_propose_ack_retry_tx, - rxrpc_propose_ack_rotate_rx, - rxrpc_propose_ack_terminal_ack, - rxrpc_propose_ack__nr_trace -}; - -enum rxrpc_propose_ack_outcome { - rxrpc_propose_ack_use, - rxrpc_propose_ack_update, - rxrpc_propose_ack_subsume, - rxrpc_propose_ack__nr_outcomes -}; - -extern const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8]; -extern const char *const rxrpc_propose_ack_outcomes[rxrpc_propose_ack__nr_outcomes]; - -enum rxrpc_congest_change { - rxrpc_cong_begin_retransmission, - rxrpc_cong_cleared_nacks, - rxrpc_cong_new_low_nack, - rxrpc_cong_no_change, - rxrpc_cong_progress, - rxrpc_cong_retransmit_again, - rxrpc_cong_rtt_window_end, - rxrpc_cong_saw_nack, - rxrpc_congest__nr_change -}; - -extern const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10]; -extern const char rxrpc_congest_changes[rxrpc_congest__nr_change][9]; - -extern const char *const rxrpc_pkts[]; -extern const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4]; - #include /* diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 1ed18d8c9c9f..8b94db3c9b2e 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -43,24 +43,6 @@ const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = { [RXRPC_CALL_NETWORK_ERROR] = "NetError", }; -const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = { - [rxrpc_call_new_client] = "NWc", - [rxrpc_call_new_service] = "NWs", - [rxrpc_call_queued] = "QUE", - [rxrpc_call_queued_ref] = "QUR", - [rxrpc_call_connected] = "CON", - [rxrpc_call_release] = "RLS", - [rxrpc_call_seen] = "SEE", - [rxrpc_call_got] = "GOT", - [rxrpc_call_got_userid] = "Gus", - [rxrpc_call_got_kernel] = "Gke", - [rxrpc_call_put] = "PUT", - [rxrpc_call_put_userid] = "Pus", - [rxrpc_call_put_kernel] = "Pke", - [rxrpc_call_put_noqueue] = "PNQ", - [rxrpc_call_error] = "*E*", -}; - struct kmem_cache *rxrpc_call_jar; LIST_HEAD(rxrpc_calls); DEFINE_RWLOCK(rxrpc_call_lock); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 6cbcdcc29853..40a1ef2adeb4 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -105,14 +105,6 @@ static void rxrpc_discard_expired_client_conns(struct work_struct *); static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap, rxrpc_discard_expired_client_conns); -const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5] = { - [RXRPC_CONN_CLIENT_INACTIVE] = "Inac", - [RXRPC_CONN_CLIENT_WAITING] = "Wait", - [RXRPC_CONN_CLIENT_ACTIVE] = "Actv", - [RXRPC_CONN_CLIENT_CULLED] = "Cull", - [RXRPC_CONN_CLIENT_IDLE] = "Idle", -}; - /* * Get a connection ID and epoch for a client connection from the global pool. * The connection struct pointer is then recorded in the idr radix tree. The diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 1d87b5453ef7..7c2abd85def9 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -767,15 +767,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, trace_rxrpc_rx_ack(call, first_soft_ack, summary.ack_reason, nr_acks); - _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", - sp->hdr.serial, - ntohs(buf.ack.maxSkew), - first_soft_ack, - ntohl(buf.ack.previousPacket), - acked_serial, - rxrpc_ack_names[summary.ack_reason], - buf.ack.nAcks); - if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) rxrpc_input_ping_response(call, skb->tstamp, acked_serial, sp->hdr.serial); @@ -931,7 +922,6 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, break; default: - _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial); break; } diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c index 6dee55fad2d3..1a2d4b112064 100644 --- a/net/rxrpc/misc.c +++ b/net/rxrpc/misc.c @@ -77,12 +77,6 @@ unsigned int rxrpc_rx_jumbo_max = 4; */ unsigned int rxrpc_resend_timeout = 4 * 1000; -const char *const rxrpc_pkts[] = { - "?00", - "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG", - "?09", "?10", "?11", "?12", "VERSION", "?14", "?15" -}; - const s8 rxrpc_ack_priority[] = { [0] = 0, [RXRPC_ACK_DELAY] = 1, @@ -94,148 +88,3 @@ const s8 rxrpc_ack_priority[] = { [RXRPC_ACK_NOSPACE] = 7, [RXRPC_ACK_PING_RESPONSE] = 8, }; - -const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4] = { - "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", - "IDL", "-?-" -}; - -const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7] = { - [rxrpc_skb_rx_cleaned] = "Rx CLN", - [rxrpc_skb_rx_freed] = "Rx FRE", - [rxrpc_skb_rx_got] = "Rx GOT", - [rxrpc_skb_rx_lost] = "Rx *L*", - [rxrpc_skb_rx_received] = "Rx RCV", - [rxrpc_skb_rx_purged] = "Rx PUR", - [rxrpc_skb_rx_rotated] = "Rx ROT", - [rxrpc_skb_rx_seen] = "Rx SEE", - [rxrpc_skb_tx_cleaned] = "Tx CLN", - [rxrpc_skb_tx_freed] = "Tx FRE", - [rxrpc_skb_tx_got] = "Tx GOT", - [rxrpc_skb_tx_new] = "Tx NEW", - [rxrpc_skb_tx_rotated] = "Tx ROT", - [rxrpc_skb_tx_seen] = "Tx SEE", -}; - -const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4] = { - [rxrpc_conn_new_client] = "NWc", - [rxrpc_conn_new_service] = "NWs", - [rxrpc_conn_queued] = "QUE", - [rxrpc_conn_seen] = "SEE", - [rxrpc_conn_got] = "GOT", - [rxrpc_conn_put_client] = "PTc", - [rxrpc_conn_put_service] = "PTs", -}; - -const char rxrpc_client_traces[rxrpc_client__nr_trace][7] = { - [rxrpc_client_activate_chans] = "Activa", - [rxrpc_client_alloc] = "Alloc ", - [rxrpc_client_chan_activate] = "ChActv", - [rxrpc_client_chan_disconnect] = "ChDisc", - [rxrpc_client_chan_pass] = "ChPass", - [rxrpc_client_chan_unstarted] = "ChUnst", - [rxrpc_client_cleanup] = "Clean ", - [rxrpc_client_count] = "Count ", - [rxrpc_client_discard] = "Discar", - [rxrpc_client_duplicate] = "Duplic", - [rxrpc_client_exposed] = "Expose", - [rxrpc_client_replace] = "Replac", - [rxrpc_client_to_active] = "->Actv", - [rxrpc_client_to_culled] = "->Cull", - [rxrpc_client_to_idle] = "->Idle", - [rxrpc_client_to_inactive] = "->Inac", - [rxrpc_client_to_waiting] = "->Wait", - [rxrpc_client_uncount] = "Uncoun", -}; - -const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4] = { - [rxrpc_transmit_wait] = "WAI", - [rxrpc_transmit_queue] = "QUE", - [rxrpc_transmit_queue_last] = "QLS", - [rxrpc_transmit_rotate] = "ROT", - [rxrpc_transmit_rotate_last] = "RLS", - [rxrpc_transmit_await_reply] = "AWR", - [rxrpc_transmit_end] = "END", -}; - -const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4] = { - [rxrpc_receive_incoming] = "INC", - [rxrpc_receive_queue] = "QUE", - [rxrpc_receive_queue_last] = "QLS", - [rxrpc_receive_front] = "FRN", - [rxrpc_receive_rotate] = "ROT", - [rxrpc_receive_end] = "END", -}; - -const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5] = { - [rxrpc_recvmsg_enter] = "ENTR", - [rxrpc_recvmsg_wait] = "WAIT", - [rxrpc_recvmsg_dequeue] = "DEQU", - [rxrpc_recvmsg_hole] = "HOLE", - [rxrpc_recvmsg_next] = "NEXT", - [rxrpc_recvmsg_cont] = "CONT", - [rxrpc_recvmsg_full] = "FULL", - [rxrpc_recvmsg_data_return] = "DATA", - [rxrpc_recvmsg_terminal] = "TERM", - [rxrpc_recvmsg_to_be_accepted] = "TBAC", - [rxrpc_recvmsg_return] = "RETN", -}; - -const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5] = { - [rxrpc_rtt_tx_ping] = "PING", - [rxrpc_rtt_tx_data] = "DATA", -}; - -const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5] = { - [rxrpc_rtt_rx_ping_response] = "PONG", - [rxrpc_rtt_rx_requested_ack] = "RACK", -}; - -const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8] = { - [rxrpc_timer_begin] = "Begin ", - [rxrpc_timer_expired] = "*EXPR*", - [rxrpc_timer_init_for_reply] = "IniRpl", - [rxrpc_timer_init_for_send_reply] = "SndRpl", - [rxrpc_timer_set_for_ack] = "SetAck", - [rxrpc_timer_set_for_ping] = "SetPng", - [rxrpc_timer_set_for_send] = "SetTx ", - [rxrpc_timer_set_for_resend] = "SetRTx", -}; - -const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8] = { - [rxrpc_propose_ack_client_tx_end] = "ClTxEnd", - [rxrpc_propose_ack_input_data] = "DataIn ", - [rxrpc_propose_ack_ping_for_lost_ack] = "LostAck", - [rxrpc_propose_ack_ping_for_lost_reply] = "LostRpl", - [rxrpc_propose_ack_ping_for_params] = "Params ", - [rxrpc_propose_ack_processing_op] = "ProcOp ", - [rxrpc_propose_ack_respond_to_ack] = "Rsp2Ack", - [rxrpc_propose_ack_respond_to_ping] = "Rsp2Png", - [rxrpc_propose_ack_retry_tx] = "RetryTx", - [rxrpc_propose_ack_rotate_rx] = "RxAck ", - [rxrpc_propose_ack_terminal_ack] = "ClTerm ", -}; - -const char *const rxrpc_propose_ack_outcomes[rxrpc_propose_ack__nr_outcomes] = { - [rxrpc_propose_ack_use] = "", - [rxrpc_propose_ack_update] = " Update", - [rxrpc_propose_ack_subsume] = " Subsume", -}; - -const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10] = { - [RXRPC_CALL_SLOW_START] = "SlowStart", - [RXRPC_CALL_CONGEST_AVOIDANCE] = "CongAvoid", - [RXRPC_CALL_PACKET_LOSS] = "PktLoss ", - [RXRPC_CALL_FAST_RETRANSMIT] = "FastReTx ", -}; - -const char rxrpc_congest_changes[rxrpc_congest__nr_change][9] = { - [rxrpc_cong_begin_retransmission] = " Retrans", - [rxrpc_cong_cleared_nacks] = " Cleared", - [rxrpc_cong_new_low_nack] = " NewLowN", - [rxrpc_cong_no_change] = "", - [rxrpc_cong_progress] = " Progres", - [rxrpc_cong_retransmit_again] = " ReTxAgn", - [rxrpc_cong_rtt_window_end] = " RttWinE", - [rxrpc_cong_saw_nack] = " SawNack", -}; -- cgit v1.2.3 From b1d9f7fde0bb6c143a9a5b9246ea191e28f2c364 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 5 Jan 2017 10:38:34 +0000 Subject: rxrpc: Add some more tracing Add the following extra tracing information: (1) Modify the rxrpc_transmit tracepoint to record the Tx window size as this is varied by the slow-start algorithm. (2) Modify the rxrpc_rx_ack tracepoint to record more information from received ACK packets. (3) Add an rxrpc_rx_data tracepoint to record the information in DATA packets. (4) Add an rxrpc_disconnect_call tracepoint to record call disconnection, including the reason the call was disconnected. (5) Add an rxrpc_improper_term tracepoint to record implicit termination of a call by a client either by starting a new call on a particular connection channel without first transmitting the final ACK for the previous call. Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 94 +++++++++++++++++++++++++++++++++++++++++--- net/rxrpc/conn_object.c | 1 + net/rxrpc/input.c | 6 ++- 3 files changed, 95 insertions(+), 6 deletions(-) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 2395a57462c9..593f586545eb 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -595,6 +595,7 @@ TRACE_EVENT(rxrpc_transmit, __field(enum rxrpc_transmit_trace, why ) __field(rxrpc_seq_t, tx_hard_ack ) __field(rxrpc_seq_t, tx_top ) + __field(int, tx_winsize ) ), TP_fast_assign( @@ -602,38 +603,81 @@ TRACE_EVENT(rxrpc_transmit, __entry->why = why; __entry->tx_hard_ack = call->tx_hard_ack; __entry->tx_top = call->tx_top; + __entry->tx_winsize = call->tx_winsize; ), - TP_printk("c=%p %s f=%08x n=%u", + TP_printk("c=%p %s f=%08x n=%u/%u", __entry->call, __print_symbolic(__entry->why, rxrpc_transmit_traces), __entry->tx_hard_ack + 1, - __entry->tx_top - __entry->tx_hard_ack) + __entry->tx_top - __entry->tx_hard_ack, + __entry->tx_winsize) + ); + +TRACE_EVENT(rxrpc_rx_data, + TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, + rxrpc_serial_t serial, u8 flags, u8 anno), + + TP_ARGS(call, seq, serial, flags, anno), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(rxrpc_seq_t, seq ) + __field(rxrpc_serial_t, serial ) + __field(u8, flags ) + __field(u8, anno ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->seq = seq; + __entry->serial = serial; + __entry->flags = flags; + __entry->anno = anno; + ), + + TP_printk("c=%p DATA %08x q=%08x fl=%02x a=%02x", + __entry->call, + __entry->serial, + __entry->seq, + __entry->flags, + __entry->anno) ); TRACE_EVENT(rxrpc_rx_ack, - TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t first, u8 reason, u8 n_acks), + TP_PROTO(struct rxrpc_call *call, + rxrpc_serial_t serial, rxrpc_serial_t ack_serial, + rxrpc_seq_t first, rxrpc_seq_t prev, u8 reason, u8 n_acks), - TP_ARGS(call, first, reason, n_acks), + TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks), TP_STRUCT__entry( __field(struct rxrpc_call *, call ) + __field(rxrpc_serial_t, serial ) + __field(rxrpc_serial_t, ack_serial ) __field(rxrpc_seq_t, first ) + __field(rxrpc_seq_t, prev ) __field(u8, reason ) __field(u8, n_acks ) ), TP_fast_assign( __entry->call = call; + __entry->serial = serial; + __entry->ack_serial = ack_serial; __entry->first = first; + __entry->prev = prev; __entry->reason = reason; __entry->n_acks = n_acks; ), - TP_printk("c=%p %s f=%08x n=%u", + TP_printk("c=%p %08x %s r=%08x f=%08x p=%08x n=%u", __entry->call, + __entry->serial, __print_symbolic(__entry->reason, rxrpc_ack_names), + __entry->ack_serial, __entry->first, + __entry->prev, __entry->n_acks) ); @@ -1001,6 +1045,46 @@ TRACE_EVENT(rxrpc_congest, __entry->sum.retrans_timeo ? " rTxTo" : "") ); +TRACE_EVENT(rxrpc_disconnect_call, + TP_PROTO(struct rxrpc_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(u32, abort_code ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->abort_code = call->abort_code; + ), + + TP_printk("c=%p ab=%08x", + __entry->call, + __entry->abort_code) + ); + +TRACE_EVENT(rxrpc_improper_term, + TP_PROTO(struct rxrpc_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(u32, abort_code ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->abort_code = call->abort_code; + ), + + TP_printk("c=%p ab=%08x", + __entry->call, + __entry->abort_code) + ); + #endif /* _TRACE_RXRPC_H */ /* This part must be outside protection */ diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index e1e83af47866..b0ecb770fdce 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -173,6 +173,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn, /* Save the result of the call so that we can repeat it if necessary * through the channel, whilst disposing of the actual call record. */ + trace_rxrpc_disconnect_call(call); chan->last_service_id = call->service_id; if (call->abort_code) { chan->last_abort = call->abort_code; diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 7c2abd85def9..78ec33477adf 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -481,6 +481,7 @@ next_subpacket: return rxrpc_proto_abort("LSA", call, seq); } + trace_rxrpc_rx_data(call, seq, serial, flags, annotation); if (before_eq(seq, hard_ack)) { ack = RXRPC_ACK_DUPLICATE; ack_serial = serial; @@ -765,7 +766,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? buf.ack.reason : RXRPC_ACK__INVALID); - trace_rxrpc_rx_ack(call, first_soft_ack, summary.ack_reason, nr_acks); + trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial, + first_soft_ack, ntohl(buf.ack.previousPacket), + summary.ack_reason, nr_acks); if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) rxrpc_input_ping_response(call, skb->tstamp, acked_serial, @@ -951,6 +954,7 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, break; } + trace_rxrpc_improper_term(call); __rxrpc_disconnect_call(conn, call); rxrpc_notify_socket(call); } -- cgit v1.2.3 From 3e018daf045f3803c3ebe89dbb48318f24ffb790 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 5 Jan 2017 10:38:33 +0000 Subject: rxrpc: Show a call's hard-ACK cursors in /proc/net/rxrpc_calls Show a call's hard-ACK cursors in /proc/net/rxrpc_calls so that a call's progress can be more easily monitored. Signed-off-by: David Howells --- net/rxrpc/proc.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index 65cd980767fa..b9bcfbfb095c 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -52,6 +52,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) struct rxrpc_sock *rx; struct rxrpc_peer *peer; struct rxrpc_call *call; + rxrpc_seq_t tx_hard_ack, rx_hard_ack; char lbuff[50], rbuff[50]; if (v == &rxrpc_calls) { @@ -82,9 +83,11 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) else strcpy(rbuff, "no_connection"); + tx_hard_ack = READ_ONCE(call->tx_hard_ack); + rx_hard_ack = READ_ONCE(call->rx_hard_ack); seq_printf(seq, "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u" - " %-8.8s %08x %lx\n", + " %-8.8s %08x %lx %08x %02x %08x %02x\n", lbuff, rbuff, call->service_id, @@ -94,7 +97,9 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) atomic_read(&call->usage), rxrpc_call_states[call->state], call->abort_code, - call->user_call_ID); + call->user_call_ID, + tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack, + rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack); return 0; } -- cgit v1.2.3 From 343884c87e5a928193796415280269e723f951c7 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Wed, 4 Jan 2017 10:53:37 +0000 Subject: cfg80211: only pass sband to set_mandatory_flags_band() The supported band structure contains the band is applies to so no need to pass it separately. Also added a default case to the switch for completeness. The current code base does not call this function with NUM_NL80211_BANDS but kept that case statement although default case would cover that. Signed-off-by: Arend van Spriel Signed-off-by: Johannes Berg --- net/wireless/util.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/wireless/util.c b/net/wireless/util.c index 2cf7df8173b6..c91bc25c4dd5 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -136,12 +136,11 @@ struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq) } EXPORT_SYMBOL(ieee80211_get_channel); -static void set_mandatory_flags_band(struct ieee80211_supported_band *sband, - enum nl80211_band band) +static void set_mandatory_flags_band(struct ieee80211_supported_band *sband) { int i, want; - switch (band) { + switch (sband->band) { case NL80211_BAND_5GHZ: want = 3; for (i = 0; i < sband->n_bitrates; i++) { @@ -191,6 +190,7 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband, WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e); break; case NUM_NL80211_BANDS: + default: WARN_ON(1); break; } @@ -202,7 +202,7 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy) for (band = 0; band < NUM_NL80211_BANDS; band++) if (wiphy->bands[band]) - set_mandatory_flags_band(wiphy->bands[band], band); + set_mandatory_flags_band(wiphy->bands[band]); } bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher) -- cgit v1.2.3 From 300ae149468f440a2629fa8b33d0ce1e860d479f Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Mon, 2 Jan 2017 13:29:40 +0100 Subject: netfilter: select LIBCRC32C together with SCTP conntrack nf_conntrack needs to compute crc32c when dealing with SCTP packets. Moreover, NF_NAT_PROTO_SCTP (currently selecting LIBCRC32C) can be enabled only if conntrack support for SCTP is enabled. Therefore, move enabling of kernel support for crc32c so that it is selected when NF_CT_PROTO_SCTP=y. Signed-off-by: Davide Caratti Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 63729b489c2c..6d425e355bf5 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -162,6 +162,7 @@ config NF_CT_PROTO_SCTP bool 'SCTP protocol connection tracking support' depends on NETFILTER_ADVANCED default y + select LIBCRC32C help With this option enabled, the layer 3 independent connection tracking code will be able to do state tracking on SCTP connections. @@ -397,7 +398,6 @@ config NF_NAT_PROTO_SCTP bool default NF_NAT && NF_CT_PROTO_SCTP depends on NF_NAT && NF_CT_PROTO_SCTP - select LIBCRC32C config NF_NAT_AMANDA tristate -- cgit v1.2.3 From cf6e007eef83476c5d541453d84e08b07befe124 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Mon, 2 Jan 2017 13:29:41 +0100 Subject: netfilter: conntrack: validate SCTP crc32c in PREROUTING implement sctp_error to let nf_conntrack_in validate crc32c on the packet transport header. Assign skb->ip_summed to CHECKSUM_UNNECESSARY and return NF_ACCEPT in case of successful validation; otherwise, return -NF_ACCEPT to let netfilter skip connection tracking, like other protocols do. Besides preventing corrupted packets from matching conntrack entries, this fixes functionality of REJECT target: it was not generating any ICMP upon reception of SCTP packets, because it was computing RFC 1624 checksum on the packet and systematically mismatching crc32c in the SCTP header. Signed-off-by: Davide Caratti Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_proto_sctp.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index a0efde38da44..44a647418948 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -22,7 +22,9 @@ #include #include #include +#include +#include #include #include #include @@ -505,6 +507,34 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, return true; } +static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb, + unsigned int dataoff, enum ip_conntrack_info *ctinfo, + u8 pf, unsigned int hooknum) +{ + const struct sctphdr *sh; + struct sctphdr _sctph; + const char *logmsg; + + sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); + if (!sh) { + logmsg = "nf_ct_sctp: short packet "; + goto out_invalid; + } + if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && + skb->ip_summed == CHECKSUM_NONE) { + if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { + logmsg = "nf_ct_sctp: bad CRC "; + goto out_invalid; + } + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + return NF_ACCEPT; +out_invalid: + if (LOG_INVALID(net, IPPROTO_SCTP)) + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", logmsg); + return -NF_ACCEPT; +} + #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include @@ -752,6 +782,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { .packet = sctp_packet, .get_timeouts = sctp_get_timeouts, .new = sctp_new, + .error = sctp_error, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = sctp_to_nlattr, @@ -786,6 +817,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { .packet = sctp_packet, .get_timeouts = sctp_get_timeouts, .new = sctp_new, + .error = sctp_error, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = sctp_to_nlattr, -- cgit v1.2.3 From 4cc4b72c136a45aeccd86f66b0859b148b47d881 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Tue, 20 Dec 2016 22:02:13 +0800 Subject: netfilter: xt_connlimit: use rb_entry() To make the code clearer, use rb_entry() instead of container_of() to deal with rbtree. Signed-off-by: Geliang Tang Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_connlimit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 2aff2b7c4689..660b61dbd776 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c @@ -218,7 +218,7 @@ count_tree(struct net *net, struct rb_root *root, int diff; bool addit; - rbconn = container_of(*rbnode, struct xt_connlimit_rb, node); + rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node); parent = *rbnode; diff = same_source_net(addr, mask, &rbconn->addr, family); @@ -398,7 +398,7 @@ static void destroy_tree(struct rb_root *r) struct rb_node *node; while ((node = rb_first(r)) != NULL) { - rbconn = container_of(node, struct xt_connlimit_rb, node); + rbconn = rb_entry(node, struct xt_connlimit_rb, node); rb_erase(node, r); -- cgit v1.2.3 From 3db5e3e707ebb9ab0ce3a2dcd924ed2ea525d770 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 5 Jan 2017 13:37:28 +0100 Subject: wireless: move IEEE80211_NUM_ACS to ieee80211.h This constant isn't really specific to mac80211, so move it "up" a level to ieee80211.h Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 2 ++ include/net/mac80211.h | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index fe849329511a..87d1937e4671 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -185,6 +185,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) /* number of user priorities 802.11 uses */ #define IEEE80211_NUM_UPS 8 +/* number of ACs */ +#define IEEE80211_NUM_ACS 4 #define IEEE80211_QOS_CTL_LEN 2 /* 1d tag mask */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5345d358a510..5f5cb194cd78 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -147,7 +147,6 @@ enum ieee80211_ac_numbers { IEEE80211_AC_BE = 2, IEEE80211_AC_BK = 3, }; -#define IEEE80211_NUM_ACS 4 /** * struct ieee80211_tx_queue_params - transmit queue configuration -- cgit v1.2.3 From aa884a26a135f0870ca1f52f276deeb58da79fee Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 5 Jan 2017 09:16:27 +0100 Subject: net: ethoc: Remove unused members from struct ethoc The io_region_size and dma_alloc members of struct ethoc are only written but never read, so they might as well be removed. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/ethoc.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 45abc81f6f55..63e5e14174ee 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -180,8 +180,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); * struct ethoc - driver-private device structure * @iobase: pointer to I/O memory region * @membase: pointer to buffer memory region - * @dma_alloc: dma allocated buffer size - * @io_region_size: I/O memory region size * @num_bd: number of buffer descriptors * @num_tx: number of send buffers * @cur_tx: last send buffer written @@ -199,8 +197,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); struct ethoc { void __iomem *iobase; void __iomem *membase; - int dma_alloc; - resource_size_t io_region_size; bool big_endian; unsigned int num_bd; @@ -1096,8 +1092,6 @@ static int ethoc_probe(struct platform_device *pdev) /* setup driver-private data */ priv = netdev_priv(netdev); priv->netdev = netdev; - priv->dma_alloc = 0; - priv->io_region_size = resource_size(mmio); priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, resource_size(mmio)); @@ -1127,7 +1121,6 @@ static int ethoc_probe(struct platform_device *pdev) goto free; } netdev->mem_end = netdev->mem_start + buffer_size; - priv->dma_alloc = buffer_size; } priv->big_endian = pdata ? pdata->big_endian : -- cgit v1.2.3 From 60f9b5e8068babbace0d239765f07a538505ed5a Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 5 Jan 2017 10:41:36 +0100 Subject: net: xilinx: emaclite: Remove xemaclite_remove_ndev() xemaclite_remove_ndev() is a simple wrapper around free_netdev() checking for NULL before the call. All possible paths calling it are guaranteed to pass a non-NULL argument, so rather call free_netdev() directly. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 93dc10b10c09..97dcc0bd5a85 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1028,20 +1028,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) return 0; } -/** - * xemaclite_remove_ndev - Free the network device - * @ndev: Pointer to the network device to be freed - * - * This function un maps the IO region of the Emaclite device and frees the net - * device. - */ -static void xemaclite_remove_ndev(struct net_device *ndev) -{ - if (ndev) { - free_netdev(ndev); - } -} - /** * get_bool - Get a parameter from the OF device * @ofdev: Pointer to OF device structure @@ -1172,7 +1158,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) return 0; error: - xemaclite_remove_ndev(ndev); + free_netdev(ndev); return rc; } @@ -1204,7 +1190,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev) of_node_put(lp->phy_node); lp->phy_node = NULL; - xemaclite_remove_ndev(ndev); + free_netdev(ndev); return 0; } -- cgit v1.2.3 From 56b46b4378f08fe519398df4da14ea2e7c9ae52c Mon Sep 17 00:00:00 2001 From: Volodymyr Bendiuga Date: Thu, 5 Jan 2017 10:44:18 +0100 Subject: net:mv88e6xxx: use g2 interrupt for 6097 chip This chip needs MV88E6XXX_FLAG_G2_INT Signed-off-by: Volodymyr Bendiuga Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index af54baea47cf..a224d66dafd9 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -566,6 +566,7 @@ enum mv88e6xxx_cap { (MV88E6XXX_FLAG_G1_ATU_FID | \ MV88E6XXX_FLAG_G1_VTU_FID | \ MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ -- cgit v1.2.3 From 5e6eb456983c994a8e582127a300b6552e5a1768 Mon Sep 17 00:00:00 2001 From: Volodymyr Bendiuga Date: Thu, 5 Jan 2017 11:10:13 +0100 Subject: net:dsa: check for EPROBE_DEFER from dsa_dst_parse() Since there can be multiple dsa switches stacked together but not all of devicetree nodes available at the time of calling dsa_dst_parse(), EPROBE_DEFER can be returned by it. When this happens, only the last dsa switch has to be deleted by dsa_dst_del_ds(), but not the whole list, because next time linux cames back to this function it will try to add only the last dsa switch which returned EPROBE_DEFER. Signed-off-by: Volodymyr Bendiuga Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- net/dsa/dsa2.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 5fff951a0a49..bad119cee2a3 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -647,8 +647,14 @@ static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np) } err = dsa_dst_parse(dst); - if (err) + if (err) { + if (err == -EPROBE_DEFER) { + dsa_dst_del_ds(dst, ds, ds->index); + return err; + } + goto out_del_dst; + } err = dsa_dst_apply(dst); if (err) { -- cgit v1.2.3 From ad02c4f547826167a709dab8a89a1caefd2c1f50 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Wed, 4 Jan 2017 11:19:34 -0500 Subject: tcp: provide timestamps for partial writes For TCP sockets, TX timestamps are only captured when the user data is successfully and fully written to the socket. In many cases, however, TCP writes can be partial for which no timestamp is collected. Collect timestamps whenever any user data is (fully or partially) copied into the socket. Pass tcp_write_queue_tail to tcp_tx_timestamp instead of the local skb pointer since it can be set to NULL on the error path. Note that tcp_write_queue_tail can be NULL, even if bytes have been copied to the socket. This is because acknowledgements are being processed in tcp_sendmsg(), and by the time tcp_tx_timestamp is called tcp_write_queue_tail can be NULL. For such cases, this patch does not collect any timestamps (i.e., it is best-effort). This patch is written with suggestions from Willem de Bruijn and Eric Dumazet. Change-log V1 -> V2: - Use sockc.tsflags instead of sk->sk_tsflags. - Use the same code path for normal writes and errors. Signed-off-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Cc: Willem de Bruijn Cc: Eric Dumazet Cc: Neal Cardwell Cc: Martin KaFai Lau Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2e3807d8eba8..ec97e4b4a62f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -429,7 +429,7 @@ EXPORT_SYMBOL(tcp_init_sock); static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb) { - if (tsflags) { + if (tsflags && skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); @@ -958,10 +958,8 @@ new_segment: copied += copy; offset += copy; size -= copy; - if (!size) { - tcp_tx_timestamp(sk, sk->sk_tsflags, skb); + if (!size) goto out; - } if (skb->len < size_goal || (flags & MSG_OOB)) continue; @@ -987,8 +985,11 @@ wait_for_memory: } out: - if (copied && !(flags & MSG_SENDPAGE_NOTLAST)) - tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); + if (copied) { + tcp_tx_timestamp(sk, sk->sk_tsflags, tcp_write_queue_tail(sk)); + if (!(flags & MSG_SENDPAGE_NOTLAST)) + tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); + } return copied; do_error: @@ -1281,7 +1282,6 @@ new_segment: copied += copy; if (!msg_data_left(msg)) { - tcp_tx_timestamp(sk, sockc.tsflags, skb); if (unlikely(flags & MSG_EOR)) TCP_SKB_CB(skb)->eor = 1; goto out; @@ -1312,8 +1312,10 @@ wait_for_memory: } out: - if (copied) + if (copied) { + tcp_tx_timestamp(sk, sockc.tsflags, tcp_write_queue_tail(sk)); tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); + } out_nopush: release_sock(sk); return copied + copied_syn; -- cgit v1.2.3 From c1878f7a89efbbe1ac0082d09b2928782a6ceba1 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Thu, 5 Jan 2017 11:06:22 -0800 Subject: tools: psock_tpacket: block Rx until socket filter has been added and socket has been bound to loopback. Packets from any/all interfaces may be queued up on the PF_PACKET socket before it is bound to the loopback interface by psock_tpacket, and when these are passed up by the kernel, they could interfere with the Rx tests. Avoid interference from spurious packet by blocking Rx until the socket filter has been set up, and the packet has been bound to the desired (lo) interface. The effective sequence is socket(PF_PACKET, SOCK_RAW, 0); set up ring Invoke SO_ATTACH_FILTER bind to sll_protocol set to ETH_P_ALL, sll_ifindex for lo After this sequence, the only packets that will be passed up are those received on loopback that pass the attached filter. Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- tools/testing/selftests/net/psock_tpacket.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c index 4a1bc648b35e..7f6cd9fdacf3 100644 --- a/tools/testing/selftests/net/psock_tpacket.c +++ b/tools/testing/selftests/net/psock_tpacket.c @@ -110,7 +110,7 @@ static unsigned int total_packets, total_bytes; static int pfsocket(int ver) { - int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); + int ret, sock = socket(PF_PACKET, SOCK_RAW, 0); if (sock == -1) { perror("socket"); exit(1); @@ -239,7 +239,6 @@ static void walk_v1_v2_rx(int sock, struct ring *ring) bug_on(ring->type != PACKET_RX_RING); pair_udp_open(udp_sock, PORT_BASE); - pair_udp_setfilter(sock); memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; @@ -601,7 +600,6 @@ static void walk_v3_rx(int sock, struct ring *ring) bug_on(ring->type != PACKET_RX_RING); pair_udp_open(udp_sock, PORT_BASE); - pair_udp_setfilter(sock); memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; @@ -741,6 +739,8 @@ static void bind_ring(int sock, struct ring *ring) { int ret; + pair_udp_setfilter(sock); + ring->ll.sll_family = PF_PACKET; ring->ll.sll_protocol = htons(ETH_P_ALL); ring->ll.sll_ifindex = if_nametoindex("lo"); -- cgit v1.2.3 From b3b73b8e6df685ba61476b256f98eff1d650c199 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 5 Jan 2017 13:23:58 +0100 Subject: xfrm: state: do not acquire lock in get_mtu helpers Once flow cache gets removed the mtu initialisation happens for every skb that gets an xfrm attached, so this lock starts to show up in perf. It is not obvious why this lock is required -- the caller holds reference on the state struct, type->destructor is only called from the state gc worker (all state structs on gc list must have refcount 0). xfrm_init_state already has been called (else private data accessed by type->get_mtu() would not be set up). So just remove the lock -- the race on the state (DEAD?) doesn't matter (could change right after dropping the lock too). Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_state.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index c5cf4d611aab..6b3366fef019 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2000,16 +2000,13 @@ EXPORT_SYMBOL(xfrm_state_delete_tunnel); int xfrm_state_mtu(struct xfrm_state *x, int mtu) { - int res; + const struct xfrm_type *type = READ_ONCE(x->type); - spin_lock_bh(&x->lock); if (x->km.state == XFRM_STATE_VALID && - x->type && x->type->get_mtu) - res = x->type->get_mtu(x, mtu); - else - res = mtu - x->props.header_len; - spin_unlock_bh(&x->lock); - return res; + type && type->get_mtu) + return type->get_mtu(x, mtu); + + return mtu - x->props.header_len; } int __xfrm_init_state(struct xfrm_state *x, bool init_replay) -- cgit v1.2.3 From 030ec9e68239a8b7851701edc29de5745fe15024 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 26 Sep 2016 20:46:25 -0700 Subject: igb: Realign bad indentation Statements should start on tabstops. Use a single statement and test instead of multiple tests. Signed-off-by: Joe Perches Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_mac.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 5010e2232c50..5eff82678f0b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -792,15 +792,13 @@ static s32 igb_set_default_fc(struct e1000_hw *hw) * control setting, then the variable hw->fc will * be initialized based on a value in the EEPROM. */ - if (hw->mac.type == e1000_i350) { + if (hw->mac.type == e1000_i350) lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG - + lan_offset, 1, &nvm_data); - } else { - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, - 1, &nvm_data); - } + else + lan_offset = 0; + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset, + 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; @@ -808,8 +806,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw) if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) hw->fc.requested_mode = e1000_fc_none; - else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == - NVM_WORD0F_ASM_DIR) + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) hw->fc.requested_mode = e1000_fc_tx_pause; else hw->fc.requested_mode = e1000_fc_full; -- cgit v1.2.3 From 7e54d9d063fa239c95c21548c5267f0ef419ff56 Mon Sep 17 00:00:00 2001 From: khalidm Date: Mon, 17 Oct 2016 09:51:08 -0700 Subject: e1000e: driver trying to free already-free irq During systemd reboot sequence network driver interface is shutdown by e1000_close. The PCI driver interface is shut by e1000_shutdown. The e1000_shutdown checks for netif_running status, if still up it brings down driver. But it disables msi outside of this if statement, regardless of netif status. All this is OK when e1000_close happens after shutdown. However, by default, everything in systemd is done in parallel. This creates a conditions where e1000_shutdown is called after e1000_close, therefore hitting BUG_ON assert in free_msi_irqs. CC: xe-kernel@external.cisco.com Signed-off-by: khalidm Signed-off-by: David Singleton Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index eccf1da9356b..af3960853a32 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6276,8 +6276,8 @@ static int e1000e_pm_freeze(struct device *dev) /* Quiesce the device without resetting the hardware */ e1000e_down(adapter, false); e1000_free_irq(adapter); + e1000e_reset_interrupt_capability(adapter); } - e1000e_reset_interrupt_capability(adapter); /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); -- cgit v1.2.3 From c4f9b6e5783c06b0b798091a4dd81c23c7ecfbe7 Mon Sep 17 00:00:00 2001 From: Cao jin Date: Wed, 2 Nov 2016 15:20:15 +0800 Subject: igb: correct register comments Signed-off-by: Cao jin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_regs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index d84afdd83e53..58adbf234e07 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -320,7 +320,7 @@ #define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ #define E1000_WUC 0x05800 /* Wakeup Control - RW */ #define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ -#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_WUS 0x05810 /* Wakeup Status - R/W1C */ #define E1000_MANC 0x05820 /* Management Control - RW */ #define E1000_IPAV 0x05838 /* IP Address Valid - RW */ #define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ -- cgit v1.2.3 From 4e684f59d760a2c7c716bb60190783546e2d08a1 Mon Sep 17 00:00:00 2001 From: Chris J Arges Date: Wed, 2 Nov 2016 09:13:42 -0500 Subject: igb: Workaround for igb i210 firmware issue Sometimes firmware may not properly initialize I347AT4_PAGE_SELECT causing the probe of an igb i210 NIC to fail. This patch adds an addition zeroing of this register during igb_get_phy_id to workaround this issue. Thanks for Jochen Henneberg for the idea and original patch. Signed-off-by: Chris J Arges Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_phy.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 5b54254aed4f..569ee25642b4 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw) s32 ret_val = 0; u16 phy_id; + /* ensure PHY page selection to fix misconfigured i210 */ + if (hw->mac.type == e1000_i210) + phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); if (ret_val) goto out; -- cgit v1.2.3 From 629823b872402451b42462414da08dddd0e2c93d Mon Sep 17 00:00:00 2001 From: Cao jin Date: Tue, 8 Nov 2016 15:06:20 +0800 Subject: igb: use igb_adapter->io_addr instead of e1000_hw->hw_addr When running as guest, under certain condition, it will oops as following. writel() in igb_configure_tx_ring() results in oops, because hw->hw_addr is NULL. While other register access won't oops kernel because they use wr32/rd32 which have a defense against NULL pointer. [ 141.225449] pcieport 0000:00:1c.0: AER: Multiple Uncorrected (Fatal) error received: id=0101 [ 141.225523] igb 0000:01:00.1: PCIe Bus Error: severity=Uncorrected (Fatal), type=Unaccessible, id=0101(Unregistered Agent ID) [ 141.299442] igb 0000:01:00.1: broadcast error_detected message [ 141.300539] igb 0000:01:00.0 enp1s0f0: PCIe link lost, device now detached [ 141.351019] igb 0000:01:00.1 enp1s0f1: PCIe link lost, device now detached [ 143.465904] pcieport 0000:00:1c.0: Root Port link has been reset [ 143.465994] igb 0000:01:00.1: broadcast slot_reset message [ 143.466039] igb 0000:01:00.0: enabling device (0000 -> 0002) [ 144.389078] igb 0000:01:00.1: enabling device (0000 -> 0002) [ 145.312078] igb 0000:01:00.1: broadcast resume message [ 145.322211] BUG: unable to handle kernel paging request at 0000000000003818 [ 145.361275] IP: [] igb_configure_tx_ring+0x14d/0x280 [igb] [ 145.400048] PGD 0 [ 145.438007] Oops: 0002 [#1] SMP A similar issue & solution could be found at: http://patchwork.ozlabs.org/patch/689592/ Signed-off-by: Cao jin Acked-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a761001308dc..1e4bd84724b2 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3394,7 +3394,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, tdba & 0x00000000ffffffffULL); wr32(E1000_TDBAH(reg_idx), tdba >> 32); - ring->tail = hw->hw_addr + E1000_TDT(reg_idx); + ring->tail = adapter->io_addr + E1000_TDT(reg_idx); wr32(E1000_TDH(reg_idx), 0); writel(0, ring->tail); @@ -3733,7 +3733,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ring->count * sizeof(union e1000_adv_rx_desc)); /* initialize head and tail */ - ring->tail = hw->hw_addr + E1000_RDT(reg_idx); + ring->tail = adapter->io_addr + E1000_RDT(reg_idx); wr32(E1000_RDH(reg_idx), 0); writel(0, ring->tail); -- cgit v1.2.3 From 182785335447957409282ca745aa5bc3968facee Mon Sep 17 00:00:00 2001 From: Aaron Sierra Date: Tue, 29 Nov 2016 10:03:56 -0600 Subject: igb: reset the PHY before reading the PHY ID Several people have reported firmware leaving the I210/I211 PHY's page select register set to something other than the default of zero. This causes the first accesses, PHY_IDx register reads, to access something else, resulting in device probe failure: igb: Intel(R) Gigabit Ethernet Network Driver - version 5.4.0-k igb: Copyright (c) 2007-2014 Intel Corporation. igb: probe of 0000:01:00.0 failed with error -2 This problem began for them after a previous patch I submitted was applied: commit 2a3cdead8b408351fa1e3079b220fa331480ffbc Author: Aaron Sierra Date: Tue Nov 3 12:37:09 2015 -0600 igb: Remove GS40G specific defines/functions I personally experienced this problem after attempting to PXE boot from I210 devices using this firmware: Intel(R) Boot Agent GE v1.5.78 Copyright (C) 1997-2014, Intel Corporation Resetting the PHY before reading from it, ensures the page select register is in its default state and doesn't make assumptions about the PHY's register set before the PHY has been probed. Cc: Matwey V. Kornilov Cc: Chris Arges Cc: Jochen Henneberg Signed-off-by: Aaron Sierra Tested-by: Matwey V. Kornilov Tested-by: Chris J Arges Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_82575.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index a61447fd778e..ee443985581f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -245,6 +245,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + /* Set phy->phy_addr and phy->id. */ ret_val = igb_get_phy_id_82575(hw); if (ret_val) -- cgit v1.2.3 From 69b97cf6dbce7403845a28bbc75d57f5be7b12ac Mon Sep 17 00:00:00 2001 From: Guilherme G Piccoli Date: Thu, 10 Nov 2016 16:46:43 -0200 Subject: igb: re-assign hw address pointer on reset after PCI error Whenever the igb driver detects the result of a read operation returns a value composed only by F's (like 0xFFFFFFFF), it will detach the net_device, clear the hw_addr pointer and warn to the user that adapter's link is lost - those steps happen on igb_rd32(). In case a PCI error happens on Power architecture, there's a recovery mechanism called EEH, that will reset the PCI slot and call driver's handlers to reset the adapter and network functionality as well. We observed that once hw_addr is NULL after the error is detected on igb_rd32(), it's never assigned back, so in the process of resetting the network functionality we got a NULL pointer dereference in both igb_configure_tx_ring() and igb_configure_rx_ring(). In order to avoid such bug, this patch re-assigns the hw_addr value in the slot_reset handler. Reported-by: Anthony H Thai Reported-by: Harsha Thyagaraja Signed-off-by: Guilherme G Piccoli Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1e4bd84724b2..82069bcabb9f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7898,6 +7898,11 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + /* In case of PCI error, adapter lose its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + igb_reset(adapter); wr32(E1000_WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; -- cgit v1.2.3 From 9474933caf21a4cb5147223dca1551f527aaac36 Mon Sep 17 00:00:00 2001 From: Todd Fujinaka Date: Tue, 15 Nov 2016 08:54:26 -0800 Subject: igb: close/suspend race in netif_device_detach Similar to ixgbe, when an interface is part of a namespace it is possible that igb_close() may be called while __igb_shutdown() is running which ends up in a double free WARN and/or a BUG in free_msi_irqs(). Extend the rtnl_lock() to protect the call to netif_device_detach() and igb_clear_interrupt_scheme() in __igb_shutdown() and check for netif_device_present() to avoid calling igb_clear_interrupt_scheme() a second time in igb_close(). Also extend the rtnl lock in igb_resume() to netif_device_attach(). Signed-off-by: Todd Fujinaka Acked-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 82069bcabb9f..594604e09f8d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3275,7 +3275,9 @@ static int __igb_close(struct net_device *netdev, bool suspending) int igb_close(struct net_device *netdev) { - return __igb_close(netdev, false); + if (netif_device_present(netdev)) + return __igb_close(netdev, false); + return 0; } /** @@ -7564,6 +7566,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) @@ -7572,6 +7575,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, igb_ptp_suspend(adapter); igb_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -7690,16 +7694,15 @@ static int igb_resume(struct device *dev) wr32(E1000_WUS, ~0); - if (netdev->flags & IFF_UP) { - rtnl_lock(); + rtnl_lock(); + if (!err && netif_running(netdev)) err = __igb_open(netdev, true); - rtnl_unlock(); - if (err) - return err; - } - netif_device_attach(netdev); - return 0; + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; } static int igb_runtime_idle(struct device *dev) -- cgit v1.2.3 From 5bc8c230e2a993b49244f9457499f17283da9ec7 Mon Sep 17 00:00:00 2001 From: Todd Fujinaka Date: Mon, 28 Nov 2016 09:09:57 -0800 Subject: igb: add i211 to i210 PHY workaround i210 and i211 share the same PHY but have different PCI IDs. Don't forget i211 for any i210 workarounds. Signed-off-by: Todd Fujinaka Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 569ee25642b4..2788a5409023 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -78,7 +78,7 @@ s32 igb_get_phy_id(struct e1000_hw *hw) u16 phy_id; /* ensure PHY page selection to fix misconfigured i210 */ - if (hw->mac.type == e1000_i210) + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); -- cgit v1.2.3 From 76ed5a8f47476e4984cc8c0c1bc4cee62650f7fd Mon Sep 17 00:00:00 2001 From: Hannu Lounento Date: Mon, 2 Jan 2017 18:26:06 +0100 Subject: igb: Fix hw_dbg logging in igb_update_flash_i210 Fix an if statement with hw_dbg lines where the logic was inverted with regards to the corresponding return value used in the if statement. Signed-off-by: Hannu Lounento Signed-off-by: Peter Senna Tschudin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_i210.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 8aa798737d4d..07d48f2e3369 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -699,9 +699,9 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw) ret_val = igb_pool_flash_update_done_i210(hw); if (ret_val) - hw_dbg("Flash update complete\n"); - else hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); out: return ret_val; -- cgit v1.2.3 From b330b25eaabda00d74e47566d9200907da381896 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 4 Jan 2017 18:58:29 +0100 Subject: dt-bindings: document common IEEE 802.11 frequency limit property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This new file should be used for properties that apply to all wireless devices. Signed-off-by: Rafał Miłecki Acked-by: Rob Herring Signed-off-by: Johannes Berg --- .../devicetree/bindings/net/wireless/ieee80211.txt | 24 ++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/wireless/ieee80211.txt diff --git a/Documentation/devicetree/bindings/net/wireless/ieee80211.txt b/Documentation/devicetree/bindings/net/wireless/ieee80211.txt new file mode 100644 index 000000000000..f6442b1397f5 --- /dev/null +++ b/Documentation/devicetree/bindings/net/wireless/ieee80211.txt @@ -0,0 +1,24 @@ +Common IEEE 802.11 properties + +This provides documentation of common properties that are valid for all wireless +devices. + +Optional properties: + - ieee80211-freq-limit : list of supported frequency ranges in KHz. This can be + used for devices that in a given config support less channels than + normally. It may happen chipset supports a wide wireless band but it is + limited to some part of it due to used antennas or power amplifier. + An example case for this can be tri-band wireless router with two + identical chipsets used for two different 5 GHz subbands. Using them + incorrectly could not work or decrease performance noticeably. + +Example: + +pcie@0,0 { + reg = <0x0000 0 0 0 0>; + wifi@0,0 { + reg = <0x0000 0 0 0 0>; + ieee80211-freq-limit = <2402000 2482000>, + <5170000 5250000>; + }; +}; -- cgit v1.2.3 From 4787cfa08476d4193c35d958f564f643bb6ae234 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 4 Jan 2017 18:58:30 +0100 Subject: cfg80211: move function checking range fit to util.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is needed for another cfg80211 helper that will be out of reg.c so move it to common util.c file and make it non-static. Signed-off-by: Rafał Miłecki Signed-off-by: Johannes Berg --- net/wireless/core.h | 3 +++ net/wireless/reg.c | 27 +++++++-------------------- net/wireless/util.c | 15 +++++++++++++++ 3 files changed, 25 insertions(+), 20 deletions(-) diff --git a/net/wireless/core.h b/net/wireless/core.h index af6e023020b1..bc8ba6e57519 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -430,6 +430,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); void cfg80211_process_wdev_events(struct wireless_dev *wdev); +bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, + u32 center_freq_khz, u32 bw_khz); + /** * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable * @wiphy: the wiphy to validate against diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 5dbac3749738..753efcd51fa3 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -748,21 +748,6 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd) return true; } -static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range, - u32 center_freq_khz, u32 bw_khz) -{ - u32 start_freq_khz, end_freq_khz; - - start_freq_khz = center_freq_khz - (bw_khz/2); - end_freq_khz = center_freq_khz + (bw_khz/2); - - if (start_freq_khz >= freq_range->start_freq_khz && - end_freq_khz <= freq_range->end_freq_khz) - return true; - - return false; -} - /** * freq_in_rule_band - tells us if a frequency is in a frequency band * @freq_range: frequency rule we want to query @@ -1070,7 +1055,7 @@ freq_reg_info_regd(u32 center_freq, if (!band_rule_found) band_rule_found = freq_in_rule_band(fr, center_freq); - bw_fits = reg_does_bw_fit(fr, center_freq, bw); + bw_fits = cfg80211_does_bw_fit_range(fr, center_freq, bw); if (band_rule_found && bw_fits) return rr; @@ -1138,11 +1123,13 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); /* If we get a reg_rule we can assume that at least 5Mhz fit */ - if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq), - MHZ_TO_KHZ(10))) + if (!cfg80211_does_bw_fit_range(freq_range, + MHZ_TO_KHZ(chan->center_freq), + MHZ_TO_KHZ(10))) bw_flags |= IEEE80211_CHAN_NO_10MHZ; - if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq), - MHZ_TO_KHZ(20))) + if (!cfg80211_does_bw_fit_range(freq_range, + MHZ_TO_KHZ(chan->center_freq), + MHZ_TO_KHZ(20))) bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(10)) diff --git a/net/wireless/util.c b/net/wireless/util.c index c91bc25c4dd5..cd8a7ae55e7d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1847,6 +1847,21 @@ void cfg80211_free_nan_func(struct cfg80211_nan_func *f) } EXPORT_SYMBOL(cfg80211_free_nan_func); +bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, + u32 center_freq_khz, u32 bw_khz) +{ + u32 start_freq_khz, end_freq_khz; + + start_freq_khz = center_freq_khz - (bw_khz / 2); + end_freq_khz = center_freq_khz + (bw_khz / 2); + + if (start_freq_khz >= freq_range->start_freq_khz && + end_freq_khz <= freq_range->end_freq_khz) + return true; + + return false; +} + /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ const unsigned char rfc1042_header[] __aligned(2) = -- cgit v1.2.3 From e691ac2f75b69bee743f0370d79454ba4429b175 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 4 Jan 2017 18:58:31 +0100 Subject: cfg80211: support ieee80211-freq-limit DT property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a helper for reading that new property and applying limitations of supported channels specified this way. It is used with devices that normally support a wide wireless band but in a given config are limited to some part of it (usually due to board design). For example a dual-band chipset may be able to support one band only because of used antennas. It's also common that tri-band routers have separated radios for lower and higher part of 5 GHz band and it may be impossible to say which is which without a DT info. Signed-off-by: Rafał Miłecki [add new function to documentation, fix link] Signed-off-by: Johannes Berg --- Documentation/80211/cfg80211.rst | 3 + include/net/cfg80211.h | 28 ++++++++ net/wireless/Makefile | 1 + net/wireless/of.c | 138 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 170 insertions(+) create mode 100644 net/wireless/of.c diff --git a/Documentation/80211/cfg80211.rst b/Documentation/80211/cfg80211.rst index b1e149ea6fee..eca534ab6172 100644 --- a/Documentation/80211/cfg80211.rst +++ b/Documentation/80211/cfg80211.rst @@ -44,6 +44,9 @@ Device registration .. kernel-doc:: include/net/cfg80211.h :functions: wiphy_new +.. kernel-doc:: include/net/cfg80211.h + :functions: wiphy_read_of_freq_limits + .. kernel-doc:: include/net/cfg80211.h :functions: wiphy_register diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index ca2ac1ce5862..41a9ecd82ca0 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -311,6 +311,34 @@ struct ieee80211_supported_band { struct ieee80211_sta_vht_cap vht_cap; }; +/** + * wiphy_read_of_freq_limits - read frequency limits from device tree + * + * @wiphy: the wireless device to get extra limits for + * + * Some devices may have extra limitations specified in DT. This may be useful + * for chipsets that normally support more bands but are limited due to board + * design (e.g. by antennas or external power amplifier). + * + * This function reads info from DT and uses it to *modify* channels (disable + * unavailable ones). It's usually a *bad* idea to use it in drivers with + * shared channel data as DT limitations are device specific. You should make + * sure to call it only if channels in wiphy are copied and can be modified + * without affecting other devices. + * + * As this function access device node it has to be called after set_wiphy_dev. + * It also modifies channels so they have to be set first. + * If using this helper, call it before wiphy_register(). + */ +#ifdef CONFIG_OF +void wiphy_read_of_freq_limits(struct wiphy *wiphy); +#else /* CONFIG_OF */ +static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy) +{ +} +#endif /* !CONFIG_OF */ + + /* * Wireless hardware/device configuration structures and methods */ diff --git a/net/wireless/Makefile b/net/wireless/Makefile index 4c9e39f04ef8..95b4c0915412 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_WEXT_PRIV) += wext-priv.o cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o ocb.o +cfg80211-$(CONFIG_OF) += of.o cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o diff --git a/net/wireless/of.c b/net/wireless/of.c new file mode 100644 index 000000000000..de221f0edca5 --- /dev/null +++ b/net/wireless/of.c @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2017 Rafał Miłecki + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "core.h" + +static bool wiphy_freq_limits_valid_chan(struct wiphy *wiphy, + struct ieee80211_freq_range *freq_limits, + unsigned int n_freq_limits, + struct ieee80211_channel *chan) +{ + u32 bw = MHZ_TO_KHZ(20); + int i; + + for (i = 0; i < n_freq_limits; i++) { + struct ieee80211_freq_range *limit = &freq_limits[i]; + + if (cfg80211_does_bw_fit_range(limit, + MHZ_TO_KHZ(chan->center_freq), + bw)) + return true; + } + + return false; +} + +static void wiphy_freq_limits_apply(struct wiphy *wiphy, + struct ieee80211_freq_range *freq_limits, + unsigned int n_freq_limits) +{ + enum nl80211_band band; + int i; + + if (WARN_ON(!n_freq_limits)) + return; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *sband = wiphy->bands[band]; + + if (!sband) + continue; + + for (i = 0; i < sband->n_channels; i++) { + struct ieee80211_channel *chan = &sband->channels[i]; + + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + + if (!wiphy_freq_limits_valid_chan(wiphy, freq_limits, + n_freq_limits, + chan)) { + pr_debug("Disabling freq %d MHz as it's out of OF limits\n", + chan->center_freq); + chan->flags |= IEEE80211_CHAN_DISABLED; + } + } + } +} + +void wiphy_read_of_freq_limits(struct wiphy *wiphy) +{ + struct device *dev = wiphy_dev(wiphy); + struct device_node *np; + struct property *prop; + struct ieee80211_freq_range *freq_limits; + unsigned int n_freq_limits; + const __be32 *p; + int len, i; + int err = 0; + + if (!dev) + return; + np = dev_of_node(dev); + if (!np) + return; + + prop = of_find_property(np, "ieee80211-freq-limit", &len); + if (!prop) + return; + + if (!len || len % sizeof(u32) || len / sizeof(u32) % 2) { + dev_err(dev, "ieee80211-freq-limit wrong format"); + return; + } + n_freq_limits = len / sizeof(u32) / 2; + + freq_limits = kcalloc(n_freq_limits, sizeof(*freq_limits), GFP_KERNEL); + if (!freq_limits) { + err = -ENOMEM; + goto out_kfree; + } + + p = NULL; + for (i = 0; i < n_freq_limits; i++) { + struct ieee80211_freq_range *limit = &freq_limits[i]; + + p = of_prop_next_u32(prop, p, &limit->start_freq_khz); + if (!p) { + err = -EINVAL; + goto out_kfree; + } + + p = of_prop_next_u32(prop, p, &limit->end_freq_khz); + if (!p) { + err = -EINVAL; + goto out_kfree; + } + + if (!limit->start_freq_khz || + !limit->end_freq_khz || + limit->start_freq_khz >= limit->end_freq_khz) { + err = -EINVAL; + goto out_kfree; + } + } + + wiphy_freq_limits_apply(wiphy, freq_limits, n_freq_limits); + +out_kfree: + kfree(freq_limits); + if (err) + dev_err(dev, "Failed to get limits: %d\n", err); +} +EXPORT_SYMBOL(wiphy_read_of_freq_limits); -- cgit v1.2.3 From 196ee9cd2d04728d0ec0038a2856b86142615b11 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 5 Jan 2017 10:38:34 +0000 Subject: afs: Make afs_fs_fetch_data() take a list of pages Make afs_fs_fetch_data() take a list of pages for bulk data transfer. This will allow afs_readpages() to be made more efficient. Signed-off-by: David Howells --- fs/afs/file.c | 37 ++++++++++++++--- fs/afs/fsclient.c | 117 +++++++++++++++++++++++++++++++++++------------------- fs/afs/internal.h | 21 +++++++++- fs/afs/vnode.c | 6 +-- fs/afs/write.c | 19 +++++++-- 5 files changed, 145 insertions(+), 55 deletions(-) diff --git a/fs/afs/file.c b/fs/afs/file.c index 6344aee4ac4b..6c262ceef32d 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -101,6 +101,21 @@ int afs_release(struct inode *inode, struct file *file) return 0; } +/* + * Dispose of a ref to a read record. + */ +void afs_put_read(struct afs_read *req) +{ + int i; + + if (atomic_dec_and_test(&req->usage)) { + for (i = 0; i < req->nr_pages; i++) + if (req->pages[i]) + put_page(req->pages[i]); + kfree(req); + } +} + #ifdef CONFIG_AFS_FSCACHE /* * deal with notification that a page was read from the cache @@ -126,9 +141,8 @@ int afs_page_filler(void *data, struct page *page) { struct inode *inode = page->mapping->host; struct afs_vnode *vnode = AFS_FS_I(inode); + struct afs_read *req; struct key *key = data; - size_t len; - off_t offset; int ret; _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); @@ -164,12 +178,23 @@ int afs_page_filler(void *data, struct page *page) _debug("cache said ENOBUFS"); default: go_on: - offset = page->index << PAGE_SHIFT; - len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE); + req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *), + GFP_KERNEL); + if (!req) + goto enomem; + + atomic_set(&req->usage, 1); + req->pos = (loff_t)page->index << PAGE_SHIFT; + req->len = min_t(size_t, i_size_read(inode) - req->pos, + PAGE_SIZE); + req->nr_pages = 1; + req->pages[0] = page; + get_page(page); /* read the contents of the file from the server into the * page */ - ret = afs_vnode_fetch_data(vnode, key, offset, len, page); + ret = afs_vnode_fetch_data(vnode, key, req); + afs_put_read(req); if (ret < 0) { if (ret == -ENOENT) { _debug("got NOENT from server" @@ -201,6 +226,8 @@ int afs_page_filler(void *data, struct page *page) _leave(" = 0"); return 0; +enomem: + ret = -ENOMEM; error: SetPageError(page); unlock_page(page); diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 31c616ab9b40..7dc1f6fb3661 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -309,15 +309,19 @@ int afs_fs_fetch_file_status(struct afs_server *server, static int afs_deliver_fs_fetch_data(struct afs_call *call) { struct afs_vnode *vnode = call->reply; + struct afs_read *req = call->reply3; const __be32 *bp; - struct page *page; + unsigned int size; void *buffer; int ret; - _enter("{%u}", call->unmarshall); + _enter("{%u,%zu/%u;%u/%llu}", + call->unmarshall, call->offset, call->count, + req->remain, req->actual_len); switch (call->unmarshall) { case 0: + req->actual_len = 0; call->offset = 0; call->unmarshall++; if (call->operation_ID != FSFETCHDATA64) { @@ -334,10 +338,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) if (ret < 0) return ret; - call->count = ntohl(call->tmp); - _debug("DATA length MSW: %u", call->count); - if (call->count > 0) - return -EBADMSG; + req->actual_len = ntohl(call->tmp); + req->actual_len <<= 32; call->offset = 0; call->unmarshall++; @@ -349,26 +351,52 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) if (ret < 0) return ret; - call->count = ntohl(call->tmp); - _debug("DATA length: %u", call->count); - if (call->count > PAGE_SIZE) + req->actual_len |= ntohl(call->tmp); + _debug("DATA length: %llu", req->actual_len); + /* Check that the server didn't want to send us extra. We + * might want to just discard instead, but that requires + * cooperation from AF_RXRPC. + */ + if (req->actual_len > req->len) return -EBADMSG; - call->offset = 0; + + req->remain = req->actual_len; + call->offset = req->pos & (PAGE_SIZE - 1); + req->index = 0; + if (req->actual_len == 0) + goto no_more_data; call->unmarshall++; + begin_page: + if (req->remain > PAGE_SIZE - call->offset) + size = PAGE_SIZE - call->offset; + else + size = req->remain; + call->count = call->offset + size; + ASSERTCMP(call->count, <=, PAGE_SIZE); + req->remain -= size; + /* extract the returned data */ case 3: - _debug("extract data"); - if (call->count > 0) { - page = call->reply3; - buffer = kmap(page); - ret = afs_extract_data(call, buffer, - call->count, true); - kunmap(page); - if (ret < 0) - return ret; + _debug("extract data %u/%llu %zu/%u", + req->remain, req->actual_len, call->offset, call->count); + + buffer = kmap(req->pages[req->index]); + ret = afs_extract_data(call, buffer, call->count, true); + kunmap(req->pages[req->index]); + if (ret < 0) + return ret; + if (call->offset == PAGE_SIZE) { + if (req->page_done) + req->page_done(call, req); + if (req->remain > 0) { + req->index++; + call->offset = 0; + goto begin_page; + } } + no_more_data: call->offset = 0; call->unmarshall++; @@ -393,17 +421,25 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) } if (call->count < PAGE_SIZE) { - _debug("clear"); - page = call->reply3; - buffer = kmap(page); + buffer = kmap(req->pages[req->index]); memset(buffer + call->count, 0, PAGE_SIZE - call->count); - kunmap(page); + kunmap(req->pages[req->index]); + if (req->page_done) + req->page_done(call, req); } _leave(" = 0 [done]"); return 0; } +static void afs_fetch_data_destructor(struct afs_call *call) +{ + struct afs_read *req = call->reply3; + + afs_put_read(req); + afs_flat_call_destructor(call); +} + /* * FS.FetchData operation type */ @@ -411,14 +447,14 @@ static const struct afs_call_type afs_RXFSFetchData = { .name = "FS.FetchData", .deliver = afs_deliver_fs_fetch_data, .abort_to_error = afs_abort_to_error, - .destructor = afs_flat_call_destructor, + .destructor = afs_fetch_data_destructor, }; static const struct afs_call_type afs_RXFSFetchData64 = { .name = "FS.FetchData64", .deliver = afs_deliver_fs_fetch_data, .abort_to_error = afs_abort_to_error, - .destructor = afs_flat_call_destructor, + .destructor = afs_fetch_data_destructor, }; /* @@ -427,8 +463,7 @@ static const struct afs_call_type afs_RXFSFetchData64 = { static int afs_fs_fetch_data64(struct afs_server *server, struct key *key, struct afs_vnode *vnode, - off_t offset, size_t length, - struct page *buffer, + struct afs_read *req, const struct afs_wait_mode *wait_mode) { struct afs_call *call; @@ -436,8 +471,6 @@ static int afs_fs_fetch_data64(struct afs_server *server, _enter(""); - ASSERTCMP(length, <, ULONG_MAX); - call = afs_alloc_flat_call(&afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4); if (!call) return -ENOMEM; @@ -445,7 +478,7 @@ static int afs_fs_fetch_data64(struct afs_server *server, call->key = key; call->reply = vnode; call->reply2 = NULL; /* volsync */ - call->reply3 = buffer; + call->reply3 = req; call->service_id = FS_SERVICE; call->port = htons(AFS_FS_PORT); call->operation_ID = FSFETCHDATA64; @@ -456,11 +489,12 @@ static int afs_fs_fetch_data64(struct afs_server *server, bp[1] = htonl(vnode->fid.vid); bp[2] = htonl(vnode->fid.vnode); bp[3] = htonl(vnode->fid.unique); - bp[4] = htonl(upper_32_bits(offset)); - bp[5] = htonl((u32) offset); + bp[4] = htonl(upper_32_bits(req->pos)); + bp[5] = htonl(lower_32_bits(req->pos)); bp[6] = 0; - bp[7] = htonl((u32) length); + bp[7] = htonl(lower_32_bits(req->len)); + atomic_inc(&req->usage); return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); } @@ -470,16 +504,16 @@ static int afs_fs_fetch_data64(struct afs_server *server, int afs_fs_fetch_data(struct afs_server *server, struct key *key, struct afs_vnode *vnode, - off_t offset, size_t length, - struct page *buffer, + struct afs_read *req, const struct afs_wait_mode *wait_mode) { struct afs_call *call; __be32 *bp; - if (upper_32_bits(offset) || upper_32_bits(offset + length)) - return afs_fs_fetch_data64(server, key, vnode, offset, length, - buffer, wait_mode); + if (upper_32_bits(req->pos) || + upper_32_bits(req->len) || + upper_32_bits(req->pos + req->len)) + return afs_fs_fetch_data64(server, key, vnode, req, wait_mode); _enter(""); @@ -490,7 +524,7 @@ int afs_fs_fetch_data(struct afs_server *server, call->key = key; call->reply = vnode; call->reply2 = NULL; /* volsync */ - call->reply3 = buffer; + call->reply3 = req; call->service_id = FS_SERVICE; call->port = htons(AFS_FS_PORT); call->operation_ID = FSFETCHDATA; @@ -501,9 +535,10 @@ int afs_fs_fetch_data(struct afs_server *server, bp[1] = htonl(vnode->fid.vid); bp[2] = htonl(vnode->fid.vnode); bp[3] = htonl(vnode->fid.unique); - bp[4] = htonl(offset); - bp[5] = htonl(length); + bp[4] = htonl(lower_32_bits(req->pos)); + bp[5] = htonl(lower_32_bits(req->len)); + atomic_inc(&req->usage); return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 535a38d2c1d0..6f7a9638ba1a 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -133,6 +133,22 @@ struct afs_call_type { void (*destructor)(struct afs_call *call); }; +/* + * Record of an outstanding read operation on a vnode. + */ +struct afs_read { + loff_t pos; /* Where to start reading */ + loff_t len; /* How much to read */ + loff_t actual_len; /* How much we're actually getting */ + atomic_t usage; + unsigned int remain; /* Amount remaining */ + unsigned int index; /* Which page we're reading into */ + unsigned int pg_offset; /* Offset in page we're at */ + unsigned int nr_pages; + void (*page_done)(struct afs_call *, struct afs_read *); + struct page *pages[]; +}; + /* * record of an outstanding writeback on a vnode */ @@ -494,6 +510,7 @@ extern const struct file_operations afs_file_operations; extern int afs_open(struct inode *, struct file *); extern int afs_release(struct inode *, struct file *); extern int afs_page_filler(void *, struct page *); +extern void afs_put_read(struct afs_read *); /* * flock.c @@ -513,7 +530,7 @@ extern int afs_fs_fetch_file_status(struct afs_server *, struct key *, extern int afs_fs_give_up_callbacks(struct afs_server *, const struct afs_wait_mode *); extern int afs_fs_fetch_data(struct afs_server *, struct key *, - struct afs_vnode *, off_t, size_t, struct page *, + struct afs_vnode *, struct afs_read *, const struct afs_wait_mode *); extern int afs_fs_create(struct afs_server *, struct key *, struct afs_vnode *, const char *, umode_t, @@ -699,7 +716,7 @@ extern void afs_vnode_finalise_status_update(struct afs_vnode *, extern int afs_vnode_fetch_status(struct afs_vnode *, struct afs_vnode *, struct key *); extern int afs_vnode_fetch_data(struct afs_vnode *, struct key *, - off_t, size_t, struct page *); + struct afs_read *); extern int afs_vnode_create(struct afs_vnode *, struct key *, const char *, umode_t, struct afs_fid *, struct afs_file_status *, struct afs_callback *, struct afs_server **); diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c index 25cf4c3f4ff7..45aa874f5d32 100644 --- a/fs/afs/vnode.c +++ b/fs/afs/vnode.c @@ -393,7 +393,7 @@ no_server: * - TODO implement caching */ int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key, - off_t offset, size_t length, struct page *page) + struct afs_read *desc) { struct afs_server *server; int ret; @@ -420,8 +420,8 @@ int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); - ret = afs_fs_fetch_data(server, key, vnode, offset, length, - page, &afs_sync_call); + ret = afs_fs_fetch_data(server, key, vnode, desc, + &afs_sync_call); } while (!afs_volume_release_fileserver(vnode, server, ret)); diff --git a/fs/afs/write.c b/fs/afs/write.c index f865c3f05bea..c83c1a0e851f 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -86,19 +86,30 @@ void afs_put_writeback(struct afs_writeback *wb) static int afs_fill_page(struct afs_vnode *vnode, struct key *key, loff_t pos, struct page *page) { + struct afs_read *req; loff_t i_size; int ret; - int len; _enter(",,%llu", (unsigned long long)pos); + req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *), + GFP_KERNEL); + if (!req) + return -ENOMEM; + + atomic_set(&req->usage, 1); + req->pos = pos; + req->nr_pages = 1; + req->pages[0] = page; + i_size = i_size_read(&vnode->vfs_inode); if (pos + PAGE_SIZE > i_size) - len = i_size - pos; + req->len = i_size - pos; else - len = PAGE_SIZE; + req->len = PAGE_SIZE; - ret = afs_vnode_fetch_data(vnode, key, pos, len, page); + ret = afs_vnode_fetch_data(vnode, key, req); + afs_put_read(req); if (ret < 0) { if (ret == -ENOENT) { _debug("got NOENT from server" -- cgit v1.2.3 From 91b467e0a3f5fa861265eda94640b7d4c0290551 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 5 Jan 2017 10:38:35 +0000 Subject: afs: Make afs_readpages() fetch data in bulk Make afs_readpages() use afs_vnode_fetch_data()'s new ability to take a list of pages and do a bulk fetch. Signed-off-by: David Howells --- fs/afs/file.c | 131 +++++++++++++++++++++++++++++++++++++++++++++++++++++++- fs/afs/volume.c | 1 + 2 files changed, 130 insertions(+), 2 deletions(-) diff --git a/fs/afs/file.c b/fs/afs/file.c index 6c262ceef32d..82897a78abc7 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "internal.h" static int afs_readpage(struct file *file, struct page *page); @@ -261,6 +262,129 @@ static int afs_readpage(struct file *file, struct page *page) return ret; } +/* + * Make pages available as they're filled. + */ +static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req) +{ + struct afs_vnode *vnode = call->reply; + struct page *page = req->pages[req->index]; + + req->pages[req->index] = NULL; + SetPageUptodate(page); + + /* send the page to the cache */ +#ifdef CONFIG_AFS_FSCACHE + if (PageFsCache(page) && + fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { + fscache_uncache_page(vnode->cache, page); + BUG_ON(PageFsCache(page)); + } +#endif + unlock_page(page); + put_page(page); +} + +/* + * Read a contiguous set of pages. + */ +static int afs_readpages_one(struct file *file, struct address_space *mapping, + struct list_head *pages) +{ + struct afs_vnode *vnode = AFS_FS_I(mapping->host); + struct afs_read *req; + struct list_head *p; + struct page *first, *page; + struct key *key = file->private_data; + pgoff_t index; + int ret, n, i; + + /* Count the number of contiguous pages at the front of the list. Note + * that the list goes prev-wards rather than next-wards. + */ + first = list_entry(pages->prev, struct page, lru); + index = first->index + 1; + n = 1; + for (p = first->lru.prev; p != pages; p = p->prev) { + page = list_entry(p, struct page, lru); + if (page->index != index) + break; + index++; + n++; + } + + req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n, + GFP_NOFS); + if (!req) + return -ENOMEM; + + atomic_set(&req->usage, 1); + req->page_done = afs_readpages_page_done; + req->pos = first->index; + req->pos <<= PAGE_SHIFT; + + /* Transfer the pages to the request. We add them in until one fails + * to add to the LRU and then we stop (as that'll make a hole in the + * contiguous run. + * + * Note that it's possible for the file size to change whilst we're + * doing this, but we rely on the server returning less than we asked + * for if the file shrank. We also rely on this to deal with a partial + * page at the end of the file. + */ + do { + page = list_entry(pages->prev, struct page, lru); + list_del(&page->lru); + index = page->index; + if (add_to_page_cache_lru(page, mapping, index, + readahead_gfp_mask(mapping))) { +#ifdef CONFIG_AFS_FSCACHE + fscache_uncache_page(vnode->cache, page); +#endif + put_page(page); + break; + } + + req->pages[req->nr_pages++] = page; + req->len += PAGE_SIZE; + } while (req->nr_pages < n); + + if (req->nr_pages == 0) { + kfree(req); + return 0; + } + + ret = afs_vnode_fetch_data(vnode, key, req); + if (ret < 0) + goto error; + + task_io_account_read(PAGE_SIZE * req->nr_pages); + afs_put_read(req); + return 0; + +error: + if (ret == -ENOENT) { + _debug("got NOENT from server" + " - marking file deleted and stale"); + set_bit(AFS_VNODE_DELETED, &vnode->flags); + ret = -ESTALE; + } + + for (i = 0; i < req->nr_pages; i++) { + page = req->pages[i]; + if (page) { +#ifdef CONFIG_AFS_FSCACHE + fscache_uncache_page(vnode->cache, page); +#endif + SetPageError(page); + unlock_page(page); + } + } + + afs_put_read(req); + return ret; +} + /* * read a set of pages */ @@ -314,8 +438,11 @@ static int afs_readpages(struct file *file, struct address_space *mapping, return ret; } - /* load the missing pages from the network */ - ret = read_cache_pages(mapping, pages, afs_page_filler, key); + while (!list_empty(pages)) { + ret = afs_readpages_one(file, mapping, pages); + if (ret < 0) + break; + } _leave(" = %d [netting]", ret); return ret; diff --git a/fs/afs/volume.c b/fs/afs/volume.c index d142a2449e65..546f9d01710b 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c @@ -106,6 +106,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params) volume->cell = params->cell; volume->vid = vlocation->vldb.vid[params->type]; + volume->bdi.ra_pages = VM_MAX_READAHEAD*1024/PAGE_SIZE; ret = bdi_setup_and_register(&volume->bdi, "afs"); if (ret) goto error_bdi; -- cgit v1.2.3 From 90028b07a74ddcbaac1c5aee038a7108ae279b2e Mon Sep 17 00:00:00 2001 From: Prasad Kanneganti Date: Wed, 4 Jan 2017 11:31:55 -0800 Subject: liquidio VF: fix incorrect struct being used The VF driver is using the wrong struct when sending commands to the NIC firmware, sometimes causing adverse effects in the firmware. The right struct is the one that the PF is using, so make the VF use that as well. Signed-off-by: Prasad Kanneganti Signed-off-by: Felix Manlunas Signed-off-by: Derek Chickles Signed-off-by: Satanand Burla Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_nic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index c3d6a8228362..0243be8dd56f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -49,7 +49,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, /* Add in the response related fields. Opcode and Param are already * there. */ - if (OCTEON_CN23XX_PF(oct)) { + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; @@ -70,7 +70,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, *sc->status_word = COMPLETION_WORD_INIT; - if (OCTEON_CN23XX_PF(oct)) + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) sc->cmd.cmd3.rptr = sc->dmarptr; else sc->cmd.cmd2.rptr = sc->dmarptr; -- cgit v1.2.3 From f784ad3d79e5be062b19dc36c53413daffeecc5c Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Wed, 4 Jan 2017 15:01:01 -0800 Subject: ipv6: do not send RTM_DELADDR for tentative addresses RTM_NEWADDR notification is sent when IFA_F_TENTATIVE is cleared from the address. So if the address is added and deleted before DAD probes completes, the RTM_DELADDR will be sent for which there was no RTM_NEWADDR causing asymmetry in notification. However if the same logic is used while sending RTM_DELADDR notification, this asymmetry can be avoided. Signed-off-by: Mahesh Bandewar CC: Hideaki YOSHIFUJI CC: Patrick McHardy CC: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c1e124bc8e1e..ac9bd5620f81 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4888,6 +4888,13 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) struct net *net = dev_net(ifa->idev->dev); int err = -ENOBUFS; + /* Don't send DELADDR notification for TENTATIVE address, + * since NEWADDR notification is sent only after removing + * TENTATIVE flag. + */ + if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR) + return; + skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); if (!skb) goto errout; -- cgit v1.2.3 From 026b471bfc995ba1429ca45b9157457fb8602f8e Mon Sep 17 00:00:00 2001 From: Weilin Chang Date: Wed, 4 Jan 2017 16:18:50 -0800 Subject: liquidio: fix wrong information about channels reported to ethtool Information reported to ethtool about channels is sometimes wrong for PF, and always wrong for VF. Fix them by getting the information from the right fields from the right structs. Signed-off-by: Weilin Chang Signed-off-by: Felix Manlunas Signed-off-by: Derek Chickles Signed-off-by: Satanand Burla Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index b00c3002360e..50384cede8be 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -296,12 +296,16 @@ lio_ethtool_get_channels(struct net_device *dev, rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); - max_rx = CFG_GET_OQ_MAX_Q(conf23); - max_tx = CFG_GET_IQ_MAX_Q(conf23); - rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx); - tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx); + max_rx = oct->sriov_info.num_pf_rings; + max_tx = oct->sriov_info.num_pf_rings; + rx_count = lio->linfo.num_rxpciq; + tx_count = lio->linfo.num_txpciq; + } else if (OCTEON_CN23XX_VF(oct)) { + max_tx = oct->sriov_info.rings_per_vf; + max_rx = oct->sriov_info.rings_per_vf; + rx_count = lio->linfo.num_rxpciq; + tx_count = lio->linfo.num_txpciq; } channel->max_rx = max_rx; -- cgit v1.2.3 From 7558828adebe675179f6a489c7e04082828bf524 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 5 Jan 2017 12:28:41 -0500 Subject: net: dsa: remove version string The dsa_driver_version string is irrelevant and has not been bumped since its introduction about 9 years ago. Kill it. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/dsa.c | 5 ----- net/dsa/dsa_priv.h | 1 - net/dsa/slave.c | 1 - 3 files changed, 7 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 89e66b623d73..3f85be0aae34 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -27,8 +27,6 @@ #include #include "dsa_priv.h" -char dsa_driver_version[] = "0.1"; - static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -926,9 +924,6 @@ static int dsa_probe(struct platform_device *pdev) struct dsa_switch_tree *dst; int ret; - pr_notice_once("Distributed Switch Architecture driver version %s\n", - dsa_driver_version); - if (pdev->dev.of_node) { ret = dsa_of_probe(&pdev->dev); if (ret) diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 6cfd7388834e..63ae1484abae 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -49,7 +49,6 @@ struct dsa_slave_priv { }; /* dsa.c */ -extern char dsa_driver_version[]; int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, struct device_node *port_dn, int port); void dsa_cpu_dsa_destroy(struct device_node *port_dn); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index ffd91969b830..5cd5b8137c08 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -673,7 +673,6 @@ static void dsa_slave_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, dsa_driver_version, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); } -- cgit v1.2.3 From 4b92ea81cca38e4b5fd200607fba3d8a2263a3f9 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 5 Jan 2017 11:08:58 -0800 Subject: net: dsa: b53: Utilize common helpers for u64/MAC Utilize the two functions recently introduced: u64_to_ether() and ether_to_u64() instead of our own versions. Reviewed-by: Andrew Lunn Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 2 +- drivers/net/dsa/b53/b53_priv.h | 24 +++--------------------- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 947adda3397d..d5370c227043 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1137,7 +1137,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, int ret; /* Convert the array into a 64-bit MAC */ - mac = b53_mac_to_u64(addr); + mac = ether_addr_to_u64(addr); /* Perform a read for the given MAC and VID */ b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index f192a673caba..1f4b07b77de2 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "b53_regs.h" @@ -325,25 +326,6 @@ struct b53_arl_entry { u8 is_static:1; }; -static inline void b53_mac_from_u64(u64 src, u8 *dst) -{ - unsigned int i; - - for (i = 0; i < ETH_ALEN; i++) - dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff; -} - -static inline u64 b53_mac_to_u64(const u8 *src) -{ - unsigned int i; - u64 dst = 0; - - for (i = 0; i < ETH_ALEN; i++) - dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i); - - return dst; -} - static inline void b53_arl_to_entry(struct b53_arl_entry *ent, u64 mac_vid, u32 fwd_entry) { @@ -352,14 +334,14 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent, ent->is_valid = !!(fwd_entry & ARLTBL_VALID); ent->is_age = !!(fwd_entry & ARLTBL_AGE); ent->is_static = !!(fwd_entry & ARLTBL_STATIC); - b53_mac_from_u64(mac_vid, ent->mac); + u64_to_ether_addr(mac_vid, ent->mac); ent->vid = mac_vid >> ARLTBL_VID_S; } static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, const struct b53_arl_entry *ent) { - *mac_vid = b53_mac_to_u64(ent->mac); + *mac_vid = ether_addr_to_u64(ent->mac); *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S; *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK; if (ent->is_valid) -- cgit v1.2.3 From 4055ae5e6d00e09ff4206843638323d1d5dfd85d Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 6 Jan 2017 08:47:20 +0530 Subject: cxgb4: Synchronize access to mailbox The issue comes when there are multiple threads attempting to use the mailbox facility at the same time. When DCB operations and interface up/down is run in a loop for every 0.1 sec, we observed mailbox collisions. And out of the two commands one would fail with the present code, since we don't queue the second command. To overcome the above issue, added a queue to access the mailbox. Whenever a mailbox command is issued add it to the queue. If its at the head issue the mailbox command, else wait for the existing command to complete. Usually command takes less than a milli-second to complete. Also timeout from the loop, if the command under execution takes long time to run. In reality, the number of mailbox access collisions is going to be very rare since no one runs such abusive script. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 8 ++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3 ++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 59 ++++++++++++++++++++++++- 3 files changed, 69 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 9a49c421f86c..ad0096e74813 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -787,6 +787,10 @@ struct vf_info { bool pf_set_mac; }; +struct mbox_list { + struct list_head list; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -849,6 +853,10 @@ struct adapter { struct work_struct db_drop_task; bool tid_release_task_busy; + /* lock for mailbox cmd list */ + spinlock_t mbox_lock; + struct mbox_list mlist; + /* support for mailbox command/reply logging */ #define T4_OS_LOG_MBOX_CMDS 256 struct mbox_cmd_log *mbox_log; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6f951877430b..9d2fe5140b88 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4707,6 +4707,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->tid_release_lock); spin_lock_init(&adapter->win0_lock); + spin_lock_init(&adapter->mbox_lock); + + INIT_LIST_HEAD(&adapter->mlist.list); INIT_WORK(&adapter->tid_release_task, process_tid_release_list); INIT_WORK(&adapter->db_full_task, process_db_full); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index cd5f437448d3..3c2effe6eefc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -284,6 +284,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 }; + struct mbox_list entry; u16 access = 0; u16 execute = 0; u32 v; @@ -311,11 +312,61 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, timeout = -timeout; } + /* Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + spin_lock(&adap->mbox_lock); + list_add_tail(&entry.list, &adap->mlist.list); + spin_unlock(&adap->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rearely + * contend on access to the mailbox ... + */ + if (i > FW_CMD_MAX_TIMEOUT) { + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); + ret = -EBUSY; + t4_record_mbox(adap, cmd, size, access, ret); + return ret; + } + + /* If we're at the head, break out and start the mailbox + * protocol. + */ + if (list_first_entry(&adap->mlist.list, struct mbox_list, + list) == &entry) + break; + + /* Delay for a bit before checking again ... */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + mdelay(ms); + } + } + + /* Loop trying to get ownership of the mailbox. Return an error + * if we can't gain ownership. + */ v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); - if (v != MBOX_OWNER_DRV) { + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); return ret; @@ -366,6 +417,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, execute = i + ms; t4_record_mbox(adap, cmd_rpl, MBOX_LEN, access, execute); + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); return -FW_CMD_RETVAL_G((int)res); } } @@ -375,6 +429,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); t4_report_fw_error(adap); + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); return ret; } -- cgit v1.2.3 From 0c8d803f397a65adfffd3aa01f52555389f33239 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 5 Jan 2017 19:32:46 -0800 Subject: net: ipv4: Simplify rt_fill_info rt_fill_info has only 1 caller and both of the last 2 args -- nowait and flags -- are hardcoded to 0. Given that remove them as input arguments and simplify rt_fill_info accordingly. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/route.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0fcac8e7a2b2..7b52ac20145b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2454,7 +2454,7 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow); static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, struct flowi4 *fl4, struct sk_buff *skb, u32 portid, - u32 seq, int event, int nowait, unsigned int flags) + u32 seq, int event) { struct rtable *rt = skb_rtable(skb); struct rtmsg *r; @@ -2463,7 +2463,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, u32 error; u32 metrics[RTAX_MAX]; - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); + nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), 0); if (!nlh) return -EMSGSIZE; @@ -2541,18 +2541,12 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { int err = ipmr_get_route(net, skb, fl4->saddr, fl4->daddr, - r, nowait, portid); + r, 0, portid); if (err <= 0) { - if (!nowait) { - if (err == 0) - return 0; - goto nla_put_failure; - } else { - if (err == -EMSGSIZE) - goto nla_put_failure; - error = err; - } + if (err == 0) + return 0; + goto nla_put_failure; } } else #endif @@ -2665,7 +2659,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) err = rt_fill_info(net, dst, src, table_id, &fl4, skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, - RTM_NEWROUTE, 0, 0); + RTM_NEWROUTE); if (err < 0) goto errout_free; -- cgit v1.2.3 From c7b371e34c0b9ed9e23271608448f89e6d66ba0a Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 5 Jan 2017 19:33:59 -0800 Subject: net: ipv4: make fib_select_default static fib_select_default has a single caller within the same file. Make it static. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/ip_fib.h | 1 - net/ipv4/fib_semantics.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 5f376af377c7..57c2a863d0b2 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -344,7 +344,6 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb); int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos, int oif, struct net_device *dev, struct in_device *idev, u32 *itag); -void fib_select_default(const struct flowi4 *flp, struct fib_result *res); #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 7a5b4c7d9a87..05c911d21782 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1434,7 +1434,7 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force) } /* Must be invoked inside of an RCU protected region. */ -void fib_select_default(const struct flowi4 *flp, struct fib_result *res) +static void fib_select_default(const struct flowi4 *flp, struct fib_result *res) { struct fib_info *fi = NULL, *last_resort = NULL; struct hlist_head *fa_head = res->fa_head; -- cgit v1.2.3 From 5e78f7fd3743d430c44402268cbeddab8eeb3af8 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Fri, 6 Jan 2017 16:51:46 +0530 Subject: cxgb4/cxgb4vf: Display 25G and 100G link speed Add support to report 25G and 100G links, which was missed as part of commit "eb97ad99f9ed". Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 20 +++++++++++++------- drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 21 ++++++++++++--------- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 9d2fe5140b88..629b11879ceb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -188,18 +188,24 @@ static void link_report(struct net_device *dev) const struct port_info *p = netdev_priv(dev); switch (p->link_cfg.speed) { - case 10000: - s = "10Gbps"; + case 100: + s = "100Mbps"; break; case 1000: - s = "1000Mbps"; + s = "1Gbps"; break; - case 100: - s = "100Mbps"; + case 10000: + s = "10Gbps"; + break; + case 25000: + s = "25Gbps"; break; case 40000: s = "40Gbps"; break; + case 100000: + s = "100Gbps"; + break; default: pr_info("%s: unsupported speed: %d\n", dev->name, p->link_cfg.speed); @@ -4397,9 +4403,9 @@ static void print_port_info(const struct net_device *dev) spd = " 8 GT/s"; if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) - bufp += sprintf(bufp, "100/"); + bufp += sprintf(bufp, "100M/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) - bufp += sprintf(bufp, "1000/"); + bufp += sprintf(bufp, "1G/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) bufp += sprintf(bufp, "10G/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 0d1a134c8174..ac7a150c54e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -158,20 +158,23 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) netif_carrier_on(dev); switch (pi->link_cfg.speed) { - case 40000: - s = "40Gbps"; + case 100: + s = "100Mbps"; + break; + case 1000: + s = "1Gbps"; break; - case 10000: s = "10Gbps"; break; - - case 1000: - s = "1000Mbps"; + case 25000: + s = "25Gbps"; break; - - case 100: - s = "100Mbps"; + case 40000: + s = "40Gbps"; + break; + case 100000: + s = "100Gbps"; break; default: -- cgit v1.2.3 From 89eb9835b2657a902adb8d5e31f721a8004726b5 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Fri, 6 Jan 2017 16:52:10 +0530 Subject: cxgb4: Add port description for new cards. Add port description for 25G and 100G cards, and also change few port descriptions in compliance with the new naming convention. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 3c2effe6eefc..f113015074b1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -5439,22 +5439,28 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) const char *t4_get_port_type_description(enum fw_port_type port_type) { static const char *const port_type_description[] = { - "R XFI", - "R XAUI", - "T SGMII", - "T XFI", - "T XAUI", + "Fiber_XFI", + "Fiber_XAUI", + "BT_SGMII", + "BT_XFI", + "BT_XAUI", "KX4", "CX4", "KX", "KR", - "R SFP+", - "KR/KX", - "KR/KX/KX4", - "R QSFP_10G", - "R QSA", - "R QSFP", - "R BP40_BA", + "SFP", + "BP_AP", + "BP4_AP", + "QSFP_10G", + "QSA", + "QSFP", + "BP40_BA", + "KR4_100G", + "CR4_QSFP", + "CR_QSFP", + "CR2_QSFP", + "SFP28", + "KR_SFP28", }; if (port_type < ARRAY_SIZE(port_type_description)) -- cgit v1.2.3 From df560056d960a3e164c179d89770d5a51b798537 Mon Sep 17 00:00:00 2001 From: Eric Garver Date: Thu, 5 Jan 2017 20:22:36 -0500 Subject: udp: inuse checks can quit early for reuseport UDP lib inuse checks will walk the entire hash bucket to check if the portaddr is in use. In the case of reuseport we can stop searching when we find a matching reuseport. On a 16-core VM a test program that spawns 16 threads that each bind to 1024 sockets (one per 10ms) takes 1m45s. With this change it takes 11s. Also add a cond_resched() when the port is not specified. Signed-off-by: Eric Garver Signed-off-by: David S. Miller --- net/ipv4/udp.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1307a7c2e544..4318d72e0248 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -153,13 +153,18 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && - (!sk2->sk_reuseport || !sk->sk_reuseport || - rcu_access_pointer(sk->sk_reuseport_cb) || - !uid_eq(uid, sock_i_uid(sk2))) && saddr_comp(sk, sk2, true)) { - if (!bitmap) - return 1; - __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); + if (sk2->sk_reuseport && sk->sk_reuseport && + !rcu_access_pointer(sk->sk_reuseport_cb) && + uid_eq(uid, sock_i_uid(sk2))) { + if (!bitmap) + return 0; + } else { + if (!bitmap) + return 1; + __set_bit(udp_sk(sk2)->udp_port_hash >> log, + bitmap); + } } } return 0; @@ -188,11 +193,14 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && - (!sk2->sk_reuseport || !sk->sk_reuseport || - rcu_access_pointer(sk->sk_reuseport_cb) || - !uid_eq(uid, sock_i_uid(sk2))) && saddr_comp(sk, sk2, true)) { - res = 1; + if (sk2->sk_reuseport && sk->sk_reuseport && + !rcu_access_pointer(sk->sk_reuseport_cb) && + uid_eq(uid, sock_i_uid(sk2))) { + res = 0; + } else { + res = 1; + } break; } } @@ -285,6 +293,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, snum += rand; } while (snum != first); spin_unlock_bh(&hslot->lock); + cond_resched(); } while (++first != last); goto fail; } else { -- cgit v1.2.3 From a83863174a6137fb3e03f279c9dcdba9e35315d0 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 6 Jan 2017 22:18:33 +0800 Subject: sctp: prepare asoc stream for stream reconf sctp stream reconf, described in RFC 6525, needs a structure to save per stream information in assoc, like stream state. In the future, sctp stream scheduler also needs it to save some stream scheduler params and queues. This patchset is to prepare the stream array in assoc for stream reconf. It defines sctp_stream that includes stream arrays inside to replace ssnmap. Note that we use different structures for IN and OUT streams, as the members in per OUT stream will get more and more different from per IN stream. v1->v2: - put these patches into a smaller group. v2->v3: - define sctp_stream to contain stream arrays, and create stream.c to put stream-related functions. - merge 3 patches into 1, as new sctp_stream has the same name with before. Signed-off-by: Xin Long Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 1 - include/net/sctp/structs.h | 76 +++++++++++---------------- net/sctp/Makefile | 2 +- net/sctp/associola.c | 13 +++-- net/sctp/objcnt.c | 2 - net/sctp/sm_make_chunk.c | 10 ++-- net/sctp/sm_statefuns.c | 3 +- net/sctp/ssnmap.c | 125 --------------------------------------------- net/sctp/stream.c | 85 ++++++++++++++++++++++++++++++ net/sctp/ulpqueue.c | 36 ++++++------- 10 files changed, 147 insertions(+), 206 deletions(-) delete mode 100644 net/sctp/ssnmap.c create mode 100644 net/sctp/stream.c diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index d8833a86cd7e..598d938b0d0a 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -283,7 +283,6 @@ extern atomic_t sctp_dbg_objcnt_chunk; extern atomic_t sctp_dbg_objcnt_bind_addr; extern atomic_t sctp_dbg_objcnt_bind_bucket; extern atomic_t sctp_dbg_objcnt_addr; -extern atomic_t sctp_dbg_objcnt_ssnmap; extern atomic_t sctp_dbg_objcnt_datamsg; extern atomic_t sctp_dbg_objcnt_keys; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 87d56cc80a3c..4741ec240caf 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -82,7 +82,6 @@ struct sctp_outq; struct sctp_bind_addr; struct sctp_ulpq; struct sctp_ep_common; -struct sctp_ssnmap; struct crypto_shash; @@ -377,54 +376,22 @@ typedef struct sctp_sender_hb_info { __u64 hb_nonce; } __packed sctp_sender_hb_info_t; -/* - * RFC 2960 1.3.2 Sequenced Delivery within Streams - * - * The term "stream" is used in SCTP to refer to a sequence of user - * messages that are to be delivered to the upper-layer protocol in - * order with respect to other messages within the same stream. This is - * in contrast to its usage in TCP, where it refers to a sequence of - * bytes (in this document a byte is assumed to be eight bits). - * ... - * - * This is the structure we use to track both our outbound and inbound - * SSN, or Stream Sequence Numbers. - */ - -struct sctp_stream { - __u16 *ssn; - unsigned int len; -}; - -struct sctp_ssnmap { - struct sctp_stream in; - struct sctp_stream out; -}; - -struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, - gfp_t gfp); -void sctp_ssnmap_free(struct sctp_ssnmap *map); -void sctp_ssnmap_clear(struct sctp_ssnmap *map); +struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp); +void sctp_stream_free(struct sctp_stream *stream); +void sctp_stream_clear(struct sctp_stream *stream); /* What is the current SSN number for this stream? */ -static inline __u16 sctp_ssn_peek(struct sctp_stream *stream, __u16 id) -{ - return stream->ssn[id]; -} +#define sctp_ssn_peek(stream, type, sid) \ + ((stream)->type[sid].ssn) /* Return the next SSN number for this stream. */ -static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id) -{ - return stream->ssn[id]++; -} +#define sctp_ssn_next(stream, type, sid) \ + ((stream)->type[sid].ssn++) /* Skip over this ssn and all below. */ -static inline void sctp_ssn_skip(struct sctp_stream *stream, __u16 id, - __u16 ssn) -{ - stream->ssn[id] = ssn+1; -} - +#define sctp_ssn_skip(stream, type, sid, ssn) \ + ((stream)->type[sid].ssn = ssn + 1) + /* * Pointers to address related SCTP functions. * (i.e. things that depend on the address family.) @@ -1331,6 +1298,25 @@ struct sctp_inithdr_host { __u32 initial_tsn; }; +struct sctp_stream_out { + __u16 ssn; + __u8 state; +}; + +struct sctp_stream_in { + __u16 ssn; +}; + +struct sctp_stream { + struct sctp_stream_out *out; + struct sctp_stream_in *in; + __u16 outcnt; + __u16 incnt; +}; + +#define SCTP_STREAM_CLOSED 0x00 +#define SCTP_STREAM_OPEN 0x01 + /* SCTP_GET_ASSOC_STATS counters */ struct sctp_priv_assoc_stats { /* Maximum observed rto in the association during subsequent @@ -1746,8 +1732,8 @@ struct sctp_association { /* Default receive parameters */ __u32 default_rcv_context; - /* This tracks outbound ssn for a given stream. */ - struct sctp_ssnmap *ssnmap; + /* Stream arrays */ + struct sctp_stream *stream; /* All outbound chunks go through this structure. */ struct sctp_outq outqueue; diff --git a/net/sctp/Makefile b/net/sctp/Makefile index 6c4f7496cec6..70f1b570bab9 100644 --- a/net/sctp/Makefile +++ b/net/sctp/Makefile @@ -11,7 +11,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ transport.o chunk.o sm_make_chunk.o ulpevent.o \ inqueue.o outqueue.o ulpqueue.o \ tsnmap.o bind_addr.o socket.o primitive.o \ - output.o input.o debug.o ssnmap.o auth.o \ + output.o input.o debug.o stream.o auth.o \ offload.o sctp_probe-y := probe.o diff --git a/net/sctp/associola.c b/net/sctp/associola.c index d3cc30c25c41..36294f7fb9a7 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -358,8 +358,8 @@ void sctp_association_free(struct sctp_association *asoc) sctp_tsnmap_free(&asoc->peer.tsn_map); - /* Free ssnmap storage. */ - sctp_ssnmap_free(asoc->ssnmap); + /* Free stream information. */ + sctp_stream_free(asoc->stream); /* Clean up the bound address list. */ sctp_bind_addr_free(&asoc->base.bind_addr); @@ -1137,7 +1137,7 @@ void sctp_assoc_update(struct sctp_association *asoc, /* Reinitialize SSN for both local streams * and peer's streams. */ - sctp_ssnmap_clear(asoc->ssnmap); + sctp_stream_clear(asoc->stream); /* Flush the ULP reassembly and ordered queue. * Any data there will now be stale and will @@ -1162,10 +1162,9 @@ void sctp_assoc_update(struct sctp_association *asoc, asoc->ctsn_ack_point = asoc->next_tsn - 1; asoc->adv_peer_ack_point = asoc->ctsn_ack_point; - if (!asoc->ssnmap) { - /* Move the ssnmap. */ - asoc->ssnmap = new->ssnmap; - new->ssnmap = NULL; + if (!asoc->stream) { + asoc->stream = new->stream; + new->stream = NULL; } if (!asoc->assoc_id) { diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c index 40e7fac96c41..105ac3327b28 100644 --- a/net/sctp/objcnt.c +++ b/net/sctp/objcnt.c @@ -51,7 +51,6 @@ SCTP_DBG_OBJCNT(bind_addr); SCTP_DBG_OBJCNT(bind_bucket); SCTP_DBG_OBJCNT(chunk); SCTP_DBG_OBJCNT(addr); -SCTP_DBG_OBJCNT(ssnmap); SCTP_DBG_OBJCNT(datamsg); SCTP_DBG_OBJCNT(keys); @@ -67,7 +66,6 @@ static sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = { SCTP_DBG_OBJCNT_ENTRY(bind_addr), SCTP_DBG_OBJCNT_ENTRY(bind_bucket), SCTP_DBG_OBJCNT_ENTRY(addr), - SCTP_DBG_OBJCNT_ENTRY(ssnmap), SCTP_DBG_OBJCNT_ENTRY(datamsg), SCTP_DBG_OBJCNT_ENTRY(keys), }; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 9e9690b7afe1..a15d824a313d 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1536,7 +1536,7 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) /* All fragments will be on the same stream */ sid = ntohs(chunk->subh.data_hdr->stream); - stream = &chunk->asoc->ssnmap->out; + stream = chunk->asoc->stream; /* Now assign the sequence number to the entire message. * All fragments must have the same stream sequence number. @@ -1547,9 +1547,9 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) ssn = 0; } else { if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) - ssn = sctp_ssn_next(stream, sid); + ssn = sctp_ssn_next(stream, out, sid); else - ssn = sctp_ssn_peek(stream, sid); + ssn = sctp_ssn_peek(stream, out, sid); } lchunk->subh.data_hdr->ssn = htons(ssn); @@ -2444,9 +2444,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, if (!asoc->temp) { int error; - asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, + asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams, asoc->c.sinit_num_ostreams, gfp); - if (!asoc->ssnmap) + if (!asoc->stream) goto clean_up; error = sctp_assoc_set_id(asoc, gfp); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 3382ef254e7b..0ceded37d20b 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -6274,9 +6274,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, * and is invalid. */ ssn = ntohs(data_hdr->ssn); - if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) { + if (ordered && SSN_lt(ssn, sctp_ssn_peek(asoc->stream, in, sid))) return SCTP_IERROR_PROTO_VIOLATION; - } /* Send the data up to the user. Note: Schedule the * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c deleted file mode 100644 index b9c8521c1a98..000000000000 --- a/net/sctp/ssnmap.c +++ /dev/null @@ -1,125 +0,0 @@ -/* SCTP kernel implementation - * Copyright (c) 2003 International Business Machines, Corp. - * - * This file is part of the SCTP kernel implementation - * - * These functions manipulate sctp SSN tracker. - * - * This SCTP implementation is free software; - * you can redistribute it and/or modify it under the terms of - * the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This SCTP implementation is distributed in the hope that it - * will be useful, but WITHOUT ANY WARRANTY; without even the implied - * ************************ - * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, see - * . - * - * Please send any bug reports or fixes you make to the - * email address(es): - * lksctp developers - * - * Written or modified by: - * Jon Grimm - */ - -#include -#include -#include -#include - -static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, - __u16 out); - -/* Storage size needed for map includes 2 headers and then the - * specific needs of in or out streams. - */ -static inline size_t sctp_ssnmap_size(__u16 in, __u16 out) -{ - return sizeof(struct sctp_ssnmap) + (in + out) * sizeof(__u16); -} - - -/* Create a new sctp_ssnmap. - * Allocate room to store at least 'len' contiguous TSNs. - */ -struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, - gfp_t gfp) -{ - struct sctp_ssnmap *retval; - int size; - - size = sctp_ssnmap_size(in, out); - if (size <= KMALLOC_MAX_SIZE) - retval = kmalloc(size, gfp); - else - retval = (struct sctp_ssnmap *) - __get_free_pages(gfp, get_order(size)); - if (!retval) - goto fail; - - if (!sctp_ssnmap_init(retval, in, out)) - goto fail_map; - - SCTP_DBG_OBJCNT_INC(ssnmap); - - return retval; - -fail_map: - if (size <= KMALLOC_MAX_SIZE) - kfree(retval); - else - free_pages((unsigned long)retval, get_order(size)); -fail: - return NULL; -} - - -/* Initialize a block of memory as a ssnmap. */ -static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, - __u16 out) -{ - memset(map, 0x00, sctp_ssnmap_size(in, out)); - - /* Start 'in' stream just after the map header. */ - map->in.ssn = (__u16 *)&map[1]; - map->in.len = in; - - /* Start 'out' stream just after 'in'. */ - map->out.ssn = &map->in.ssn[in]; - map->out.len = out; - - return map; -} - -/* Clear out the ssnmap streams. */ -void sctp_ssnmap_clear(struct sctp_ssnmap *map) -{ - size_t size; - - size = (map->in.len + map->out.len) * sizeof(__u16); - memset(map->in.ssn, 0x00, size); -} - -/* Dispose of a ssnmap. */ -void sctp_ssnmap_free(struct sctp_ssnmap *map) -{ - int size; - - if (unlikely(!map)) - return; - - size = sctp_ssnmap_size(map->in.len, map->out.len); - if (size <= KMALLOC_MAX_SIZE) - kfree(map); - else - free_pages((unsigned long)map, get_order(size)); - - SCTP_DBG_OBJCNT_DEC(ssnmap); -} diff --git a/net/sctp/stream.c b/net/sctp/stream.c new file mode 100644 index 000000000000..f86de43cbbe5 --- /dev/null +++ b/net/sctp/stream.c @@ -0,0 +1,85 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001 Intel Corp. + * + * This file is part of the SCTP kernel implementation + * + * These functions manipulate sctp tsn mapping array. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * . + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers + * + * Written or modified by: + * Xin Long + */ + +#include + +struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp) +{ + struct sctp_stream *stream; + int i; + + stream = kzalloc(sizeof(*stream), gfp); + if (!stream) + return NULL; + + stream->outcnt = outcnt; + stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); + if (!stream->out) { + kfree(stream); + return NULL; + } + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_OPEN; + + stream->incnt = incnt; + stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp); + if (!stream->in) { + kfree(stream->out); + kfree(stream); + return NULL; + } + + return stream; +} + +void sctp_stream_free(struct sctp_stream *stream) +{ + if (unlikely(!stream)) + return; + + kfree(stream->out); + kfree(stream->in); + kfree(stream); +} + +void sctp_stream_clear(struct sctp_stream *stream) +{ + int i; + + for (i = 0; i < stream->outcnt; i++) + stream->out[i].ssn = 0; + + for (i = 0; i < stream->incnt; i++) + stream->in[i].ssn = 0; +} diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 84d0fdaf7de9..aa3624d50278 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -760,11 +760,11 @@ static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, struct sk_buff_head *event_list; struct sk_buff *pos, *tmp; struct sctp_ulpevent *cevent; - struct sctp_stream *in; + struct sctp_stream *stream; __u16 sid, csid, cssn; sid = event->stream; - in = &ulpq->asoc->ssnmap->in; + stream = ulpq->asoc->stream; event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; @@ -782,11 +782,11 @@ static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, if (csid < sid) continue; - if (cssn != sctp_ssn_peek(in, sid)) + if (cssn != sctp_ssn_peek(stream, in, sid)) break; - /* Found it, so mark in the ssnmap. */ - sctp_ssn_next(in, sid); + /* Found it, so mark in the stream. */ + sctp_ssn_next(stream, in, sid); __skb_unlink(pos, &ulpq->lobby); @@ -849,7 +849,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { __u16 sid, ssn; - struct sctp_stream *in; + struct sctp_stream *stream; /* Check if this message needs ordering. */ if (SCTP_DATA_UNORDERED & event->msg_flags) @@ -858,10 +858,10 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, /* Note: The stream ID must be verified before this routine. */ sid = event->stream; ssn = event->ssn; - in = &ulpq->asoc->ssnmap->in; + stream = ulpq->asoc->stream; /* Is this the expected SSN for this stream ID? */ - if (ssn != sctp_ssn_peek(in, sid)) { + if (ssn != sctp_ssn_peek(stream, in, sid)) { /* We've received something out of order, so find where it * needs to be placed. We order by stream and then by SSN. */ @@ -870,7 +870,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, } /* Mark that the next chunk has been found. */ - sctp_ssn_next(in, sid); + sctp_ssn_next(stream, in, sid); /* Go find any other chunks that were waiting for * ordering. @@ -888,12 +888,12 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) struct sk_buff *pos, *tmp; struct sctp_ulpevent *cevent; struct sctp_ulpevent *event; - struct sctp_stream *in; + struct sctp_stream *stream; struct sk_buff_head temp; struct sk_buff_head *lobby = &ulpq->lobby; __u16 csid, cssn; - in = &ulpq->asoc->ssnmap->in; + stream = ulpq->asoc->stream; /* We are holding the chunks by stream, by SSN. */ skb_queue_head_init(&temp); @@ -912,7 +912,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) continue; /* see if this ssn has been marked by skipping */ - if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) + if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid))) break; __skb_unlink(pos, lobby); @@ -932,8 +932,8 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) csid = cevent->stream; cssn = cevent->ssn; - if (csid == sid && cssn == sctp_ssn_peek(in, csid)) { - sctp_ssn_next(in, csid); + if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) { + sctp_ssn_next(stream, in, csid); __skb_unlink(pos, lobby); __skb_queue_tail(&temp, pos); event = sctp_skb2event(pos); @@ -955,17 +955,17 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) */ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) { - struct sctp_stream *in; + struct sctp_stream *stream; /* Note: The stream ID must be verified before this routine. */ - in = &ulpq->asoc->ssnmap->in; + stream = ulpq->asoc->stream; /* Is this an old SSN? If so ignore. */ - if (SSN_lt(ssn, sctp_ssn_peek(in, sid))) + if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid))) return; /* Mark that we are no longer expecting this SSN or lower. */ - sctp_ssn_skip(in, sid, ssn); + sctp_ssn_skip(stream, in, sid, ssn); /* Go find any other chunks that were waiting for * ordering and deliver them if needed. -- cgit v1.2.3 From 780e982905bef61d13496d9af5310bf4af3a64d3 Mon Sep 17 00:00:00 2001 From: "santosh.shilimkar@oracle.com" Date: Fri, 6 Jan 2017 10:44:15 -0800 Subject: RDS: validate the requested traces user input against max supported Larger than supported value can lead to array read/write overflow. Reported-by: Colin Ian King Signed-off-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/af_rds.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index fd8217404162..b405f77d664c 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -310,6 +310,9 @@ static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval, if (copy_from_user(&trace, optval, sizeof(trace))) return -EFAULT; + if (trace.rx_traces > RDS_MSG_RX_DGRAM_TRACE_MAX) + return -EFAULT; + rs->rs_rx_traces = trace.rx_traces; for (i = 0; i < rs->rs_rx_traces; i++) { if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) { -- cgit v1.2.3 From 8cf2f704534147ce84c4df951887c1d49fe9350c Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 6 Jan 2017 20:03:54 +0100 Subject: l2tp: remove redundant addr_len check in l2tp_ip_bind() addr_len's value has already been verified at this point. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 3d73278b86ca..992761e721af 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -258,7 +258,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (!sock_flag(sk, SOCK_ZAPPED)) goto out; - if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) + if (sk->sk_state != TCP_CLOSE) goto out; chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr); -- cgit v1.2.3 From bb39b0bdc8c62e97ceedb9a5dadea0f098431d8b Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 6 Jan 2017 20:03:55 +0100 Subject: l2tp: make __l2tp_ip*_bind_lookup() parameters 'const' Add const qualifier wherever possible for __l2tp_ip_bind_lookup() and __l2tp_ip6_bind_lookup(). Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ip.c | 4 ++-- net/l2tp/l2tp_ip6.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 992761e721af..e5686bf898f7 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -53,8 +53,8 @@ static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr, struct sock *sk; sk_for_each_bound(sk, &l2tp_ip_bind_table) { - struct inet_sock *inet = inet_sk(sk); - struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); + const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); + const struct inet_sock *inet = inet_sk(sk); if (l2tp == NULL) continue; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 331ccf5a7bad..2f6be6ddc8cb 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -57,8 +57,8 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk) return (struct l2tp_ip6_sock *)sk; } -static struct sock *__l2tp_ip6_bind_lookup(struct net *net, - struct in6_addr *laddr, +static struct sock *__l2tp_ip6_bind_lookup(const struct net *net, + const struct in6_addr *laddr, const struct in6_addr *raddr, int dif, u32 tunnel_id) { @@ -67,7 +67,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net, sk_for_each_bound(sk, &l2tp_ip6_bind_table) { const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk); const struct in6_addr *sk_raddr = &sk->sk_v6_daddr; - struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); + const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); if (l2tp == NULL) continue; -- cgit v1.2.3 From 986f7cbc510e29c33b7c8c1701a902a752159425 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 6 Jan 2017 20:03:56 +0100 Subject: l2tp: remove useless NULL check in __l2tp_ip*_bind_lookup() If "l2tp" was NULL, that'd mean "sk" is NULL too. This can't happen since "sk" is returned by sk_for_each_bound(). Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ip.c | 3 --- net/l2tp/l2tp_ip6.c | 3 --- 2 files changed, 6 deletions(-) diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index e5686bf898f7..499d3cdbfbc8 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -56,9 +56,6 @@ static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr, const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); const struct inet_sock *inet = inet_sk(sk); - if (l2tp == NULL) - continue; - if ((l2tp->conn_id == tunnel_id) && net_eq(sock_net(sk), net) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 2f6be6ddc8cb..7165b06d7b25 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -69,9 +69,6 @@ static struct sock *__l2tp_ip6_bind_lookup(const struct net *net, const struct in6_addr *sk_raddr = &sk->sk_v6_daddr; const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); - if (l2tp == NULL) - continue; - if ((l2tp->conn_id == tunnel_id) && net_eq(sock_net(sk), net) && (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) && -- cgit v1.2.3 From c5fdae044030f753c9e6a3efa8db28fe00f8fdad Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 6 Jan 2017 20:03:57 +0100 Subject: l2tp: rework socket comparison in __l2tp_ip*_bind_lookup() Split conditions, so that each test becomes clearer. Also, for l2tp_ip, check if "laddr" is 0. This prevents a socket from binding to the unspecified address when other sockets are already bound using the same device (if any), connection ID and namespace. Same thing for l2tp_ip6: add ipv6_addr_any(laddr) and ipv6_addr_any(raddr) tests to ensure that an IPv6 unspecified address passed as parameter is properly treated a wildcard. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ip.c | 24 +++++++++++++++++------- net/l2tp/l2tp_ip6.c | 25 ++++++++++++++++++------- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 499d3cdbfbc8..b07a859f21bd 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -56,13 +56,23 @@ static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr, const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); const struct inet_sock *inet = inet_sk(sk); - if ((l2tp->conn_id == tunnel_id) && - net_eq(sock_net(sk), net) && - !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && - (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) && - (!sk->sk_bound_dev_if || !dif || - sk->sk_bound_dev_if == dif)) - goto found; + if (!net_eq(sock_net(sk), net)) + continue; + + if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif) + continue; + + if (inet->inet_rcv_saddr && laddr && + inet->inet_rcv_saddr != laddr) + continue; + + if (inet->inet_daddr && raddr && inet->inet_daddr != raddr) + continue; + + if (l2tp->conn_id != tunnel_id) + continue; + + goto found; } sk = NULL; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 7165b06d7b25..4b06eb415f68 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -69,13 +69,24 @@ static struct sock *__l2tp_ip6_bind_lookup(const struct net *net, const struct in6_addr *sk_raddr = &sk->sk_v6_daddr; const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); - if ((l2tp->conn_id == tunnel_id) && - net_eq(sock_net(sk), net) && - (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) && - (!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) && - (!sk->sk_bound_dev_if || !dif || - sk->sk_bound_dev_if == dif)) - goto found; + if (!net_eq(sock_net(sk), net)) + continue; + + if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif) + continue; + + if (sk_laddr && !ipv6_addr_any(sk_laddr) && + !ipv6_addr_any(laddr) && !ipv6_addr_equal(sk_laddr, laddr)) + continue; + + if (!ipv6_addr_any(sk_raddr) && raddr && + !ipv6_addr_any(raddr) && !ipv6_addr_equal(sk_raddr, raddr)) + continue; + + if (l2tp->conn_id != tunnel_id) + continue; + + goto found; } sk = NULL; -- cgit v1.2.3 From a6c83ccf3c534214e0aeb167a70391864da9b1fc Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 6 Jan 2017 14:07:29 -0600 Subject: net: ethernet: ti: cpdma: am437x: allow descs to be plased in ddr It's observed that cpsw/cpdma is not working properly when CPPI descriptors are placed in DDR instead of internal CPPI RAM on am437x SoC: - rx/tx silently stops processing packets; - or - after boot it's working for sometime, but stuck once Network load is increased (ping is working, but iperf is not). (The same issue has not been reproduced on am335x and am57xx). It seems that write to HDP register processed faster by interconnect than writing of descriptor memory buffer in DDR, which is probably caused by store buffer / write buffer differences as these functions are implemented differently across devices. So, to fix this i come up with two minimal, required changes: 1) all accesses to the channel register HDP/CP/RXFREE registers should be done using sync IO accessors readl()/writel(), because all previous memory writes writes have to be completed before starting channel (write to HDP) or completing desc processing. 2) the change 1 only doesn't work on am437x and additional reading of desc's field is required right after the new descriptor was filled with data and before pointer on it will be stored in prev_desc->hw_next field or HDP register. In addition, to above changes this patch eliminates all relaxed ordering I/O accessors in this driver as suggested by David Miller to avoid such kind of issues in the future, but with one exception - relaxed IO accessors will still be used to fill desc in cpdma_chan_submit(), which is safe as there is read barrier at the end of write sequence, and because sync IO accessors usage here will affect on net performance. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 40 ++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 36518fc5c7cc..d6f0ded7dafc 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -166,12 +166,12 @@ static struct cpdma_control_info controls[] = { #define num_chan params.num_chan /* various accessors */ -#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) -#define chan_read(chan, fld) __raw_readl((chan)->fld) -#define desc_read(desc, fld) __raw_readl(&(desc)->fld) -#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) -#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) -#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) +#define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs)) +#define chan_read(chan, fld) readl((chan)->fld) +#define desc_read(desc, fld) readl(&(desc)->fld) +#define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs)) +#define chan_write(chan, fld, v) writel(v, (chan)->fld) +#define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld) #define cpdma_desc_to_port(chan, mode, directed) \ do { \ @@ -542,10 +542,10 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) } for (i = 0; i < ctlr->num_chan; i++) { - __raw_writel(0, ctlr->params.txhdp + 4 * i); - __raw_writel(0, ctlr->params.rxhdp + 4 * i); - __raw_writel(0, ctlr->params.txcp + 4 * i); - __raw_writel(0, ctlr->params.rxcp + 4 * i); + writel(0, ctlr->params.txhdp + 4 * i); + writel(0, ctlr->params.rxhdp + 4 * i); + writel(0, ctlr->params.txcp + 4 * i); + writel(0, ctlr->params.rxcp + 4 * i); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); @@ -1061,13 +1061,17 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; cpdma_desc_to_port(chan, mode, directed); - desc_write(desc, hw_next, 0); - desc_write(desc, hw_buffer, buffer); - desc_write(desc, hw_len, len); - desc_write(desc, hw_mode, mode | len); - desc_write(desc, sw_token, token); - desc_write(desc, sw_buffer, buffer); - desc_write(desc, sw_len, len); + /* Relaxed IO accessors can be used here as there is read barrier + * at the end of write sequence. + */ + writel_relaxed(0, &desc->hw_next); + writel_relaxed(buffer, &desc->hw_buffer); + writel_relaxed(len, &desc->hw_len); + writel_relaxed(mode | len, &desc->hw_mode); + writel_relaxed(token, &desc->sw_token); + writel_relaxed(buffer, &desc->sw_buffer); + writel_relaxed(len, &desc->sw_len); + desc_read(desc, sw_len); __cpdma_chan_submit(chan, desc); @@ -1136,7 +1140,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) } desc_dma = desc_phys(pool, desc); - status = __raw_readl(&desc->hw_mode); + status = desc_read(desc, hw_mode); outlen = status & 0x7ff; if (status & CPDMA_DESC_OWNER) { chan->stats.busy_dequeue++; -- cgit v1.2.3 From 12a303e3b8136a89f0864a577a69f6a40b65fbe5 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 6 Jan 2017 14:07:30 -0600 Subject: net: ethernet: ti: cpdma: fix desc re-queuing The currently processing cpdma descriptor with EOQ flag set may contain two values in Next Descriptor Pointer field: - valid pointer: means CPDMA missed addition of new desc in queue; - null: no more descriptors in queue. In the later case, it's not required to write to HDP register, but now CPDMA does it. Hence, add additional check for Next Descriptor Pointer != null in cpdma_chan_process() function before writing in HDP register. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index d6f0ded7dafc..a53b38455a2f 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -1159,7 +1159,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) chan->count--; chan->stats.good_dequeue++; - if (status & CPDMA_DESC_EOQ) { + if ((status & CPDMA_DESC_EOQ) && chan->head) { chan->stats.requeue++; chan_write(chan, hdp, desc_phys(pool, chan->head)); } -- cgit v1.2.3 From 5fcc40a90064822d31328ced3e3e69bc7d6cfe27 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 6 Jan 2017 14:07:31 -0600 Subject: net: ethernet: ti: cpdma: minimize number of parameters in cpdma_desc_pool_create/destroy() Update cpdma_desc_pool_create/destroy() to accept only one parameter struct cpdma_ctlr*, as this structure contains all required information for pool creation/destruction. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 62 ++++++++++++++++----------------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index a53b38455a2f..7080eb9eceff 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -181,8 +181,10 @@ static struct cpdma_control_info controls[] = { (directed << CPDMA_TO_PORT_SHIFT)); \ } while (0) -static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) +static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) { + struct cpdma_desc_pool *pool = ctlr->pool; + if (!pool) return; @@ -191,7 +193,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) gen_pool_size(pool->gen_pool), gen_pool_avail(pool->gen_pool)); if (pool->cpumap) - dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, + dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap, pool->phys); else iounmap(pool->iomap); @@ -203,37 +205,37 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) * devices (e.g. cpsw switches) use plain old memory. Descriptor pools * abstract out these details */ -static struct cpdma_desc_pool * -cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, - int size, int align) +int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) { + struct cpdma_params *cpdma_params = &ctlr->params; struct cpdma_desc_pool *pool; - int ret; + int ret = -ENOMEM; - pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); + pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL); if (!pool) goto gen_pool_create_fail; + ctlr->pool = pool; - pool->dev = dev; - pool->mem_size = size; - pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); - pool->num_desc = size / pool->desc_size; + pool->mem_size = cpdma_params->desc_mem_size; + pool->desc_size = ALIGN(sizeof(struct cpdma_desc), + cpdma_params->desc_align); + pool->num_desc = pool->mem_size / pool->desc_size; - pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1, - "cpdma"); + pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size), + -1, "cpdma"); if (IS_ERR(pool->gen_pool)) { - dev_err(dev, "pool create failed %ld\n", - PTR_ERR(pool->gen_pool)); + ret = PTR_ERR(pool->gen_pool); + dev_err(ctlr->dev, "pool create failed %d\n", ret); goto gen_pool_create_fail; } - if (phys) { - pool->phys = phys; - pool->iomap = ioremap(phys, size); /* should be memremap? */ - pool->hw_addr = hw_addr; + if (cpdma_params->desc_mem_phys) { + pool->phys = cpdma_params->desc_mem_phys; + pool->iomap = ioremap(pool->phys, pool->mem_size); + pool->hw_addr = cpdma_params->desc_hw_addr; } else { - pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr, - GFP_KERNEL); + pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size, + &pool->hw_addr, GFP_KERNEL); pool->iomap = (void __iomem __force *)pool->cpumap; pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ } @@ -244,16 +246,17 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, pool->phys, pool->mem_size, -1); if (ret < 0) { - dev_err(dev, "pool add failed %d\n", ret); + dev_err(ctlr->dev, "pool add failed %d\n", ret); goto gen_pool_add_virt_fail; } - return pool; + return 0; gen_pool_add_virt_fail: - cpdma_desc_pool_destroy(pool); + cpdma_desc_pool_destroy(ctlr); gen_pool_create_fail: - return NULL; + ctlr->pool = NULL; + return ret; } static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, @@ -502,12 +505,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) ctlr->chan_num = 0; spin_lock_init(&ctlr->lock); - ctlr->pool = cpdma_desc_pool_create(ctlr->dev, - ctlr->params.desc_mem_phys, - ctlr->params.desc_hw_addr, - ctlr->params.desc_mem_size, - ctlr->params.desc_align); - if (!ctlr->pool) + if (cpdma_desc_pool_create(ctlr)) return NULL; if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) @@ -623,7 +621,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) cpdma_chan_destroy(ctlr->channels[i]); - cpdma_desc_pool_destroy(ctlr->pool); + cpdma_desc_pool_destroy(ctlr); return ret; } EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); -- cgit v1.2.3 From 7f3b490aaacd789ebc0e26e47b7706547afd0438 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 6 Jan 2017 14:07:32 -0600 Subject: net: ethernet: ti: cpdma: use devm_ioremap Use devm_ioremap() and simplify the code. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 7080eb9eceff..b229bf3df912 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -195,8 +195,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) if (pool->cpumap) dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap, pool->phys); - else - iounmap(pool->iomap); } /* @@ -231,7 +229,8 @@ int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) if (cpdma_params->desc_mem_phys) { pool->phys = cpdma_params->desc_mem_phys; - pool->iomap = ioremap(pool->phys, pool->mem_size); + pool->iomap = devm_ioremap(ctlr->dev, pool->phys, + pool->mem_size); pool->hw_addr = cpdma_params->desc_hw_addr; } else { pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size, -- cgit v1.2.3 From 90225bf0ba35ab43d1e9825c22f3810826c8bfe8 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 6 Jan 2017 14:07:33 -0600 Subject: net: ethernet: ti: cpsw: add support for descs pool size configuration The CPSW CPDMA can process buffer descriptors placed as in internal CPPI RAM as in DDR. This patch adds support in CPSW and CPDMA for descs_pool_size mudule parameter, which defines total number of CPDMA CPPI descriptors to be used for both ingress/egress packets processing: - memory size, required for CPDMA descriptor pool, is calculated basing on number of descriptors specified by user in descs_pool_size and CPDMA descriptor size and allocated from coherent memory (CMA area); - CPDMA descriptor pool will be allocated in DDR if pool memory size > internal CPPI RAM or use internal CPPI RAM otherwise; - if descs_pool_size not specified in DT - the default value 256 will be used which will allow to place CPDMA descriptors pool into the internal CPPI RAM (current default behaviour); - CPDMA will ignore descs_pool_size if descs_pool_size = 0 for backward comaptiobility with davinci_emac. descs_pool_size is boot time setting and can't be changed once CPSW/CPDMA is initialized. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 12 +++++++++--- drivers/net/ethernet/ti/davinci_cpdma.c | 12 ++++++++++++ drivers/net/ethernet/ti/davinci_cpdma.h | 1 + 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index b203143647e6..d39875e267d9 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -145,6 +145,7 @@ do { \ cpsw->data.active_slave) #define IRQ_NUM 2 #define CPSW_MAX_QUEUES 8 +#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 static int debug_level; module_param(debug_level, int, 0); @@ -158,6 +159,10 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE; module_param(rx_packet_max, int, 0); MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); +static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; +module_param(descs_pool_size, int, 0444); +MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool"); + struct cpsw_wr_regs { u32 id_ver; u32 soft_reset; @@ -2969,6 +2974,7 @@ static int cpsw_probe(struct platform_device *pdev) dma_params.has_ext_regs = true; dma_params.desc_hw_addr = dma_params.desc_mem_phys; dma_params.bus_freq_mhz = cpsw->bus_freq_mhz; + dma_params.descs_pool_size = descs_pool_size; cpsw->dma = cpdma_ctlr_create(&dma_params); if (!cpsw->dma) { @@ -3072,9 +3078,9 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } - cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", - &ss_res->start, ndev->irq); - + cpsw_notice(priv, probe, + "initialized device (regs %pa, irq %d, pool size %d)\n", + &ss_res->start, ndev->irq, dma_params.descs_pool_size); if (cpsw->data.dual_emac) { ret = cpsw_probe_dual_emac(priv); if (ret) { diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index b229bf3df912..65e2f124d62a 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -219,6 +219,18 @@ int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) cpdma_params->desc_align); pool->num_desc = pool->mem_size / pool->desc_size; + if (cpdma_params->descs_pool_size) { + /* recalculate memory size required cpdma descriptor pool + * basing on number of descriptors specified by user and + * if memory size > CPPI internal RAM size (desc_mem_size) + * then switch to use DDR + */ + pool->num_desc = cpdma_params->descs_pool_size; + pool->mem_size = pool->desc_size * pool->num_desc; + if (pool->mem_size > cpdma_params->desc_mem_size) + cpdma_params->desc_mem_phys = 0; + } + pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size), -1, "cpdma"); if (IS_ERR(pool->gen_pool)) { diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index 4a167db2abab..cb45f8f543d9 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -37,6 +37,7 @@ struct cpdma_params { int desc_mem_size; int desc_align; u32 bus_freq_mhz; + u32 descs_pool_size; /* * Some instances of embedded cpdma controllers have extra control and -- cgit v1.2.3 From be034fc14015c7fcabe62317d156e98b508a759b Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 6 Jan 2017 14:07:34 -0600 Subject: net: ethernet: ti: cpsw: add support for ringparam configuration The CPDMA uses one pool of descriptors for both RX and TX which by default split between all channels proportionally depending on total number of CPDMA channels and number of TX and RX channels. As result, more descriptors will be consumed by TX path if there are more TX channels and there is no way now to dedicate more descriptors for RX path. So, add the ability to re-split CPDMA pool of descriptors between RX and TX path via ethtool '-G' command wich will allow to configure and fix number of descriptors used by RX and TX path, which, then, will be split between RX/TX channels proportionally depending on RX/TX channels number and weight. ethtool '-G' command will accept only number of RX entries and rest of descriptors will be arranged for TX automatically. Command: ethtool -G rx defaults and limitations: - minimum number of rx descriptors is 10% of total number of descriptors in CPDMA pool - maximum number of rx descriptors is 90% of total number of descriptors in CPDMA pool - by default, descriptors will be split equally between RX/TX path - any values passed in "tx" parameter will be ignored Usage: # ethtool -g eth0 Pre-set maximums: RX: 7372 RX Mini: 0 RX Jumbo: 0 TX: 0 Current hardware settings: RX: 4096 RX Mini: 0 RX Jumbo: 0 TX: 4096 # ethtool -G eth0 rx 7372 # ethtool -g eth0 Ring parameters for eth0: Pre-set maximums: RX: 7372 RX Mini: 0 RX Jumbo: 0 TX: 0 Current hardware settings: RX: 7372 RX Mini: 0 RX Jumbo: 0 TX: 820 Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 86 +++++++++++++++++++++++++++++++++ drivers/net/ethernet/ti/davinci_cpdma.c | 40 ++++++++++++--- drivers/net/ethernet/ti/davinci_cpdma.h | 4 ++ 3 files changed, 122 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index d39875e267d9..f339268da11a 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2484,6 +2484,90 @@ static int cpsw_nway_reset(struct net_device *ndev) return -EOPNOTSUPP; } +static void cpsw_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + + /* not supported */ + ering->tx_max_pending = 0; + ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); + /* Max 90% RX buffers */ + ering->rx_max_pending = (descs_pool_size * 9) / 10; + ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); +} + +static int cpsw_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + /* ignore ering->tx_pending - only rx_pending adjustment is supported */ + + if (ering->rx_mini_pending || ering->rx_jumbo_pending || + ering->rx_pending < (descs_pool_size / 10) || + ering->rx_pending > ((descs_pool_size * 9) / 10)) + return -EINVAL; + + if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma)) + return 0; + + /* Disable NAPI scheduling */ + cpsw_intr_disable(cpsw); + + /* Stop all transmit queues for every network device. + * Disable re-using rx descriptors with dormant_on. + */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + + netif_tx_stop_all_queues(slave->ndev); + netif_dormant_on(slave->ndev); + } + + /* Handle rest of tx packets and stop cpdma channels */ + cpdma_ctlr_stop(cpsw->dma); + + cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending); + + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + + /* Enable rx packets handling */ + netif_dormant_off(slave->ndev); + } + + if (cpsw_common_res_usage_state(cpsw)) { + cpdma_chan_split_pool(cpsw->dma); + + ret = cpsw_fill_rx_channels(priv); + if (ret) + goto err; + + /* After this receive is started */ + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + } + + /* Resume transmit for every affected interface */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + netif_tx_start_all_queues(slave->ndev); + } + return 0; +err: + dev_err(priv->dev, "cannot set ring params, closing device\n"); + dev_close(ndev); + return ret; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@ -2510,6 +2594,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .get_eee = cpsw_get_eee, .set_eee = cpsw_set_eee, .nway_reset = cpsw_nway_reset, + .get_ringparam = cpsw_get_ringparam, + .set_ringparam = cpsw_set_ringparam, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw, diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 65e2f124d62a..d80bff19d4ec 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -108,6 +108,8 @@ struct cpdma_ctlr { spinlock_t lock; struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; int chan_num; + int num_rx_desc; /* RX descriptors number */ + int num_tx_desc; /* TX descriptors number */ }; struct cpdma_chan { @@ -518,6 +520,9 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) if (cpdma_desc_pool_create(ctlr)) return NULL; + /* split pool equally between RX/TX by default */ + ctlr->num_tx_desc = ctlr->pool->num_desc / 2; + ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc; if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) ctlr->num_chan = CPDMA_MAX_CHANNELS; @@ -717,22 +722,22 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, } } /* use remains */ - most_chan->desc_num += desc_cnt; + if (most_chan) + most_chan->desc_num += desc_cnt; } /** * cpdma_chan_split_pool - Splits ctrl pool between all channels. * Has to be called under ctlr lock */ -static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) +int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) { int tx_per_ch_desc = 0, rx_per_ch_desc = 0; - struct cpdma_desc_pool *pool = ctlr->pool; int free_rx_num = 0, free_tx_num = 0; int rx_weight = 0, tx_weight = 0; int tx_desc_num, rx_desc_num; struct cpdma_chan *chan; - int i, tx_num = 0; + int i; if (!ctlr->chan_num) return 0; @@ -750,15 +755,14 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) if (!chan->weight) free_tx_num++; tx_weight += chan->weight; - tx_num++; } } if (rx_weight > 100 || tx_weight > 100) return -EINVAL; - tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num; - rx_desc_num = pool->num_desc - tx_desc_num; + tx_desc_num = ctlr->num_tx_desc; + rx_desc_num = ctlr->num_rx_desc; if (free_tx_num) { tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100; @@ -774,6 +778,8 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) return 0; } +EXPORT_SYMBOL_GPL(cpdma_chan_split_pool); + /* cpdma_chan_set_weight - set weight of a channel in percentage. * Tx and Rx channels have separate weights. That is 100% for RX @@ -907,7 +913,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, chan->chan_num = chan_num; chan->handler = handler; chan->rate = 0; - chan->desc_num = ctlr->pool->num_desc / 2; chan->weight = 0; if (is_rx_chan(chan)) { @@ -1329,4 +1334,23 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) } EXPORT_SYMBOL_GPL(cpdma_control_set); +int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr) +{ + return ctlr->num_rx_desc; +} +EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs); + +int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr) +{ + return ctlr->num_tx_desc; +} +EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs); + +void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc) +{ + ctlr->num_rx_desc = num_rx_desc; + ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; +} +EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs); + MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index cb45f8f543d9..fd65ce2b83de 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -114,5 +114,9 @@ enum cpdma_control { int cpdma_control_get(struct cpdma_ctlr *ctlr, int control); int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value); +int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr); +void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc); +int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr); +int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr); #endif -- cgit v1.2.3 From c40d8883a28ece32d753d96e77f05e5e9a7c4415 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 6 Jan 2017 14:07:35 -0600 Subject: Documentation: DT: net: cpsw: remove no_bd_ram property Even if no_bd_ram property is described in TI CPSW bindings the support for it has never been introduced in CPSW driver, so there are no real users of it. Hence, remove no_bd_ram property from documentation and DT files. Cc: 'Rob Herring ' Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/cpsw.txt | 3 --- arch/arm/boot/dts/am33xx.dtsi | 1 - arch/arm/boot/dts/am4372.dtsi | 1 - arch/arm/boot/dts/dm814x.dtsi | 1 - arch/arm/boot/dts/dra7.dtsi | 1 - 5 files changed, 7 deletions(-) diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt index ebda7c93453a..7cc15c96ea95 100644 --- a/Documentation/devicetree/bindings/net/cpsw.txt +++ b/Documentation/devicetree/bindings/net/cpsw.txt @@ -23,7 +23,6 @@ Required properties: Optional properties: - ti,hwmods : Must be "cpgmac0" -- no_bd_ram : Must be 0 or 1 - dual_emac : Specifies Switch to act as Dual EMAC - syscon : Phandle to the system control device node, which is the control module device of the am33x @@ -70,7 +69,6 @@ Examples: cpdma_channels = <8>; ale_entries = <1024>; bd_ram_size = <0x2000>; - no_bd_ram = <0>; rx_descs = <64>; mac_control = <0x20>; slaves = <2>; @@ -99,7 +97,6 @@ Examples: cpdma_channels = <8>; ale_entries = <1024>; bd_ram_size = <0x2000>; - no_bd_ram = <0>; rx_descs = <64>; mac_control = <0x20>; slaves = <2>; diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 64c8aa9057a3..d458cebcab5f 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -781,7 +781,6 @@ cpdma_channels = <8>; ale_entries = <1024>; bd_ram_size = <0x2000>; - no_bd_ram = <0>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index ac55f93fc91e..837aff1b4aa1 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -669,7 +669,6 @@ cpdma_channels = <8>; ale_entries = <1024>; bd_ram_size = <0x2000>; - no_bd_ram = <0>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi index 1facc5f12cef..4cbeb5c9b529 100644 --- a/arch/arm/boot/dts/dm814x.dtsi +++ b/arch/arm/boot/dts/dm814x.dtsi @@ -509,7 +509,6 @@ cpdma_channels = <8>; ale_entries = <1024>; bd_ram_size = <0x2000>; - no_bd_ram = <0>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index addb7530cfbe..b69df916c8c2 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1707,7 +1707,6 @@ cpdma_channels = <8>; ale_entries = <1024>; bd_ram_size = <0x2000>; - no_bd_ram = <0>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; -- cgit v1.2.3 From 69d707d034b6078f0b5998f80e5883c8243b205c Mon Sep 17 00:00:00 2001 From: "Karicheri, Muralidharan" Date: Fri, 6 Jan 2017 15:37:39 -0500 Subject: net: netcp: extract eflag from desc for rx_hook handling Extract the eflag bits from the received desc and pass it down the rx_hook chain to be available for netcp modules. Also the psdata and epib data has to be inspected by the netcp modules. So the desc can be freed only after returning from the rx_hook. So move knav_pool_desc_put() after the rx_hook processing. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp.h | 1 + drivers/net/ethernet/ti/netcp_core.c | 20 +++++++++++++++++--- include/linux/soc/ti/knav_dma.h | 2 ++ 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 0f58c584ae09..a92abd62ec60 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -115,6 +115,7 @@ struct netcp_packet { struct sk_buff *skb; __le32 *epib; u32 *psdata; + u32 eflags; unsigned int psdata_len; struct netcp_intf *netcp; struct netcp_tx_pipe *tx_pipe; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index c243335ed649..a136c56e0aff 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -122,6 +122,13 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, *ndesc = le32_to_cpu(desc->next_desc); } +static void get_desc_info(u32 *desc_info, u32 *pkt_info, + struct knav_dma_desc *desc) +{ + *desc_info = le32_to_cpu(desc->desc_info); + *pkt_info = le32_to_cpu(desc->packet_info); +} + static u32 get_sw_data(int index, struct knav_dma_desc *desc) { /* No Endian conversion needed as this data is untouched by hw */ @@ -653,6 +660,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) struct netcp_packet p_info; struct sk_buff *skb; void *org_buf_ptr; + u32 tmp; dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); if (!dma_desc) @@ -724,9 +732,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) knav_pool_desc_put(netcp->rx_pool, ndesc); } - /* Free the primary descriptor */ - knav_pool_desc_put(netcp->rx_pool, desc); - /* check for packet len and warn */ if (unlikely(pkt_sz != accum_sz)) dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n", @@ -739,6 +744,11 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) p_info.skb = skb; skb->dev = netcp->ndev; p_info.rxtstamp_complete = false; + get_desc_info(&tmp, &p_info.eflags, desc); + p_info.epib = desc->epib; + p_info.psdata = (u32 __force *)desc->psdata; + p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) & + KNAV_DMA_DESC_EFLAGS_MASK); list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) { int ret; @@ -748,10 +758,14 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n", rx_hook->order, ret); netcp->ndev->stats.rx_errors++; + /* Free the primary descriptor */ + knav_pool_desc_put(netcp->rx_pool, desc); dev_kfree_skb(skb); return 0; } } + /* Free the primary descriptor */ + knav_pool_desc_put(netcp->rx_pool, desc); netcp->ndev->stats.rx_packets++; netcp->ndev->stats.rx_bytes += skb->len; diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h index 35cb9264e0d5..2b7882666ef6 100644 --- a/include/linux/soc/ti/knav_dma.h +++ b/include/linux/soc/ti/knav_dma.h @@ -41,6 +41,8 @@ #define KNAV_DMA_DESC_RETQ_SHIFT 0 #define KNAV_DMA_DESC_RETQ_MASK MASK(14) #define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22) +#define KNAV_DMA_DESC_EFLAGS_MASK MASK(4) +#define KNAV_DMA_DESC_EFLAGS_SHIFT 20 #define KNAV_DMA_NUM_EPIB_WORDS 4 #define KNAV_DMA_NUM_PS_WORDS 16 -- cgit v1.2.3 From aa255101f7e92b621736856386f87f7b418874a6 Mon Sep 17 00:00:00 2001 From: "Karicheri, Muralidharan" Date: Fri, 6 Jan 2017 15:37:40 -0500 Subject: net: netcp: remove the redundant memmov() The psdata is populated with command data by netcp modules to the tail of the buffer and set_words() copy the same to the front of the psdata. So remove the redundant memmov function call. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index a136c56e0aff..286fd8da3b67 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1226,9 +1226,9 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, /* psdata points to both native-endian and device-endian data */ __le32 *psdata = (void __force *)p_info.psdata; - memmove(p_info.psdata, p_info.psdata + p_info.psdata_len, - p_info.psdata_len); - set_words(p_info.psdata, p_info.psdata_len, psdata); + set_words((u32 *)psdata + + (KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len), + p_info.psdata_len, psdata); tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << KNAV_DMA_DESC_PSLEN_SHIFT; } -- cgit v1.2.3 From 6a8162e99ef344fcffe14c9a4d1061493c4a81a4 Mon Sep 17 00:00:00 2001 From: Michael Scherban Date: Fri, 6 Jan 2017 15:37:41 -0500 Subject: net: netcp: store network statistics in 64 bits Previously the network statistics were stored in 32 bit variable which can cause some stats to roll over after several minutes of high traffic. This implements 64 bit storage so larger numbers can be stored. Signed-off-by: Michael Scherban Signed-off-by: Murali Karicheri Signed-off-by: Sekhar Nori Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp.h | 18 ++++++++++ drivers/net/ethernet/ti/netcp_core.c | 68 +++++++++++++++++++++++++++++------- 2 files changed, 74 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index a92abd62ec60..d243c5df9d6b 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -23,6 +23,7 @@ #include #include +#include /* Maximum Ethernet frame size supported by Keystone switch */ #define NETCP_MAX_FRAME_SIZE 9504 @@ -68,6 +69,20 @@ struct netcp_addr { struct list_head node; }; +struct netcp_stats { + struct u64_stats_sync syncp_rx ____cacheline_aligned_in_smp; + u64 rx_packets; + u64 rx_bytes; + u32 rx_errors; + u32 rx_dropped; + + struct u64_stats_sync syncp_tx ____cacheline_aligned_in_smp; + u64 tx_packets; + u64 tx_bytes; + u32 tx_errors; + u32 tx_dropped; +}; + struct netcp_intf { struct device *dev; struct device *ndev_dev; @@ -88,6 +103,9 @@ struct netcp_intf { struct napi_struct rx_napi; struct napi_struct tx_napi; + /* 64-bit netcp stats */ + struct netcp_stats stats; + void *rx_channel; const char *dma_chan_name; u32 rx_pool_size; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 286fd8da3b67..b077ed4abddc 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -629,6 +629,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, static void netcp_empty_rx_queue(struct netcp_intf *netcp) { + struct netcp_stats *rx_stats = &netcp->stats; struct knav_dma_desc *desc; unsigned int dma_sz; dma_addr_t dma; @@ -642,16 +643,17 @@ static void netcp_empty_rx_queue(struct netcp_intf *netcp) if (unlikely(!desc)) { dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n", __func__); - netcp->ndev->stats.rx_errors++; + rx_stats->rx_errors++; continue; } netcp_free_rx_desc_chain(netcp, desc); - netcp->ndev->stats.rx_dropped++; + rx_stats->rx_dropped++; } } static int netcp_process_one_rx_packet(struct netcp_intf *netcp) { + struct netcp_stats *rx_stats = &netcp->stats; unsigned int dma_sz, buf_len, org_buf_len; struct knav_dma_desc *desc, *ndesc; unsigned int pkt_sz = 0, accum_sz; @@ -757,8 +759,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) if (unlikely(ret)) { dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n", rx_hook->order, ret); - netcp->ndev->stats.rx_errors++; /* Free the primary descriptor */ + rx_stats->rx_dropped++; knav_pool_desc_put(netcp->rx_pool, desc); dev_kfree_skb(skb); return 0; @@ -767,8 +769,10 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) /* Free the primary descriptor */ knav_pool_desc_put(netcp->rx_pool, desc); - netcp->ndev->stats.rx_packets++; - netcp->ndev->stats.rx_bytes += skb->len; + u64_stats_update_begin(&rx_stats->syncp_rx); + rx_stats->rx_packets++; + rx_stats->rx_bytes += skb->len; + u64_stats_update_end(&rx_stats->syncp_rx); /* push skb up the stack */ skb->protocol = eth_type_trans(skb, netcp->ndev); @@ -777,7 +781,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) free_desc: netcp_free_rx_desc_chain(netcp, desc); - netcp->ndev->stats.rx_errors++; + rx_stats->rx_errors++; return 0; } @@ -1008,6 +1012,7 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp, static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, unsigned int budget) { + struct netcp_stats *tx_stats = &netcp->stats; struct knav_dma_desc *desc; struct netcp_tx_cb *tx_cb; struct sk_buff *skb; @@ -1022,7 +1027,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz); if (unlikely(!desc)) { dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); - netcp->ndev->stats.tx_errors++; + tx_stats->tx_errors++; continue; } @@ -1033,7 +1038,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, netcp_free_tx_desc_chain(netcp, desc, dma_sz); if (!skb) { dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); - netcp->ndev->stats.tx_errors++; + tx_stats->tx_errors++; continue; } @@ -1050,8 +1055,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, netif_wake_subqueue(netcp->ndev, subqueue); } - netcp->ndev->stats.tx_packets++; - netcp->ndev->stats.tx_bytes += skb->len; + u64_stats_update_begin(&tx_stats->syncp_tx); + tx_stats->tx_packets++; + tx_stats->tx_bytes += skb->len; + u64_stats_update_end(&tx_stats->syncp_tx); dev_kfree_skb(skb); pkts++; } @@ -1272,6 +1279,7 @@ out: static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_stats *tx_stats = &netcp->stats; int subqueue = skb_get_queue_mapping(skb); struct knav_dma_desc *desc; int desc_count, ret = 0; @@ -1287,7 +1295,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* If we get here, the skb has already been dropped */ dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n", ret); - ndev->stats.tx_dropped++; + tx_stats->tx_dropped++; return ret; } skb->len = NETCP_MIN_PACKET_SIZE; @@ -1315,7 +1323,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; drop: - ndev->stats.tx_dropped++; + tx_stats->tx_dropped++; if (desc) netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); dev_kfree_skb(skb); @@ -1897,12 +1905,46 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, return 0; } +static struct rtnl_link_stats64 * +netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_stats *p = &netcp->stats; + u64 rxpackets, rxbytes, txpackets, txbytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&p->syncp_rx); + rxpackets = p->rx_packets; + rxbytes = p->rx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start)); + + do { + start = u64_stats_fetch_begin_irq(&p->syncp_tx); + txpackets = p->tx_packets; + txbytes = p->tx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start)); + + stats->rx_packets = rxpackets; + stats->rx_bytes = rxbytes; + stats->tx_packets = txpackets; + stats->tx_bytes = txbytes; + + /* The following are stored as 32 bit */ + stats->rx_errors = p->rx_errors; + stats->rx_dropped = p->rx_dropped; + stats->tx_dropped = p->tx_dropped; + + return stats; +} + static const struct net_device_ops netcp_netdev_ops = { .ndo_open = netcp_ndo_open, .ndo_stop = netcp_ndo_stop, .ndo_start_xmit = netcp_ndo_start_xmit, .ndo_set_rx_mode = netcp_set_rx_mode, .ndo_do_ioctl = netcp_ndo_ioctl, + .ndo_get_stats64 = netcp_get_stats, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = netcp_rx_add_vid, @@ -1949,6 +1991,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device, INIT_LIST_HEAD(&netcp->txhook_list_head); INIT_LIST_HEAD(&netcp->rxhook_list_head); INIT_LIST_HEAD(&netcp->addr_list); + u64_stats_init(&netcp->stats.syncp_rx); + u64_stats_init(&netcp->stats.syncp_tx); netcp->netcp_device = netcp_device; netcp->dev = netcp_device->device; netcp->ndev = ndev; -- cgit v1.2.3 From 0cead3a6a1abd1f1656e302d876e985aadf8d8d2 Mon Sep 17 00:00:00 2001 From: "Karicheri, Muralidharan" Date: Fri, 6 Jan 2017 15:37:42 -0500 Subject: net: netcp: ethss: get phy-handle only if link interface is MAC-to-PHY Currently to parse phy-handle, driver doesn't check if the interface is MAC to PHY. This patch add this check for all MAC to PHY interface types supported by the driver. Signed-off-by: Murali Karicheri Signed-off-by: Sekhar Nori Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 7d9e36f66735..4a64b3e5c7a0 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2930,7 +2930,9 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, } slave->open = false; - slave->phy_node = of_parse_phandle(node, "phy-handle", 0); + if ((slave->link_interface == SGMII_LINK_MAC_PHY) || + (slave->link_interface == XGMII_LINK_MAC_PHY)) + slave->phy_node = of_parse_phandle(node, "phy-handle", 0); slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num); if (slave->link_interface >= XGMII_LINK_MAC_PHY) -- cgit v1.2.3 From 4cd85a61d2185a79389cd5e52c02223db9062559 Mon Sep 17 00:00:00 2001 From: "Karicheri, Muralidharan" Date: Fri, 6 Jan 2017 15:37:43 -0500 Subject: net: netcp: use hw capability to remove FCS word from rx packets Some of the newer Ethernet switch hw (such as that on k2e/l/g) can strip the Etherenet FCS from packet at the port 0 egress of the switch. So use this capability instead of doing it in software. Signed-off-by: Murali Karicheri Signed-off-by: Sekhar Nori Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp.h | 2 ++ drivers/net/ethernet/ti/netcp_core.c | 8 ++++++-- drivers/net/ethernet/ti/netcp_ethss.c | 10 ++++++++-- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index d243c5df9d6b..8900a6fad318 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -102,6 +102,8 @@ struct netcp_intf { void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; struct napi_struct rx_napi; struct napi_struct tx_napi; +#define ETH_SW_CAN_REMOVE_ETH_FCS BIT(0) + u32 hw_cap; /* 64-bit netcp stats */ struct netcp_stats stats; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index b077ed4abddc..68a75cc0731c 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -739,8 +739,12 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n", pkt_sz, accum_sz); - /* Remove ethernet FCS from the packet */ - __pskb_trim(skb, skb->len - ETH_FCS_LEN); + /* Newer version of the Ethernet switch can trim the Ethernet FCS + * from the packet and is indicated in hw_cap. So trim it only for + * older h/w + */ + if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS)) + __pskb_trim(skb, skb->len - ETH_FCS_LEN); /* Call each of the RX hooks */ p_info.skb = skb; diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 4a64b3e5c7a0..6bb8e2650017 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -122,6 +122,7 @@ #define MACSL_FULLDUPLEX BIT(0) #define GBE_CTL_P0_ENABLE BIT(2) +#define ETH_SW_CTL_P0_TX_CRC_REMOVE BIT(13) #define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf #define GBE_STATS_CD_SEL BIT(28) @@ -2821,7 +2822,7 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) struct netcp_intf *netcp = netdev_priv(ndev); struct gbe_slave *slave = gbe_intf->slave; int port_num = slave->port_num; - u32 reg; + u32 reg, val; int ret; reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver)); @@ -2851,7 +2852,12 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype)); /* Control register */ - writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control)); + val = GBE_CTL_P0_ENABLE; + if (IS_SS_ID_MU(gbe_dev)) { + val |= ETH_SW_CTL_P0_TX_CRC_REMOVE; + netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS; + } + writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control)); /* All statistics enabled and STAT AB visible by default */ writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs, -- cgit v1.2.3 From ca47130a744b47495cfaa0f29f3f8d4c85079cba Mon Sep 17 00:00:00 2001 From: "Karicheri, Muralidharan" Date: Fri, 6 Jan 2017 15:37:44 -0500 Subject: net: netcp: ale: update to support unknown vlan controls for NU switch In NU Ethernet switch used on some of the Keystone SoCs, there is separate UNKNOWNVLAN register for membership, unreg mcast flood, reg mcast flood and force untag egress bits in ALE. So control for these fields require different address offset, shift and size of field. As this ALE has the same version number as ALE in CPSW found on other SoCs, customization based on version number is not possible. So use a configuration parameter, nu_switch_ale, to identify the ALE ALE found in NU Switch. Different treatment is needed for NU Switch ALE due to difference in the ale table bits, separate unknown vlan registers etc. The register information available in ale_controls, needs to be updated to support the netcp NU switch h/w. So it is not constant array any more since it needs to be updated based on ALE type. The header of the file is also updated to indicate it supports N port switch ALE, not just 3 port. The version mask is 3 bits in NU Switch ALE vs 8 bits on other ALE types. While at it, change the debug print to info print so that ALE version gets displayed in boot log. Signed-off-by: Murali Karicheri Signed-off-by: Sekhar Nori Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw_ale.c | 50 +++++++++++++++++++++++++++++++---- drivers/net/ethernet/ti/cpsw_ale.h | 13 ++++++++- drivers/net/ethernet/ti/netcp_ethss.c | 5 +++- 3 files changed, 61 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 43b061bd8e07..e15db39a2b69 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -1,5 +1,5 @@ /* - * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine + * Texas Instruments N-Port Ethernet Switch Address Lookup Engine * * Copyright (C) 2012 Texas Instruments * @@ -27,8 +27,9 @@ #define BITMASK(bits) (BIT(bits) - 1) -#define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff) +#define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask)) #define ALE_VERSION_MINOR(rev) (rev & 0xff) +#define ALE_VERSION_1R4 0x0104 /* ALE Registers */ #define ALE_IDVER 0x00 @@ -39,6 +40,12 @@ #define ALE_TABLE 0x34 #define ALE_PORTCTL 0x40 +/* ALE NetCP NU switch specific Registers */ +#define ALE_UNKNOWNVLAN_MEMBER 0x90 +#define ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD 0x94 +#define ALE_UNKNOWNVLAN_REG_MCAST_FLOOD 0x98 +#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C + #define ALE_TABLE_WRITE BIT(31) #define ALE_TYPE_FREE 0 @@ -464,7 +471,7 @@ struct ale_control_info { int bits; }; -static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = { +static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = { [ALE_ENABLE] = { .name = "enable", .offset = ALE_CONTROL, @@ -724,8 +731,41 @@ void cpsw_ale_start(struct cpsw_ale *ale) u32 rev; rev = __raw_readl(ale->params.ale_regs + ALE_IDVER); - dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n", - ALE_VERSION_MAJOR(rev), ALE_VERSION_MINOR(rev)); + if (!ale->params.major_ver_mask) + ale->params.major_ver_mask = 0xff; + ale->version = + (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) | + ALE_VERSION_MINOR(rev); + dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n", + ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask), + ALE_VERSION_MINOR(rev)); + + if (ale->params.nu_switch_ale) { + /* Separate registers for unknown vlan configuration. + * Also there are N bits, where N is number of ale + * ports and shift value should be 0 + */ + ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].offset = + ALE_UNKNOWNVLAN_MEMBER; + ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].shift = 0; + ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].offset = + ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD; + ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].shift = 0; + ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].offset = + ALE_UNKNOWNVLAN_REG_MCAST_FLOOD; + ale_controls[ALE_PORT_UNTAGGED_EGRESS].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0; + ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset = + ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS; + } + cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index a7001894f3da..b1c79542633c 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -1,5 +1,5 @@ /* - * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine APIs + * Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs * * Copyright (C) 2012 Texas Instruments * @@ -21,6 +21,16 @@ struct cpsw_ale_params { unsigned long ale_ageout; /* in secs */ unsigned long ale_entries; unsigned long ale_ports; + /* NU Switch has specific handling as number of bits in ALE entries + * are different than other versions of ALE. Also there are specific + * registers for unknown vlan specific fields. So use nu_switch_ale + * to identify this hardware. + */ + bool nu_switch_ale; + /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So + * pass it from caller. + */ + u32 major_ver_mask; }; struct cpsw_ale { @@ -28,6 +38,7 @@ struct cpsw_ale { struct timer_list timer; unsigned long ageout; int allmulti; + u32 version; }; enum cpsw_ale_control { diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 6bb8e2650017..e4a18628d23d 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3609,7 +3609,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT; ale_params.ale_entries = gbe_dev->ale_entries; ale_params.ale_ports = gbe_dev->ale_ports; - + if (IS_SS_ID_MU(gbe_dev)) { + ale_params.major_ver_mask = 0x7; + ale_params.nu_switch_ale = true; + } gbe_dev->ale = cpsw_ale_create(&ale_params); if (!gbe_dev->ale) { dev_err(gbe_dev->dev, "error initializing ale engine\n"); -- cgit v1.2.3 From 7938a0d75fd64ce419b1cdf16aa182fa0fd1acd6 Mon Sep 17 00:00:00 2001 From: "Karicheri, Muralidharan" Date: Fri, 6 Jan 2017 15:37:45 -0500 Subject: net: netcp: ale: use ale_status to size the ale table ALE h/w on newer version of NetCP (K2E/L/G) does provide a ALE_STATUS register for the size of the ALE Table implemented in h/w. Currently for example we set ALE Table size to 1024 for NetCP ALE on K2E even though the ALE Status/Documentation shows it has 8192 entries. So take advantage of this register to read the size of ALE table supported and use that value in the driver for the newer version of NetCP ALE. For NetCP lite, ALE Table size is much less (64) and indicated by a size of zero in ALE_STATUS. So use that as a default for now. While at it, also fix the ale table size on 10G switch to 2048 per User guide http://www.ti.com/lit/ug/spruhj5/spruhj5.pdf Signed-off-by: Murali Karicheri Signed-off-by: Sekhar Nori Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw_ale.c | 31 ++++++++++++++++++++++++++++++- drivers/net/ethernet/ti/netcp_ethss.c | 4 +--- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index e15db39a2b69..62a18d61e0aa 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -33,6 +33,7 @@ /* ALE Registers */ #define ALE_IDVER 0x00 +#define ALE_STATUS 0x04 #define ALE_CONTROL 0x08 #define ALE_PRESCALE 0x10 #define ALE_UNKNOWNVLAN 0x18 @@ -58,6 +59,10 @@ #define ALE_UCAST_OUI 2 #define ALE_UCAST_TOUCHED 3 +#define ALE_TABLE_SIZE_MULTIPLIER 1024 +#define ALE_STATUS_SIZE_MASK 0x1f +#define ALE_TABLE_SIZE_DEFAULT 64 + static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) { int idx; @@ -728,7 +733,7 @@ static void cpsw_ale_timer(unsigned long arg) void cpsw_ale_start(struct cpsw_ale *ale) { - u32 rev; + u32 rev, ale_entries; rev = __raw_readl(ale->params.ale_regs + ALE_IDVER); if (!ale->params.major_ver_mask) @@ -740,6 +745,30 @@ void cpsw_ale_start(struct cpsw_ale *ale) ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask), ALE_VERSION_MINOR(rev)); + if (!ale->params.ale_entries) { + ale_entries = + __raw_readl(ale->params.ale_regs + ALE_STATUS) & + ALE_STATUS_SIZE_MASK; + /* ALE available on newer NetCP switches has introduced + * a register, ALE_STATUS, to indicate the size of ALE + * table which shows the size as a multiple of 1024 entries. + * For these, params.ale_entries will be set to zero. So + * read the register and update the value of ale_entries. + * ALE table on NetCP lite, is much smaller and is indicated + * by a value of zero in ALE_STATUS. So use a default value + * of ALE_TABLE_SIZE_DEFAULT for this. Caller is expected + * to set the value of ale_entries for all other versions + * of ALE. + */ + if (!ale_entries) + ale_entries = ALE_TABLE_SIZE_DEFAULT; + else + ale_entries *= ALE_TABLE_SIZE_MULTIPLIER; + ale->params.ale_entries = ale_entries; + } + dev_info(ale->params.dev, + "ALE Table size %ld\n", ale->params.ale_entries); + if (ale->params.nu_switch_ale) { /* Separate registers for unknown vlan configuration. * Also there are N bits, where N is number of ale diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index e4a18628d23d..f7bb241b17ab 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -81,7 +81,6 @@ #define GBENU_CPTS_OFFSET 0x1d000 #define GBENU_ALE_OFFSET 0x1e000 #define GBENU_HOST_PORT_NUM 0 -#define GBENU_NUM_ALE_ENTRIES 1024 #define GBENU_SGMII_MODULE_SIZE 0x100 /* 10G Ethernet SS defines */ @@ -103,7 +102,7 @@ #define XGBE10_ALE_OFFSET 0x700 #define XGBE10_HW_STATS_OFFSET 0x800 #define XGBE10_HOST_PORT_NUM 0 -#define XGBE10_NUM_ALE_ENTRIES 1024 +#define XGBE10_NUM_ALE_ENTRIES 2048 #define GBE_TIMER_INTERVAL (HZ / 2) @@ -3441,7 +3440,6 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET; gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBENU_HOST_PORT_NUM; - gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; /* Subsystem registers */ -- cgit v1.2.3 From b361da837392e7fc13189f223b12385de728be12 Mon Sep 17 00:00:00 2001 From: "Karicheri, Muralidharan" Date: Fri, 6 Jan 2017 15:37:46 -0500 Subject: net: netcp: ale: add proper ale entry mask bits for netcp switch ALE For NetCP NU Switch ALE, some of the mask bits are different than defaults used in the driver. Add a new macro DEFINE_ALE_FIELD1 that use a configurable mask bits and use it in the driver. These bits are set to correct values by using the new variables added to cpsw_ale structure and re-used in the macros. The parameter nu_switch_ale is configured by the caller driver to indicate the ALE is for that switch and is used in the ALE driver to do customization as needed. Signed-off-by: Murali Karicheri Signed-off-by: Sekhar Nori Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw_ale.c | 99 ++++++++++++++++++++++++++++++-------- drivers/net/ethernet/ti/cpsw_ale.h | 4 ++ 2 files changed, 84 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 62a18d61e0aa..ddd43e09111e 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -29,6 +29,7 @@ #define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask)) #define ALE_VERSION_MINOR(rev) (rev & 0xff) +#define ALE_VERSION_1R3 0x0103 #define ALE_VERSION_1R4 0x0104 /* ALE Registers */ @@ -46,6 +47,7 @@ #define ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD 0x94 #define ALE_UNKNOWNVLAN_REG_MCAST_FLOOD 0x98 #define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C +#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg))) #define ALE_TABLE_WRITE BIT(31) @@ -96,20 +98,34 @@ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \ cpsw_ale_set_field(ale_entry, start, bits, value); \ } +#define DEFINE_ALE_FIELD1(name, start) \ +static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits) \ +{ \ + return cpsw_ale_get_field(ale_entry, start, bits); \ +} \ +static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \ + u32 bits) \ +{ \ + cpsw_ale_set_field(ale_entry, start, bits, value); \ +} + DEFINE_ALE_FIELD(entry_type, 60, 2) DEFINE_ALE_FIELD(vlan_id, 48, 12) DEFINE_ALE_FIELD(mcast_state, 62, 2) -DEFINE_ALE_FIELD(port_mask, 66, 3) +DEFINE_ALE_FIELD1(port_mask, 66) DEFINE_ALE_FIELD(super, 65, 1) DEFINE_ALE_FIELD(ucast_type, 62, 2) -DEFINE_ALE_FIELD(port_num, 66, 2) +DEFINE_ALE_FIELD1(port_num, 66) DEFINE_ALE_FIELD(blocked, 65, 1) DEFINE_ALE_FIELD(secure, 64, 1) -DEFINE_ALE_FIELD(vlan_untag_force, 24, 3) -DEFINE_ALE_FIELD(vlan_reg_mcast, 16, 3) -DEFINE_ALE_FIELD(vlan_unreg_mcast, 8, 3) -DEFINE_ALE_FIELD(vlan_member_list, 0, 3) +DEFINE_ALE_FIELD1(vlan_untag_force, 24) +DEFINE_ALE_FIELD1(vlan_reg_mcast, 16) +DEFINE_ALE_FIELD1(vlan_unreg_mcast, 8) +DEFINE_ALE_FIELD1(vlan_member_list, 0) DEFINE_ALE_FIELD(mcast, 40, 1) +/* ALE NetCP nu switch specific */ +DEFINE_ALE_FIELD(vlan_unreg_mcast_idx, 20, 3) +DEFINE_ALE_FIELD(vlan_reg_mcast_idx, 44, 3) /* The MAC address field in the ALE entry cannot be macroized as above */ static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr) @@ -235,14 +251,16 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry, { int mask; - mask = cpsw_ale_get_port_mask(ale_entry); + mask = cpsw_ale_get_port_mask(ale_entry, + ale->port_mask_bits); if ((mask & port_mask) == 0) return; /* ports dont intersect, not interested */ mask &= ~port_mask; /* free if only remaining port is host port */ if (mask) - cpsw_ale_set_port_mask(ale_entry, mask); + cpsw_ale_set_port_mask(ale_entry, mask, + ale->port_mask_bits); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); } @@ -303,7 +321,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT); cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0); cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); - cpsw_ale_set_port_num(ale_entry, port); + cpsw_ale_set_port_num(ale_entry, port, ale->port_num_bits); idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) @@ -350,9 +368,11 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_mcast_state(ale_entry, mcast_state); - mask = cpsw_ale_get_port_mask(ale_entry); + mask = cpsw_ale_get_port_mask(ale_entry, + ale->port_mask_bits); port_mask |= mask; - cpsw_ale_set_port_mask(ale_entry, port_mask); + cpsw_ale_set_port_mask(ale_entry, port_mask, + ale->port_mask_bits); if (idx < 0) idx = cpsw_ale_match_free(ale); @@ -379,7 +399,8 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, cpsw_ale_read(ale, idx, ale_entry); if (port_mask) - cpsw_ale_set_port_mask(ale_entry, port_mask); + cpsw_ale_set_port_mask(ale_entry, port_mask, + ale->port_mask_bits); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); @@ -388,6 +409,21 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, } EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast); +/* ALE NetCP NU switch specific vlan functions */ +static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry, + int reg_mcast, int unreg_mcast) +{ + int idx; + + /* Set VLAN registered multicast flood mask */ + idx = cpsw_ale_get_vlan_reg_mcast_idx(ale_entry); + writel(reg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx)); + + /* Set VLAN unregistered multicast flood mask */ + idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry); + writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx)); +} + int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast) { @@ -401,10 +437,16 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); cpsw_ale_set_vlan_id(ale_entry, vid); - cpsw_ale_set_vlan_untag_force(ale_entry, untag); - cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast); - cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); - cpsw_ale_set_vlan_member_list(ale_entry, port); + cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits); + if (!ale->params.nu_switch_ale) { + cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast, + ale->vlan_field_bits); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, + ale->vlan_field_bits); + } else { + cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast); + } + cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits); if (idx < 0) idx = cpsw_ale_match_free(ale); @@ -430,7 +472,8 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) cpsw_ale_read(ale, idx, ale_entry); if (port_mask) - cpsw_ale_set_vlan_member_list(ale_entry, port_mask); + cpsw_ale_set_vlan_member_list(ale_entry, port_mask, + ale->vlan_field_bits); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); @@ -458,12 +501,15 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) if (type != ALE_TYPE_VLAN) continue; - unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry); + unreg_mcast = + cpsw_ale_get_vlan_unreg_mcast(ale_entry, + ale->vlan_field_bits); if (allmulti) unreg_mcast |= 1; else unreg_mcast &= ~1; - cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, + ale->vlan_field_bits); cpsw_ale_write(ale, idx, ale_entry); } } @@ -769,6 +815,14 @@ void cpsw_ale_start(struct cpsw_ale *ale) dev_info(ale->params.dev, "ALE Table size %ld\n", ale->params.ale_entries); + /* set default bits for existing h/w */ + ale->port_mask_bits = 3; + ale->port_num_bits = 2; + ale->vlan_field_bits = 3; + + /* Set defaults override for ALE on NetCP NU switch and for version + * 1R3 + */ if (ale->params.nu_switch_ale) { /* Separate registers for unknown vlan configuration. * Also there are N bits, where N is number of ale @@ -793,6 +847,13 @@ void cpsw_ale_start(struct cpsw_ale *ale) ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0; ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset = ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS; + ale->port_mask_bits = ale->params.ale_ports; + ale->port_num_bits = ale->params.ale_ports - 1; + ale->vlan_field_bits = ale->params.ale_ports; + } else if (ale->version == ALE_VERSION_1R3) { + ale->port_mask_bits = ale->params.ale_ports; + ale->port_num_bits = 3; + ale->vlan_field_bits = ale->params.ale_ports; } cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index b1c79542633c..25d24e8d0904 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -39,6 +39,10 @@ struct cpsw_ale { unsigned long ageout; int allmulti; u32 version; + /* These bits are different on NetCP NU Switch ALE */ + u32 port_mask_bits; + u32 port_num_bits; + u32 vlan_field_bits; }; enum cpsw_ale_control { -- cgit v1.2.3 From 111427f6eb5a5d9ce22f8a90780ac1c18113091a Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 6 Jan 2017 16:42:00 -0500 Subject: net: dsa: move HWMON support to its own file Isolate the HWMON support in DSA in its own file. Currently only the legacy DSA code is concerned. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/Makefile | 1 + net/dsa/dsa.c | 131 +---------------------------------------------- net/dsa/dsa_priv.h | 9 ++++ net/dsa/hwmon.c | 147 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 159 insertions(+), 129 deletions(-) create mode 100644 net/dsa/hwmon.c diff --git a/net/dsa/Makefile b/net/dsa/Makefile index a3380ed0e0be..560b6747c276 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -1,6 +1,7 @@ # the core obj-$(CONFIG_NET_DSA) += dsa_core.o dsa_core-y += dsa.o slave.o dsa2.o +dsa_core-$(CONFIG_NET_DSA_HWMON) += hwmon.o # tagging formats dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 3f85be0aae34..cda787ebad15 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -9,9 +9,7 @@ * (at your option) any later version. */ -#include #include -#include #include #include #include @@ -108,105 +106,6 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, return ret; } -/* hwmon support ************************************************************/ - -#ifdef CONFIG_NET_DSA_HWMON - -static ssize_t temp1_input_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dsa_switch *ds = dev_get_drvdata(dev); - int temp, ret; - - ret = ds->ops->get_temp(ds, &temp); - if (ret < 0) - return ret; - - return sprintf(buf, "%d\n", temp * 1000); -} -static DEVICE_ATTR_RO(temp1_input); - -static ssize_t temp1_max_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dsa_switch *ds = dev_get_drvdata(dev); - int temp, ret; - - ret = ds->ops->get_temp_limit(ds, &temp); - if (ret < 0) - return ret; - - return sprintf(buf, "%d\n", temp * 1000); -} - -static ssize_t temp1_max_store(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) -{ - struct dsa_switch *ds = dev_get_drvdata(dev); - int temp, ret; - - ret = kstrtoint(buf, 0, &temp); - if (ret < 0) - return ret; - - ret = ds->ops->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000)); - if (ret < 0) - return ret; - - return count; -} -static DEVICE_ATTR_RW(temp1_max); - -static ssize_t temp1_max_alarm_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dsa_switch *ds = dev_get_drvdata(dev); - bool alarm; - int ret; - - ret = ds->ops->get_temp_alarm(ds, &alarm); - if (ret < 0) - return ret; - - return sprintf(buf, "%d\n", alarm); -} -static DEVICE_ATTR_RO(temp1_max_alarm); - -static struct attribute *dsa_hwmon_attrs[] = { - &dev_attr_temp1_input.attr, /* 0 */ - &dev_attr_temp1_max.attr, /* 1 */ - &dev_attr_temp1_max_alarm.attr, /* 2 */ - NULL -}; - -static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj, - struct attribute *attr, int index) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct dsa_switch *ds = dev_get_drvdata(dev); - struct dsa_switch_ops *ops = ds->ops; - umode_t mode = attr->mode; - - if (index == 1) { - if (!ops->get_temp_limit) - mode = 0; - else if (!ops->set_temp_limit) - mode &= ~S_IWUSR; - } else if (index == 2 && !ops->get_temp_alarm) { - mode = 0; - } - return mode; -} - -static const struct attribute_group dsa_hwmon_group = { - .attrs = dsa_hwmon_attrs, - .is_visible = dsa_hwmon_attrs_visible, -}; -__ATTRIBUTE_GROUPS(dsa_hwmon); - -#endif /* CONFIG_NET_DSA_HWMON */ - /* basic switch operations **************************************************/ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, struct device_node *port_dn, int port) @@ -415,30 +314,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) if (ret) return ret; -#ifdef CONFIG_NET_DSA_HWMON - /* If the switch provides a temperature sensor, - * register with hardware monitoring subsystem. - * Treat registration error as non-fatal and ignore it. - */ - if (ops->get_temp) { - const char *netname = netdev_name(dst->master_netdev); - char hname[IFNAMSIZ + 1]; - int i, j; - - /* Create valid hwmon 'name' attribute */ - for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) { - if (isalnum(netname[i])) - hname[j++] = netname[i]; - } - hname[j] = '\0'; - scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d", - hname, index); - ds->hwmon_dev = hwmon_device_register_with_groups(NULL, - ds->hwmon_name, ds, dsa_hwmon_groups); - if (IS_ERR(ds->hwmon_dev)) - ds->hwmon_dev = NULL; - } -#endif /* CONFIG_NET_DSA_HWMON */ + dsa_hwmon_register(ds); return 0; } @@ -498,10 +374,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds) { int port; -#ifdef CONFIG_NET_DSA_HWMON - if (ds->hwmon_dev) - hwmon_device_unregister(ds->hwmon_dev); -#endif + dsa_hwmon_unregister(ds); /* Destroy network devices for physical switch ports. */ for (port = 0; port < DSA_MAX_PORTS; port++) { diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 63ae1484abae..7e3385ec73f4 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -56,6 +56,15 @@ const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol); int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds); void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds); +/* hwmon.c */ +#ifdef CONFIG_NET_DSA_HWMON +void dsa_hwmon_register(struct dsa_switch *ds); +void dsa_hwmon_unregister(struct dsa_switch *ds); +#else +static inline void dsa_hwmon_register(struct dsa_switch *ds) { } +static inline void dsa_hwmon_unregister(struct dsa_switch *ds) { } +#endif + /* slave.c */ extern const struct dsa_device_ops notag_netdev_ops; void dsa_slave_mii_bus_init(struct dsa_switch *ds); diff --git a/net/dsa/hwmon.c b/net/dsa/hwmon.c new file mode 100644 index 000000000000..3a9cdf0b22b8 --- /dev/null +++ b/net/dsa/hwmon.c @@ -0,0 +1,147 @@ +/* + * net/dsa/hwmon.c - HWMON subsystem support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +#include "dsa_priv.h" + +static ssize_t temp1_input_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + int temp, ret; + + ret = ds->ops->get_temp(ds, &temp); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", temp * 1000); +} +static DEVICE_ATTR_RO(temp1_input); + +static ssize_t temp1_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + int temp, ret; + + ret = ds->ops->get_temp_limit(ds, &temp); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", temp * 1000); +} + +static ssize_t temp1_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + int temp, ret; + + ret = kstrtoint(buf, 0, &temp); + if (ret < 0) + return ret; + + ret = ds->ops->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000)); + if (ret < 0) + return ret; + + return count; +} +static DEVICE_ATTR_RW(temp1_max); + +static ssize_t temp1_max_alarm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + bool alarm; + int ret; + + ret = ds->ops->get_temp_alarm(ds, &alarm); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", alarm); +} +static DEVICE_ATTR_RO(temp1_max_alarm); + +static struct attribute *dsa_hwmon_attrs[] = { + &dev_attr_temp1_input.attr, /* 0 */ + &dev_attr_temp1_max.attr, /* 1 */ + &dev_attr_temp1_max_alarm.attr, /* 2 */ + NULL +}; + +static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct dsa_switch *ds = dev_get_drvdata(dev); + struct dsa_switch_ops *ops = ds->ops; + umode_t mode = attr->mode; + + if (index == 1) { + if (!ops->get_temp_limit) + mode = 0; + else if (!ops->set_temp_limit) + mode &= ~S_IWUSR; + } else if (index == 2 && !ops->get_temp_alarm) { + mode = 0; + } + return mode; +} + +static const struct attribute_group dsa_hwmon_group = { + .attrs = dsa_hwmon_attrs, + .is_visible = dsa_hwmon_attrs_visible, +}; +__ATTRIBUTE_GROUPS(dsa_hwmon); + +void dsa_hwmon_register(struct dsa_switch *ds) +{ + const char *netname = netdev_name(ds->dst->master_netdev); + char hname[IFNAMSIZ + 1]; + int i, j; + + /* If the switch provides temperature accessors, register with hardware + * monitoring subsystem. Treat registration error as non-fatal. + */ + if (!ds->ops->get_temp) + return; + + /* Create valid hwmon 'name' attribute */ + for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) { + if (isalnum(netname[i])) + hname[j++] = netname[i]; + } + hname[j] = '\0'; + scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d", hname, + ds->index); + ds->hwmon_dev = hwmon_device_register_with_groups(NULL, ds->hwmon_name, + ds, dsa_hwmon_groups); + if (IS_ERR(ds->hwmon_dev)) { + pr_warn("DSA: failed to register HWMON subsystem for switch %d\n", + ds->index); + ds->hwmon_dev = NULL; + } else { + pr_info("DSA: registered HWMON subsystem for switch %d\n", + ds->index); + } +} + +void dsa_hwmon_unregister(struct dsa_switch *ds) +{ + if (ds->hwmon_dev) { + hwmon_device_unregister(ds->hwmon_dev); + ds->hwmon_dev = NULL; + } +} -- cgit v1.2.3 From de8d6e02efbdb259c67832ccf027d7ace9b91d5d Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:19 +0200 Subject: IB/mlx5: Fix kernel to user leak prevention logic The logic was broken as it failed to update the response length for architectures with PAGE_SIZE larger than 4kB. As a result further extension of the ucontext response struct would fail. Fixes: d69e3bcf7976 ('IB/mlx5: Mmap the HCA's core clock register to user-space') Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 86c61e73780e..852b5b7b4897 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1148,13 +1148,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, * pretend we don't support reading the HCA's core clock. This is also * forced by mmap function. */ - if (PAGE_SIZE <= 4096 && - field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { - resp.comp_mask |= - MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; - resp.hca_core_clock_offset = - offsetof(struct mlx5_init_seg, internal_timer_h) % - PAGE_SIZE; + if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { + if (PAGE_SIZE <= 4096) { + resp.comp_mask |= + MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; + resp.hca_core_clock_offset = + offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE; + } resp.response_length += sizeof(resp.hca_core_clock_offset) + sizeof(resp.reserved2); } -- cgit v1.2.3 From f4044dac63e952ac1137b6df02b233d37696e2f5 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:20 +0200 Subject: IB/mlx5: Fix error handling order in create_kernel_qp Make sure order of cleanup is exactly the opposite of initialization. Fixes: 9603b61de1ee ('mlx5: Move pci device handling from mlx5_ib to mlx5_core') Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/qp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 53f4dd32f956..42d021cdc6c5 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -994,12 +994,12 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, return 0; err_wrid: - mlx5_db_free(dev->mdev, &qp->db); kfree(qp->sq.wqe_head); kfree(qp->sq.w_list); kfree(qp->sq.wrid); kfree(qp->sq.wr_data); kfree(qp->rq.wrid); + mlx5_db_free(dev->mdev, &qp->db); err_free: kvfree(*in); @@ -1014,12 +1014,12 @@ err_uuar: static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) { - mlx5_db_free(dev->mdev, &qp->db); kfree(qp->sq.wqe_head); kfree(qp->sq.w_list); kfree(qp->sq.wrid); kfree(qp->sq.wr_data); kfree(qp->rq.wrid); + mlx5_db_free(dev->mdev, &qp->db); mlx5_buf_free(dev->mdev, &qp->buf); free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); } -- cgit v1.2.3 From 2f5ff26478adaff5ed9b7ad4079d6a710b5f27e7 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:21 +0200 Subject: mlx5: Fix naming convention with respect to UARs This establishes a solid naming conventions for UARs. A UAR (User Access Region) can have size identical to a system page or can be fixed 4KB depending on a value queried by firmware. Each UAR always has 4 blue flame register which are used to post doorbell to send queue. In addition, a UAR has section used for posting doorbells to CQs or EQs. In this patch we change names to reflect this conventions. Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/cq.c | 6 +- drivers/infiniband/hw/mlx5/main.c | 80 +++++------ drivers/infiniband/hw/mlx5/mlx5_ib.h | 6 +- drivers/infiniband/hw/mlx5/qp.c | 176 ++++++++++++------------- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 8 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 8 +- drivers/net/ethernet/mellanox/mlx5/core/uar.c | 90 ++++++------- include/linux/mlx5/device.h | 9 +- include/linux/mlx5/driver.h | 14 +- include/uapi/rdma/mlx5-abi.h | 12 +- 10 files changed, 206 insertions(+), 203 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index b3ef47c3ab73..bb7e91c55003 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; struct mlx5_ib_cq *cq = to_mcq(ibcq); - void __iomem *uar_page = mdev->priv.uuari.uars[0].map; + void __iomem *uar_page = mdev->priv.bfregi.uars[0].map; unsigned long irq_flags; int ret = 0; @@ -790,7 +790,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, MLX5_SET(cqc, cqc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); - *index = to_mucontext(context)->uuari.uars[0].index; + *index = to_mucontext(context)->bfregi.uars[0].index; if (ucmd.cqe_comp_en == 1) { if (unlikely((*cqe_size != 64) || @@ -886,7 +886,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, MLX5_SET(cqc, cqc, log_page_size, cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); - *index = dev->mdev->priv.uuari.uars[0].index; + *index = dev->mdev->priv.bfregi.uars[0].index; return 0; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 852b5b7b4897..d5cf82b387d3 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -999,12 +999,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, struct mlx5_ib_alloc_ucontext_req_v2 req = {}; struct mlx5_ib_alloc_ucontext_resp resp = {}; struct mlx5_ib_ucontext *context; - struct mlx5_uuar_info *uuari; + struct mlx5_bfreg_info *bfregi; struct mlx5_uar *uars; - int gross_uuars; + int gross_bfregs; int num_uars; int ver; - int uuarn; + int bfregn; int err; int i; size_t reqlen; @@ -1032,10 +1032,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (req.flags) return ERR_PTR(-EINVAL); - if (req.total_num_uuars > MLX5_MAX_UUARS) + if (req.total_num_bfregs > MLX5_MAX_BFREGS) return ERR_PTR(-ENOMEM); - if (req.total_num_uuars == 0) + if (req.total_num_bfregs == 0) return ERR_PTR(-EINVAL); if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) @@ -1046,13 +1046,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, reqlen - sizeof(req))) return ERR_PTR(-EOPNOTSUPP); - req.total_num_uuars = ALIGN(req.total_num_uuars, - MLX5_NON_FP_BF_REGS_PER_PAGE); - if (req.num_low_latency_uuars > req.total_num_uuars - 1) + req.total_num_bfregs = ALIGN(req.total_num_bfregs, + MLX5_NON_FP_BFREGS_PER_UAR); + if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) return ERR_PTR(-EINVAL); - num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; - gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; + num_uars = req.total_num_bfregs / MLX5_NON_FP_BFREGS_PER_UAR; + gross_bfregs = num_uars * MLX5_BFREGS_PER_UAR; resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); @@ -1072,32 +1072,33 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (!context) return ERR_PTR(-ENOMEM); - uuari = &context->uuari; - mutex_init(&uuari->lock); + bfregi = &context->bfregi; + mutex_init(&bfregi->lock); uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); if (!uars) { err = -ENOMEM; goto out_ctx; } - uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars), - sizeof(*uuari->bitmap), + bfregi->bitmap = kcalloc(BITS_TO_LONGS(gross_bfregs), + sizeof(*bfregi->bitmap), GFP_KERNEL); - if (!uuari->bitmap) { + if (!bfregi->bitmap) { err = -ENOMEM; goto out_uar_ctx; } /* - * clear all fast path uuars + * clear all fast path bfregs */ - for (i = 0; i < gross_uuars; i++) { - uuarn = i & 3; - if (uuarn == 2 || uuarn == 3) - set_bit(i, uuari->bitmap); + for (i = 0; i < gross_bfregs; i++) { + bfregn = i & 3; + if (bfregn == 2 || bfregn == 3) + set_bit(i, bfregi->bitmap); } - uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL); - if (!uuari->count) { + bfregi->count = kcalloc(gross_bfregs, + sizeof(*bfregi->count), GFP_KERNEL); + if (!bfregi->count) { err = -ENOMEM; goto out_bitmap; } @@ -1130,7 +1131,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); - resp.tot_uuars = req.total_num_uuars; + resp.tot_bfregs = req.total_num_bfregs; resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); if (field_avail(typeof(resp), cqe_version, udata->outlen)) @@ -1163,10 +1164,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (err) goto out_td; - uuari->ver = ver; - uuari->num_low_latency_uuars = req.num_low_latency_uuars; - uuari->uars = uars; - uuari->num_uars = num_uars; + bfregi->ver = ver; + bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; + bfregi->uars = uars; + bfregi->num_uars = num_uars; context->cqe_version = resp.cqe_version; return &context->ibucontext; @@ -1182,10 +1183,10 @@ out_uars: for (i--; i >= 0; i--) mlx5_cmd_free_uar(dev->mdev, uars[i].index); out_count: - kfree(uuari->count); + kfree(bfregi->count); out_bitmap: - kfree(uuari->bitmap); + kfree(bfregi->bitmap); out_uar_ctx: kfree(uars); @@ -1199,7 +1200,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); - struct mlx5_uuar_info *uuari = &context->uuari; + struct mlx5_bfreg_info *bfregi = &context->bfregi; int i; if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) @@ -1207,14 +1208,15 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) free_page(context->upd_xlt_page); - for (i = 0; i < uuari->num_uars; i++) { - if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) - mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); + for (i = 0; i < bfregi->num_uars; i++) { + if (mlx5_cmd_free_uar(dev->mdev, bfregi->uars[i].index)) + mlx5_ib_warn(dev, "Failed to free UAR 0x%x\n", + bfregi->uars[i].index); } - kfree(uuari->count); - kfree(uuari->bitmap); - kfree(uuari->uars); + kfree(bfregi->count); + kfree(bfregi->bitmap); + kfree(bfregi->uars); kfree(context); return 0; @@ -1377,7 +1379,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, struct vm_area_struct *vma, struct mlx5_ib_ucontext *context) { - struct mlx5_uuar_info *uuari = &context->uuari; + struct mlx5_bfreg_info *bfregi = &context->bfregi; int err; unsigned long idx; phys_addr_t pfn, pa; @@ -1408,10 +1410,10 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, return -EINVAL; idx = get_index(vma->vm_pgoff); - if (idx >= uuari->num_uars) + if (idx >= bfregi->num_uars) return -EINVAL; - pfn = uar_index2pfn(dev, uuari->uars[idx].index); + pfn = uar_index2pfn(dev, bfregi->uars[idx].index); mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); vma->vm_page_prot = prot; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index a51c8051aeb2..d4d1329df94a 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -100,7 +100,7 @@ enum mlx5_ib_mad_ifc_flags { }; enum { - MLX5_CROSS_CHANNEL_UUAR = 0, + MLX5_CROSS_CHANNEL_BFREG = 0, }; enum { @@ -120,7 +120,7 @@ struct mlx5_ib_ucontext { /* protect doorbell record alloc/free */ struct mutex db_page_mutex; - struct mlx5_uuar_info uuari; + struct mlx5_bfreg_info bfregi; u8 cqe_version; /* Transport Domain number */ u32 tdn; @@ -355,7 +355,7 @@ struct mlx5_ib_qp { /* only for user space QPs. For kernel * we have it from the bf object */ - int uuarn; + int bfregn; int create_type; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 42d021cdc6c5..fbea9bd63c8e 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -475,12 +475,12 @@ static int qp_has_rq(struct ib_qp_init_attr *attr) return 1; } -static int first_med_uuar(void) +static int first_med_bfreg(void) { return 1; } -static int next_uuar(int n) +static int next_bfreg(int n) { n++; @@ -490,45 +490,45 @@ static int next_uuar(int n) return n; } -static int num_med_uuar(struct mlx5_uuar_info *uuari) +static int num_med_bfreg(struct mlx5_bfreg_info *bfregi) { int n; - n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE - - uuari->num_low_latency_uuars - 1; + n = bfregi->num_uars * MLX5_NON_FP_BFREGS_PER_UAR - + bfregi->num_low_latency_bfregs - 1; return n >= 0 ? n : 0; } -static int max_uuari(struct mlx5_uuar_info *uuari) +static int max_bfregi(struct mlx5_bfreg_info *bfregi) { - return uuari->num_uars * 4; + return bfregi->num_uars * 4; } -static int first_hi_uuar(struct mlx5_uuar_info *uuari) +static int first_hi_bfreg(struct mlx5_bfreg_info *bfregi) { int med; int i; int t; - med = num_med_uuar(uuari); - for (t = 0, i = first_med_uuar();; i = next_uuar(i)) { + med = num_med_bfreg(bfregi); + for (t = 0, i = first_med_bfreg();; i = next_bfreg(i)) { t++; if (t == med) - return next_uuar(i); + return next_bfreg(i); } return 0; } -static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) +static int alloc_high_class_bfreg(struct mlx5_bfreg_info *bfregi) { int i; - for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) { - if (!test_bit(i, uuari->bitmap)) { - set_bit(i, uuari->bitmap); - uuari->count[i]++; + for (i = first_hi_bfreg(bfregi); i < max_bfregi(bfregi); i = next_bfreg(i)) { + if (!test_bit(i, bfregi->bitmap)) { + set_bit(i, bfregi->bitmap); + bfregi->count[i]++; return i; } } @@ -536,87 +536,87 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) return -ENOMEM; } -static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari) +static int alloc_med_class_bfreg(struct mlx5_bfreg_info *bfregi) { - int minidx = first_med_uuar(); + int minidx = first_med_bfreg(); int i; - for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) { - if (uuari->count[i] < uuari->count[minidx]) + for (i = first_med_bfreg(); i < first_hi_bfreg(bfregi); i = next_bfreg(i)) { + if (bfregi->count[i] < bfregi->count[minidx]) minidx = i; } - uuari->count[minidx]++; + bfregi->count[minidx]++; return minidx; } -static int alloc_uuar(struct mlx5_uuar_info *uuari, - enum mlx5_ib_latency_class lat) +static int alloc_bfreg(struct mlx5_bfreg_info *bfregi, + enum mlx5_ib_latency_class lat) { - int uuarn = -EINVAL; + int bfregn = -EINVAL; - mutex_lock(&uuari->lock); + mutex_lock(&bfregi->lock); switch (lat) { case MLX5_IB_LATENCY_CLASS_LOW: - uuarn = 0; - uuari->count[uuarn]++; + bfregn = 0; + bfregi->count[bfregn]++; break; case MLX5_IB_LATENCY_CLASS_MEDIUM: - if (uuari->ver < 2) - uuarn = -ENOMEM; + if (bfregi->ver < 2) + bfregn = -ENOMEM; else - uuarn = alloc_med_class_uuar(uuari); + bfregn = alloc_med_class_bfreg(bfregi); break; case MLX5_IB_LATENCY_CLASS_HIGH: - if (uuari->ver < 2) - uuarn = -ENOMEM; + if (bfregi->ver < 2) + bfregn = -ENOMEM; else - uuarn = alloc_high_class_uuar(uuari); + bfregn = alloc_high_class_bfreg(bfregi); break; case MLX5_IB_LATENCY_CLASS_FAST_PATH: - uuarn = 2; + bfregn = 2; break; } - mutex_unlock(&uuari->lock); + mutex_unlock(&bfregi->lock); - return uuarn; + return bfregn; } -static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) +static void free_med_class_bfreg(struct mlx5_bfreg_info *bfregi, int bfregn) { - clear_bit(uuarn, uuari->bitmap); - --uuari->count[uuarn]; + clear_bit(bfregn, bfregi->bitmap); + --bfregi->count[bfregn]; } -static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) +static void free_high_class_bfreg(struct mlx5_bfreg_info *bfregi, int bfregn) { - clear_bit(uuarn, uuari->bitmap); - --uuari->count[uuarn]; + clear_bit(bfregn, bfregi->bitmap); + --bfregi->count[bfregn]; } -static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn) +static void free_bfreg(struct mlx5_bfreg_info *bfregi, int bfregn) { - int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; - int high_uuar = nuuars - uuari->num_low_latency_uuars; + int nbfregs = bfregi->num_uars * MLX5_BFREGS_PER_UAR; + int high_bfreg = nbfregs - bfregi->num_low_latency_bfregs; - mutex_lock(&uuari->lock); - if (uuarn == 0) { - --uuari->count[uuarn]; + mutex_lock(&bfregi->lock); + if (bfregn == 0) { + --bfregi->count[bfregn]; goto out; } - if (uuarn < high_uuar) { - free_med_class_uuar(uuari, uuarn); + if (bfregn < high_bfreg) { + free_med_class_bfreg(bfregi, bfregn); goto out; } - free_high_class_uuar(uuari, uuarn); + free_high_class_bfreg(bfregi, bfregn); out: - mutex_unlock(&uuari->lock); + mutex_unlock(&bfregi->lock); } static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) @@ -657,9 +657,9 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq); -static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) +static int bfregn_to_uar_index(struct mlx5_bfreg_info *bfregi, int bfregn) { - return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; + return bfregi->uars[bfregn / MLX5_BFREGS_PER_UAR].index; } static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, @@ -776,7 +776,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, int uar_index; int npages; u32 offset = 0; - int uuarn; + int bfregn; int ncont = 0; __be64 *pas; void *qpc; @@ -794,27 +794,27 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, */ if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) /* In CROSS_CHANNEL CQ and QP must use the same UAR */ - uuarn = MLX5_CROSS_CHANNEL_UUAR; + bfregn = MLX5_CROSS_CHANNEL_BFREG; else { - uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); - if (uuarn < 0) { - mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n"); + bfregn = alloc_bfreg(&context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH); + if (bfregn < 0) { + mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n"); mlx5_ib_dbg(dev, "reverting to medium latency\n"); - uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM); - if (uuarn < 0) { - mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n"); + bfregn = alloc_bfreg(&context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM); + if (bfregn < 0) { + mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n"); mlx5_ib_dbg(dev, "reverting to high latency\n"); - uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); - if (uuarn < 0) { - mlx5_ib_warn(dev, "uuar allocation failed\n"); - return uuarn; + bfregn = alloc_bfreg(&context->bfregi, MLX5_IB_LATENCY_CLASS_LOW); + if (bfregn < 0) { + mlx5_ib_warn(dev, "bfreg allocation failed\n"); + return bfregn; } } } } - uar_index = uuarn_to_uar_index(&context->uuari, uuarn); - mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); + uar_index = bfregn_to_uar_index(&context->bfregi, bfregn); + mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index); qp->rq.offset = 0; qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); @@ -822,7 +822,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, err = set_user_buf_size(dev, qp, &ucmd, base, attr); if (err) - goto err_uuar; + goto err_bfreg; if (ucmd.buf_addr && ubuffer->buf_size) { ubuffer->buf_addr = ucmd.buf_addr; @@ -831,7 +831,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, &ubuffer->umem, &npages, &page_shift, &ncont, &offset); if (err) - goto err_uuar; + goto err_bfreg; } else { ubuffer->umem = NULL; } @@ -854,8 +854,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, MLX5_SET(qpc, qpc, page_offset, offset); MLX5_SET(qpc, qpc, uar_page, uar_index); - resp->uuar_index = uuarn; - qp->uuarn = uuarn; + resp->bfreg_index = bfregn; + qp->bfregn = bfregn; err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); if (err) { @@ -882,8 +882,8 @@ err_umem: if (ubuffer->umem) ib_umem_release(ubuffer->umem); -err_uuar: - free_uuar(&context->uuari, uuarn); +err_bfreg: + free_bfreg(&context->bfregi, bfregn); return err; } @@ -896,7 +896,7 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp, mlx5_ib_db_unmap_user(context, &qp->db); if (base->ubuffer.umem) ib_umem_release(base->ubuffer.umem); - free_uuar(&context->uuari, qp->uuarn); + free_bfreg(&context->bfregi, qp->bfregn); } static int create_kernel_qp(struct mlx5_ib_dev *dev, @@ -906,13 +906,13 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp_base *base) { enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; - struct mlx5_uuar_info *uuari; + struct mlx5_bfreg_info *bfregi; int uar_index; void *qpc; - int uuarn; + int bfregn; int err; - uuari = &dev->mdev->priv.uuari; + bfregi = &dev->mdev->priv.bfregi; if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | IB_QP_CREATE_IPOIB_UD_LSO | @@ -922,19 +922,19 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; - uuarn = alloc_uuar(uuari, lc); - if (uuarn < 0) { + bfregn = alloc_bfreg(bfregi, lc); + if (bfregn < 0) { mlx5_ib_dbg(dev, "\n"); return -ENOMEM; } - qp->bf = &uuari->bfs[uuarn]; + qp->bf = &bfregi->bfs[bfregn]; uar_index = qp->bf->uar->index; err = calc_sq_size(dev, init_attr, qp); if (err < 0) { mlx5_ib_dbg(dev, "err %d\n", err); - goto err_uuar; + goto err_bfreg; } qp->rq.offset = 0; @@ -944,7 +944,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf); if (err) { mlx5_ib_dbg(dev, "err %d\n", err); - goto err_uuar; + goto err_bfreg; } qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); @@ -1007,8 +1007,8 @@ err_free: err_buf: mlx5_buf_free(dev->mdev, &qp->buf); -err_uuar: - free_uuar(&dev->mdev->priv.uuari, uuarn); +err_bfreg: + free_bfreg(&dev->mdev->priv.bfregi, bfregn); return err; } @@ -1021,7 +1021,7 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) kfree(qp->rq.wrid); mlx5_db_free(dev->mdev, &qp->db); mlx5_buf_free(dev->mdev, &qp->buf); - free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); + free_bfreg(&dev->mdev->priv.bfregi, qp->bf->bfregn); } static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) @@ -1353,7 +1353,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (init_attr->create_flags || init_attr->send_cq) return -EINVAL; - min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index); + min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index); if (udata->outlen < min_resp_len) return -EINVAL; @@ -4132,7 +4132,7 @@ out: __acquire(&bf->lock); /* TBD enable WC */ - if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) { + if (0 && nreq == 1 && bf->bfregn && inl && size > 1 && size <= bf->buf_size / 16) { mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); /* wc_wmb(); */ } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 4aff8ac68e14..11a8d638bcd0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -686,7 +686,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, - "mlx5_cmd_eq", &dev->priv.uuari.uars[0], + "mlx5_cmd_eq", &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); @@ -697,7 +697,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, MLX5_NUM_ASYNC_EQE, async_event_mask, - "mlx5_async_eq", &dev->priv.uuari.uars[0], + "mlx5_async_eq", &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create async EQ %d\n", err); @@ -708,7 +708,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) MLX5_EQ_VEC_PAGES, /* TODO: sriov max_vf + */ 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", - &dev->priv.uuari.uars[0], + &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); @@ -722,7 +722,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) MLX5_NUM_ASYNC_EQE, 1 << MLX5_EVENT_TYPE_PAGE_FAULT, "mlx5_page_fault_eq", - &dev->priv.uuari.uars[0], + &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_PF); if (err) { mlx5_core_warn(dev, "failed to create page fault EQ %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f4115135e30b..634e96a02516 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -753,7 +753,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, - name, &dev->priv.uuari.uars[0], + name, &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_COMP); if (err) { kfree(eq); @@ -1094,7 +1094,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_cleanup_once; } - err = mlx5_alloc_uuars(dev, &priv->uuari); + err = mlx5_alloc_bfregs(dev, &priv->bfregi); if (err) { dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); goto err_disable_msix; @@ -1170,7 +1170,7 @@ err_stop_eqs: mlx5_stop_eqs(dev); err_free_uar: - mlx5_free_uuars(dev, &priv->uuari); + mlx5_free_bfregs(dev, &priv->bfregi); err_disable_msix: mlx5_disable_msix(dev); @@ -1230,7 +1230,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); - mlx5_free_uuars(dev, &priv->uuari); + mlx5_free_bfregs(dev, &priv->bfregi); mlx5_disable_msix(dev); if (cleanup) mlx5_cleanup_once(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index ab0b896621a0..ce7fcebb81a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -39,7 +39,7 @@ enum { NUM_DRIVER_UARS = 4, - NUM_LOW_LAT_UUARS = 4, + NUM_LOW_LAT_BFREGS = 4, }; int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) @@ -67,116 +67,116 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) } EXPORT_SYMBOL(mlx5_cmd_free_uar); -static int need_uuar_lock(int uuarn) +static int need_bfreg_lock(int bfregn) { - int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + int tot_bfregs = NUM_DRIVER_UARS * MLX5_BFREGS_PER_UAR; - if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS) + if (bfregn == 0 || tot_bfregs - NUM_LOW_LAT_BFREGS) return 0; return 1; } -int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +int mlx5_alloc_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi) { - int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + int tot_bfregs = NUM_DRIVER_UARS * MLX5_BFREGS_PER_UAR; struct mlx5_bf *bf; phys_addr_t addr; int err; int i; - uuari->num_uars = NUM_DRIVER_UARS; - uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; + bfregi->num_uars = NUM_DRIVER_UARS; + bfregi->num_low_latency_bfregs = NUM_LOW_LAT_BFREGS; - mutex_init(&uuari->lock); - uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); - if (!uuari->uars) + mutex_init(&bfregi->lock); + bfregi->uars = kcalloc(bfregi->num_uars, sizeof(*bfregi->uars), GFP_KERNEL); + if (!bfregi->uars) return -ENOMEM; - uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); - if (!uuari->bfs) { + bfregi->bfs = kcalloc(tot_bfregs, sizeof(*bfregi->bfs), GFP_KERNEL); + if (!bfregi->bfs) { err = -ENOMEM; goto out_uars; } - uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), + bfregi->bitmap = kcalloc(BITS_TO_LONGS(tot_bfregs), sizeof(*bfregi->bitmap), GFP_KERNEL); - if (!uuari->bitmap) { + if (!bfregi->bitmap) { err = -ENOMEM; goto out_bfs; } - uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); - if (!uuari->count) { + bfregi->count = kcalloc(tot_bfregs, sizeof(*bfregi->count), GFP_KERNEL); + if (!bfregi->count) { err = -ENOMEM; goto out_bitmap; } - for (i = 0; i < uuari->num_uars; i++) { - err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); + for (i = 0; i < bfregi->num_uars; i++) { + err = mlx5_cmd_alloc_uar(dev, &bfregi->uars[i].index); if (err) goto out_count; - addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); - uuari->uars[i].map = ioremap(addr, PAGE_SIZE); - if (!uuari->uars[i].map) { - mlx5_cmd_free_uar(dev, uuari->uars[i].index); + addr = dev->iseg_base + ((phys_addr_t)(bfregi->uars[i].index) << PAGE_SHIFT); + bfregi->uars[i].map = ioremap(addr, PAGE_SIZE); + if (!bfregi->uars[i].map) { + mlx5_cmd_free_uar(dev, bfregi->uars[i].index); err = -ENOMEM; goto out_count; } mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", - uuari->uars[i].index, uuari->uars[i].map); + bfregi->uars[i].index, bfregi->uars[i].map); } - for (i = 0; i < tot_uuars; i++) { - bf = &uuari->bfs[i]; + for (i = 0; i < tot_bfregs; i++) { + bf = &bfregi->bfs[i]; bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2; - bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; - bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; + bf->uar = &bfregi->uars[i / MLX5_BFREGS_PER_UAR]; + bf->regreg = bfregi->uars[i / MLX5_BFREGS_PER_UAR].map; bf->reg = NULL; /* Add WC support */ - bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * + bf->offset = (i % MLX5_BFREGS_PER_UAR) * (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) + MLX5_BF_OFFSET; - bf->need_lock = need_uuar_lock(i); + bf->need_lock = need_bfreg_lock(i); spin_lock_init(&bf->lock); spin_lock_init(&bf->lock32); - bf->uuarn = i; + bf->bfregn = i; } return 0; out_count: for (i--; i >= 0; i--) { - iounmap(uuari->uars[i].map); - mlx5_cmd_free_uar(dev, uuari->uars[i].index); + iounmap(bfregi->uars[i].map); + mlx5_cmd_free_uar(dev, bfregi->uars[i].index); } - kfree(uuari->count); + kfree(bfregi->count); out_bitmap: - kfree(uuari->bitmap); + kfree(bfregi->bitmap); out_bfs: - kfree(uuari->bfs); + kfree(bfregi->bfs); out_uars: - kfree(uuari->uars); + kfree(bfregi->uars); return err; } -int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +int mlx5_free_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi) { - int i = uuari->num_uars; + int i = bfregi->num_uars; for (i--; i >= 0; i--) { - iounmap(uuari->uars[i].map); - mlx5_cmd_free_uar(dev, uuari->uars[i].index); + iounmap(bfregi->uars[i].map); + mlx5_cmd_free_uar(dev, bfregi->uars[i].index); } - kfree(uuari->count); - kfree(uuari->bitmap); - kfree(uuari->bfs); - kfree(uuari->uars); + kfree(bfregi->count); + kfree(bfregi->bitmap); + kfree(bfregi->bfs); + kfree(bfregi->uars); return 0; } diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 3ccaeff15a80..aa851c51ab59 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -212,10 +212,11 @@ enum { }; enum { - MLX5_BF_REGS_PER_PAGE = 4, - MLX5_MAX_UAR_PAGES = 1 << 8, - MLX5_NON_FP_BF_REGS_PER_PAGE = 2, - MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE, + MLX5_BFREGS_PER_UAR = 4, + MLX5_MAX_UARS = 1 << 8, + MLX5_NON_FP_BFREGS_PER_UAR = 2, + MLX5_MAX_BFREGS = MLX5_MAX_UARS * + MLX5_NON_FP_BFREGS_PER_UAR, }; enum { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index cfa49bca009c..3d07e25b3bf1 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -188,16 +188,16 @@ enum mlx5_eq_type { #endif }; -struct mlx5_uuar_info { +struct mlx5_bfreg_info { struct mlx5_uar *uars; int num_uars; - int num_low_latency_uuars; + int num_low_latency_bfregs; unsigned long *bitmap; unsigned int *count; struct mlx5_bf *bfs; /* - * protect uuar allocation data structs + * protect bfreg allocation data structs */ struct mutex lock; u32 ver; @@ -217,7 +217,7 @@ struct mlx5_bf { /* serialize 64 bit writes when done as two 32 bit accesses */ spinlock_t lock32; - int uuarn; + int bfregn; }; struct mlx5_cmd_first { @@ -579,7 +579,7 @@ struct mlx5_priv { struct mlx5_eq_table eq_table; struct msix_entry *msix_arr; struct mlx5_irq_info *irq_info; - struct mlx5_uuar_info uuari; + struct mlx5_bfreg_info bfregi; MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); /* pages stuff */ @@ -903,8 +903,8 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); -int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); -int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); +int mlx5_alloc_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi); +int mlx5_free_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi); int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, bool map_wc); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index fae6cdaeb56d..86a8f30060f3 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -61,13 +61,13 @@ enum { */ struct mlx5_ib_alloc_ucontext_req { - __u32 total_num_uuars; - __u32 num_low_latency_uuars; + __u32 total_num_bfregs; + __u32 num_low_latency_bfregs; }; struct mlx5_ib_alloc_ucontext_req_v2 { - __u32 total_num_uuars; - __u32 num_low_latency_uuars; + __u32 total_num_bfregs; + __u32 num_low_latency_bfregs; __u32 flags; __u32 comp_mask; __u8 max_cqe_version; @@ -88,7 +88,7 @@ enum mlx5_user_cmds_supp_uhw { struct mlx5_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 bf_reg_size; - __u32 tot_uuars; + __u32 tot_bfregs; __u32 cache_line_size; __u16 max_sq_desc_sz; __u16 max_rq_desc_sz; @@ -241,7 +241,7 @@ struct mlx5_ib_create_qp_rss { }; struct mlx5_ib_create_qp_resp { - __u32 uuar_index; + __u32 bfreg_index; }; struct mlx5_ib_alloc_mw { -- cgit v1.2.3 From 0b80c14f009758cefeed0edff4f9141957964211 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:22 +0200 Subject: IB/mlx5: Fix retrieval of index to first hi class bfreg First the function retrieving the index of the first hi latency class blue flame register. High latency class bfregs are located right above medium latency class bfregs. Fixes: c1be5232d21d ('IB/mlx5: Fix micro UAR allocator') Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/qp.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index fbea9bd63c8e..240fbb0c63ba 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -490,12 +490,21 @@ static int next_bfreg(int n) return n; } +enum { + /* this is the first blue flame register in the array of bfregs assigned + * to a processes. Since we do not use it for blue flame but rather + * regular 64 bit doorbells, we do not need a lock for maintaiing + * "odd/even" order + */ + NUM_NON_BLUE_FLAME_BFREGS = 1, +}; + static int num_med_bfreg(struct mlx5_bfreg_info *bfregi) { int n; n = bfregi->num_uars * MLX5_NON_FP_BFREGS_PER_UAR - - bfregi->num_low_latency_bfregs - 1; + bfregi->num_low_latency_bfregs - NUM_NON_BLUE_FLAME_BFREGS; return n >= 0 ? n : 0; } @@ -508,17 +517,9 @@ static int max_bfregi(struct mlx5_bfreg_info *bfregi) static int first_hi_bfreg(struct mlx5_bfreg_info *bfregi) { int med; - int i; - int t; med = num_med_bfreg(bfregi); - for (t = 0, i = first_med_bfreg();; i = next_bfreg(i)) { - t++; - if (t == med) - return next_bfreg(i); - } - - return 0; + return next_bfreg(med); } static int alloc_high_class_bfreg(struct mlx5_bfreg_info *bfregi) @@ -544,6 +545,8 @@ static int alloc_med_class_bfreg(struct mlx5_bfreg_info *bfregi) for (i = first_med_bfreg(); i < first_hi_bfreg(bfregi); i = next_bfreg(i)) { if (bfregi->count[i] < bfregi->count[minidx]) minidx = i; + if (!bfregi->count[minidx]) + break; } bfregi->count[minidx]++; @@ -558,6 +561,7 @@ static int alloc_bfreg(struct mlx5_bfreg_info *bfregi, mutex_lock(&bfregi->lock); switch (lat) { case MLX5_IB_LATENCY_CLASS_LOW: + BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1); bfregn = 0; bfregi->count[bfregn]++; break; -- cgit v1.2.3 From a6d51b68611e98f05042ada662aed5dbe3279c1e Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:23 +0200 Subject: net/mlx5: Introduce blue flame register allocator Here is an implementation of an allocator that allocates blue flame registers. A blue flame register is used for generating send doorbells. A blue flame register can be used to generate either a regular doorbell or a blue flame doorbell where the data to be sent is written to the device's I/O memory hence saving the need to read the data from memory. For blue flame kind of doorbells to succeed, the blue flame register need to be mapped as write combining. The user can specify what kind of send doorbells she wishes to use. If she requested write combining mapping but that failed, the allocator will fall back to non write combining mapping and will indicate that to the user. Subsequent patches in this series will make use of this allocator. Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/uar.c | 235 ++++++++++++++++++++++++++ include/linux/mlx5/device.h | 2 + include/linux/mlx5/driver.h | 37 ++++ include/linux/mlx5/mlx5_ifc.h | 7 +- 4 files changed, 279 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index ce7fcebb81a3..6a081a8787a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -231,3 +231,238 @@ void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) mlx5_cmd_free_uar(mdev, uar->index); } EXPORT_SYMBOL(mlx5_unmap_free_uar); + +static int uars_per_sys_page(struct mlx5_core_dev *mdev) +{ + if (MLX5_CAP_GEN(mdev, uar_4k)) + return MLX5_CAP_GEN(mdev, num_of_uars_per_page); + + return 1; +} + +static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index) +{ + u32 system_page_index; + + if (MLX5_CAP_GEN(mdev, uar_4k)) + system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT); + else + system_page_index = index; + + return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index; +} + +static void up_rel_func(struct kref *kref) +{ + struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count); + + list_del(&up->list); + if (mlx5_cmd_free_uar(up->mdev, up->index)) + mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); + kfree(up->reg_bitmap); + kfree(up->fp_bitmap); + kfree(up); +} + +static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev, + bool map_wc) +{ + struct mlx5_uars_page *up; + int err = -ENOMEM; + phys_addr_t pfn; + int bfregs; + int i; + + bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR; + up = kzalloc(sizeof(*up), GFP_KERNEL); + if (!up) + return ERR_PTR(err); + + up->mdev = mdev; + up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL); + if (!up->reg_bitmap) + goto error1; + + up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL); + if (!up->fp_bitmap) + goto error1; + + for (i = 0; i < bfregs; i++) + if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR) + set_bit(i, up->reg_bitmap); + else + set_bit(i, up->fp_bitmap); + + up->bfregs = bfregs; + up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR; + up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR; + + err = mlx5_cmd_alloc_uar(mdev, &up->index); + if (err) { + mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); + goto error1; + } + + pfn = uar2pfn(mdev, up->index); + if (map_wc) { + up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!up->map) { + err = -EAGAIN; + goto error2; + } + } else { + up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!up->map) { + err = -ENOMEM; + goto error2; + } + } + kref_init(&up->ref_count); + mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n", + up->index, up->bfregs); + return up; + +error2: + if (mlx5_cmd_free_uar(mdev, up->index)) + mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index); +error1: + kfree(up->fp_bitmap); + kfree(up->reg_bitmap); + kfree(up); + return ERR_PTR(err); +} + +static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi) +{ + /* return the offset in bytes from the start of the page to the + * blue flame area of the UAR + */ + return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE + + (dbi % MLX5_BFREGS_PER_UAR) * + (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET; +} + +static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, + bool map_wc, bool fast_path) +{ + struct mlx5_bfreg_data *bfregs; + struct mlx5_uars_page *up; + struct list_head *head; + unsigned long *bitmap; + unsigned int *avail; + struct mutex *lock; /* pointer to right mutex */ + int dbi; + + bfregs = &mdev->priv.bfregs; + if (map_wc) { + head = &bfregs->wc_head.list; + lock = &bfregs->wc_head.lock; + } else { + head = &bfregs->reg_head.list; + lock = &bfregs->reg_head.lock; + } + mutex_lock(lock); + if (list_empty(head)) { + up = alloc_uars_page(mdev, map_wc); + if (IS_ERR(up)) { + mutex_unlock(lock); + return PTR_ERR(up); + } + list_add(&up->list, head); + } else { + up = list_entry(head->next, struct mlx5_uars_page, list); + kref_get(&up->ref_count); + } + if (fast_path) { + bitmap = up->fp_bitmap; + avail = &up->fp_avail; + } else { + bitmap = up->reg_bitmap; + avail = &up->reg_avail; + } + dbi = find_first_bit(bitmap, up->bfregs); + clear_bit(dbi, bitmap); + (*avail)--; + if (!(*avail)) + list_del(&up->list); + + bfreg->map = up->map + map_offset(mdev, dbi); + bfreg->up = up; + bfreg->wc = map_wc; + bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR; + mutex_unlock(lock); + + return 0; +} + +int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, + bool map_wc, bool fast_path) +{ + int err; + + err = alloc_bfreg(mdev, bfreg, map_wc, fast_path); + if (!err) + return 0; + + if (err == -EAGAIN && map_wc) + return alloc_bfreg(mdev, bfreg, false, fast_path); + + return err; +} +EXPORT_SYMBOL(mlx5_alloc_bfreg); + +static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev, + struct mlx5_uars_page *up, + struct mlx5_sq_bfreg *bfreg) +{ + unsigned int uar_idx; + unsigned int bfreg_idx; + unsigned int bf_reg_size; + + bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size); + + uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT; + bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size; + + return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx; +} + +void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg) +{ + struct mlx5_bfreg_data *bfregs; + struct mlx5_uars_page *up; + struct mutex *lock; /* pointer to right mutex */ + unsigned int dbi; + bool fp; + unsigned int *avail; + unsigned long *bitmap; + struct list_head *head; + + bfregs = &mdev->priv.bfregs; + if (bfreg->wc) { + head = &bfregs->wc_head.list; + lock = &bfregs->wc_head.lock; + } else { + head = &bfregs->reg_head.list; + lock = &bfregs->reg_head.lock; + } + up = bfreg->up; + dbi = addr_to_dbi_in_syspage(mdev, up, bfreg); + fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR; + if (fp) { + avail = &up->fp_avail; + bitmap = up->fp_bitmap; + } else { + avail = &up->reg_avail; + bitmap = up->reg_bitmap; + } + mutex_lock(lock); + (*avail)++; + set_bit(dbi, bitmap); + if (*avail == 1) + list_add_tail(&up->list, head); + + kref_put(&up->ref_count, up_rel_func); + mutex_unlock(lock); +} +EXPORT_SYMBOL(mlx5_free_bfreg); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index aa851c51ab59..db1b9287012f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -215,6 +215,8 @@ enum { MLX5_BFREGS_PER_UAR = 4, MLX5_MAX_UARS = 1 << 8, MLX5_NON_FP_BFREGS_PER_UAR = 2, + MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR - + MLX5_NON_FP_BFREGS_PER_UAR, MLX5_MAX_BFREGS = MLX5_MAX_UARS * MLX5_NON_FP_BFREGS_PER_UAR, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 3d07e25b3bf1..969aa1fe17e2 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -452,6 +452,39 @@ struct mlx5_eq_table { spinlock_t lock; }; +struct mlx5_uars_page { + void __iomem *map; + bool wc; + u32 index; + struct list_head list; + unsigned int bfregs; + unsigned long *reg_bitmap; /* for non fast path bf regs */ + unsigned long *fp_bitmap; + unsigned int reg_avail; + unsigned int fp_avail; + struct kref ref_count; + struct mlx5_core_dev *mdev; +}; + +struct mlx5_bfreg_head { + /* protect blue flame registers allocations */ + struct mutex lock; + struct list_head list; +}; + +struct mlx5_bfreg_data { + struct mlx5_bfreg_head reg_head; + struct mlx5_bfreg_head wc_head; +}; + +struct mlx5_sq_bfreg { + void __iomem *map; + struct mlx5_uars_page *up; + bool wc; + u32 index; + unsigned int offset; +}; + struct mlx5_uar { u32 index; struct list_head bf_list; @@ -645,6 +678,7 @@ struct mlx5_priv { void *pfault_ctx; struct srcu_struct pfault_srcu; #endif + struct mlx5_bfreg_data bfregs; }; enum mlx5_device_state { @@ -1022,6 +1056,9 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index); void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); +int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, + bool map_wc, bool fast_path); +void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); static inline int fw_initializing(struct mlx5_core_dev *dev) { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 15f896781966..1223feff0ea4 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -905,7 +905,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 uc[0x1]; u8 rc[0x1]; - u8 reserved_at_240[0xa]; + u8 uar_4k[0x1]; + u8 reserved_at_241[0x9]; u8 uar_sz[0x6]; u8 reserved_at_250[0x8]; u8 log_pg_sz[0x8]; @@ -997,7 +998,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 device_frequency_mhz[0x20]; u8 device_frequency_khz[0x20]; - u8 reserved_at_500[0x80]; + u8 reserved_at_500[0x20]; + u8 num_of_uars_per_page[0x20]; + u8 reserved_at_540[0x40]; u8 reserved_at_580[0x3f]; u8 cqe_compression[0x1]; -- cgit v1.2.3 From 50bfaee50c005b707e9eee2ffb0bbb69b8efbf53 Mon Sep 17 00:00:00 2001 From: Ngai-Mint Kwan Date: Wed, 2 Nov 2016 16:44:44 -0700 Subject: fm10k-shared: use mac-> instead of hw->mac. Since a pointer "mac" to fm10k_mac_info structure exists, use it to access the contents of its members. Signed-off-by: Ngai-Mint Kwan Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_common.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index dd95ac4f4c64..62a6ad9b3eed 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -506,7 +506,7 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) goto out; /* if we somehow dropped the Tx enable we should reset */ - if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) { + if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) { ret_val = FM10K_ERR_RESET_REQUESTED; goto out; } @@ -523,8 +523,8 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) /* interface cannot receive traffic without logical ports */ if (mac->dglort_map == FM10K_DGLORTMAP_NONE) { - if (hw->mac.ops.request_lport_map) - ret_val = hw->mac.ops.request_lport_map(hw); + if (mac->ops.request_lport_map) + ret_val = mac->ops.request_lport_map(hw); goto out; } -- cgit v1.2.3 From aee243334461f83f3cb2e8c19e9a6cdedd35ee4b Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 2 Nov 2016 16:44:45 -0700 Subject: fm10k: remove extraneous variable definition in fm10k_ethtool.c We don't need to typecast a u8 * into a char *, so just remove the extra variable. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 5241e0873397..0c84fef750f4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -148,7 +148,7 @@ enum { static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { }; -static void fm10k_add_stat_strings(char **p, const char *prefix, +static void fm10k_add_stat_strings(u8 **p, const char *prefix, const struct fm10k_stats stats[], const unsigned int size) { @@ -164,32 +164,31 @@ static void fm10k_add_stat_strings(char **p, const char *prefix, static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) { struct fm10k_intfc *interface = netdev_priv(dev); - char *p = (char *)data; unsigned int i; - fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats, FM10K_NETDEV_STATS_LEN); - fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats, FM10K_GLOBAL_STATS_LEN); - fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats, FM10K_MBX_STATS_LEN); if (interface->hw.mac.type != fm10k_mac_vf) - fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats, FM10K_PF_STATS_LEN); for (i = 0; i < interface->hw.mac.max_queues; i++) { char prefix[ETH_GSTRING_LEN]; snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i); - fm10k_add_stat_strings(&p, prefix, + fm10k_add_stat_strings(&data, prefix, fm10k_gstrings_queue_stats, FM10K_QUEUE_STATS_LEN); snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i); - fm10k_add_stat_strings(&p, prefix, + fm10k_add_stat_strings(&data, prefix, fm10k_gstrings_queue_stats, FM10K_QUEUE_STATS_LEN); } @@ -198,18 +197,16 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data) { - char *p = (char *)data; - switch (stringset) { case ETH_SS_TEST: - memcpy(data, *fm10k_gstrings_test, + memcpy(data, fm10k_gstrings_test, FM10K_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: fm10k_get_stat_strings(dev, data); break; case ETH_SS_PRIV_FLAGS: - memcpy(p, fm10k_prv_flags, + memcpy(data, fm10k_prv_flags, FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN); break; } -- cgit v1.2.3 From 2f3fc1e6200309ccf87f61dea56e57e563c4f800 Mon Sep 17 00:00:00 2001 From: Ngai-Mint Kwan Date: Wed, 2 Nov 2016 16:44:46 -0700 Subject: fm10k: request reset when mbx->state changes Multiple IES API resets can cause a race condition where the mailbox interrupt request bits can be cleared before being handled. This can leave certain mailbox messages from the PF to be untreated and the PF will enter in some inactive state. If this situation occurs, the IES API will initiate a mailbox version reset which, then, trigger a mailbox state change. Once this mailbox transition occurs (from OPEN to CONNECT state), a request for reset will be returned. This ensures that PF will undergo a reset whenever IES API encounters an unknown global mailbox interrupt event or whenever the IES API terminates. Signed-off-by: Ngai-Mint Kwan Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_mbx.c | 10 +++++++--- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 6 +++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index c9dfa6564fcf..334088a101c3 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -2011,9 +2011,10 @@ static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, * function can also be used to respond to an error as the connection * resetting would also be a means of dealing with errors. **/ -static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, - struct fm10k_mbx_info *mbx) +static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) { + s32 err = 0; const enum fm10k_mbx_state state = mbx->state; switch (state) { @@ -2026,6 +2027,7 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, case FM10K_STATE_OPEN: /* flush any incomplete work */ fm10k_sm_mbx_connect_reset(mbx); + err = FM10K_ERR_RESET_REQUESTED; break; case FM10K_STATE_CONNECT: /* Update remote value to match local value */ @@ -2035,6 +2037,8 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, } fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail); + + return err; } /** @@ -2115,7 +2119,7 @@ static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw, switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) { case 0: - fm10k_sm_mbx_process_reset(hw, mbx); + err = fm10k_sm_mbx_process_reset(hw, mbx); break; case FM10K_SM_MBX_VERSION: err = fm10k_sm_mbx_process_version_1(hw, mbx); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index b1a2f8437d59..e372a5823480 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1144,6 +1144,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) struct fm10k_hw *hw = &interface->hw; struct fm10k_mbx_info *mbx = &hw->mbx; u32 eicr; + s32 err = 0; /* unmask any set bits related to this interrupt */ eicr = fm10k_read_reg(hw, FM10K_EICR); @@ -1159,12 +1160,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) /* service mailboxes */ if (fm10k_mbx_trylock(interface)) { - mbx->ops.process(hw, mbx); + err = mbx->ops.process(hw, mbx); /* handle VFLRE events */ fm10k_iov_event(interface); fm10k_mbx_unlock(interface); } + if (err == FM10K_ERR_RESET_REQUESTED) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + /* if switch toggled state we should reset GLORTs */ if (eicr & FM10K_EICR_SWITCHNOTREADY) { /* force link down for at least 4 seconds */ -- cgit v1.2.3 From f0524955177c3e5cea89ceec56ad9d538530fe4f Mon Sep 17 00:00:00 2001 From: Ngai-Mint Kwan Date: Wed, 2 Nov 2016 16:44:47 -0700 Subject: fm10k: do not clear global mailbox interrupt bits Partially revert commit 5e93cbadd3e9 ("fm10k: Reset mailbox global interrupts", 2016-06-07) The register bits related to this commit are now solely being handled by the IES API. Recent changes in the IES API will allow an automatic recovery from improper handling of these bits. Signed-off-by: Ngai-Mint Kwan Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 23fb319fd2a0..40ee0242a80a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -72,10 +72,6 @@ force_reset: fm10k_write_flush(hw); udelay(FM10K_RESET_TIMEOUT); - /* Reset mailbox global interrupts */ - reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT; - fm10k_write_reg(hw, FM10K_GMBX, reg); - /* Verify we made it out of reset */ reg = fm10k_read_reg(hw, FM10K_IP); if (!(reg & FM10K_IP_NOTINRESET)) -- cgit v1.2.3 From 6721f2dad51164fe9da7b3c2f8156529d716b888 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 3 Nov 2016 16:05:47 -0700 Subject: fm10k: bump version number Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5de937852436..509514d3ce6c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.21.2-k" +#define DRV_VERSION "0.21.7-k" #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; -- cgit v1.2.3 From 881571c12fed1bacf100b13fb2ac528faf57390c Mon Sep 17 00:00:00 2001 From: Scott Peterson Date: Thu, 3 Nov 2016 16:09:41 -0700 Subject: fm10k: Limit dma sync of RX buffers to actual packet size On packet RX, we perform a dma sync for cpu before passing the packet up. Here we limit that sync to the actual length of the incoming packet, rather than always syncing the entire buffer. Signed-off-by: Scott Peterson Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 509514d3ce6c..8f90c6db53ff 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -251,6 +251,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, /** * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_buffer: buffer containing page to add + * @size: packet size from rx_desc * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into * @@ -263,12 +264,12 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, * true if the buffer can be reused by the interface. **/ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, + unsigned int size, union fm10k_rx_desc *rx_desc, struct sk_buff *skb) { struct page *page = rx_buffer->page; unsigned char *va = page_address(page) + rx_buffer->page_offset; - unsigned int size = le16_to_cpu(rx_desc->w.length); #if (PAGE_SIZE < 8192) unsigned int truesize = FM10K_RX_BUFSZ; #else @@ -314,6 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, union fm10k_rx_desc *rx_desc, struct sk_buff *skb) { + unsigned int size = le16_to_cpu(rx_desc->w.length); struct fm10k_rx_buffer *rx_buffer; struct page *page; @@ -350,11 +352,11 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - FM10K_RX_BUFSZ, + size, DMA_FROM_DEVICE); /* pull page into skb */ - if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) { + if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) { /* hand second half of page back to the ring */ fm10k_reuse_rx_page(rx_ring, rx_buffer); } else { -- cgit v1.2.3 From b5db29f08afc063b49178b23c9e221434000e9ba Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 18 Nov 2016 09:45:38 -0800 Subject: fm10k: report the receive timestamp in FM10K_CB(skb)->tstamp This was accidentally removed when we defeatured the full 1588 Clock support. We need to report the Rx descriptor timestamp value so that applications built on top of the IES API can function properly. Additionally, remove the FM10K_FLAG_RX_TS_ENABLED, as it is not used now that 1588 functionality has been removed. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k.h | 5 ++--- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 2 ++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 4d19e46f7c55..75d2c8018815 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -260,9 +260,8 @@ struct fm10k_intfc { #define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0)) #define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1)) #define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2)) -#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3)) -#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4)) -#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5)) +#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(3)) +#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(4)) int xcast_mode; /* Tx fast path data */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 8f90c6db53ff..5bb233a9614c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -475,6 +475,8 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, fm10k_rx_checksum(rx_ring, rx_desc, skb); + FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; + FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; skb_record_rx_queue(skb, rx_ring->queue_index); -- cgit v1.2.3 From 0035ddf4399ca9a86c593a25fcf1bcd09b042f10 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 18 Nov 2016 09:45:39 -0800 Subject: fm10k: remove FM10K_FLAG_DEBUG_STATS The debug statistics were removed due to complications with the ethtool statistics API which are not possible to resolve without a new statistics interface. The flag was left behind, but we no longer need it. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 75d2c8018815..52b979443cde 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -261,7 +261,6 @@ struct fm10k_intfc { #define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1)) #define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2)) #define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(3)) -#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(4)) int xcast_mode; /* Tx fast path data */ -- cgit v1.2.3 From 60889869147e3dcd62032005a85eb7693bf828d8 Mon Sep 17 00:00:00 2001 From: Derek Chickles Date: Fri, 6 Jan 2017 17:16:12 -0800 Subject: liquidio: simplify octeon_flush_iq() Because every call to octeon_flush_iq() has a hardcoded 1 for the pending_thresh argument, simplify that function by removing that argument. This avoids one atomic read as well. Signed-off-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: Satanand Burla Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 2 +- drivers/net/ethernet/cavium/liquidio/octeon_iq.h | 2 +- .../net/ethernet/cavium/liquidio/request_manager.c | 49 +++++++++++----------- 4 files changed, 27 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 6443bc13af62..13f67a32cc4d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2441,7 +2441,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) iq = oct->instr_queue[iq_no]; if (iq) { /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, 1, budget); + tx_done = octeon_flush_iq(oct, iq, budget); /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 70d96c10c673..55846f2d7e46 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1627,7 +1627,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) iq = oct->instr_queue[iq_no]; if (iq) { /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, 1, budget); + tx_done = octeon_flush_iq(oct, iq, budget); /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index e04ca8f0b4a7..4608a5af35a3 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -369,5 +369,5 @@ int octeon_setup_iq(struct octeon_device *oct, int ifidx, void *app_ctx); int octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh, u32 napi_budget); + u32 napi_budget); #endif /* __OCTEON_IQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 3ce66759e80a..707bc15adec6 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -455,7 +455,7 @@ lio_process_iq_request_list(struct octeon_device *oct, /* Can only be called from process context */ int octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh, u32 napi_budget) + u32 napi_budget) { u32 inst_processed = 0; u32 tot_inst_processed = 0; @@ -468,33 +468,32 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); - if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { - do { - /* Process any outstanding IQ packets. */ - if (iq->flush_index == iq->octeon_read_index) - break; - - if (napi_budget) - inst_processed = lio_process_iq_request_list - (oct, iq, - napi_budget - tot_inst_processed); - else - inst_processed = - lio_process_iq_request_list(oct, iq, 0); + do { + /* Process any outstanding IQ packets. */ + if (iq->flush_index == iq->octeon_read_index) + break; - if (inst_processed) { - atomic_sub(inst_processed, &iq->instr_pending); - iq->stats.instr_processed += inst_processed; - } + if (napi_budget) + inst_processed = + lio_process_iq_request_list(oct, iq, + napi_budget - + tot_inst_processed); + else + inst_processed = + lio_process_iq_request_list(oct, iq, 0); + + if (inst_processed) { + atomic_sub(inst_processed, &iq->instr_pending); + iq->stats.instr_processed += inst_processed; + } - tot_inst_processed += inst_processed; - inst_processed = 0; + tot_inst_processed += inst_processed; + inst_processed = 0; - } while (tot_inst_processed < napi_budget); + } while (tot_inst_processed < napi_budget); - if (napi_budget && (tot_inst_processed >= napi_budget)) - tx_done = 0; - } + if (napi_budget && (tot_inst_processed >= napi_budget)) + tx_done = 0; iq->last_db_time = jiffies; @@ -530,7 +529,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no) iq->last_db_time = jiffies; /* Flush the instruction queue */ - octeon_flush_iq(oct, iq, 1, 0); + octeon_flush_iq(oct, iq, 0); lio_enable_irq(NULL, iq); } -- cgit v1.2.3 From 9f09eaeae28a0c67b8fc2b5acb411a5d4fd8cc80 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 6 Jan 2017 17:39:06 -0800 Subject: net: ipmr: Remove nowait arg to ipmr_get_route ipmr_get_route has 1 caller and the nowait arg is 0. Remove the arg and simplify ipmr_get_route accordingly. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/linux/mroute.h | 2 +- net/ipv4/ipmr.c | 7 +------ net/ipv4/route.c | 2 +- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/include/linux/mroute.h b/include/linux/mroute.h index e5fb81376e92..f019b62f27b5 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -120,5 +120,5 @@ struct mfc_cache { struct rtmsg; int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait, u32 portid); + struct rtmsg *rtm, u32 portid); #endif diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b35dda57586b..824c4fdf21eb 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2136,7 +2136,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait, u32 portid) + struct rtmsg *rtm, u32 portid) { struct mfc_cache *cache; struct mr_table *mrt; @@ -2160,11 +2160,6 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, struct net_device *dev; int vif = -1; - if (nowait) { - rcu_read_unlock(); - return -EAGAIN; - } - dev = skb->dev; read_lock(&mrt_lock); if (dev) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 7b52ac20145b..0965f8c59e7e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2541,7 +2541,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { int err = ipmr_get_route(net, skb, fl4->saddr, fl4->daddr, - r, 0, portid); + r, portid); if (err <= 0) { if (err == 0) -- cgit v1.2.3 From dc33da59ff887985db559d0b116ca8b98f63651e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 6 Jan 2017 17:39:58 -0800 Subject: net: ipv4: Remove flow arg from ip_mkroute_input fl4 arg is not used; remove it. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/route.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0965f8c59e7e..f51823dc998b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1758,7 +1758,6 @@ standard_hash: static int ip_mkroute_input(struct sk_buff *skb, struct fib_result *res, - const struct flowi4 *fl4, struct in_device *in_dev, __be32 daddr, __be32 saddr, u32 tos) { @@ -1883,7 +1882,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (res.type != RTN_UNICAST) goto martian_destination; - err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos); + err = ip_mkroute_input(skb, &res, in_dev, daddr, saddr, tos); out: return err; brd_input: -- cgit v1.2.3 From bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 6 Jan 2017 19:12:52 -0800 Subject: net: make ndo_get_stats64 a void function The network device operation for reading statistics is only called in one place, and it ignores the return value. Having a structure return value is potentially confusing because some future driver could incorrectly assume that the return value was used. Fix all drivers with ndo_get_stats64 to have a void function. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 10 ++++------ drivers/net/dummy.c | 5 ++--- drivers/net/ethernet/alacritech/slicoss.c | 6 ++---- drivers/net/ethernet/amazon/ena/ena_netdev.c | 10 ++++------ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 6 ++---- drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 4 +--- drivers/net/ethernet/atheros/alx/main.c | 6 ++---- drivers/net/ethernet/broadcom/b44.c | 5 ++--- drivers/net/ethernet/broadcom/bnx2.c | 5 ++--- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 ++---- drivers/net/ethernet/broadcom/tg3.c | 8 +++----- drivers/net/ethernet/brocade/bna/bnad.c | 6 ++---- drivers/net/ethernet/calxeda/xgmac.c | 5 ++--- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 5 ++--- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 7 +++---- drivers/net/ethernet/cisco/enic/enic_main.c | 8 +++----- drivers/net/ethernet/ec_bhf.c | 4 +--- drivers/net/ethernet/emulex/benet/be_main.c | 5 ++--- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 6 ++---- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 6 ++---- drivers/net/ethernet/ibm/ehea/ehea_main.c | 5 ++--- drivers/net/ethernet/intel/e1000e/e1000.h | 4 ++-- drivers/net/ethernet/intel/e1000e/netdev.c | 5 ++--- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 6 ++---- drivers/net/ethernet/intel/i40e/i40e.h | 5 ++--- drivers/net/ethernet/intel/i40e/i40e_main.c | 18 ++++++------------ drivers/net/ethernet/intel/igb/igb_main.c | 10 ++++------ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 ++++--- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 ++---- drivers/net/ethernet/marvell/mvneta.c | 4 +--- drivers/net/ethernet/marvell/mvpp2.c | 4 +--- drivers/net/ethernet/marvell/sky2.c | 6 ++---- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 6 ++---- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 4 +--- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 +-- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 3 +-- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 +--- drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 3 +-- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 9 ++++----- drivers/net/ethernet/neterion/vxge/vxge-main.c | 4 +--- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 6 ++---- drivers/net/ethernet/nvidia/forcedeth.c | 4 +--- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 10 ++++------ drivers/net/ethernet/qlogic/qede/qede_main.c | 7 ++----- drivers/net/ethernet/qualcomm/emac/emac.c | 6 ++---- drivers/net/ethernet/realtek/8139too.c | 9 +++------ drivers/net/ethernet/realtek/r8169.c | 4 +--- drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 8 ++------ drivers/net/ethernet/sfc/efx.c | 6 ++---- drivers/net/ethernet/sfc/falcon/efx.c | 6 ++---- drivers/net/ethernet/sun/niu.c | 6 ++---- drivers/net/ethernet/synopsys/dwc_eth_qos.c | 4 +--- drivers/net/ethernet/tile/tilepro.c | 4 ++-- drivers/net/ethernet/via/via-rhine.c | 8 +++----- drivers/net/fjes/fjes_main.c | 7 ++----- drivers/net/hyperv/netvsc_drv.c | 6 ++---- drivers/net/ifb.c | 6 ++---- drivers/net/ipvlan/ipvlan_main.c | 5 ++--- drivers/net/loopback.c | 5 ++--- drivers/net/macsec.c | 8 +++----- drivers/net/macvlan.c | 5 ++--- drivers/net/nlmon.c | 4 +--- drivers/net/ppp/ppp_generic.c | 4 +--- drivers/net/slip/slip.c | 3 +-- drivers/net/team/team.c | 3 +-- drivers/net/tun.c | 3 +-- drivers/net/veth.c | 6 ++---- drivers/net/virtio_net.c | 6 ++---- drivers/net/vmxnet3/vmxnet3_ethtool.c | 4 +--- drivers/net/vmxnet3/vmxnet3_int.h | 4 ++-- drivers/net/vrf.c | 5 ++--- drivers/net/xen-netfront.c | 6 ++---- drivers/staging/netlogic/xlr_net.c | 10 +--------- include/linux/netdevice.h | 8 ++++---- include/net/ip_tunnels.h | 4 ++-- net/8021q/vlan_dev.c | 5 ++--- net/bridge/br_device.c | 6 ++---- net/ipv4/ip_tunnel_core.c | 6 ++---- net/l2tp/l2tp_eth.c | 6 ++---- net/mac80211/iface.c | 4 +--- net/openvswitch/vport-internal_dev.c | 4 +--- net/sched/sch_teql.c | 5 ++--- 82 files changed, 166 insertions(+), 309 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 8029dd4912b6..36919221b3f0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -211,8 +211,8 @@ static int lacp_fast; static int bond_init(struct net_device *bond_dev); static void bond_uninit(struct net_device *bond_dev); -static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, - struct rtnl_link_stats64 *stats); +static void bond_get_stats(struct net_device *bond_dev, + struct rtnl_link_stats64 *stats); static void bond_slave_arr_handler(struct work_struct *work); static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, int mod); @@ -3337,8 +3337,8 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, } } -static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, - struct rtnl_link_stats64 *stats) +static void bond_get_stats(struct net_device *bond_dev, + struct rtnl_link_stats64 *stats) { struct bonding *bond = netdev_priv(bond_dev); struct rtnl_link_stats64 temp; @@ -3362,8 +3362,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, memcpy(&bond->bond_stats, stats, sizeof(*stats)); spin_unlock(&bond->stats_lock); - - return stats; } static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 6421835f11b7..1f2de4e8207c 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -54,8 +54,8 @@ struct pcpu_dstats { struct u64_stats_sync syncp; }; -static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void dummy_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { int i; @@ -73,7 +73,6 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev, stats->tx_bytes += tbytes; stats->tx_packets += tpackets; } - return stats; } static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index b21d8aa8d653..15a8096c60df 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1471,8 +1471,8 @@ drop_skb: return NETDEV_TX_OK; } -static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *lst) +static void slic_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *lst) { struct slic_device *sdev = netdev_priv(dev); struct slic_stats *stats = &sdev->stats; @@ -1489,8 +1489,6 @@ static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev, SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc); SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802); SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier); - - return lst; } static int slic_get_sset_count(struct net_device *dev, int sset) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index cc8b13ebfa75..aca95b397393 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2165,19 +2165,19 @@ err: ena_com_delete_debug_area(adapter->ena_dev); } -static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void ena_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct ena_adapter *adapter = netdev_priv(netdev); struct ena_admin_basic_stats ena_stats; int rc; if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) - return NULL; + return; rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats); if (rc) - return NULL; + return; stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) | ena_stats.tx_bytes_low; @@ -2204,8 +2204,6 @@ static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev, stats->rx_errors = 0; stats->tx_errors = 0; - - return stats; } static const struct net_device_ops ena_netdev_ops = { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 155190db682d..130de11fa553 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1759,8 +1759,8 @@ static void xgbe_tx_timeout(struct net_device *netdev) schedule_work(&pdata->restart_work); } -static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *s) +static void xgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *s) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; @@ -1786,8 +1786,6 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, s->tx_dropped = netdev->stats.tx_dropped; DBGPR("<--%s\n", __func__); - - return s; } static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 523b8eff6d7b..45f2204e6695 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1453,7 +1453,7 @@ err: return ret; } -static struct rtnl_link_stats64 *xgene_enet_get_stats64( +static void xgene_enet_get_stats64( struct net_device *ndev, struct rtnl_link_stats64 *storage) { @@ -1484,8 +1484,6 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64( } } memcpy(storage, stats, sizeof(struct rtnl_link_stats64)); - - return storage; } static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c8f525574d68..c66195d00ed4 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1643,8 +1643,8 @@ static void alx_poll_controller(struct net_device *netdev) } #endif -static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *net_stats) +static void alx_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *net_stats) { struct alx_priv *alx = netdev_priv(dev); struct alx_hw_stats *hw_stats = &alx->hw.stats; @@ -1688,8 +1688,6 @@ static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; spin_unlock(&alx->stats_lock); - - return net_stats; } static const struct net_device_ops alx_netdev_ops = { diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 48707ed76ffc..7aef70f7d8ef 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1674,8 +1674,8 @@ static int b44_close(struct net_device *dev) return 0; } -static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *nstat) +static void b44_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *nstat) { struct b44 *bp = netdev_priv(dev); struct b44_hw_stats *hwstat = &bp->hw_stats; @@ -1718,7 +1718,6 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, #endif } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); - return nstat; } static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index d5d1026be4b7..de1d07c08495 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -6821,13 +6821,13 @@ bnx2_save_stats(struct bnx2 *bp) (unsigned long) (bp->stats_blk->ctr + \ bp->temp_stats_blk->ctr) -static struct rtnl_link_stats64 * +static void bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct bnx2 *bp = netdev_priv(dev); if (bp->stats_blk == NULL) - return net_stats; + return; net_stats->rx_packets = GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) + @@ -6891,7 +6891,6 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) + GET_32BIT_NET_STATS(stat_FwRxDrop); - return net_stats; } /* All ethtool functions called with rtnl_lock */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 98e948489700..e5f458396e1a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5879,7 +5879,7 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -static struct rtnl_link_stats64 * +static void bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { u32 i; @@ -5888,7 +5888,7 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) memset(stats, 0, sizeof(struct rtnl_link_stats64)); if (!bp->bnapi) - return stats; + return; /* TODO check if we need to synchronize with bnxt_close path */ for (i = 0; i < bp->cp_nr_rings; i++) { @@ -5935,8 +5935,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); stats->tx_errors = le64_to_cpu(tx->tx_err); } - - return stats; } static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 185e9e047aa9..800328f562fa 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -14142,8 +14142,8 @@ static const struct ethtool_ops tg3_ethtool_ops = { .set_link_ksettings = tg3_set_link_ksettings, }; -static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void tg3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct tg3 *tp = netdev_priv(dev); @@ -14151,13 +14151,11 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, if (!tp->hw_stats) { *stats = tp->net_stats_prev; spin_unlock_bh(&tp->lock); - return stats; + return; } tg3_get_nstats(tp, stats); spin_unlock_bh(&tp->lock); - - return stats; } static void tg3_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 112030828c4b..73a94113db1f 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3111,7 +3111,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) * Used spin_lock to synchronize reading of stats structures, which * is written by BNA under the same lock. */ -static struct rtnl_link_stats64 * +static void bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct bnad *bnad = netdev_priv(netdev); @@ -3123,8 +3123,6 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) bnad_netdev_hwstats_fill(bnad, stats); spin_unlock_irqrestore(&bnad->bna_lock, flags); - - return stats; } static void @@ -3427,7 +3425,7 @@ static const struct net_device_ops bnad_netdev_ops = { .ndo_open = bnad_open, .ndo_stop = bnad_stop, .ndo_start_xmit = bnad_start_xmit, - .ndo_get_stats64 = bnad_get_stats64, + .ndo_get_stats64 = bnad_get_stats64, .ndo_set_rx_mode = bnad_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnad_set_mac_address, diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index ce7de6f72512..b0540658afad 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1446,9 +1446,9 @@ static void xgmac_poll_controller(struct net_device *dev) } #endif -static struct rtnl_link_stats64 * +static void xgmac_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *storage) + struct rtnl_link_stats64 *storage) { struct xgmac_priv *priv = netdev_priv(dev); void __iomem *base = priv->base; @@ -1476,7 +1476,6 @@ xgmac_get_stats64(struct net_device *dev, writel(0, base + XGMAC_MMC_CTRL); spin_unlock_bh(&priv->stats_lock); - return storage; } static int xgmac_set_mac_address(struct net_device *dev, void *p) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 2006f58b14b1..273eafdb1c57 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1461,8 +1461,8 @@ void nicvf_update_stats(struct nicvf *nic) nicvf_update_sq_stats(nic, qidx); } -static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void nicvf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct nicvf *nic = netdev_priv(netdev); struct nicvf_hw_stats *hw_stats = &nic->hw_stats; @@ -1478,7 +1478,6 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, stats->tx_packets = hw_stats->tx_frames; stats->tx_dropped = hw_stats->tx_drops; - return stats; } static void nicvf_tx_timeout(struct net_device *dev) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 629b11879ceb..3349e1f376c3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2375,8 +2375,8 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, } EXPORT_SYMBOL(cxgb4_remove_server_filter); -static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *ns) +static void cxgb_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *ns) { struct port_stats stats; struct port_info *p = netdev_priv(dev); @@ -2389,7 +2389,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, spin_lock(&adapter->stats_lock); if (!netif_device_present(dev)) { spin_unlock(&adapter->stats_lock); - return ns; + return; } t4_get_port_stats_offset(adapter, p->tx_chan, &stats, &p->stats_base); @@ -2423,7 +2423,6 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, ns->tx_errors = stats.tx_error_frames; ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; - return ns; } static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index cdd7a1a59aa7..c5842c525eed 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -680,8 +680,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, } /* dev_base_lock rwlock held, nominally process context */ -static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *net_stats) +static void enic_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *net_stats) { struct enic *enic = netdev_priv(netdev); struct vnic_stats *stats; @@ -693,7 +693,7 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, * recorded stats. */ if (err == -ENOMEM) - return net_stats; + return; net_stats->tx_packets = stats->tx.tx_frames_ok; net_stats->tx_bytes = stats->tx.tx_bytes_ok; @@ -707,8 +707,6 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, net_stats->rx_over_errors = enic->rq_truncated_pkts; net_stats->rx_crc_errors = enic->rq_bad_fcs; net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; - - return net_stats; } static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr) diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 7bf78a0d322c..278f139f2a22 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -457,7 +457,7 @@ static int ec_bhf_stop(struct net_device *net_dev) return 0; } -static struct rtnl_link_stats64 * +static void ec_bhf_get_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { @@ -472,8 +472,6 @@ ec_bhf_get_stats(struct net_device *net_dev, stats->tx_bytes = priv->stat_tx_bytes; stats->rx_bytes = priv->stat_rx_bytes; - - return stats; } static const struct net_device_ops ec_bhf_netdev_ops = { diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 225e9a4877d7..0a679d2eaeee 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -639,8 +639,8 @@ void be_parse_stats(struct be_adapter *adapter) } } -static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void be_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct be_adapter *adapter = netdev_priv(netdev); struct be_drv_stats *drvs = &adapter->drv_stats; @@ -704,7 +704,6 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop + drvs->rx_input_fifo_overflow_drop + drvs->rx_drops_no_pbuf; - return stats; } void be_link_status_update(struct be_adapter *adapter, u8 link_status) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index c9b7ad65e563..b7cbc26a0911 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -313,8 +313,8 @@ static void dpaa_tx_timeout(struct net_device *net_dev) /* Calculates the statistics for the given device by adding the statistics * collected by each CPU. */ -static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev, - struct rtnl_link_stats64 *s) +static void dpaa_get_stats64(struct net_device *net_dev, + struct rtnl_link_stats64 *s) { int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); struct dpaa_priv *priv = netdev_priv(net_dev); @@ -332,8 +332,6 @@ static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev, for (j = 0; j < numstats; j++) netstats[j] += cpustats[j]; } - - return s; } static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 672b64606321..b7cb61385ad8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1625,8 +1625,8 @@ void hns_nic_set_rx_mode(struct net_device *ndev) netdev_err(ndev, "sync uc address fail\n"); } -struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats) +static void hns_nic_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) { int idx = 0; u64 tx_bytes = 0; @@ -1668,8 +1668,6 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, stats->tx_window_errors = ndev->stats.tx_window_errors; stats->rx_compressed = ndev->stats.rx_compressed; stats->tx_compressed = ndev->stats.tx_compressed; - - return stats; } static u16 diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 702446a93697..1e53d7a82675 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -328,8 +328,8 @@ out: spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags); } -static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void ehea_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct ehea_port *port = netdev_priv(dev); u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0; @@ -352,7 +352,6 @@ static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev, stats->multicast = port->stats.multicast; stats->rx_errors = port->stats.rx_errors; - return stats; } static void ehea_update_stats(struct work_struct *work) diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 879cca47b021..a29b12e80855 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -493,8 +493,8 @@ int e1000e_setup_rx_resources(struct e1000_ring *ring); int e1000e_setup_tx_resources(struct e1000_ring *ring); void e1000e_free_rx_resources(struct e1000_ring *ring); void e1000e_free_tx_resources(struct e1000_ring *ring); -struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats); +void e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); void e1000e_get_hw_control(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index af3960853a32..723025b317cc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5920,8 +5920,8 @@ static void e1000_reset_task(struct work_struct *work) * * Returns the address of the device statistics structure. **/ -struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +void e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5958,7 +5958,6 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, /* Tx Dropped needs to be maintained elsewhere */ spin_unlock(&adapter->stats64_lock); - return stats; } /** diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index bc5ef6eb3dd6..01db688cf539 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1118,8 +1118,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface) * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This * function replaces fm10k_get_stats for kernels which support it. */ -static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void fm10k_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_ring *ring; @@ -1164,8 +1164,6 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, /* following stats updated by fm10k_service_task() */ stats->rx_missed_errors = netdev->stats.rx_missed_errors; - - return stats; } int fm10k_setup_tc(struct net_device *dev, u8 tc) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index ba8d30984bee..342007df4663 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -834,9 +834,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); #ifdef I40E_FCOE -struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( - struct net_device *netdev, - struct rtnl_link_stats64 *storage); +void i40e_get_netdev_stats_struct(struct net_device *netdev, + struct rtnl_link_stats64 *storage); int i40e_set_mac(struct net_device *netdev, void *p); void i40e_set_rx_mode(struct net_device *netdev); #endif diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ad4cf639430e..b2f76d24000d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -409,15 +409,11 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ -#ifdef I40E_FCOE -struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( - struct net_device *netdev, - struct rtnl_link_stats64 *stats) -#else -static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( - struct net_device *netdev, - struct rtnl_link_stats64 *stats) +#ifndef I40E_FCOE +static #endif +void i40e_get_netdev_stats_struct(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_ring *tx_ring, *rx_ring; @@ -426,10 +422,10 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( int i; if (test_bit(__I40E_DOWN, &vsi->state)) - return stats; + return; if (!vsi->tx_rings) - return stats; + return; rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { @@ -469,8 +465,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( stats->rx_dropped = vsi_stats->rx_dropped; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; - - return stats; } /** diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 594604e09f8d..7546109d4980 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -137,8 +137,8 @@ static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); static void igb_watchdog_task(struct work_struct *); static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); -static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void igb_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); static void igb_set_uta(struct igb_adapter *adapter, bool set); @@ -5404,8 +5404,8 @@ static void igb_reset_task(struct work_struct *work) * @netdev: network interface device structure * @stats: rtnl_link_stats64 pointer **/ -static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void igb_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -5413,8 +5413,6 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, igb_update_stats(adapter, &adapter->stats64); memcpy(stats, &adapter->stats64, sizeof(*stats)); spin_unlock(&adapter->stats64_lock); - - return stats; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0c6eca570791..ffe7d940d9ff 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8173,8 +8173,9 @@ static void ixgbe_netpoll(struct net_device *netdev) } #endif -static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) + +static void ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; @@ -8212,13 +8213,13 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, } } rcu_read_unlock(); + /* following stats updated by ixgbe_watchdog_task() */ stats->multicast = netdev->stats.multicast; stats->rx_errors = netdev->stats.rx_errors; stats->rx_length_errors = netdev->stats.rx_length_errors; stats->rx_crc_errors = netdev->stats.rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; - return stats; } #ifdef CONFIG_IXGBE_DCB diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 1a28349114f8..b06863560c7d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3896,8 +3896,8 @@ static void ixgbevf_shutdown(struct pci_dev *pdev) ixgbevf_suspend(pdev, PMSG_SUSPEND); } -static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void ixgbevf_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); unsigned int start; @@ -3930,8 +3930,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, stats->tx_bytes += bytes; stats->tx_packets += packets; } - - return stats; } #define IXGBEVF_MAX_MAC_HDR_LEN 127 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e05e22705cf7..3607d8febbcf 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -652,7 +652,7 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp) } /* Get System Network Statistics */ -static struct rtnl_link_stats64 * +static void mvneta_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -686,8 +686,6 @@ mvneta_get_stats64(struct net_device *dev, stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; - - return stats; } /* Rx descriptors helper methods */ diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 4fe430ceb194..69db40e1a4e1 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5739,7 +5739,7 @@ error: return err; } -static struct rtnl_link_stats64 * +static void mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mvpp2_port *port = netdev_priv(dev); @@ -5771,8 +5771,6 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_errors = dev->stats.rx_errors; stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; - - return stats; } static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index b60ad0e56a9f..18d6336fa162 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -3888,8 +3888,8 @@ static void sky2_set_multicast(struct net_device *dev) gma_write16(hw, port, GM_RX_CTRL, reg); } -static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void sky2_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -3929,8 +3929,6 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, stats->rx_dropped = dev->stats.rx_dropped; stats->rx_fifo_errors = dev->stats.rx_fifo_errors; stats->tx_fifo_errors = dev->stats.tx_fifo_errors; - - return stats; } /* Can have one global because blinking is controlled by diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 3dd87889e67e..25ae0c5bce3a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -462,8 +462,8 @@ static void mtk_stats_update(struct mtk_eth *eth) } } -static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *storage) +static void mtk_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_hw_stats *hw_stats = mac->hw_stats; @@ -494,8 +494,6 @@ static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, storage->tx_errors = dev->stats.tx_errors; storage->rx_dropped = dev->stats.rx_dropped; storage->tx_dropped = dev->stats.tx_dropped; - - return storage; } static inline int mtk_max_frag_size(int mtu) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index edbe200ac2fa..06ef23f040a4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1321,7 +1321,7 @@ static void mlx4_en_tx_timeout(struct net_device *dev) } -static struct rtnl_link_stats64 * +static void mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -1330,8 +1330,6 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx4_en_fold_software_stats(dev); netdev_stats_to_stats64(stats, &dev->stats); spin_unlock_bh(&priv->stats_lock); - - return stats; } static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 88dd731bb8cb..60e5670452a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2686,7 +2686,7 @@ mqprio: return mlx5e_setup_tc(dev, tc->tc); } -static struct rtnl_link_stats64 * +static void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -2729,7 +2729,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); - return stats; } static void mlx5e_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 850378893b25..2c864574a9d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -374,13 +374,12 @@ int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, return -EINVAL; } -static struct rtnl_link_stats64 * +static void mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); - return stats; } static const struct switchdev_ops mlx5e_rep_switchdev_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d768c7b6c6d6..46c53a042e6b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -947,15 +947,13 @@ out: /* Return the stats from a cache that is updated periodically, * as this function might get called in an atomic context. */ -static struct rtnl_link_stats64 * +static void mlxsw_sp_port_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); - - return stats; } int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 150ccf5192a9..696d40612d28 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -381,7 +381,7 @@ static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu) return 0; } -static struct rtnl_link_stats64 * +static void mlxsw_sx_port_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -410,7 +410,6 @@ mlxsw_sx_port_get_stats64(struct net_device *dev, tx_dropped += p->tx_dropped; } stats->tx_dropped = tx_dropped; - return stats; } static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name, diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index e506ca876d0d..db297cfce6f4 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -378,8 +378,8 @@ static inline void put_be32(__be32 val, __be32 __iomem * p) __raw_writel((__force __u32) val, (__force void __iomem *)p); } -static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void myri10ge_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats); static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated) { @@ -3119,8 +3119,8 @@ drop: return NETDEV_TX_OK; } -static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void myri10ge_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) { const struct myri10ge_priv *mgp = netdev_priv(dev); const struct myri10ge_slice_netstats *slice_stats; @@ -3135,7 +3135,6 @@ static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, stats->rx_dropped += slice_stats->rx_dropped; stats->tx_dropped += slice_stats->tx_dropped; } - return stats; } static void myri10ge_set_multicast_list(struct net_device *dev) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index e07b936f64ec..f364502229db 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3111,7 +3111,7 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu) * @stats: pointer to struct rtnl_link_stats64 * */ -static struct rtnl_link_stats64 * +static void vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct vxgedev *vdev = netdev_priv(dev); @@ -3150,8 +3150,6 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) net_stats->tx_bytes += bytes; net_stats->tx_errors += txstats->tx_errors; } - - return net_stats; } static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index e8d448109e03..67afd95ffb93 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2638,8 +2638,8 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL); } -static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void nfp_net_stat64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct nfp_net *nn = netdev_priv(netdev); int r; @@ -2669,8 +2669,6 @@ static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, stats->tx_bytes += data[1]; stats->tx_errors += data[2]; } - - return stats; } static bool nfp_net_ebpf_capable(struct nfp_net *nn) diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 3913f07279d2..dfc2c8149d22 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1733,7 +1733,7 @@ static void nv_update_stats(struct net_device *dev) * Called with read_lock(&dev_base_lock) held for read - * only synchronized against unregister_netdevice. */ -static struct rtnl_link_stats64* +static void nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) __acquires(&netdev_priv(dev)->hwstats_lock) __releases(&netdev_priv(dev)->hwstats_lock) @@ -1793,8 +1793,6 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) spin_unlock_bh(&np->hwstats_lock); } - - return storage; } /* diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 561fb94c7267..86fb9d3df700 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -90,8 +90,8 @@ static irqreturn_t netxen_msix_intr(int irq, void *data); static void netxen_free_ip_list(struct netxen_adapter *, bool); static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); -static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void netxen_nic_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int netxen_nic_set_mac(struct net_device *netdev, void *p); /* PCI Device ID Table */ @@ -2302,8 +2302,8 @@ request_reset: clear_bit(__NX_RESETTING, &adapter->state); } -static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void netxen_nic_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct netxen_adapter *adapter = netdev_priv(netdev); @@ -2313,8 +2313,6 @@ static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, stats->tx_bytes = adapter->stats.txbytes; stats->rx_dropped = adapter->stats.rxdropped; stats->tx_dropped = adapter->stats.txdropped; - - return stats; } static irqreturn_t netxen_intr(int irq, void *data) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index b58509feecd5..40a76a1d5973 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -398,9 +398,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; } -static -struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void qede_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct qede_dev *edev = netdev_priv(dev); @@ -430,8 +429,6 @@ struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev, stats->collisions = edev->stats.tx_total_collisions; stats->rx_crc_errors = edev->stats.rx_crc_errors; stats->rx_frame_errors = edev->stats.rx_align_errors; - - return stats; } #ifdef CONFIG_QED_SRIOV diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 422289c232bc..40ebe010b06f 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -312,8 +312,8 @@ static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } /* Provide network statistics info for the interface */ -static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *net_stats) +static void emac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *net_stats) { struct emac_adapter *adpt = netdev_priv(netdev); unsigned int addr = REG_MAC_RX_STATUS_BIN; @@ -377,8 +377,6 @@ static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev, net_stats->tx_window_errors = stats->tx_late_col; spin_unlock(&stats->lock); - - return net_stats; } static const struct net_device_ops emac_netdev_ops = { diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 9bc047ac883b..5ad59c6d29a4 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -653,9 +653,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget); static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); static int rtl8139_close (struct net_device *dev); static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); -static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 - *stats); +static void rtl8139_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static void rtl8139_set_rx_mode (struct net_device *dev); static void __set_rx_mode (struct net_device *dev); static void rtl8139_hw_start (struct net_device *dev); @@ -2516,7 +2515,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } -static struct rtnl_link_stats64 * +static void rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8139_private *tp = netdev_priv(dev); @@ -2544,8 +2543,6 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = tp->tx_stats.packets; stats->tx_bytes = tp->tx_stats.bytes; } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); - - return stats; } /* Set or clear the multicast filter for this adaptor. diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 44389c90056a..858f4554de11 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7755,7 +7755,7 @@ err_pm_runtime_put: goto out; } -static struct rtnl_link_stats64 * +static void rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); @@ -7809,8 +7809,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) le16_to_cpu(tp->tc_offset.tx_aborted); pm_runtime_put_noidle(&pdev->dev); - - return stats; } static void rtl8169_net_suspend(struct net_device *dev) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index cddcff5a00a7..07074d9bc45d 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -1706,11 +1706,9 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi) * This function is a driver entry point whenever ifconfig command gets * executed to see device statistics. Statistics are number of * bytes sent or received, errors occurred etc. - * Return value: - * This function returns various statistical information of device. */ -static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void sxgbe_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct sxgbe_priv_data *priv = netdev_priv(dev); void __iomem *ioaddr = priv->ioaddr; @@ -1761,8 +1759,6 @@ static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev, SXGBE_MMC_TXUFLWHI_GBCNT_REG); writel(0, ioaddr + SXGBE_MMC_CTL_REG); spin_unlock(&priv->stats_lock); - - return stats; } /* sxgbe_set_features - entry point to set offload features of the device. diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index bbbed2e84de8..ebeecb8fed45 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2219,16 +2219,14 @@ int efx_net_stop(struct net_device *net_dev) } /* Context: process, dev_base_lock or RTNL held, non-blocking. */ -static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, - struct rtnl_link_stats64 *stats) +static void efx_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) { struct efx_nic *efx = netdev_priv(net_dev); spin_lock_bh(&efx->stats_lock); efx->type->update_stats(efx, NULL, stats); spin_unlock_bh(&efx->stats_lock); - - return stats; } /* Context: netif_tx_lock held, BHs disabled. */ diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index ec3ac0e45cc9..8cfbe01e1ddf 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2158,16 +2158,14 @@ int ef4_net_stop(struct net_device *net_dev) } /* Context: process, dev_base_lock or RTNL held, non-blocking. */ -static struct rtnl_link_stats64 *ef4_net_stats(struct net_device *net_dev, - struct rtnl_link_stats64 *stats) +static void ef4_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) { struct ef4_nic *efx = netdev_priv(net_dev); spin_lock_bh(&efx->stats_lock); efx->type->update_stats(efx, NULL, stats); spin_unlock_bh(&efx->stats_lock); - - return stats; } /* Context: netif_tx_lock held, BHs disabled. */ diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index f90d1af6d390..e557a3290a25 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6294,8 +6294,8 @@ no_rings: stats->tx_errors = errors; } -static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void niu_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct niu *np = netdev_priv(dev); @@ -6303,8 +6303,6 @@ static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, niu_get_rx_stats(np, stats); niu_get_tx_stats(np, stats); } - - return stats; } static void niu_load_hash_xmac(struct niu *np, u16 *hash) diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 09f5a67da35e..467dcc53f5e1 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -2490,7 +2490,7 @@ static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB); } -static struct rtnl_link_stats64* +static void dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) { unsigned long flags; @@ -2522,8 +2522,6 @@ dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) else s->tx_errors = hwstats->txunderflowerror + hwstats->txcarriererror; - - return s; } static void diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 0a3b7dafa3ba..30cfea62a356 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -2047,8 +2047,8 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) * * Returns the address of the device statistics structure. */ -static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void tile_net_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct tile_net_priv *priv = netdev_priv(dev); u64 rx_packets = 0, tx_packets = 0; diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 0a6c4e804eed..453a1fad560c 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -513,8 +513,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance); static void rhine_tx(struct net_device *dev); static int rhine_rx(struct net_device *dev, int limit); static void rhine_set_rx_mode(struct net_device *dev); -static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void rhine_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static int rhine_close(struct net_device *dev); @@ -2221,7 +2221,7 @@ out_unlock: mutex_unlock(&rp->task_lock); } -static struct rtnl_link_stats64 * +static void rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rhine_private *rp = netdev_priv(dev); @@ -2244,8 +2244,6 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = rp->tx_stats.packets; stats->tx_bytes = rp->tx_stats.bytes; } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); - - return stats; } static void rhine_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index b77e4ecf3cf2..5028001429c7 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -57,8 +57,7 @@ static void fjes_raise_intr_rxdata_task(struct work_struct *); static void fjes_tx_stall_task(struct work_struct *); static void fjes_force_close_task(struct work_struct *); static irqreturn_t fjes_intr(int, void*); -static struct rtnl_link_stats64 * -fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *); +static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *); static int fjes_change_mtu(struct net_device *, int); static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16); static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16); @@ -782,14 +781,12 @@ static void fjes_tx_retry(struct net_device *netdev) netif_tx_wake_queue(queue); } -static struct rtnl_link_stats64 * +static void fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct fjes_adapter *adapter = netdev_priv(netdev); memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64)); - - return stats; } static int fjes_change_mtu(struct net_device *netdev, int new_mtu) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c9414c054852..05374fce7da4 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -908,8 +908,8 @@ out: return ret; } -static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, - struct rtnl_link_stats64 *t) +static void netvsc_get_stats64(struct net_device *net, + struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); int cpu; @@ -947,8 +947,6 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, t->rx_dropped = net->stats.rx_dropped; t->rx_errors = net->stats.rx_errors; - - return t; } static int netvsc_set_mac_addr(struct net_device *ndev, void *p) diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 66c0eeafcb5d..082534e187fc 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -129,8 +129,8 @@ resched: } -static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void ifb_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct ifb_dev_private *dp = netdev_priv(dev); struct ifb_q_private *txp = dp->tx_private; @@ -157,8 +157,6 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, } stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; - - return stats; } static int ifb_dev_init(struct net_device *dev) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index ce7ca6a5aa8a..1cdb8c5ec403 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -303,8 +303,8 @@ static void ipvlan_set_multicast_mac_filter(struct net_device *dev) dev_mc_sync(ipvlan->phy_dev, dev); } -static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *s) +static void ipvlan_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) { struct ipvl_dev *ipvlan = netdev_priv(dev); @@ -341,7 +341,6 @@ static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev, s->rx_dropped = rx_errs; s->tx_dropped = tx_drps; } - return s; } static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 1e05b7c2d157..30a493936e63 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -97,8 +97,8 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void loopback_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { u64 bytes = 0; u64 packets = 0; @@ -122,7 +122,6 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, stats->tx_packets = packets; stats->rx_bytes = bytes; stats->tx_bytes = bytes; - return stats; } static u32 always_on(struct net_device *dev) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index f83cf6696820..778a77303c49 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2888,13 +2888,13 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *s) +static void macsec_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) { int cpu; if (!dev->tstats) - return s; + return; for_each_possible_cpu(cpu) { struct pcpu_sw_netstats *stats; @@ -2918,8 +2918,6 @@ static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, s->rx_dropped = dev->stats.rx_dropped; s->tx_dropped = dev->stats.tx_dropped; - - return s; } static int macsec_get_iflink(const struct net_device *dev) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 20b3fdf282c5..440ab3d8adf7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -855,8 +855,8 @@ static void macvlan_uninit(struct net_device *dev) macvlan_port_destroy(port->dev); } -static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void macvlan_dev_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -893,7 +893,6 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, stats->rx_dropped = rx_errors; stats->tx_dropped = tx_dropped; } - return stats; } static int macvlan_vlan_rx_add_vid(struct net_device *dev, diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index 2de7faee9b19..b91603835d26 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -58,7 +58,7 @@ static int nlmon_close(struct net_device *dev) return netlink_remove_tap(&nlmon->nt); } -static struct rtnl_link_stats64 * +static void nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { int i; @@ -86,8 +86,6 @@ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_bytes = bytes; stats->tx_bytes = 0; - - return stats; } static u32 always_on(struct net_device *dev) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 3d3b1f4339ef..a411b43a69eb 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1297,7 +1297,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; } -static struct rtnl_link_stats64* +static void ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) { struct ppp *ppp = netdev_priv(dev); @@ -1317,8 +1317,6 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) stats64->rx_dropped = dev->stats.rx_dropped; stats64->tx_dropped = dev->stats.tx_dropped; stats64->rx_length_errors = dev->stats.rx_length_errors; - - return stats64; } static int ppp_dev_init(struct net_device *dev) diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 9841f3dc0682..08db4d687533 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -566,7 +566,7 @@ static int sl_change_mtu(struct net_device *dev, int new_mtu) /* Netdevice get statistics request */ -static struct rtnl_link_stats64 * +static void sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct net_device_stats *devstats = &dev->stats; @@ -597,7 +597,6 @@ sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->collisions += comp->sls_o_misses; } #endif - return stats; } /* Netdevice register callback */ diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index bdc58567d10e..a3711769544b 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1798,7 +1798,7 @@ unwind: return err; } -static struct rtnl_link_stats64 * +static void team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct team *team = netdev_priv(dev); @@ -1835,7 +1835,6 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_dropped = rx_dropped; stats->tx_dropped = tx_dropped; stats->rx_nohandler = rx_nohandler; - return stats; } static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index cd8e02c94be0..8c1d3bd6b4d0 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -953,7 +953,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr) tun->align = new_hr; } -static struct rtnl_link_stats64 * +static void tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; @@ -987,7 +987,6 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_dropped = rx_dropped; stats->rx_frame_errors = rx_frame_errors; stats->tx_dropped = tx_dropped; - return stats; } static const struct net_device_ops tun_netdev_ops = { diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 0520952aa096..8c39d6d690e5 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -158,8 +158,8 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) return atomic64_read(&priv->dropped); } -static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *tot) +static void veth_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; @@ -177,8 +177,6 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev, tot->rx_packets = one.packets; } rcu_read_unlock(); - - return tot; } /* fake multicast ability */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2cea022e6e6e..37db91d1a0a3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1272,8 +1272,8 @@ out: return ret; } -static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, - struct rtnl_link_stats64 *tot) +static void virtnet_stats(struct net_device *dev, + struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); int cpu; @@ -1306,8 +1306,6 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; - - return tot; } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index aabc6ef366b4..f88ffafebfbf 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -113,7 +113,7 @@ vmxnet3_global_stats[] = { }; -struct rtnl_link_stats64 * +void vmxnet3_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -160,8 +160,6 @@ vmxnet3_get_stats64(struct net_device *netdev, stats->rx_dropped += drvRxStats->drop_total; stats->multicast += devRxStats->mcastPktsRxOK; } - - return stats; } static int diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 59e077be8829..ba1c9f93592b 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -465,8 +465,8 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, void vmxnet3_set_ethtool_ops(struct net_device *netdev); -struct rtnl_link_stats64 * -vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); +void vmxnet3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); extern char vmxnet3_driver_name[]; #endif diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 23dfb0eac098..895e3e258543 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -77,8 +77,8 @@ static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) kfree_skb(skb); } -static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void vrf_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { int i; @@ -102,7 +102,6 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, stats->rx_bytes += rbytes; stats->rx_packets += rpkts; } - return stats; } /* Local traffic destined to local address. Reinsert the packet to rx diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a479cd99911d..40f26b69beb1 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1073,8 +1073,8 @@ static int xennet_change_mtu(struct net_device *dev, int mtu) return 0; } -static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *tot) +static void xennet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { struct netfront_info *np = netdev_priv(dev); int cpu; @@ -1105,8 +1105,6 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, tot->rx_errors = dev->stats.rx_errors; tot->tx_dropped = dev->stats.tx_dropped; - - return tot; } static void xennet_release_tx_bufs(struct netfront_queue *queue) diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index fb0928a4fb97..f84069ffa8c6 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -397,14 +397,6 @@ static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) TX_DROP_FRAME_COUNTER); } -static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats - ) -{ - xlr_stats(ndev, stats); - return stats; -} - static const struct net_device_ops xlr_netdev_ops = { .ndo_open = xlr_net_open, .ndo_stop = xlr_net_stop, @@ -412,7 +404,7 @@ static const struct net_device_ops xlr_netdev_ops = { .ndo_select_queue = xlr_net_select_queue, .ndo_set_mac_address = xlr_net_set_mac_addr, .ndo_set_rx_mode = xlr_set_rx_mode, - .ndo_get_stats64 = xlr_get_stats64, + .ndo_get_stats64 = xlr_stats, }; /* diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ecd78b3c9aba..b14ad9c139d7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -913,8 +913,8 @@ struct netdev_xdp { * Callback used when the transmitter has not made any progress * for dev->watchdog ticks. * - * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, - * struct rtnl_link_stats64 *storage); + * void (*ndo_get_stats64)(struct net_device *dev, + * struct rtnl_link_stats64 *storage); * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); * Called when a user wants to get the network device usage * statistics. Drivers must do one of the following: @@ -1165,8 +1165,8 @@ struct net_device_ops { struct neigh_parms *); void (*ndo_tx_timeout) (struct net_device *dev); - struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, - struct rtnl_link_stats64 *storage); + void (*ndo_get_stats64)(struct net_device *dev, + struct rtnl_link_stats64 *storage); bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index e893fe43dd13..3d4ca4df1209 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -261,8 +261,8 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); -struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *tot); +void ip_tunnel_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot); struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, int link, __be16 flags, __be32 remote, __be32 local, diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 10da6c588bf8..116455ac3db5 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -671,7 +671,8 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev, return 0; } -static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +static void vlan_dev_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct vlan_pcpu_stats *p; u32 rx_errors = 0, tx_dropped = 0; @@ -702,8 +703,6 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st } stats->rx_errors = rx_errors; stats->tx_dropped = tx_dropped; - - return stats; } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ed3b3192fb00..6c46d1b4cdbb 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -153,8 +153,8 @@ static int br_dev_stop(struct net_device *dev) return 0; } -static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void br_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct net_bridge *br = netdev_priv(dev); struct pcpu_sw_netstats tmp, sum = { 0 }; @@ -178,8 +178,6 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev, stats->tx_packets = sum.tx_packets; stats->rx_bytes = sum.rx_bytes; stats->rx_packets = sum.rx_packets; - - return stats; } static int br_change_mtu(struct net_device *dev, int new_mtu) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index fed3d29f9eb3..5476110598f7 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -188,8 +188,8 @@ int iptunnel_handle_offloads(struct sk_buff *skb, EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); /* Often modified stats are per cpu, other are shared (netdev->stats) */ -struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *tot) +void ip_tunnel_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { int i; @@ -214,8 +214,6 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; } - - return tot; } EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64); diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index e2c6ae024565..8bf18a5f66e0 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -106,8 +106,8 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void l2tp_eth_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct l2tp_eth *priv = netdev_priv(dev); @@ -117,10 +117,8 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev, stats->rx_bytes = atomic_long_read(&priv->rx_bytes); stats->rx_packets = atomic_long_read(&priv->rx_packets); stats->rx_errors = atomic_long_read(&priv->rx_errors); - return stats; } - static const struct net_device_ops l2tp_eth_netdev_ops = { .ndo_init = l2tp_eth_dev_init, .ndo_uninit = l2tp_eth_dev_uninit, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 41497b670e2b..77e8a42225f9 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1122,7 +1122,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev, return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); } -static struct rtnl_link_stats64 * +static void ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { int i; @@ -1147,8 +1147,6 @@ ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_bytes += rx_bytes; stats->tx_bytes += tx_bytes; } - - return stats; } static const struct net_device_ops ieee80211_dataif_ops = { diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index d5d6caecd072..09141a18ee2d 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -97,7 +97,7 @@ static void internal_dev_destructor(struct net_device *dev) free_netdev(dev); } -static struct rtnl_link_stats64 * +static void internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { int i; @@ -125,8 +125,6 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_bytes += local_stats.tx_bytes; stats->tx_packets += local_stats.tx_packets; } - - return stats; } static void internal_set_rx_headroom(struct net_device *dev, int new_hr) diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index b0196366d58d..9fe6b427afed 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -401,8 +401,8 @@ static int teql_master_close(struct net_device *dev) return 0; } -static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void teql_master_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct teql_master *m = netdev_priv(dev); @@ -410,7 +410,6 @@ static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev, stats->tx_bytes = m->tx_bytes; stats->tx_errors = m->tx_errors; stats->tx_dropped = m->tx_dropped; - return stats; } static int teql_master_mtu(struct net_device *dev, int new_mtu) -- cgit v1.2.3 From 5944701df90d9577658e2354cc27c4ceaeca30fe Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 6 Jan 2017 19:12:53 -0800 Subject: net: remove useless memset's in drivers get_stats64 In dev_get_stats() the statistic structure storage has already been zeroed. Therefore network drivers do not need to call memset() again. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 1 - drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 -- drivers/net/ethernet/intel/e1000e/netdev.c | 1 - 3 files changed, 4 deletions(-) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 45f2204e6695..de59db6c5841 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1462,7 +1462,6 @@ static void xgene_enet_get_stats64( struct xgene_enet_desc_ring *ring; int i; - memset(stats, 0, sizeof(struct rtnl_link_stats64)); for (i = 0; i < pdata->txq_cnt; i++) { ring = pdata->tx_ring[i]; if (ring) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e5f458396e1a..7bd2a85694dd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5885,8 +5885,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) u32 i; struct bnxt *bp = netdev_priv(dev); - memset(stats, 0, sizeof(struct rtnl_link_stats64)); - if (!bp->bnapi) return; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 723025b317cc..79651eb608ff 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5925,7 +5925,6 @@ void e1000e_get_stats64(struct net_device *netdev, { struct e1000_adapter *adapter = netdev_priv(netdev); - memset(stats, 0, sizeof(struct rtnl_link_stats64)); spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); /* Fill out the OS statistics structure */ -- cgit v1.2.3 From 29b84f20e2c59317db133d5dab96bbf500714227 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 6 Jan 2017 22:27:59 -0800 Subject: mdio: Demote print from info to debug in mdio_device_register While it is useful to know which MDIO device is being registered, demote the dev_info() to a dev_dbg(). Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index 43c8fd46504b..fc3aaaa36b1d 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -67,7 +67,7 @@ int mdio_device_register(struct mdio_device *mdiodev) { int err; - dev_info(&mdiodev->dev, "mdio_device_register\n"); + dev_dbg(&mdiodev->dev, "mdio_device_register\n"); err = mdiobus_register_device(mdiodev); if (err) -- cgit v1.2.3 From aec745e2c520bf2d046684a284dac11c25d8e717 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sat, 7 Jan 2017 17:06:33 -0500 Subject: net-tc: remove unused tc_verd fields Remove the last reference to tc_verd's munge and redirect ttl bits. These fields are no longer used. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index cb4bcdc58543..c769f71972f5 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -17,10 +17,6 @@ /* verdict bit breakdown * -bit 0: when set -> this packet has been munged already - -bit 1: when set -> It is ok to munge this packet - bit 2,3,4,5: Reclassify counter - sort of reverse TTL - if exceeded assume loop @@ -31,8 +27,6 @@ bit 6,7: Where this packet was last seen bit 8: when set --> Request not to classify on ingress. -bits 9,10,11: redirect counter - redirect TTL. Loop avoidance - * * */ @@ -56,7 +50,6 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance #define SET_TC_AT(v,n) ((V_TC_AT(n)) | (v & ~M_TC_AT)) #define MAX_REC_LOOP 4 -#define MAX_RED_LOOP 4 #endif /* Action attributes */ -- cgit v1.2.3 From d6264071ce7d100a2b7c1f295167796ab5178caf Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sat, 7 Jan 2017 17:06:34 -0500 Subject: net-tc: make MAX_RECLASSIFY_LOOP local This field is no longer kept in tc_verd. Remove it from the global definition of that struct. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 5 ----- net/sched/sch_api.c | 3 ++- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index c769f71972f5..bba23dbb3ab6 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -17,9 +17,6 @@ /* verdict bit breakdown * -bit 2,3,4,5: Reclassify counter - sort of reverse TTL - if exceeded -assume loop - bit 6,7: Where this packet was last seen 0: Above the transmit example at the socket level 1: on the Ingress @@ -48,8 +45,6 @@ bit 8: when set --> Request not to classify on ingress. #define G_TC_AT(x) _TC_GETVALUE(x,S_TC_AT,M_TC_AT) #define V_TC_AT(x) _TC_MAKEVALUE(x,S_TC_AT) #define SET_TC_AT(v,n) ((V_TC_AT(n)) | (v & ~M_TC_AT)) - -#define MAX_REC_LOOP 4 #endif /* Action attributes */ diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index d7b93429f0cc..ef53ede11590 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1861,6 +1861,7 @@ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, { __be16 protocol = tc_skb_protocol(skb); #ifdef CONFIG_NET_CLS_ACT + const int max_reclassify_loop = 4; const struct tcf_proto *old_tp = tp; int limit = 0; @@ -1885,7 +1886,7 @@ reclassify: return TC_ACT_UNSPEC; /* signal: continue lookup */ #ifdef CONFIG_NET_CLS_ACT reset: - if (unlikely(limit++ >= MAX_REC_LOOP)) { + if (unlikely(limit++ >= max_reclassify_loop)) { net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n", tp->q->ops->id, tp->prio & 0xffff, ntohs(tp->protocol)); -- cgit v1.2.3 From e7246e122aaa99ebbb8ad7da80f35a20577bd8af Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sat, 7 Jan 2017 17:06:35 -0500 Subject: net-tc: extract skip classify bit from tc_verd Packets sent by the IFB device skip subsequent tc classification. A single bit governs this state. Move it out of tc_verd in anticipation of removing that __u16 completely. The new bitfield tc_skip_classify temporarily uses one bit of a hole, until tc_verd is removed completely in a follow-up patch. Remove the bit hole comment. It could be 2, 3, 4 or 5 bits long. With that many options, little value in documenting it. Introduce a helper function to deduplicate the logic in the two sites that check this bit. The field tc_skip_classify is set only in IFB on skbs cloned in act_mirred, so original packet sources do not have to clear the bit when reusing packets (notably, pktgen and octeon). Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- drivers/net/ifb.c | 2 +- include/linux/skbuff.h | 5 ++++- include/net/sch_generic.h | 11 +++++++++++ include/uapi/linux/pkt_cls.h | 6 ------ net/core/dev.c | 10 +++------- net/sched/act_api.c | 11 ++++------- 6 files changed, 23 insertions(+), 22 deletions(-) diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 082534e187fc..442c4c4a9606 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -81,7 +81,7 @@ static void ifb_ri_tasklet(unsigned long _txp) u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; - skb->tc_verd = SET_TC_NCLS(skb->tc_verd); + skb->tc_skip_classify = 1; u64_stats_update_begin(&txp->tsync); txp->tx_packets++; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b53c0cfd417e..570f60ec6cb4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -589,6 +589,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @pkt_type: Packet class * @fclone: skbuff clone status * @ipvs_property: skbuff is owned by ipvs + * @tc_skip_classify: do not classify packet. set by IFB device * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag @@ -749,7 +750,9 @@ struct sk_buff { #ifdef CONFIG_NET_SWITCHDEV __u8 offload_fwd_mark:1; #endif - /* 2, 4 or 5 bit hole */ +#ifdef CONFIG_NET_CLS_ACT + __u8 tc_skip_classify:1; +#endif #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 498f81b229a4..857356f2d74b 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -418,6 +418,17 @@ static inline bool skb_at_tc_ingress(const struct sk_buff *skb) #endif } +static inline bool skb_skip_tc_classify(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + if (skb->tc_skip_classify) { + skb->tc_skip_classify = 0; + return true; + } +#endif + return false; +} + /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) { diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index bba23dbb3ab6..1eed5d7509bc 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -22,8 +22,6 @@ bit 6,7: Where this packet was last seen 1: on the Ingress 2: on the Egress -bit 8: when set --> Request not to classify on ingress. - * * */ @@ -36,10 +34,6 @@ bit 8: when set --> Request not to classify on ingress. #define AT_INGRESS 0x1 #define AT_EGRESS 0x2 -#define TC_NCLS _TC_MAKEMASK1(8) -#define SET_TC_NCLS(v) ( TC_NCLS | (v & ~TC_NCLS)) -#define CLR_TC_NCLS(v) ( v & ~TC_NCLS) - #define S_TC_AT _TC_MAKE32(12) #define M_TC_AT _TC_MAKEMASK(2,S_TC_AT) #define G_TC_AT(x) _TC_GETVALUE(x,S_TC_AT,M_TC_AT) diff --git a/net/core/dev.c b/net/core/dev.c index 56818f7eab2b..e39e35d2e082 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4093,12 +4093,8 @@ another_round: goto out; } -#ifdef CONFIG_NET_CLS_ACT - if (skb->tc_verd & TC_NCLS) { - skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); - goto ncls; - } -#endif + if (skb_skip_tc_classify(skb)) + goto skip_classify; if (pfmemalloc) goto skip_taps; @@ -4128,8 +4124,8 @@ skip_taps: #endif #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; -ncls: #endif +skip_classify: if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 2095c83ce773..f04715a57300 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -426,11 +426,9 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, { int ret = -1, i; - if (skb->tc_verd & TC_NCLS) { - skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); - ret = TC_ACT_OK; - goto exec_done; - } + if (skb_skip_tc_classify(skb)) + return TC_ACT_OK; + for (i = 0; i < nr_actions; i++) { const struct tc_action *a = actions[i]; @@ -439,9 +437,8 @@ repeat: if (ret == TC_ACT_REPEAT) goto repeat; /* we need a ttl - JHS */ if (ret != TC_ACT_PIPE) - goto exec_done; + break; } -exec_done: return ret; } EXPORT_SYMBOL(tcf_action_exec); -- cgit v1.2.3 From a5135bcfba7345031df45e02cd150a45add47cf8 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sat, 7 Jan 2017 17:06:36 -0500 Subject: net-tc: convert tc_verd to integer bitfields Extract the remaining two fields from tc_verd and remove the __u16 completely. TC_AT and TC_FROM are converted to equivalent two-bit integer fields tc_at and tc_from. Where possible, use existing helper skb_at_tc_ingress when reading tc_at. Introduce helper skb_reset_tc to clear fields. Not documenting tc_from and tc_at, because they will be replaced with single bit fields in follow-on patches. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- drivers/net/ifb.c | 7 +++---- drivers/staging/octeon/ethernet-tx.c | 5 ++--- include/linux/skbuff.h | 6 ++---- include/net/sch_generic.h | 10 +++++++++- include/uapi/linux/pkt_cls.h | 31 ------------------------------- net/core/dev.c | 10 ++++------ net/core/pktgen.c | 4 +--- net/core/skbuff.c | 3 --- net/sched/act_ife.c | 7 +++---- net/sched/act_mirred.c | 9 ++++----- net/sched/sch_netem.c | 2 +- 11 files changed, 29 insertions(+), 65 deletions(-) diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 442c4c4a9606..b73b6b6c066b 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -78,9 +78,9 @@ static void ifb_ri_tasklet(unsigned long _txp) } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { - u32 from = G_TC_FROM(skb->tc_verd); + u32 from = skb->tc_from; - skb->tc_verd = 0; + skb_reset_tc(skb); skb->tc_skip_classify = 1; u64_stats_update_begin(&txp->tsync); @@ -239,7 +239,6 @@ static void ifb_setup(struct net_device *dev) static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ifb_dev_private *dp = netdev_priv(dev); - u32 from = G_TC_FROM(skb->tc_verd); struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb); u64_stats_update_begin(&txp->rsync); @@ -247,7 +246,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) txp->rx_bytes += skb->len; u64_stats_update_end(&txp->rsync); - if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) { + if (skb->tc_from == AT_STACK || !skb->skb_iif) { dev_kfree_skb(skb); dev->stats.rx_dropped++; return NETDEV_TX_OK; diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 6b4c20872323..0b8053205091 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -23,6 +23,7 @@ #endif /* CONFIG_XFRM */ #include +#include #include @@ -369,9 +370,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) #ifdef CONFIG_NET_SCHED skb->tc_index = 0; -#ifdef CONFIG_NET_CLS_ACT - skb->tc_verd = 0; -#endif /* CONFIG_NET_CLS_ACT */ + skb_reset_tc(skb); #endif /* CONFIG_NET_SCHED */ #endif /* REUSE_SKBUFFS_WITHOUT_FREE */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 570f60ec6cb4..f738d09947b2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -599,7 +599,6 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @skb_iif: ifindex of device we arrived on * @tc_index: Traffic control index - * @tc_verd: traffic control verdict * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices * @xmit_more: More SKBs are pending for this queue @@ -752,13 +751,12 @@ struct sk_buff { #endif #ifdef CONFIG_NET_CLS_ACT __u8 tc_skip_classify:1; + __u8 tc_at:2; + __u8 tc_from:2; #endif #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ -#ifdef CONFIG_NET_CLS_ACT - __u16 tc_verd; /* traffic control verdict */ -#endif #endif union { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 857356f2d74b..f80dba516964 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -409,10 +409,18 @@ bool tcf_destroy(struct tcf_proto *tp, bool force); void tcf_destroy_chain(struct tcf_proto __rcu **fl); int skb_do_redirect(struct sk_buff *); +static inline void skb_reset_tc(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + skb->tc_at = 0; + skb->tc_from = 0; +#endif +} + static inline bool skb_at_tc_ingress(const struct sk_buff *skb) { #ifdef CONFIG_NET_CLS_ACT - return G_TC_AT(skb->tc_verd) & AT_INGRESS; + return skb->tc_at & AT_INGRESS; #else return false; #endif diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 1eed5d7509bc..cee753a7a40c 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -5,40 +5,9 @@ #include #ifdef __KERNEL__ -/* I think i could have done better macros ; for now this is stolen from - * some arch/mips code - jhs -*/ -#define _TC_MAKE32(x) ((x)) - -#define _TC_MAKEMASK1(n) (_TC_MAKE32(1) << _TC_MAKE32(n)) -#define _TC_MAKEMASK(v,n) (_TC_MAKE32((_TC_MAKE32(1)<<(v))-1) << _TC_MAKE32(n)) -#define _TC_MAKEVALUE(v,n) (_TC_MAKE32(v) << _TC_MAKE32(n)) -#define _TC_GETVALUE(v,n,m) ((_TC_MAKE32(v) & _TC_MAKE32(m)) >> _TC_MAKE32(n)) - -/* verdict bit breakdown - * -bit 6,7: Where this packet was last seen -0: Above the transmit example at the socket level -1: on the Ingress -2: on the Egress - - * - * */ - -#define S_TC_FROM _TC_MAKE32(6) -#define M_TC_FROM _TC_MAKEMASK(2,S_TC_FROM) -#define G_TC_FROM(x) _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM) -#define V_TC_FROM(x) _TC_MAKEVALUE(x,S_TC_FROM) -#define SET_TC_FROM(v,n) ((V_TC_FROM(n)) | (v & ~M_TC_FROM)) #define AT_STACK 0x0 #define AT_INGRESS 0x1 #define AT_EGRESS 0x2 - -#define S_TC_AT _TC_MAKE32(12) -#define M_TC_AT _TC_MAKEMASK(2,S_TC_AT) -#define G_TC_AT(x) _TC_GETVALUE(x,S_TC_AT,M_TC_AT) -#define V_TC_AT(x) _TC_MAKEVALUE(x,S_TC_AT) -#define SET_TC_AT(v,n) ((V_TC_AT(n)) | (v & ~M_TC_AT)) #endif /* Action attributes */ diff --git a/net/core/dev.c b/net/core/dev.c index e39e35d2e082..8b5d6d033473 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3153,7 +3153,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) if (!cl) return skb; - /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set + /* skb->tc_at and qdisc_skb_cb(skb)->pkt_len were already set * earlier by the caller. */ qdisc_bstats_cpu_update(cl->q, skb); @@ -3320,7 +3320,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) qdisc_pkt_len_init(skb); #ifdef CONFIG_NET_CLS_ACT - skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); + skb->tc_at = AT_EGRESS; # ifdef CONFIG_NET_EGRESS if (static_key_false(&egress_needed)) { skb = sch_handle_egress(skb, &rc, dev); @@ -3920,7 +3920,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, } qdisc_skb_cb(skb)->pkt_len = skb->len; - skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); + skb->tc_at = AT_INGRESS; qdisc_bstats_cpu_update(cl->q, skb); switch (tc_classify(skb, cl, &cl_res, false)) { @@ -4122,9 +4122,7 @@ skip_taps: goto out; } #endif -#ifdef CONFIG_NET_CLS_ACT - skb->tc_verd = 0; -#endif + skb_reset_tc(skb); skip_classify: if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 8e69ce472236..96947f5d41e4 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3439,9 +3439,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) /* skb was 'freed' by stack, so clean few * bits and reuse it */ -#ifdef CONFIG_NET_CLS_ACT - skb->tc_verd = 0; /* reset reclass/redir ttl */ -#endif + skb_reset_tc(skb); } while (--burst > 0); goto out; /* Skips xmit_mode M_START_XMIT */ } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5a03730fbc1a..adec4bf807d8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -878,9 +878,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) #endif #ifdef CONFIG_NET_SCHED CHECK_SKB_FIELD(tc_index); -#ifdef CONFIG_NET_CLS_ACT - CHECK_SKB_FIELD(tc_verd); -#endif #endif } diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 80b848d3f096..921fb20eaa7c 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -736,12 +736,11 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, u16 metalen = ife_get_sz(skb, ife); int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN; unsigned int skboff = skb->dev->hard_header_len; - u32 at = G_TC_AT(skb->tc_verd); int new_len = skb->len + hdrm; bool exceed_mtu = false; int err; - if (at & AT_EGRESS) { + if (!skb_at_tc_ingress(skb)) { if (new_len > skb->dev->mtu) exceed_mtu = true; } @@ -773,7 +772,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, return TC_ACT_SHOT; } - if (!(at & AT_EGRESS)) + if (skb_at_tc_ingress(skb)) skb_push(skb, skb->dev->hard_header_len); iethh = (struct ethhdr *)skb->data; @@ -816,7 +815,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, ether_addr_copy(oethh->h_dest, iethh->h_dest); oethh->h_proto = htons(ife->eth_type); - if (!(at & AT_EGRESS)) + if (skb_at_tc_ingress(skb)) skb_pull(skb, skb->dev->hard_header_len); spin_unlock(&ife->tcf_lock); diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 2d9fa6e0a1b4..8543279bba49 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -170,7 +170,6 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, int retval, err = 0; int m_eaction; int mac_len; - u32 at; tcf_lastuse_update(&m->tcf_tm); bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb); @@ -191,7 +190,6 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, goto out; } - at = G_TC_AT(skb->tc_verd); skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) goto out; @@ -200,8 +198,9 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, * and devices expect a mac header on xmit, then mac push/pull is * needed. */ - if (at != tcf_mirred_act_direction(m_eaction) && m_mac_header_xmit) { - if (at & AT_EGRESS) { + if (skb->tc_at != tcf_mirred_act_direction(m_eaction) && + m_mac_header_xmit) { + if (!skb_at_tc_ingress(skb)) { /* caught at egress, act ingress: pull mac */ mac_len = skb_network_header(skb) - skb_mac_header(skb); skb_pull_rcsum(skb2, mac_len); @@ -213,7 +212,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, /* mirror is always swallowed */ if (tcf_mirred_is_act_redirect(m_eaction)) - skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at); + skb2->tc_from = skb2->tc_at; skb2->skb_iif = skb->dev->ifindex; skb2->dev = dev; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index bcfadfdea8e0..bb5c638b6852 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -626,7 +626,7 @@ deliver: * If it's at ingress let's pretend the delay is * from the network (tstamp will be updated). */ - if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) + if (skb->tc_from & AT_INGRESS) skb->tstamp = 0; #endif -- cgit v1.2.3 From 8dc07fdbf2054f157e8333f940a1ad728916c786 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sat, 7 Jan 2017 17:06:37 -0500 Subject: net-tc: convert tc_at to tc_at_ingress Field tc_at is used only within tc actions to distinguish ingress from egress processing. A single bit is sufficient for this purpose. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/skbuff.h | 3 ++- include/net/sch_generic.h | 3 +-- net/core/dev.c | 8 +++----- net/sched/act_mirred.c | 12 ++++++------ 4 files changed, 12 insertions(+), 14 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f738d09947b2..fab3f87e9bd1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -590,6 +590,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @fclone: skbuff clone status * @ipvs_property: skbuff is owned by ipvs * @tc_skip_classify: do not classify packet. set by IFB device + * @tc_at_ingress: used within tc_classify to distinguish in/egress * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag @@ -751,7 +752,7 @@ struct sk_buff { #endif #ifdef CONFIG_NET_CLS_ACT __u8 tc_skip_classify:1; - __u8 tc_at:2; + __u8 tc_at_ingress:1; __u8 tc_from:2; #endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index f80dba516964..4bd6d5387209 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -412,7 +412,6 @@ int skb_do_redirect(struct sk_buff *); static inline void skb_reset_tc(struct sk_buff *skb) { #ifdef CONFIG_NET_CLS_ACT - skb->tc_at = 0; skb->tc_from = 0; #endif } @@ -420,7 +419,7 @@ static inline void skb_reset_tc(struct sk_buff *skb) static inline bool skb_at_tc_ingress(const struct sk_buff *skb) { #ifdef CONFIG_NET_CLS_ACT - return skb->tc_at & AT_INGRESS; + return skb->tc_at_ingress; #else return false; #endif diff --git a/net/core/dev.c b/net/core/dev.c index 8b5d6d033473..c143f1391117 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3153,9 +3153,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) if (!cl) return skb; - /* skb->tc_at and qdisc_skb_cb(skb)->pkt_len were already set - * earlier by the caller. - */ + /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ qdisc_bstats_cpu_update(cl->q, skb); switch (tc_classify(skb, cl, &cl_res, false)) { @@ -3320,7 +3318,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) qdisc_pkt_len_init(skb); #ifdef CONFIG_NET_CLS_ACT - skb->tc_at = AT_EGRESS; + skb->tc_at_ingress = 0; # ifdef CONFIG_NET_EGRESS if (static_key_false(&egress_needed)) { skb = sch_handle_egress(skb, &rc, dev); @@ -3920,7 +3918,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, } qdisc_skb_cb(skb)->pkt_len = skb->len; - skb->tc_at = AT_INGRESS; + skb->tc_at_ingress = 1; qdisc_bstats_cpu_update(cl->q, skb); switch (tc_classify(skb, cl, &cl_res, false)) { diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 8543279bba49..e832c62fd705 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -39,15 +39,15 @@ static bool tcf_mirred_is_act_redirect(int action) return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR; } -static u32 tcf_mirred_act_direction(int action) +static bool tcf_mirred_act_wants_ingress(int action) { switch (action) { case TCA_EGRESS_REDIR: case TCA_EGRESS_MIRROR: - return AT_EGRESS; + return false; case TCA_INGRESS_REDIR: case TCA_INGRESS_MIRROR: - return AT_INGRESS; + return true; default: BUG(); } @@ -198,7 +198,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, * and devices expect a mac header on xmit, then mac push/pull is * needed. */ - if (skb->tc_at != tcf_mirred_act_direction(m_eaction) && + if (skb_at_tc_ingress(skb) != tcf_mirred_act_wants_ingress(m_eaction) && m_mac_header_xmit) { if (!skb_at_tc_ingress(skb)) { /* caught at egress, act ingress: pull mac */ @@ -212,11 +212,11 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, /* mirror is always swallowed */ if (tcf_mirred_is_act_redirect(m_eaction)) - skb2->tc_from = skb2->tc_at; + skb2->tc_from = skb_at_tc_ingress(skb) ? AT_INGRESS : AT_EGRESS; skb2->skb_iif = skb->dev->ifindex; skb2->dev = dev; - if (tcf_mirred_act_direction(m_eaction) & AT_EGRESS) + if (!tcf_mirred_act_wants_ingress(m_eaction)) err = dev_queue_xmit(skb2); else err = netif_receive_skb(skb2); -- cgit v1.2.3 From bc31c905e946b5c55df5d2938335e78ffb3157ca Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sat, 7 Jan 2017 17:06:38 -0500 Subject: net-tc: convert tc_from to tc_from_ingress and tc_redirected The tc_from field fulfills two roles. It encodes whether a packet was redirected by an act_mirred device and, if so, whether act_mirred was called on ingress or egress. Split it into separate fields. The information is needed by the special IFB loop, where packets are taken out of the normal path by act_mirred, forwarded to IFB, then reinjected at their original location (ingress or egress) by IFB. The IFB device cannot use skb->tc_at_ingress, because that may have been overwritten as the packet travels from act_mirred to ifb_xmit, when it passes through tc_classify on the IFB egress path. Cache this value in skb->tc_from_ingress. That field is valid only if a packet arriving at ifb_xmit came from act_mirred. Other packets can be crafted to reach ifb_xmit. These must be dropped. Set tc_redirected on redirection and drop all packets that do not have this bit set. Both fields are set only on cloned skbs in tc actions, so original packet sources do not have to clear the bit when reusing packets (notably, pktgen and octeon). Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- drivers/net/ifb.c | 13 +++++-------- include/linux/skbuff.h | 5 ++++- include/net/sch_generic.h | 2 +- include/uapi/linux/pkt_cls.h | 6 ------ net/sched/act_mirred.c | 6 ++++-- net/sched/sch_netem.c | 2 +- 6 files changed, 15 insertions(+), 19 deletions(-) diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index b73b6b6c066b..312fce7302d3 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -78,9 +78,7 @@ static void ifb_ri_tasklet(unsigned long _txp) } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { - u32 from = skb->tc_from; - - skb_reset_tc(skb); + skb->tc_redirected = 0; skb->tc_skip_classify = 1; u64_stats_update_begin(&txp->tsync); @@ -101,13 +99,12 @@ static void ifb_ri_tasklet(unsigned long _txp) rcu_read_unlock(); skb->skb_iif = txp->dev->ifindex; - if (from & AT_EGRESS) { + if (!skb->tc_from_ingress) { dev_queue_xmit(skb); - } else if (from & AT_INGRESS) { + } else { skb_pull(skb, skb->mac_len); netif_receive_skb(skb); - } else - BUG(); + } } if (__netif_tx_trylock(txq)) { @@ -246,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) txp->rx_bytes += skb->len; u64_stats_update_end(&txp->rsync); - if (skb->tc_from == AT_STACK || !skb->skb_iif) { + if (!skb->tc_redirected || !skb->skb_iif) { dev_kfree_skb(skb); dev->stats.rx_dropped++; return NETDEV_TX_OK; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fab3f87e9bd1..3149a88de548 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -591,6 +591,8 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @ipvs_property: skbuff is owned by ipvs * @tc_skip_classify: do not classify packet. set by IFB device * @tc_at_ingress: used within tc_classify to distinguish in/egress + * @tc_redirected: packet was redirected by a tc action + * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag @@ -753,7 +755,8 @@ struct sk_buff { #ifdef CONFIG_NET_CLS_ACT __u8 tc_skip_classify:1; __u8 tc_at_ingress:1; - __u8 tc_from:2; + __u8 tc_redirected:1; + __u8 tc_from_ingress:1; #endif #ifdef CONFIG_NET_SCHED diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 4bd6d5387209..e2f426f6d62f 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -412,7 +412,7 @@ int skb_do_redirect(struct sk_buff *); static inline void skb_reset_tc(struct sk_buff *skb) { #ifdef CONFIG_NET_CLS_ACT - skb->tc_from = 0; + skb->tc_redirected = 0; #endif } diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index cee753a7a40c..a081efbd61a2 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -4,12 +4,6 @@ #include #include -#ifdef __KERNEL__ -#define AT_STACK 0x0 -#define AT_INGRESS 0x1 -#define AT_EGRESS 0x2 -#endif - /* Action attributes */ enum { TCA_ACT_UNSPEC, diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index e832c62fd705..84682f02b611 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -211,8 +211,10 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, } /* mirror is always swallowed */ - if (tcf_mirred_is_act_redirect(m_eaction)) - skb2->tc_from = skb_at_tc_ingress(skb) ? AT_INGRESS : AT_EGRESS; + if (tcf_mirred_is_act_redirect(m_eaction)) { + skb2->tc_redirected = 1; + skb2->tc_from_ingress = skb2->tc_at_ingress; + } skb2->skb_iif = skb->dev->ifindex; skb2->dev = dev; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index bb5c638b6852..c8bb62a1e744 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -626,7 +626,7 @@ deliver: * If it's at ingress let's pretend the delay is * from the network (tstamp will be updated). */ - if (skb->tc_from & AT_INGRESS) + if (skb->tc_redirected && skb->tc_from_ingress) skb->tstamp = 0; #endif -- cgit v1.2.3 From 8e8d7f13b6d5a93b3d2cf9a4ceaaf923809fd5ac Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 5 Jan 2017 10:38:34 +0000 Subject: afs: Add some tracepoints Add three tracepoints to the AFS filesystem: (1) The afs_recv_data tracepoint logs data segments that are extracted from the data received from the peer through afs_extract_data(). (2) The afs_notify_call tracepoint logs notification from AF_RXRPC of data coming in to an asynchronous call. (3) The afs_cb_call tracepoint logs incoming calls that have had their operation ID extracted and mapped into a supported cache manager service call. To make (3) work, the name strings in the afs_call_type struct objects have to be annotated with __tracepoint_string. This is done with the CM_NAME() macro. Further, the AFS call state enum needs a name so that it can be used to declare parameter types. Signed-off-by: David Howells --- fs/afs/cmservice.c | 22 ++++++--- fs/afs/internal.h | 21 +++++---- fs/afs/main.c | 1 + fs/afs/rxrpc.c | 6 +++ include/trace/events/afs.h | 109 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 144 insertions(+), 15 deletions(-) create mode 100644 include/trace/events/afs.h diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index d764236072b1..a2e1e02005f6 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -25,11 +25,16 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *); static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *); static void afs_cm_destructor(struct afs_call *); +#define CM_NAME(name) \ + const char afs_SRXCB##name##_name[] __tracepoint_string = \ + "CB." #name + /* * CB.CallBack operation type */ +static CM_NAME(CallBack); static const struct afs_call_type afs_SRXCBCallBack = { - .name = "CB.CallBack", + .name = afs_SRXCBCallBack_name, .deliver = afs_deliver_cb_callback, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, @@ -38,8 +43,9 @@ static const struct afs_call_type afs_SRXCBCallBack = { /* * CB.InitCallBackState operation type */ +static CM_NAME(InitCallBackState); static const struct afs_call_type afs_SRXCBInitCallBackState = { - .name = "CB.InitCallBackState", + .name = afs_SRXCBInitCallBackState_name, .deliver = afs_deliver_cb_init_call_back_state, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, @@ -48,8 +54,9 @@ static const struct afs_call_type afs_SRXCBInitCallBackState = { /* * CB.InitCallBackState3 operation type */ +static CM_NAME(InitCallBackState3); static const struct afs_call_type afs_SRXCBInitCallBackState3 = { - .name = "CB.InitCallBackState3", + .name = afs_SRXCBInitCallBackState3_name, .deliver = afs_deliver_cb_init_call_back_state3, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, @@ -58,8 +65,9 @@ static const struct afs_call_type afs_SRXCBInitCallBackState3 = { /* * CB.Probe operation type */ +static CM_NAME(Probe); static const struct afs_call_type afs_SRXCBProbe = { - .name = "CB.Probe", + .name = afs_SRXCBProbe_name, .deliver = afs_deliver_cb_probe, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, @@ -68,8 +76,9 @@ static const struct afs_call_type afs_SRXCBProbe = { /* * CB.ProbeUuid operation type */ +static CM_NAME(ProbeUuid); static const struct afs_call_type afs_SRXCBProbeUuid = { - .name = "CB.ProbeUuid", + .name = afs_SRXCBProbeUuid_name, .deliver = afs_deliver_cb_probe_uuid, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, @@ -78,8 +87,9 @@ static const struct afs_call_type afs_SRXCBProbeUuid = { /* * CB.TellMeAboutYourself operation type */ +static CM_NAME(TellMeAboutYourself); static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { - .name = "CB.TellMeAboutYourself", + .name = afs_SRXCBTellMeAboutYourself_name, .deliver = afs_deliver_cb_tell_me_about_yourself, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 6f7a9638ba1a..f71e58fcc2f2 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -68,6 +68,15 @@ struct afs_wait_mode { extern const struct afs_wait_mode afs_sync_call; extern const struct afs_wait_mode afs_async_call; +enum afs_call_state { + AFS_CALL_REQUESTING, /* request is being sent for outgoing call */ + AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */ + AFS_CALL_AWAIT_OP_ID, /* awaiting op ID on incoming call */ + AFS_CALL_AWAIT_REQUEST, /* awaiting request data on incoming call */ + AFS_CALL_REPLYING, /* replying to incoming call */ + AFS_CALL_AWAIT_ACK, /* awaiting final ACK of incoming call */ + AFS_CALL_COMPLETE, /* Completed or failed */ +}; /* * a record of an in-progress RxRPC call */ @@ -91,15 +100,7 @@ struct afs_call { pgoff_t first; /* first page in mapping to deal with */ pgoff_t last; /* last page in mapping to deal with */ size_t offset; /* offset into received data store */ - enum { /* call state */ - AFS_CALL_REQUESTING, /* request is being sent for outgoing call */ - AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */ - AFS_CALL_AWAIT_OP_ID, /* awaiting op ID on incoming call */ - AFS_CALL_AWAIT_REQUEST, /* awaiting request data on incoming call */ - AFS_CALL_REPLYING, /* replying to incoming call */ - AFS_CALL_AWAIT_ACK, /* awaiting final ACK of incoming call */ - AFS_CALL_COMPLETE, /* Completed or failed */ - } state; + enum afs_call_state state; int error; /* error code */ u32 abort_code; /* Remote abort ID or 0 */ unsigned request_size; /* size of request data */ @@ -773,6 +774,8 @@ extern int afs_fsync(struct file *, loff_t, loff_t, int); /* * debug tracing */ +#include + extern unsigned afs_debug; #define dbgprintk(FMT,...) \ diff --git a/fs/afs/main.c b/fs/afs/main.c index 0b187ef3b5b7..f8188feb03ad 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c @@ -15,6 +15,7 @@ #include #include #include +#define CREATE_TRACE_POINTS #include "internal.h" MODULE_DESCRIPTION("AFS Client File System"); diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 25f05a8d21b1..f26344a8c029 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -416,6 +416,8 @@ static void afs_deliver_to_call(struct afs_call *call) ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall, NULL, 0, &offset, false, &call->abort_code); + trace_afs_recv_data(call, 0, offset, false, ret); + if (ret == -EINPROGRESS || ret == -EAGAIN) return; if (ret == 1 || ret < 0) { @@ -541,6 +543,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, { struct afs_call *call = (struct afs_call *)call_user_ID; + trace_afs_notify_call(rxcall, call); call->need_attention = true; queue_work(afs_async_calls, &call->async_work); } @@ -689,6 +692,8 @@ static int afs_deliver_cm_op_id(struct afs_call *call) if (!afs_cm_incoming_call(call)) return -ENOTSUPP; + trace_afs_cb_call(call); + /* pass responsibility for the remainer of this message off to the * cache manager op */ return call->type->deliver(call); @@ -780,6 +785,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count, ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall, buf, count, &call->offset, want_more, &call->abort_code); + trace_afs_recv_data(call, count, call->offset, want_more, ret); if (ret == 0 || ret == -EAGAIN) return ret; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h new file mode 100644 index 000000000000..845907b04ff4 --- /dev/null +++ b/include/trace/events/afs.h @@ -0,0 +1,109 @@ +/* AFS tracepoints + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM afs + +#if !defined(_TRACE_AFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_AFS_H + +#include + +TRACE_EVENT(afs_recv_data, + TP_PROTO(struct afs_call *call, unsigned count, unsigned offset, + bool want_more, int ret), + + TP_ARGS(call, count, offset, want_more, ret), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, rxcall ) + __field(struct afs_call *, call ) + __field(enum afs_call_state, state ) + __field(unsigned int, count ) + __field(unsigned int, offset ) + __field(unsigned short, unmarshall ) + __field(bool, want_more ) + __field(int, ret ) + ), + + TP_fast_assign( + __entry->rxcall = call->rxcall; + __entry->call = call; + __entry->state = call->state; + __entry->unmarshall = call->unmarshall; + __entry->count = count; + __entry->offset = offset; + __entry->want_more = want_more; + __entry->ret = ret; + ), + + TP_printk("c=%p ac=%p s=%u u=%u %u/%u wm=%u ret=%d", + __entry->rxcall, + __entry->call, + __entry->state, __entry->unmarshall, + __entry->offset, __entry->count, + __entry->want_more, __entry->ret) + ); + +TRACE_EVENT(afs_notify_call, + TP_PROTO(struct rxrpc_call *rxcall, struct afs_call *call), + + TP_ARGS(rxcall, call), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, rxcall ) + __field(struct afs_call *, call ) + __field(enum afs_call_state, state ) + __field(unsigned short, unmarshall ) + ), + + TP_fast_assign( + __entry->rxcall = rxcall; + __entry->call = call; + __entry->state = call->state; + __entry->unmarshall = call->unmarshall; + ), + + TP_printk("c=%p ac=%p s=%u u=%u", + __entry->rxcall, + __entry->call, + __entry->state, __entry->unmarshall) + ); + +TRACE_EVENT(afs_cb_call, + TP_PROTO(struct afs_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, rxcall ) + __field(struct afs_call *, call ) + __field(const char *, name ) + __field(u32, op ) + ), + + TP_fast_assign( + __entry->rxcall = call->rxcall; + __entry->call = call; + __entry->name = call->type->name; + __entry->op = call->operation_ID; + ), + + TP_printk("c=%p ac=%p %s o=%u", + __entry->rxcall, + __entry->call, + __entry->name, + __entry->op) + ); + +#endif /* _TRACE_AFS_H */ + +/* This part must be outside protection */ +#include -- cgit v1.2.3 From 9b8e34e211b15af429b72388a8f2b3b1823d172e Mon Sep 17 00:00:00 2001 From: Michał Kępień Date: Fri, 6 Jan 2017 07:07:47 +0100 Subject: rfkill: Add rfkill-any LED trigger MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new "global" (i.e. not per-rfkill device) LED trigger, rfkill-any, which may be useful on laptops with a single "radio LED" and multiple radio transmitters. The trigger is meant to turn a LED on whenever there is at least one radio transmitter active and turn it off otherwise. Signed-off-by: Michał Kępień Signed-off-by: Johannes Berg --- net/rfkill/core.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 71 insertions(+), 1 deletion(-) diff --git a/net/rfkill/core.c b/net/rfkill/core.c index afa4f71b4c7b..2064c3a35ef8 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -176,6 +176,50 @@ static void rfkill_led_trigger_unregister(struct rfkill *rfkill) { led_trigger_unregister(&rfkill->led_trigger); } + +static struct led_trigger rfkill_any_led_trigger; +static struct work_struct rfkill_any_work; + +static void rfkill_any_led_trigger_worker(struct work_struct *work) +{ + enum led_brightness brightness = LED_OFF; + struct rfkill *rfkill; + + mutex_lock(&rfkill_global_mutex); + list_for_each_entry(rfkill, &rfkill_list, node) { + if (!(rfkill->state & RFKILL_BLOCK_ANY)) { + brightness = LED_FULL; + break; + } + } + mutex_unlock(&rfkill_global_mutex); + + led_trigger_event(&rfkill_any_led_trigger, brightness); +} + +static void rfkill_any_led_trigger_event(void) +{ + schedule_work(&rfkill_any_work); +} + +static void rfkill_any_led_trigger_activate(struct led_classdev *led_cdev) +{ + rfkill_any_led_trigger_event(); +} + +static int rfkill_any_led_trigger_register(void) +{ + INIT_WORK(&rfkill_any_work, rfkill_any_led_trigger_worker); + rfkill_any_led_trigger.name = "rfkill-any"; + rfkill_any_led_trigger.activate = rfkill_any_led_trigger_activate; + return led_trigger_register(&rfkill_any_led_trigger); +} + +static void rfkill_any_led_trigger_unregister(void) +{ + led_trigger_unregister(&rfkill_any_led_trigger); + cancel_work_sync(&rfkill_any_work); +} #else static void rfkill_led_trigger_event(struct rfkill *rfkill) { @@ -189,6 +233,19 @@ static inline int rfkill_led_trigger_register(struct rfkill *rfkill) static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) { } + +static void rfkill_any_led_trigger_event(void) +{ +} + +static int rfkill_any_led_trigger_register(void) +{ + return 0; +} + +static void rfkill_any_led_trigger_unregister(void) +{ +} #endif /* CONFIG_RFKILL_LEDS */ static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, @@ -297,6 +354,7 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked) spin_unlock_irqrestore(&rfkill->lock, flags); rfkill_led_trigger_event(rfkill); + rfkill_any_led_trigger_event(); if (prev != curr) rfkill_event(rfkill); @@ -477,6 +535,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) spin_unlock_irqrestore(&rfkill->lock, flags); rfkill_led_trigger_event(rfkill); + rfkill_any_led_trigger_event(); if (rfkill->registered && prev != blocked) schedule_work(&rfkill->uevent_work); @@ -520,6 +579,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) schedule_work(&rfkill->uevent_work); rfkill_led_trigger_event(rfkill); + rfkill_any_led_trigger_event(); return blocked; } @@ -569,6 +629,7 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) schedule_work(&rfkill->uevent_work); rfkill_led_trigger_event(rfkill); + rfkill_any_led_trigger_event(); } } EXPORT_SYMBOL(rfkill_set_states); @@ -985,6 +1046,7 @@ int __must_check rfkill_register(struct rfkill *rfkill) #endif } + rfkill_any_led_trigger_event(); rfkill_send_events(rfkill, RFKILL_OP_ADD); mutex_unlock(&rfkill_global_mutex); @@ -1017,6 +1079,7 @@ void rfkill_unregister(struct rfkill *rfkill) mutex_lock(&rfkill_global_mutex); rfkill_send_events(rfkill, RFKILL_OP_DEL); list_del_init(&rfkill->node); + rfkill_any_led_trigger_event(); mutex_unlock(&rfkill_global_mutex); rfkill_led_trigger_unregister(rfkill); @@ -1269,6 +1332,10 @@ static int __init rfkill_init(void) if (error) goto error_misc; + error = rfkill_any_led_trigger_register(); + if (error) + goto error_led_trigger; + #ifdef CONFIG_RFKILL_INPUT error = rfkill_handler_init(); if (error) @@ -1279,8 +1346,10 @@ static int __init rfkill_init(void) #ifdef CONFIG_RFKILL_INPUT error_input: - misc_deregister(&rfkill_miscdev); + rfkill_any_led_trigger_unregister(); #endif +error_led_trigger: + misc_deregister(&rfkill_miscdev); error_misc: class_unregister(&rfkill_class); error_class: @@ -1293,6 +1362,7 @@ static void __exit rfkill_exit(void) #ifdef CONFIG_RFKILL_INPUT rfkill_handler_exit(); #endif + rfkill_any_led_trigger_unregister(); misc_deregister(&rfkill_miscdev); class_unregister(&rfkill_class); } -- cgit v1.2.3 From 56ff9c837778ba2cf76f29c966856a9341e5939d Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 5 Jan 2017 10:38:36 +0000 Subject: afs: Kill afs_wait_mode The afs_wait_mode struct isn't really necessary. Client calls only use one of a choice of two (synchronous or the asynchronous) and incoming calls don't use the wait at all. Replace with a boolean parameter. Signed-off-by: David Howells --- fs/afs/callback.c | 2 +- fs/afs/fsclient.c | 80 +++++++++++++++++++++++++++--------------------------- fs/afs/internal.h | 68 ++++++++++++---------------------------------- fs/afs/rxrpc.c | 51 ++++++++-------------------------- fs/afs/vlclient.c | 8 +++--- fs/afs/vlocation.c | 4 +-- fs/afs/vnode.c | 26 +++++++++--------- 7 files changed, 90 insertions(+), 149 deletions(-) diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 1e9d2f84e5b5..b29447e03ede 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -343,7 +343,7 @@ void afs_dispatch_give_up_callbacks(struct work_struct *work) * had callbacks entirely, and the server will call us later to break * them */ - afs_fs_give_up_callbacks(server, &afs_async_call); + afs_fs_give_up_callbacks(server, true); } /* diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 7dc1f6fb3661..ac8e766978dc 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -275,7 +275,7 @@ int afs_fs_fetch_file_status(struct afs_server *server, struct key *key, struct afs_vnode *vnode, struct afs_volsync *volsync, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; @@ -300,7 +300,7 @@ int afs_fs_fetch_file_status(struct afs_server *server, bp[2] = htonl(vnode->fid.vnode); bp[3] = htonl(vnode->fid.unique); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -464,7 +464,7 @@ static int afs_fs_fetch_data64(struct afs_server *server, struct key *key, struct afs_vnode *vnode, struct afs_read *req, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; @@ -495,7 +495,7 @@ static int afs_fs_fetch_data64(struct afs_server *server, bp[7] = htonl(lower_32_bits(req->len)); atomic_inc(&req->usage); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -505,7 +505,7 @@ int afs_fs_fetch_data(struct afs_server *server, struct key *key, struct afs_vnode *vnode, struct afs_read *req, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; @@ -513,7 +513,7 @@ int afs_fs_fetch_data(struct afs_server *server, if (upper_32_bits(req->pos) || upper_32_bits(req->len) || upper_32_bits(req->pos + req->len)) - return afs_fs_fetch_data64(server, key, vnode, req, wait_mode); + return afs_fs_fetch_data64(server, key, vnode, req, async); _enter(""); @@ -539,7 +539,7 @@ int afs_fs_fetch_data(struct afs_server *server, bp[5] = htonl(lower_32_bits(req->len)); atomic_inc(&req->usage); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -568,7 +568,7 @@ static const struct afs_call_type afs_RXFSGiveUpCallBacks = { * - the callbacks are held in the server->cb_break ring */ int afs_fs_give_up_callbacks(struct afs_server *server, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; size_t ncallbacks; @@ -622,7 +622,7 @@ int afs_fs_give_up_callbacks(struct afs_server *server, ASSERT(ncallbacks > 0); wake_up_nr(&server->cb_break_waitq, ncallbacks); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -673,7 +673,7 @@ int afs_fs_create(struct afs_server *server, struct afs_fid *newfid, struct afs_file_status *newstatus, struct afs_callback *newcb, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; size_t namesz, reqsz, padsz; @@ -718,7 +718,7 @@ int afs_fs_create(struct afs_server *server, *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ *bp++ = 0; /* segment size */ - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -763,7 +763,7 @@ int afs_fs_remove(struct afs_server *server, struct afs_vnode *vnode, const char *name, bool isdir, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; size_t namesz, reqsz, padsz; @@ -798,7 +798,7 @@ int afs_fs_remove(struct afs_server *server, bp = (void *) bp + padsz; } - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -844,7 +844,7 @@ int afs_fs_link(struct afs_server *server, struct afs_vnode *dvnode, struct afs_vnode *vnode, const char *name, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; size_t namesz, reqsz, padsz; @@ -883,7 +883,7 @@ int afs_fs_link(struct afs_server *server, *bp++ = htonl(vnode->fid.vnode); *bp++ = htonl(vnode->fid.unique); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -932,7 +932,7 @@ int afs_fs_symlink(struct afs_server *server, const char *contents, struct afs_fid *newfid, struct afs_file_status *newstatus, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; size_t namesz, reqsz, padsz, c_namesz, c_padsz; @@ -987,7 +987,7 @@ int afs_fs_symlink(struct afs_server *server, *bp++ = htonl(S_IRWXUGO); /* unix mode */ *bp++ = 0; /* segment size */ - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -1036,7 +1036,7 @@ int afs_fs_rename(struct afs_server *server, const char *orig_name, struct afs_vnode *new_dvnode, const char *new_name, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz; @@ -1090,7 +1090,7 @@ int afs_fs_rename(struct afs_server *server, bp = (void *) bp + n_padsz; } - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -1145,7 +1145,7 @@ static int afs_fs_store_data64(struct afs_server *server, pgoff_t first, pgoff_t last, unsigned offset, unsigned to, loff_t size, loff_t pos, loff_t i_size, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_vnode *vnode = wb->vnode; struct afs_call *call; @@ -1194,7 +1194,7 @@ static int afs_fs_store_data64(struct afs_server *server, *bp++ = htonl(i_size >> 32); *bp++ = htonl((u32) i_size); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -1203,7 +1203,7 @@ static int afs_fs_store_data64(struct afs_server *server, int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, pgoff_t first, pgoff_t last, unsigned offset, unsigned to, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_vnode *vnode = wb->vnode; struct afs_call *call; @@ -1229,7 +1229,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, if (pos >> 32 || i_size >> 32 || size >> 32 || (pos + size) >> 32) return afs_fs_store_data64(server, wb, first, last, offset, to, - size, pos, i_size, wait_mode); + size, pos, i_size, async); call = afs_alloc_flat_call(&afs_RXFSStoreData, (4 + 6 + 3) * 4, @@ -1268,7 +1268,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, *bp++ = htonl(size); *bp++ = htonl(i_size); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -1330,7 +1330,7 @@ static const struct afs_call_type afs_RXFSStoreData64_as_Status = { */ static int afs_fs_setattr_size64(struct afs_server *server, struct key *key, struct afs_vnode *vnode, struct iattr *attr, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; @@ -1369,7 +1369,7 @@ static int afs_fs_setattr_size64(struct afs_server *server, struct key *key, *bp++ = htonl(attr->ia_size >> 32); /* new file length */ *bp++ = htonl((u32) attr->ia_size); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -1378,7 +1378,7 @@ static int afs_fs_setattr_size64(struct afs_server *server, struct key *key, */ static int afs_fs_setattr_size(struct afs_server *server, struct key *key, struct afs_vnode *vnode, struct iattr *attr, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; @@ -1389,7 +1389,7 @@ static int afs_fs_setattr_size(struct afs_server *server, struct key *key, ASSERT(attr->ia_valid & ATTR_SIZE); if (attr->ia_size >> 32) return afs_fs_setattr_size64(server, key, vnode, attr, - wait_mode); + async); call = afs_alloc_flat_call(&afs_RXFSStoreData_as_Status, (4 + 6 + 3) * 4, @@ -1417,7 +1417,7 @@ static int afs_fs_setattr_size(struct afs_server *server, struct key *key, *bp++ = 0; /* size of write */ *bp++ = htonl(attr->ia_size); /* new file length */ - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -1426,14 +1426,14 @@ static int afs_fs_setattr_size(struct afs_server *server, struct key *key, */ int afs_fs_setattr(struct afs_server *server, struct key *key, struct afs_vnode *vnode, struct iattr *attr, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; if (attr->ia_valid & ATTR_SIZE) return afs_fs_setattr_size(server, key, vnode, attr, - wait_mode); + async); _enter(",%x,{%x:%u},,", key_serial(key), vnode->fid.vid, vnode->fid.vnode); @@ -1459,7 +1459,7 @@ int afs_fs_setattr(struct afs_server *server, struct key *key, xdr_encode_AFS_StoreStatus(&bp, attr); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -1661,7 +1661,7 @@ int afs_fs_get_volume_status(struct afs_server *server, struct key *key, struct afs_vnode *vnode, struct afs_volume_status *vs, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; @@ -1691,7 +1691,7 @@ int afs_fs_get_volume_status(struct afs_server *server, bp[0] = htonl(FSGETVOLUMESTATUS); bp[1] = htonl(vnode->fid.vid); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -1753,7 +1753,7 @@ int afs_fs_set_lock(struct afs_server *server, struct key *key, struct afs_vnode *vnode, afs_lock_type_t type, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; @@ -1777,7 +1777,7 @@ int afs_fs_set_lock(struct afs_server *server, *bp++ = htonl(vnode->fid.unique); *bp++ = htonl(type); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -1786,7 +1786,7 @@ int afs_fs_set_lock(struct afs_server *server, int afs_fs_extend_lock(struct afs_server *server, struct key *key, struct afs_vnode *vnode, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; @@ -1809,7 +1809,7 @@ int afs_fs_extend_lock(struct afs_server *server, *bp++ = htonl(vnode->fid.vnode); *bp++ = htonl(vnode->fid.unique); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } /* @@ -1818,7 +1818,7 @@ int afs_fs_extend_lock(struct afs_server *server, int afs_fs_release_lock(struct afs_server *server, struct key *key, struct afs_vnode *vnode, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; @@ -1841,5 +1841,5 @@ int afs_fs_release_lock(struct afs_server *server, *bp++ = htonl(vnode->fid.vnode); *bp++ = htonl(vnode->fid.unique); - return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); + return afs_make_call(&server->addr, call, GFP_NOFS, async); } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index f71e58fcc2f2..b411670d5f67 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -51,23 +51,6 @@ struct afs_mount_params { struct key *key; /* key to use for secure mounting */ }; -/* - * definition of how to wait for the completion of an operation - */ -struct afs_wait_mode { - /* RxRPC received message notification */ - rxrpc_notify_rx_t notify_rx; - - /* synchronous call waiter and call dispatched notification */ - int (*wait)(struct afs_call *call); - - /* asynchronous call completion */ - void (*async_complete)(void *reply, int error); -}; - -extern const struct afs_wait_mode afs_sync_call; -extern const struct afs_wait_mode afs_async_call; - enum afs_call_state { AFS_CALL_REQUESTING, /* request is being sent for outgoing call */ AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */ @@ -82,7 +65,6 @@ enum afs_call_state { */ struct afs_call { const struct afs_call_type *type; /* type of call */ - const struct afs_wait_mode *wait_mode; /* completion wait mode */ wait_queue_head_t waitq; /* processes awaiting completion */ struct work_struct async_work; /* asynchronous work processor */ struct work_struct work; /* actual work processor */ @@ -111,6 +93,7 @@ struct afs_call { bool incoming; /* T if incoming call */ bool send_pages; /* T if data from mapping should be sent */ bool need_attention; /* T if RxRPC poked us */ + bool async; /* T if asynchronous */ u16 service_id; /* RxRPC service ID to call */ __be16 port; /* target UDP port */ u32 operation_ID; /* operation ID for an incoming call */ @@ -527,50 +510,37 @@ extern int afs_flock(struct file *, int, struct file_lock *); */ extern int afs_fs_fetch_file_status(struct afs_server *, struct key *, struct afs_vnode *, struct afs_volsync *, - const struct afs_wait_mode *); -extern int afs_fs_give_up_callbacks(struct afs_server *, - const struct afs_wait_mode *); + bool); +extern int afs_fs_give_up_callbacks(struct afs_server *, bool); extern int afs_fs_fetch_data(struct afs_server *, struct key *, - struct afs_vnode *, struct afs_read *, - const struct afs_wait_mode *); + struct afs_vnode *, struct afs_read *, bool); extern int afs_fs_create(struct afs_server *, struct key *, struct afs_vnode *, const char *, umode_t, struct afs_fid *, struct afs_file_status *, - struct afs_callback *, - const struct afs_wait_mode *); + struct afs_callback *, bool); extern int afs_fs_remove(struct afs_server *, struct key *, - struct afs_vnode *, const char *, bool, - const struct afs_wait_mode *); + struct afs_vnode *, const char *, bool, bool); extern int afs_fs_link(struct afs_server *, struct key *, struct afs_vnode *, - struct afs_vnode *, const char *, - const struct afs_wait_mode *); + struct afs_vnode *, const char *, bool); extern int afs_fs_symlink(struct afs_server *, struct key *, struct afs_vnode *, const char *, const char *, - struct afs_fid *, struct afs_file_status *, - const struct afs_wait_mode *); + struct afs_fid *, struct afs_file_status *, bool); extern int afs_fs_rename(struct afs_server *, struct key *, struct afs_vnode *, const char *, - struct afs_vnode *, const char *, - const struct afs_wait_mode *); + struct afs_vnode *, const char *, bool); extern int afs_fs_store_data(struct afs_server *, struct afs_writeback *, - pgoff_t, pgoff_t, unsigned, unsigned, - const struct afs_wait_mode *); + pgoff_t, pgoff_t, unsigned, unsigned, bool); extern int afs_fs_setattr(struct afs_server *, struct key *, - struct afs_vnode *, struct iattr *, - const struct afs_wait_mode *); + struct afs_vnode *, struct iattr *, bool); extern int afs_fs_get_volume_status(struct afs_server *, struct key *, struct afs_vnode *, - struct afs_volume_status *, - const struct afs_wait_mode *); + struct afs_volume_status *, bool); extern int afs_fs_set_lock(struct afs_server *, struct key *, - struct afs_vnode *, afs_lock_type_t, - const struct afs_wait_mode *); + struct afs_vnode *, afs_lock_type_t, bool); extern int afs_fs_extend_lock(struct afs_server *, struct key *, - struct afs_vnode *, - const struct afs_wait_mode *); + struct afs_vnode *, bool); extern int afs_fs_release_lock(struct afs_server *, struct key *, - struct afs_vnode *, - const struct afs_wait_mode *); + struct afs_vnode *, bool); /* * inode.c @@ -624,8 +594,7 @@ extern struct socket *afs_socket; extern int afs_open_socket(void); extern void afs_close_socket(void); -extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, - const struct afs_wait_mode *); +extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, bool); extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *, size_t, size_t); extern void afs_flat_call_destructor(struct afs_call *); @@ -681,11 +650,10 @@ extern int afs_get_MAC_address(u8 *, size_t); */ extern int afs_vl_get_entry_by_name(struct in_addr *, struct key *, const char *, struct afs_cache_vlocation *, - const struct afs_wait_mode *); + bool); extern int afs_vl_get_entry_by_id(struct in_addr *, struct key *, afs_volid_t, afs_voltype_t, - struct afs_cache_vlocation *, - const struct afs_wait_mode *); + struct afs_cache_vlocation *, bool); /* * vlocation.c diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index f26344a8c029..ec1e41f929d1 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -25,29 +25,11 @@ static void afs_free_call(struct afs_call *); static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); static int afs_wait_for_call_to_complete(struct afs_call *); static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); -static int afs_dont_wait_for_call_to_complete(struct afs_call *); static void afs_process_async_call(struct work_struct *); static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); static int afs_deliver_cm_op_id(struct afs_call *); -/* synchronous call management */ -const struct afs_wait_mode afs_sync_call = { - .notify_rx = afs_wake_up_call_waiter, - .wait = afs_wait_for_call_to_complete, -}; - -/* asynchronous call management */ -const struct afs_wait_mode afs_async_call = { - .notify_rx = afs_wake_up_async_call, - .wait = afs_dont_wait_for_call_to_complete, -}; - -/* asynchronous incoming call management */ -static const struct afs_wait_mode afs_async_incoming_call = { - .notify_rx = afs_wake_up_async_call, -}; - /* asynchronous incoming call initial processing */ static const struct afs_call_type afs_RXCMxxxx = { .name = "CB.xxxx", @@ -315,7 +297,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg, * initiate a call */ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, - const struct afs_wait_mode *wait_mode) + bool async) { struct sockaddr_rxrpc srx; struct rxrpc_call *rxcall; @@ -332,7 +314,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, call, call->type->name, key_serial(call->key), atomic_read(&afs_outstanding_calls)); - call->wait_mode = wait_mode; + call->async = async; INIT_WORK(&call->async_work, afs_process_async_call); memset(&srx, 0, sizeof(srx)); @@ -347,7 +329,9 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, /* create a call */ rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key, (unsigned long) call, gfp, - wait_mode->notify_rx); + (async ? + afs_wake_up_async_call : + afs_wake_up_call_waiter)); call->key = NULL; if (IS_ERR(rxcall)) { ret = PTR_ERR(rxcall); @@ -386,7 +370,10 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, /* at this point, an async call may no longer exist as it may have * already completed */ - return wait_mode->wait(call); + if (call->async) + return -EINPROGRESS; + + return afs_wait_for_call_to_complete(call); error_do_abort: rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); @@ -548,17 +535,6 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, queue_work(afs_async_calls, &call->async_work); } -/* - * put a call into asynchronous mode - * - mustn't touch the call descriptor as the call my have completed by the - * time we get here - */ -static int afs_dont_wait_for_call_to_complete(struct afs_call *call) -{ - _enter(""); - return -EINPROGRESS; -} - /* * delete an asynchronous call */ @@ -587,10 +563,7 @@ static void afs_process_async_call(struct work_struct *work) afs_deliver_to_call(call); } - if (call->state == AFS_CALL_COMPLETE && call->wait_mode) { - if (call->wait_mode->async_complete) - call->wait_mode->async_complete(call->reply, - call->error); + if (call->state == AFS_CALL_COMPLETE) { call->reply = NULL; /* kill the call */ @@ -626,10 +599,10 @@ static void afs_charge_preallocation(struct work_struct *work) break; INIT_WORK(&call->async_work, afs_process_async_call); - call->wait_mode = &afs_async_incoming_call; call->type = &afs_RXCMxxxx; - init_waitqueue_head(&call->waitq); + call->async = true; call->state = AFS_CALL_AWAIT_OP_ID; + init_waitqueue_head(&call->waitq); } if (rxrpc_kernel_charge_accept(afs_socket, diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index 94bcd97d22b8..a5e4cc561b6c 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -147,7 +147,7 @@ int afs_vl_get_entry_by_name(struct in_addr *addr, struct key *key, const char *volname, struct afs_cache_vlocation *entry, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; size_t volnamesz, reqsz, padsz; @@ -177,7 +177,7 @@ int afs_vl_get_entry_by_name(struct in_addr *addr, memset((void *) bp + volnamesz, 0, padsz); /* initiate the call */ - return afs_make_call(addr, call, GFP_KERNEL, wait_mode); + return afs_make_call(addr, call, GFP_KERNEL, async); } /* @@ -188,7 +188,7 @@ int afs_vl_get_entry_by_id(struct in_addr *addr, afs_volid_t volid, afs_voltype_t voltype, struct afs_cache_vlocation *entry, - const struct afs_wait_mode *wait_mode) + bool async) { struct afs_call *call; __be32 *bp; @@ -211,5 +211,5 @@ int afs_vl_get_entry_by_id(struct in_addr *addr, *bp = htonl(voltype); /* initiate the call */ - return afs_make_call(addr, call, GFP_KERNEL, wait_mode); + return afs_make_call(addr, call, GFP_KERNEL, async); } diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c index 45a86396fd2d..d7d8dd8c0b31 100644 --- a/fs/afs/vlocation.c +++ b/fs/afs/vlocation.c @@ -53,7 +53,7 @@ static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl, /* attempt to access the VL server */ ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb, - &afs_sync_call); + false); switch (ret) { case 0: goto out; @@ -111,7 +111,7 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl, /* attempt to access the VL server */ ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb, - &afs_sync_call); + false); switch (ret) { case 0: goto out; diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c index 45aa874f5d32..dcb956143c86 100644 --- a/fs/afs/vnode.c +++ b/fs/afs/vnode.c @@ -358,7 +358,7 @@ get_anyway: server, ntohl(server->addr.s_addr)); ret = afs_fs_fetch_file_status(server, key, vnode, NULL, - &afs_sync_call); + false); } while (!afs_volume_release_fileserver(vnode, server, ret)); @@ -421,7 +421,7 @@ int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); ret = afs_fs_fetch_data(server, key, vnode, desc, - &afs_sync_call); + false); } while (!afs_volume_release_fileserver(vnode, server, ret)); @@ -477,7 +477,7 @@ int afs_vnode_create(struct afs_vnode *vnode, struct key *key, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); ret = afs_fs_create(server, key, vnode, name, mode, newfid, - newstatus, newcb, &afs_sync_call); + newstatus, newcb, false); } while (!afs_volume_release_fileserver(vnode, server, ret)); @@ -533,7 +533,7 @@ int afs_vnode_remove(struct afs_vnode *vnode, struct key *key, const char *name, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); ret = afs_fs_remove(server, key, vnode, name, isdir, - &afs_sync_call); + false); } while (!afs_volume_release_fileserver(vnode, server, ret)); @@ -595,7 +595,7 @@ int afs_vnode_link(struct afs_vnode *dvnode, struct afs_vnode *vnode, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); ret = afs_fs_link(server, key, dvnode, vnode, name, - &afs_sync_call); + false); } while (!afs_volume_release_fileserver(dvnode, server, ret)); @@ -659,7 +659,7 @@ int afs_vnode_symlink(struct afs_vnode *vnode, struct key *key, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); ret = afs_fs_symlink(server, key, vnode, name, content, - newfid, newstatus, &afs_sync_call); + newfid, newstatus, false); } while (!afs_volume_release_fileserver(vnode, server, ret)); @@ -729,7 +729,7 @@ int afs_vnode_rename(struct afs_vnode *orig_dvnode, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); ret = afs_fs_rename(server, key, orig_dvnode, orig_name, - new_dvnode, new_name, &afs_sync_call); + new_dvnode, new_name, false); } while (!afs_volume_release_fileserver(orig_dvnode, server, ret)); @@ -795,7 +795,7 @@ int afs_vnode_store_data(struct afs_writeback *wb, pgoff_t first, pgoff_t last, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); ret = afs_fs_store_data(server, wb, first, last, offset, to, - &afs_sync_call); + false); } while (!afs_volume_release_fileserver(vnode, server, ret)); @@ -847,7 +847,7 @@ int afs_vnode_setattr(struct afs_vnode *vnode, struct key *key, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); - ret = afs_fs_setattr(server, key, vnode, attr, &afs_sync_call); + ret = afs_fs_setattr(server, key, vnode, attr, false); } while (!afs_volume_release_fileserver(vnode, server, ret)); @@ -894,7 +894,7 @@ int afs_vnode_get_volume_status(struct afs_vnode *vnode, struct key *key, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); - ret = afs_fs_get_volume_status(server, key, vnode, vs, &afs_sync_call); + ret = afs_fs_get_volume_status(server, key, vnode, vs, false); } while (!afs_volume_release_fileserver(vnode, server, ret)); @@ -933,7 +933,7 @@ int afs_vnode_set_lock(struct afs_vnode *vnode, struct key *key, _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); - ret = afs_fs_set_lock(server, key, vnode, type, &afs_sync_call); + ret = afs_fs_set_lock(server, key, vnode, type, false); } while (!afs_volume_release_fileserver(vnode, server, ret)); @@ -971,7 +971,7 @@ int afs_vnode_extend_lock(struct afs_vnode *vnode, struct key *key) _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); - ret = afs_fs_extend_lock(server, key, vnode, &afs_sync_call); + ret = afs_fs_extend_lock(server, key, vnode, false); } while (!afs_volume_release_fileserver(vnode, server, ret)); @@ -1009,7 +1009,7 @@ int afs_vnode_release_lock(struct afs_vnode *vnode, struct key *key) _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); - ret = afs_fs_release_lock(server, key, vnode, &afs_sync_call); + ret = afs_fs_release_lock(server, key, vnode, false); } while (!afs_volume_release_fileserver(vnode, server, ret)); -- cgit v1.2.3 From 210f035316f545e6f507e7d61e191495ba983e27 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 5 Jan 2017 10:38:36 +0000 Subject: rxrpc: Allow listen(sock, 0) to be used to disable listening Allow listen() with a backlog of 0 to be used to disable listening on an AF_RXRPC socket. This also releases any preallocation, thereby making it easier for a kernel service to account for all allocated call structures when shutting down the service. The socket cannot thereafter have listening reenabled, but must rather be closed and reopened. Signed-off-by: David Howells --- net/rxrpc/af_rxrpc.c | 8 ++++++++ net/rxrpc/ar-internal.h | 1 + net/rxrpc/call_accept.c | 3 ++- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 5f63f6dcaabb..199b46e93e64 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -224,6 +224,14 @@ static int rxrpc_listen(struct socket *sock, int backlog) else sk->sk_max_ack_backlog = old; break; + case RXRPC_SERVER_LISTENING: + if (backlog == 0) { + rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED; + sk->sk_max_ack_backlog = 0; + rxrpc_discard_prealloc(rx); + ret = 0; + break; + } default: ret = -EBUSY; break; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 84927c7b5fdf..12be432be9b2 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -60,6 +60,7 @@ enum { RXRPC_CLIENT_BOUND, /* client local address bound */ RXRPC_SERVER_BOUND, /* server local address bound */ RXRPC_SERVER_LISTENING, /* server listening for connections */ + RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */ RXRPC_CLOSE, /* socket is being closed */ }; diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 832d854c2d5c..7c4c64ab8da2 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -349,7 +349,8 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, found_service: spin_lock(&rx->incoming_lock); - if (rx->sk.sk_state == RXRPC_CLOSE) { + if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || + rx->sk.sk_state == RXRPC_CLOSE) { trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; -- cgit v1.2.3 From 341f741f04beceebcb30daa12ae2e5e52e64e532 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 5 Jan 2017 10:38:36 +0000 Subject: afs: Refcount the afs_call struct A static checker warning occurs in the AFS filesystem: fs/afs/cmservice.c:155 SRXAFSCB_CallBack() error: dereferencing freed memory 'call' due to the reply being sent before we access the server it points to. The act of sending the reply causes the call to be freed if an error occurs (but not if it doesn't). On top of this, the lifetime handling of afs_call structs is fragile because they get passed around through workqueues without any sort of refcounting. Deal with the issues by: (1) Fix the maybe/maybe not nature of the reply sending functions with regards to whether they release the call struct. (2) Refcount the afs_call struct and sort out places that need to get/put references. (3) Pass a ref through the work queue and release (or pass on) that ref in the work function. Care has to be taken because a work queue may already own a ref to the call. (4) Do the cleaning up in the put function only. (5) Simplify module cleanup by always incrementing afs_outstanding_calls whenever a call is allocated. (6) Set the backlog to 0 with kernel_listen() at the beginning of the process of closing the socket to prevent new incoming calls from occurring and to remove the contribution of preallocated calls from afs_outstanding_calls before we wait on it. A tracepoint is also added to monitor the afs_call refcount and lifetime. Reported-by: Dan Carpenter Signed-off-by: David Howells Fixes: 08e0e7c82eea: "[AF_RXRPC]: Make the in-kernel AFS filesystem use AF_RXRPC." --- fs/afs/cmservice.c | 41 ++++++------ fs/afs/internal.h | 9 ++- fs/afs/rxrpc.c | 153 ++++++++++++++++++++++++++++----------------- include/trace/events/afs.h | 75 ++++++++++++++++++++++ 4 files changed, 199 insertions(+), 79 deletions(-) diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index a2e1e02005f6..e349a3316303 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -24,6 +24,11 @@ static int afs_deliver_cb_callback(struct afs_call *); static int afs_deliver_cb_probe_uuid(struct afs_call *); static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *); static void afs_cm_destructor(struct afs_call *); +static void SRXAFSCB_CallBack(struct work_struct *); +static void SRXAFSCB_InitCallBackState(struct work_struct *); +static void SRXAFSCB_Probe(struct work_struct *); +static void SRXAFSCB_ProbeUuid(struct work_struct *); +static void SRXAFSCB_TellMeAboutYourself(struct work_struct *); #define CM_NAME(name) \ const char afs_SRXCB##name##_name[] __tracepoint_string = \ @@ -38,6 +43,7 @@ static const struct afs_call_type afs_SRXCBCallBack = { .deliver = afs_deliver_cb_callback, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, + .work = SRXAFSCB_CallBack, }; /* @@ -49,6 +55,7 @@ static const struct afs_call_type afs_SRXCBInitCallBackState = { .deliver = afs_deliver_cb_init_call_back_state, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, + .work = SRXAFSCB_InitCallBackState, }; /* @@ -60,6 +67,7 @@ static const struct afs_call_type afs_SRXCBInitCallBackState3 = { .deliver = afs_deliver_cb_init_call_back_state3, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, + .work = SRXAFSCB_InitCallBackState, }; /* @@ -71,6 +79,7 @@ static const struct afs_call_type afs_SRXCBProbe = { .deliver = afs_deliver_cb_probe, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, + .work = SRXAFSCB_Probe, }; /* @@ -82,6 +91,7 @@ static const struct afs_call_type afs_SRXCBProbeUuid = { .deliver = afs_deliver_cb_probe_uuid, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, + .work = SRXAFSCB_ProbeUuid, }; /* @@ -93,6 +103,7 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { .deliver = afs_deliver_cb_tell_me_about_yourself, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, + .work = SRXAFSCB_TellMeAboutYourself, }; /* @@ -163,6 +174,7 @@ static void SRXAFSCB_CallBack(struct work_struct *work) afs_send_empty_reply(call); afs_break_callbacks(call->server, call->count, call->request); + afs_put_call(call); _leave(""); } @@ -284,9 +296,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) return -ENOTCONN; call->server = server; - INIT_WORK(&call->work, SRXAFSCB_CallBack); - queue_work(afs_wq, &call->work); - return 0; + return afs_queue_call_work(call); } /* @@ -300,6 +310,7 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work) afs_init_callback_state(call->server); afs_send_empty_reply(call); + afs_put_call(call); _leave(""); } @@ -330,9 +341,7 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call) return -ENOTCONN; call->server = server; - INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); - queue_work(afs_wq, &call->work); - return 0; + return afs_queue_call_work(call); } /* @@ -404,9 +413,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) return -ENOTCONN; call->server = server; - INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); - queue_work(afs_wq, &call->work); - return 0; + return afs_queue_call_work(call); } /* @@ -418,6 +425,7 @@ static void SRXAFSCB_Probe(struct work_struct *work) _enter(""); afs_send_empty_reply(call); + afs_put_call(call); _leave(""); } @@ -437,9 +445,7 @@ static int afs_deliver_cb_probe(struct afs_call *call) /* no unmarshalling required */ call->state = AFS_CALL_REPLYING; - INIT_WORK(&call->work, SRXAFSCB_Probe); - queue_work(afs_wq, &call->work); - return 0; + return afs_queue_call_work(call); } /* @@ -462,6 +468,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work) reply.match = htonl(1); afs_send_simple_reply(call, &reply, sizeof(reply)); + afs_put_call(call); _leave(""); } @@ -520,9 +527,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) call->state = AFS_CALL_REPLYING; - INIT_WORK(&call->work, SRXAFSCB_ProbeUuid); - queue_work(afs_wq, &call->work); - return 0; + return afs_queue_call_work(call); } /* @@ -584,7 +589,7 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) reply.cap.capcount = htonl(1); reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION); afs_send_simple_reply(call, &reply, sizeof(reply)); - + afs_put_call(call); _leave(""); } @@ -604,7 +609,5 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call) /* no unmarshalling required */ call->state = AFS_CALL_REPLYING; - INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself); - queue_work(afs_wq, &call->work); - return 0; + return afs_queue_call_work(call); } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index b411670d5f67..65504e218d35 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -66,7 +66,7 @@ enum afs_call_state { struct afs_call { const struct afs_call_type *type; /* type of call */ wait_queue_head_t waitq; /* processes awaiting completion */ - struct work_struct async_work; /* asynchronous work processor */ + struct work_struct async_work; /* async I/O processor */ struct work_struct work; /* actual work processor */ struct rxrpc_call *rxcall; /* RxRPC call handle */ struct key *key; /* security for this call */ @@ -82,6 +82,7 @@ struct afs_call { pgoff_t first; /* first page in mapping to deal with */ pgoff_t last; /* last page in mapping to deal with */ size_t offset; /* offset into received data store */ + atomic_t usage; enum afs_call_state state; int error; /* error code */ u32 abort_code; /* Remote abort ID or 0 */ @@ -115,6 +116,9 @@ struct afs_call_type { /* clean up a call */ void (*destructor)(struct afs_call *call); + + /* Work function */ + void (*work)(struct work_struct *work); }; /* @@ -591,9 +595,12 @@ extern void afs_proc_cell_remove(struct afs_cell *); * rxrpc.c */ extern struct socket *afs_socket; +extern atomic_t afs_outstanding_calls; extern int afs_open_socket(void); extern void afs_close_socket(void); +extern void afs_put_call(struct afs_call *); +extern int afs_queue_call_work(struct afs_call *); extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, bool); extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *, size_t, size_t); diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index ec1e41f929d1..95f42872b787 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -19,9 +19,8 @@ struct socket *afs_socket; /* my RxRPC socket */ static struct workqueue_struct *afs_async_calls; static struct afs_call *afs_spare_incoming_call; -static atomic_t afs_outstanding_calls; +atomic_t afs_outstanding_calls; -static void afs_free_call(struct afs_call *); static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); static int afs_wait_for_call_to_complete(struct afs_call *); static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); @@ -112,9 +111,11 @@ void afs_close_socket(void) { _enter(""); + kernel_listen(afs_socket, 0); + flush_workqueue(afs_async_calls); + if (afs_spare_incoming_call) { - atomic_inc(&afs_outstanding_calls); - afs_free_call(afs_spare_incoming_call); + afs_put_call(afs_spare_incoming_call); afs_spare_incoming_call = NULL; } @@ -123,7 +124,6 @@ void afs_close_socket(void) TASK_UNINTERRUPTIBLE); _debug("no outstanding calls"); - flush_workqueue(afs_async_calls); kernel_sock_shutdown(afs_socket, SHUT_RDWR); flush_workqueue(afs_async_calls); sock_release(afs_socket); @@ -134,44 +134,79 @@ void afs_close_socket(void) } /* - * free a call + * Allocate a call. */ -static void afs_free_call(struct afs_call *call) +static struct afs_call *afs_alloc_call(const struct afs_call_type *type, + gfp_t gfp) { - _debug("DONE %p{%s} [%d]", - call, call->type->name, atomic_read(&afs_outstanding_calls)); + struct afs_call *call; + int o; - ASSERTCMP(call->rxcall, ==, NULL); - ASSERT(!work_pending(&call->async_work)); - ASSERT(call->type->name != NULL); + call = kzalloc(sizeof(*call), gfp); + if (!call) + return NULL; - kfree(call->request); - kfree(call); + call->type = type; + atomic_set(&call->usage, 1); + INIT_WORK(&call->async_work, afs_process_async_call); + init_waitqueue_head(&call->waitq); - if (atomic_dec_and_test(&afs_outstanding_calls)) - wake_up_atomic_t(&afs_outstanding_calls); + o = atomic_inc_return(&afs_outstanding_calls); + trace_afs_call(call, afs_call_trace_alloc, 1, o, + __builtin_return_address(0)); + return call; } /* - * End a call but do not free it + * Dispose of a reference on a call. */ -static void afs_end_call_nofree(struct afs_call *call) +void afs_put_call(struct afs_call *call) { - if (call->rxcall) { - rxrpc_kernel_end_call(afs_socket, call->rxcall); - call->rxcall = NULL; + int n = atomic_dec_return(&call->usage); + int o = atomic_read(&afs_outstanding_calls); + + trace_afs_call(call, afs_call_trace_put, n + 1, o, + __builtin_return_address(0)); + + ASSERTCMP(n, >=, 0); + if (n == 0) { + ASSERT(!work_pending(&call->async_work)); + ASSERT(call->type->name != NULL); + + if (call->rxcall) { + rxrpc_kernel_end_call(afs_socket, call->rxcall); + call->rxcall = NULL; + } + if (call->type->destructor) + call->type->destructor(call); + + kfree(call->request); + kfree(call); + + o = atomic_dec_return(&afs_outstanding_calls); + trace_afs_call(call, afs_call_trace_free, 0, o, + __builtin_return_address(0)); + if (o == 0) + wake_up_atomic_t(&afs_outstanding_calls); } - if (call->type->destructor) - call->type->destructor(call); } /* - * End a call and free it + * Queue the call for actual work. Returns 0 unconditionally for convenience. */ -static void afs_end_call(struct afs_call *call) +int afs_queue_call_work(struct afs_call *call) { - afs_end_call_nofree(call); - afs_free_call(call); + int u = atomic_inc_return(&call->usage); + + trace_afs_call(call, afs_call_trace_work, u, + atomic_read(&afs_outstanding_calls), + __builtin_return_address(0)); + + INIT_WORK(&call->work, call->type->work); + + if (!queue_work(afs_wq, &call->work)) + afs_put_call(call); + return 0; } /* @@ -182,25 +217,19 @@ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, { struct afs_call *call; - call = kzalloc(sizeof(*call), GFP_NOFS); + call = afs_alloc_call(type, GFP_NOFS); if (!call) goto nomem_call; - _debug("CALL %p{%s} [%d]", - call, type->name, atomic_read(&afs_outstanding_calls)); - atomic_inc(&afs_outstanding_calls); - - call->type = type; - call->request_size = request_size; - call->reply_max = reply_max; - if (request_size) { + call->request_size = request_size; call->request = kmalloc(request_size, GFP_NOFS); if (!call->request) goto nomem_free; } if (reply_max) { + call->reply_max = reply_max; call->buffer = kmalloc(reply_max, GFP_NOFS); if (!call->buffer) goto nomem_free; @@ -210,7 +239,7 @@ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, return call; nomem_free: - afs_free_call(call); + afs_put_call(call); nomem_call: return NULL; } @@ -315,7 +344,6 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, atomic_read(&afs_outstanding_calls)); call->async = async; - INIT_WORK(&call->async_work, afs_process_async_call); memset(&srx, 0, sizeof(srx)); srx.srx_family = AF_RXRPC; @@ -378,7 +406,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, error_do_abort: rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); error_kill_call: - afs_end_call(call); + afs_put_call(call); _leave(" = %d", ret); return ret; } @@ -448,7 +476,7 @@ static void afs_deliver_to_call(struct afs_call *call) done: if (call->state == AFS_CALL_COMPLETE && call->incoming) - afs_end_call(call); + afs_put_call(call); out: _leave(""); return; @@ -505,7 +533,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call) } _debug("call complete"); - afs_end_call(call); + afs_put_call(call); _leave(" = %d", ret); return ret; } @@ -529,14 +557,25 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, unsigned long call_user_ID) { struct afs_call *call = (struct afs_call *)call_user_ID; + int u; trace_afs_notify_call(rxcall, call); call->need_attention = true; - queue_work(afs_async_calls, &call->async_work); + + u = __atomic_add_unless(&call->usage, 1, 0); + if (u != 0) { + trace_afs_call(call, afs_call_trace_wake, u, + atomic_read(&afs_outstanding_calls), + __builtin_return_address(0)); + + if (!queue_work(afs_async_calls, &call->async_work)) + afs_put_call(call); + } } /* - * delete an asynchronous call + * Delete an asynchronous call. The work item carries a ref to the call struct + * that we need to release. */ static void afs_delete_async_call(struct work_struct *work) { @@ -544,13 +583,14 @@ static void afs_delete_async_call(struct work_struct *work) _enter(""); - afs_free_call(call); + afs_put_call(call); _leave(""); } /* - * perform processing on an asynchronous call + * Perform I/O processing on an asynchronous call. The work item carries a ref + * to the call struct that we either need to release or to pass on. */ static void afs_process_async_call(struct work_struct *work) { @@ -566,15 +606,16 @@ static void afs_process_async_call(struct work_struct *work) if (call->state == AFS_CALL_COMPLETE) { call->reply = NULL; - /* kill the call */ - afs_end_call_nofree(call); - - /* we can't just delete the call because the work item may be - * queued */ + /* We have two refs to release - one from the alloc and one + * queued with the work item - and we can't just deallocate the + * call because the work item may be queued again. + */ call->async_work.func = afs_delete_async_call; - queue_work(afs_async_calls, &call->async_work); + if (!queue_work(afs_async_calls, &call->async_work)) + afs_put_call(call); } + afs_put_call(call); _leave(""); } @@ -594,12 +635,10 @@ static void afs_charge_preallocation(struct work_struct *work) for (;;) { if (!call) { - call = kzalloc(sizeof(struct afs_call), GFP_KERNEL); + call = afs_alloc_call(&afs_RXCMxxxx, GFP_KERNEL); if (!call) break; - INIT_WORK(&call->async_work, afs_process_async_call); - call->type = &afs_RXCMxxxx; call->async = true; call->state = AFS_CALL_AWAIT_OP_ID; init_waitqueue_head(&call->waitq); @@ -624,9 +663,8 @@ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall, { struct afs_call *call = (struct afs_call *)user_call_ID; - atomic_inc(&afs_outstanding_calls); call->rxcall = NULL; - afs_free_call(call); + afs_put_call(call); } /* @@ -635,7 +673,6 @@ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall, static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall, unsigned long user_call_ID) { - atomic_inc(&afs_outstanding_calls); queue_work(afs_wq, &afs_charge_preallocation_work); } @@ -699,7 +736,6 @@ void afs_send_empty_reply(struct afs_call *call) rxrpc_kernel_abort_call(afs_socket, call->rxcall, RX_USER_ABORT, ENOMEM, "KOO"); default: - afs_end_call(call); _leave(" [error]"); return; } @@ -738,7 +774,6 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) rxrpc_kernel_abort_call(afs_socket, call->rxcall, RX_USER_ABORT, ENOMEM, "KOO"); } - afs_end_call(call); _leave(" [error]"); } diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 845907b04ff4..8b95c16b7045 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -16,6 +16,51 @@ #include +/* + * Define enums for tracing information. + */ +#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY + +enum afs_call_trace { + afs_call_trace_alloc, + afs_call_trace_free, + afs_call_trace_put, + afs_call_trace_wake, + afs_call_trace_work, +}; + +#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */ + +/* + * Declare tracing information enums and their string mappings for display. + */ +#define afs_call_traces \ + EM(afs_call_trace_alloc, "ALLOC") \ + EM(afs_call_trace_free, "FREE ") \ + EM(afs_call_trace_put, "PUT ") \ + EM(afs_call_trace_wake, "WAKE ") \ + E_(afs_call_trace_work, "WORK ") + +/* + * Export enum symbols via userspace. + */ +#undef EM +#undef E_ +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define E_(a, b) TRACE_DEFINE_ENUM(a); + +afs_call_traces; + +/* + * Now redefine the EM() and E_() macros to map the enums to the strings that + * will be printed in the output. + */ +#undef EM +#undef E_ +#define EM(a, b) { a, b }, +#define E_(a, b) { a, b } + TRACE_EVENT(afs_recv_data, TP_PROTO(struct afs_call *call, unsigned count, unsigned offset, bool want_more, int ret), @@ -103,6 +148,36 @@ TRACE_EVENT(afs_cb_call, __entry->op) ); +TRACE_EVENT(afs_call, + TP_PROTO(struct afs_call *call, enum afs_call_trace op, + int usage, int outstanding, const void *where), + + TP_ARGS(call, op, usage, outstanding, where), + + TP_STRUCT__entry( + __field(struct afs_call *, call ) + __field(int, op ) + __field(int, usage ) + __field(int, outstanding ) + __field(const void *, where ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->op = op; + __entry->usage = usage; + __entry->outstanding = outstanding; + __entry->where = where; + ), + + TP_printk("c=%p %s u=%d o=%d sp=%pSR", + __entry->call, + __print_symbolic(__entry->op, afs_call_traces), + __entry->usage, + __entry->outstanding, + __entry->where) + ); + #endif /* _TRACE_AFS_H */ /* This part must be outside protection */ -- cgit v1.2.3 From 4ef8c1c93f848e360754f10eb2e7134c872b6597 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 9 Jan 2017 11:10:42 +0100 Subject: cfg80211: size various nl80211 messages correctly Ilan reported that sometimes nl80211 messages weren't working if the frames being transported got very large, which was really a problem for userspace-to-kernel messages, but prompted me to look at the code. Upon review, I found various places where variable-length data is transported in an nl80211 message but the message isn't allocated taking that into account. This shouldn't cause any problems since the frames aren't really that long, apart in one place where two (possibly very long frames) might not fit. Fix all the places (that I found) that get variable length data from the driver and put it into a message to take the length of the variable data into account. The 100 there is just a safe constant for the remaining message overhead (it's usually around 50 for most messages.) Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 23692658fe98..fed33ec20a71 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -13018,7 +13018,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, struct sk_buff *msg; void *hdr; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + msg = nlmsg_new(100 + len, gfp); if (!msg) return; @@ -13170,7 +13170,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, struct sk_buff *msg; void *hdr; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp); if (!msg) return; @@ -13212,7 +13212,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev, struct sk_buff *msg; void *hdr; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp); if (!msg) return; @@ -13249,7 +13249,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, struct sk_buff *msg; void *hdr; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = nlmsg_new(100 + ie_len, GFP_KERNEL); if (!msg) return; @@ -13325,7 +13325,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr, trace_cfg80211_notify_new_peer_candidate(dev, addr); - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + msg = nlmsg_new(100 + ie_len, gfp); if (!msg) return; @@ -13696,7 +13696,7 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct sk_buff *msg; void *hdr; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + msg = nlmsg_new(100 + len, gfp); if (!msg) return -ENOMEM; @@ -13740,7 +13740,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, trace_cfg80211_mgmt_tx_status(wdev, cookie, ack); - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + msg = nlmsg_new(100 + len, gfp); if (!msg) return; @@ -14551,7 +14551,7 @@ void cfg80211_ft_event(struct net_device *netdev, if (!ft_event->target_ap) return; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL); if (!msg) return; -- cgit v1.2.3 From bd2522b168847106c1885f0319a2833bdf88bf9a Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Fri, 6 Jan 2017 16:33:43 -0500 Subject: cfg80211: NL80211_ATTR_SOCKET_OWNER support for CMD_CONNECT Disconnect or deauthenticate when the owning socket is closed if this flag is supplied to CMD_CONNECT or CMD_ASSOCIATE. This may be used to ensure userspace daemon doesn't leave an unmanaged connection behind. In some situations it would be possible to account for that, to some degree, in the deamon restart code or in the up/down scripts without the use of this attribute. But there will be systems where the daemon can go away for varying periods without a warning due to local resource management. Signed-off-by: Andrew Zaborowski Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 7 +++++++ include/uapi/linux/nl80211.h | 2 ++ net/wireless/core.c | 3 +++ net/wireless/core.h | 1 + net/wireless/mlme.c | 5 +++++ net/wireless/nl80211.c | 26 +++++++++++++++++++++++++- net/wireless/sme.c | 33 +++++++++++++++++++++++++++++++++ 7 files changed, 76 insertions(+), 1 deletion(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 41a9ecd82ca0..cb13789ebaef 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3865,6 +3865,9 @@ struct cfg80211_cached_keys; * @conn: (private) cfg80211 software SME connection state machine data * @connect_keys: (private) keys to set after connection is established * @conn_bss_type: connecting/connected BSS type + * @conn_owner_nlportid: (private) connection owner socket port ID + * @disconnect_wk: (private) auto-disconnect work + * @disconnect_bssid: (private) the BSSID to use for auto-disconnect * @ibss_fixed: (private) IBSS is using fixed BSSID * @ibss_dfs_possible: (private) IBSS may change to a DFS channel * @event_list: (private) list for internal event processing @@ -3896,6 +3899,10 @@ struct wireless_dev { struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; enum ieee80211_bss_type conn_bss_type; + u32 conn_owner_nlportid; + + struct work_struct disconnect_wk; + u8 disconnect_bssid[ETH_ALEN]; struct list_head event_list; spinlock_t event_lock; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index d74e10b1246a..174f4b30e804 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1820,6 +1820,8 @@ enum nl80211_commands { * and remove functions. NAN notifications will be sent in unicast to that * socket. Without this attribute, any socket can add functions and the * notifications will be sent to the %NL80211_MCGRP_NAN multicast group. + * If set during %NL80211_CMD_ASSOCIATE or %NL80211_CMD_CONNECT the + * station will deauthenticate when the socket is closed. * * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is * the TDLS link initiator. diff --git a/net/wireless/core.c b/net/wireless/core.c index 158c59ecf90a..903fc419217a 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1142,6 +1142,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) dev->priv_flags |= IFF_DONT_BRIDGE; + INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); + nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); break; case NETDEV_GOING_DOWN: @@ -1230,6 +1232,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, #ifdef CONFIG_CFG80211_WEXT kzfree(wdev->wext.keys); #endif + flush_work(&wdev->disconnect_wk); } /* * synchronise (so that we won't find this netdev diff --git a/net/wireless/core.h b/net/wireless/core.h index bc8ba6e57519..ba42055a036d 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -400,6 +400,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *resp_ie, size_t resp_ie_len); int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); +void cfg80211_autodisconnect_wk(struct work_struct *work); /* SME implementation */ void cfg80211_conn_work(struct work_struct *work); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 4646cf5695b9..1c63a77aea34 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -345,6 +345,11 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) return 0; + if (ether_addr_equal(wdev->disconnect_bssid, bssid) || + (wdev->current_bss && + ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) + wdev->conn_owner_nlportid = 0; + return rdev_deauth(rdev, dev, &req); } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index fed33ec20a71..b378d0a04003 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -8050,8 +8050,17 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) err = nl80211_crypto_settings(rdev, info, &req.crypto, 1); if (!err) { wdev_lock(dev->ieee80211_ptr); + err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, ssid, ssid_len, &req); + + if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) { + dev->ieee80211_ptr->conn_owner_nlportid = + info->snd_portid; + memcpy(dev->ieee80211_ptr->disconnect_bssid, + bssid, ETH_ALEN); + } + wdev_unlock(dev->ieee80211_ptr); } @@ -8770,11 +8779,24 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) } wdev_lock(dev->ieee80211_ptr); + err = cfg80211_connect(rdev, dev, &connect, connkeys, connect.prev_bssid); - wdev_unlock(dev->ieee80211_ptr); if (err) kzfree(connkeys); + + if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) { + dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid; + if (connect.bssid) + memcpy(dev->ieee80211_ptr->disconnect_bssid, + connect.bssid, ETH_ALEN); + else + memset(dev->ieee80211_ptr->disconnect_bssid, + 0, ETH_ALEN); + } + + wdev_unlock(dev->ieee80211_ptr); + return err; } @@ -14491,6 +14513,8 @@ static int nl80211_netlink_notify(struct notifier_block * nb, if (wdev->owner_nlportid == notify->portid) schedule_destroy_work = true; + else if (wdev->conn_owner_nlportid == notify->portid) + schedule_work(&wdev->disconnect_wk); } spin_lock_bh(&rdev->beacon_registrations_lock); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 5e0d19380302..46693913fcea 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -727,6 +727,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, kzfree(wdev->connect_keys); wdev->connect_keys = NULL; wdev->ssid_len = 0; + wdev->conn_owner_nlportid = 0; if (bss) { cfg80211_unhold_bss(bss_from_pub(bss)); cfg80211_put_bss(wdev->wiphy, bss); @@ -955,6 +956,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, wdev->current_bss = NULL; wdev->ssid_len = 0; + wdev->conn_owner_nlportid = 0; nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); @@ -1098,6 +1100,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, kzfree(wdev->connect_keys); wdev->connect_keys = NULL; + wdev->conn_owner_nlportid = 0; + if (wdev->conn) err = cfg80211_sme_disconnect(wdev, reason); else if (!rdev->ops->disconnect) @@ -1107,3 +1111,32 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, return err; } + +/* + * Used to clean up after the connection / connection attempt owner socket + * disconnects + */ +void cfg80211_autodisconnect_wk(struct work_struct *work) +{ + struct wireless_dev *wdev = + container_of(work, struct wireless_dev, disconnect_wk); + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); + + wdev_lock(wdev); + + if (wdev->conn_owner_nlportid) { + /* + * Use disconnect_bssid if still connecting and ops->disconnect + * not implemented. Otherwise we can use cfg80211_disconnect. + */ + if (rdev->ops->disconnect || wdev->current_bss) + cfg80211_disconnect(rdev, wdev->netdev, + WLAN_REASON_DEAUTH_LEAVING, true); + else + cfg80211_mlme_deauth(rdev, wdev->netdev, + wdev->disconnect_bssid, NULL, 0, + WLAN_REASON_DEAUTH_LEAVING, false); + } + + wdev_unlock(wdev); +} -- cgit v1.2.3 From f32815d21d4d8287336fb9cef4d2d9e0866214c2 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 2 Jan 2017 17:19:40 -0500 Subject: xtables: add xt_match, xt_target and data copy_to_user functions xt_entry_target, xt_entry_match and their private data may contain kernel data. Introduce helper functions xt_match_to_user, xt_target_to_user and xt_data_to_user that copy only the expected fields. These replace existing logic that calls copy_to_user on entire structs, then overwrites select fields. Private data is defined in xt_match and xt_target. All matches and targets that maintain kernel data store this at the tail of their private structure. Extend xt_match and xt_target with .usersize to limit how many bytes of data are copied. The remainder is cleared. If compatsize is specified, usersize can only safely be used if all fields up to usersize use platform-independent types. Otherwise, the compat_to_user callback must be defined. This patch does not yet enable the support logic. Signed-off-by: Willem de Bruijn Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/x_tables.h | 9 +++++++ net/netfilter/x_tables.c | 54 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5117e4d2ddfa..be378cf47fcc 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -167,6 +167,7 @@ struct xt_match { const char *table; unsigned int matchsize; + unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif @@ -207,6 +208,7 @@ struct xt_target { const char *table; unsigned int targetsize; + unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif @@ -287,6 +289,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); +int xt_match_to_user(const struct xt_entry_match *m, + struct xt_entry_match __user *u); +int xt_target_to_user(const struct xt_entry_target *t, + struct xt_entry_target __user *u); +int xt_data_to_user(void __user *dst, const void *src, + int usersize, int size); + void *xt_copy_counters_from_user(const void __user *user, unsigned int len, struct xt_counters_info *info, bool compat); diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 2ff499680cc6..feccf527abdd 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -262,6 +262,60 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) } EXPORT_SYMBOL_GPL(xt_request_find_target); + +static int xt_obj_to_user(u16 __user *psize, u16 size, + void __user *pname, const char *name, + u8 __user *prev, u8 rev) +{ + if (put_user(size, psize)) + return -EFAULT; + if (copy_to_user(pname, name, strlen(name) + 1)) + return -EFAULT; + if (put_user(rev, prev)) + return -EFAULT; + + return 0; +} + +#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \ + xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \ + U->u.user.name, K->u.kernel.TYPE->name, \ + &U->u.user.revision, K->u.kernel.TYPE->revision) + +int xt_data_to_user(void __user *dst, const void *src, + int usersize, int size) +{ + usersize = usersize ? : size; + if (copy_to_user(dst, src, usersize)) + return -EFAULT; + if (usersize != size && clear_user(dst + usersize, size - usersize)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(xt_data_to_user); + +#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ + xt_data_to_user(U->data, K->data, \ + K->u.kernel.TYPE->usersize, \ + C_SIZE ? : K->u.kernel.TYPE->TYPE##size) + +int xt_match_to_user(const struct xt_entry_match *m, + struct xt_entry_match __user *u) +{ + return XT_OBJ_TO_USER(u, m, match, 0) || + XT_DATA_TO_USER(u, m, match, 0); +} +EXPORT_SYMBOL_GPL(xt_match_to_user); + +int xt_target_to_user(const struct xt_entry_target *t, + struct xt_entry_target __user *u) +{ + return XT_OBJ_TO_USER(u, t, target, 0) || + XT_DATA_TO_USER(u, t, target, 0); +} +EXPORT_SYMBOL_GPL(xt_target_to_user); + static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) { const struct xt_match *m; -- cgit v1.2.3 From f77bc5b23fb1af51fc0faa8a479dea8969eb5079 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 2 Jan 2017 17:19:41 -0500 Subject: iptables: use match, target and data copy_to_user helpers Convert iptables to copying entries, matches and targets one by one, using the xt_match_to_user and xt_target_to_user helper functions. Signed-off-by: Willem de Bruijn Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/ip_tables.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 91656a1d8fbd..384b85713e06 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -826,10 +826,6 @@ copy_entries_to_user(unsigned int total_size, return PTR_ERR(counters); loc_cpu_entry = private->entries; - if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { - ret = -EFAULT; - goto free_counters; - } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ @@ -839,6 +835,10 @@ copy_entries_to_user(unsigned int total_size, const struct xt_entry_target *t; e = (struct ipt_entry *)(loc_cpu_entry + off); + if (copy_to_user(userptr + off, e, sizeof(*e))) { + ret = -EFAULT; + goto free_counters; + } if (copy_to_user(userptr + off + offsetof(struct ipt_entry, counters), &counters[num], @@ -852,23 +852,14 @@ copy_entries_to_user(unsigned int total_size, i += m->u.match_size) { m = (void *)e + i; - if (copy_to_user(userptr + off + i - + offsetof(struct xt_entry_match, - u.user.name), - m->u.kernel.match->name, - strlen(m->u.kernel.match->name)+1) - != 0) { + if (xt_match_to_user(m, userptr + off + i)) { ret = -EFAULT; goto free_counters; } } t = ipt_get_target_c(e); - if (copy_to_user(userptr + off + e->target_offset - + offsetof(struct xt_entry_target, - u.user.name), - t->u.kernel.target->name, - strlen(t->u.kernel.target->name)+1) != 0) { + if (xt_target_to_user(t, userptr + off + e->target_offset)) { ret = -EFAULT; goto free_counters; } -- cgit v1.2.3 From e47ddb2c4691fd2bd8d25745ecb6848408899757 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 2 Jan 2017 17:19:42 -0500 Subject: ip6tables: use match, target and data copy_to_user helpers Convert ip6tables to copying entries, matches and targets one by one, using the xt_match_to_user and xt_target_to_user helper functions. Signed-off-by: Willem de Bruijn Signed-off-by: Pablo Neira Ayuso --- net/ipv6/netfilter/ip6_tables.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 25a022d41a70..1e15c54fd5e2 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -855,10 +855,6 @@ copy_entries_to_user(unsigned int total_size, return PTR_ERR(counters); loc_cpu_entry = private->entries; - if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { - ret = -EFAULT; - goto free_counters; - } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ @@ -868,6 +864,10 @@ copy_entries_to_user(unsigned int total_size, const struct xt_entry_target *t; e = (struct ip6t_entry *)(loc_cpu_entry + off); + if (copy_to_user(userptr + off, e, sizeof(*e))) { + ret = -EFAULT; + goto free_counters; + } if (copy_to_user(userptr + off + offsetof(struct ip6t_entry, counters), &counters[num], @@ -881,23 +881,14 @@ copy_entries_to_user(unsigned int total_size, i += m->u.match_size) { m = (void *)e + i; - if (copy_to_user(userptr + off + i - + offsetof(struct xt_entry_match, - u.user.name), - m->u.kernel.match->name, - strlen(m->u.kernel.match->name)+1) - != 0) { + if (xt_match_to_user(m, userptr + off + i)) { ret = -EFAULT; goto free_counters; } } t = ip6t_get_target_c(e); - if (copy_to_user(userptr + off + e->target_offset - + offsetof(struct xt_entry_target, - u.user.name), - t->u.kernel.target->name, - strlen(t->u.kernel.target->name)+1) != 0) { + if (xt_target_to_user(t, userptr + off + e->target_offset)) { ret = -EFAULT; goto free_counters; } -- cgit v1.2.3 From 244b531bee2bdcfcd35ca2fff9cc7d073b3aa060 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 2 Jan 2017 17:19:43 -0500 Subject: arptables: use match, target and data copy_to_user helpers Convert arptables to copying entries, matches and targets one by one, using the xt_match_to_user and xt_target_to_user helper functions. Signed-off-by: Willem de Bruijn Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/arp_tables.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index a467e1236c43..6241a81fd7f5 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -677,11 +677,6 @@ static int copy_entries_to_user(unsigned int total_size, return PTR_ERR(counters); loc_cpu_entry = private->entries; - /* ... then copy entire thing ... */ - if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { - ret = -EFAULT; - goto free_counters; - } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ @@ -689,6 +684,10 @@ static int copy_entries_to_user(unsigned int total_size, const struct xt_entry_target *t; e = (struct arpt_entry *)(loc_cpu_entry + off); + if (copy_to_user(userptr + off, e, sizeof(*e))) { + ret = -EFAULT; + goto free_counters; + } if (copy_to_user(userptr + off + offsetof(struct arpt_entry, counters), &counters[num], @@ -698,11 +697,7 @@ static int copy_entries_to_user(unsigned int total_size, } t = arpt_get_target_c(e); - if (copy_to_user(userptr + off + e->target_offset - + offsetof(struct xt_entry_target, - u.user.name), - t->u.kernel.target->name, - strlen(t->u.kernel.target->name)+1) != 0) { + if (xt_target_to_user(t, userptr + off + e->target_offset)) { ret = -EFAULT; goto free_counters; } -- cgit v1.2.3 From b5040f6c33a51e6e1fddab75baf0f45d4943cde4 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 2 Jan 2017 17:19:44 -0500 Subject: ebtables: use match, target and data copy_to_user helpers Convert ebtables to copying entries, matches and targets one by one. The solution is analogous to that of generic xt_(match|target)_to_user helpers, but is applied to different structs. Convert existing helpers ebt_make_XXXname helpers that overwrite fields of an already copy_to_user'd struct with ebt_XXX_to_user helpers that copy all relevant fields of the struct from scratch. Signed-off-by: Willem de Bruijn Signed-off-by: Pablo Neira Ayuso --- net/bridge/netfilter/ebtables.c | 78 +++++++++++++++++++++++++---------------- 1 file changed, 47 insertions(+), 31 deletions(-) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 537e3d506fc2..79b69917f521 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1346,56 +1346,72 @@ static int update_counters(struct net *net, const void __user *user, hlp.num_counters, user, len); } -static inline int ebt_make_matchname(const struct ebt_entry_match *m, - const char *base, char __user *ubase) +static inline int ebt_obj_to_user(char __user *um, const char *_name, + const char *data, int entrysize, + int usersize, int datasize) { - char __user *hlp = ubase + ((char *)m - base); - char name[EBT_FUNCTION_MAXNAMELEN] = {}; + char name[EBT_FUNCTION_MAXNAMELEN] = {0}; /* ebtables expects 32 bytes long names but xt_match names are 29 bytes * long. Copy 29 bytes and fill remaining bytes with zeroes. */ - strlcpy(name, m->u.match->name, sizeof(name)); - if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) + strlcpy(name, _name, sizeof(name)); + if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) || + put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) || + xt_data_to_user(um + entrysize, data, usersize, datasize)) return -EFAULT; + return 0; } -static inline int ebt_make_watchername(const struct ebt_entry_watcher *w, - const char *base, char __user *ubase) +static inline int ebt_match_to_user(const struct ebt_entry_match *m, + const char *base, char __user *ubase) { - char __user *hlp = ubase + ((char *)w - base); - char name[EBT_FUNCTION_MAXNAMELEN] = {}; + return ebt_obj_to_user(ubase + ((char *)m - base), + m->u.match->name, m->data, sizeof(*m), + m->u.match->usersize, m->match_size); +} - strlcpy(name, w->u.watcher->name, sizeof(name)); - if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) - return -EFAULT; - return 0; +static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w, + const char *base, char __user *ubase) +{ + return ebt_obj_to_user(ubase + ((char *)w - base), + w->u.watcher->name, w->data, sizeof(*w), + w->u.watcher->usersize, w->watcher_size); } -static inline int ebt_make_names(struct ebt_entry *e, const char *base, - char __user *ubase) +static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base, + char __user *ubase) { int ret; char __user *hlp; const struct ebt_entry_target *t; - char name[EBT_FUNCTION_MAXNAMELEN] = {}; - if (e->bitmask == 0) + if (e->bitmask == 0) { + /* special case !EBT_ENTRY_OR_ENTRIES */ + if (copy_to_user(ubase + ((char *)e - base), e, + sizeof(struct ebt_entries))) + return -EFAULT; return 0; + } + + if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e))) + return -EFAULT; hlp = ubase + (((char *)e + e->target_offset) - base); t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); - ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase); + ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase); if (ret != 0) return ret; - ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase); + ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase); if (ret != 0) return ret; - strlcpy(name, t->u.target->name, sizeof(name)); - if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) - return -EFAULT; + ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t), + t->u.target->usersize, t->target_size); + if (ret != 0) + return ret; + return 0; } @@ -1475,13 +1491,9 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user, if (ret) return ret; - if (copy_to_user(tmp.entries, entries, entries_size)) { - BUGPRINT("Couldn't copy entries to userspace\n"); - return -EFAULT; - } /* set the match/watcher/target names right */ return EBT_ENTRY_ITERATE(entries, entries_size, - ebt_make_names, entries, tmp.entries); + ebt_entry_to_user, entries, tmp.entries); } static int do_ebt_set_ctl(struct sock *sk, @@ -1630,8 +1642,10 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, if (match->compat_to_user) { if (match->compat_to_user(cm->data, m->data)) return -EFAULT; - } else if (copy_to_user(cm->data, m->data, msize)) + } else { + if (xt_data_to_user(cm->data, m->data, match->usersize, msize)) return -EFAULT; + } *size -= ebt_compat_entry_padsize() + off; *dstptr = cm->data; @@ -1657,8 +1671,10 @@ static int compat_target_to_user(struct ebt_entry_target *t, if (target->compat_to_user) { if (target->compat_to_user(cm->data, t->data)) return -EFAULT; - } else if (copy_to_user(cm->data, t->data, tsize)) - return -EFAULT; + } else { + if (xt_data_to_user(cm->data, t->data, target->usersize, tsize)) + return -EFAULT; + } *size -= ebt_compat_entry_padsize() + off; *dstptr = cm->data; -- cgit v1.2.3 From 4915f7bbc402071539ec0cb8c75aad878c982402 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 2 Jan 2017 17:19:45 -0500 Subject: xtables: use match, target and data copy_to_user helpers in compat Convert compat to copying entries, matches and targets one by one, using the xt_match_to_user and xt_target_to_user helper functions. Signed-off-by: Willem de Bruijn Signed-off-by: Pablo Neira Ayuso --- net/netfilter/x_tables.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index feccf527abdd..016db6be94b9 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -619,17 +619,14 @@ int xt_compat_match_to_user(const struct xt_entry_match *m, int off = xt_compat_match_offset(match); u_int16_t msize = m->u.user.match_size - off; - if (copy_to_user(cm, m, sizeof(*cm)) || - put_user(msize, &cm->u.user.match_size) || - copy_to_user(cm->u.user.name, m->u.kernel.match->name, - strlen(m->u.kernel.match->name) + 1)) + if (XT_OBJ_TO_USER(cm, m, match, msize)) return -EFAULT; if (match->compat_to_user) { if (match->compat_to_user((void __user *)cm->data, m->data)) return -EFAULT; } else { - if (copy_to_user(cm->data, m->data, msize - sizeof(*cm))) + if (XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) return -EFAULT; } @@ -977,17 +974,14 @@ int xt_compat_target_to_user(const struct xt_entry_target *t, int off = xt_compat_target_offset(target); u_int16_t tsize = t->u.user.target_size - off; - if (copy_to_user(ct, t, sizeof(*ct)) || - put_user(tsize, &ct->u.user.target_size) || - copy_to_user(ct->u.user.name, t->u.kernel.target->name, - strlen(t->u.kernel.target->name) + 1)) + if (XT_OBJ_TO_USER(ct, t, target, tsize)) return -EFAULT; if (target->compat_to_user) { if (target->compat_to_user((void __user *)ct->data, t->data)) return -EFAULT; } else { - if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct))) + if (XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) return -EFAULT; } -- cgit v1.2.3 From ec23189049651b16dc2ffab35a4371dc1f491aca Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 2 Jan 2017 17:19:46 -0500 Subject: xtables: extend matches and targets with .usersize In matches and targets that define a kernel-only tail to their xt_match and xt_target data structs, add a field .usersize that specifies up to where data is to be shared with userspace. Performed a search for comment "Used internally by the kernel" to find relevant matches and targets. Manually inspected the structs to derive a valid offsetof. Signed-off-by: Willem de Bruijn Signed-off-by: Pablo Neira Ayuso --- net/bridge/netfilter/ebt_limit.c | 1 + net/ipv4/netfilter/ipt_CLUSTERIP.c | 1 + net/ipv6/netfilter/ip6t_NPT.c | 2 ++ net/netfilter/xt_CT.c | 3 +++ net/netfilter/xt_RATEEST.c | 1 + net/netfilter/xt_TEE.c | 2 ++ net/netfilter/xt_bpf.c | 2 ++ net/netfilter/xt_cgroup.c | 1 + net/netfilter/xt_connlimit.c | 1 + net/netfilter/xt_hashlimit.c | 4 ++++ net/netfilter/xt_limit.c | 2 ++ net/netfilter/xt_quota.c | 1 + net/netfilter/xt_rateest.c | 1 + net/netfilter/xt_string.c | 1 + 14 files changed, 23 insertions(+) diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c index 517e78befcb2..61a9f1be1263 100644 --- a/net/bridge/netfilter/ebt_limit.c +++ b/net/bridge/netfilter/ebt_limit.c @@ -105,6 +105,7 @@ static struct xt_match ebt_limit_mt_reg __read_mostly = { .match = ebt_limit_mt, .checkentry = ebt_limit_mt_check, .matchsize = sizeof(struct ebt_limit_info), + .usersize = offsetof(struct ebt_limit_info, prev), #ifdef CONFIG_COMPAT .compatsize = sizeof(struct ebt_compat_limit_info), #endif diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 21db00d0362b..8a3d20ebb815 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -468,6 +468,7 @@ static struct xt_target clusterip_tg_reg __read_mostly = { .checkentry = clusterip_tg_check, .destroy = clusterip_tg_destroy, .targetsize = sizeof(struct ipt_clusterip_tgt_info), + .usersize = offsetof(struct ipt_clusterip_tgt_info, config), #ifdef CONFIG_COMPAT .compatsize = sizeof(struct compat_ipt_clusterip_tgt_info), #endif /* CONFIG_COMPAT */ diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 590f767db5d4..a379d2f79b19 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c @@ -112,6 +112,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = { .table = "mangle", .target = ip6t_snpt_tg, .targetsize = sizeof(struct ip6t_npt_tginfo), + .usersize = offsetof(struct ip6t_npt_tginfo, adjustment), .checkentry = ip6t_npt_checkentry, .family = NFPROTO_IPV6, .hooks = (1 << NF_INET_LOCAL_IN) | @@ -123,6 +124,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = { .table = "mangle", .target = ip6t_dnpt_tg, .targetsize = sizeof(struct ip6t_npt_tginfo), + .usersize = offsetof(struct ip6t_npt_tginfo, adjustment), .checkentry = ip6t_npt_checkentry, .family = NFPROTO_IPV6, .hooks = (1 << NF_INET_PRE_ROUTING) | diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 95c750358747..26b0bccfa0c5 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -373,6 +373,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = { .name = "CT", .family = NFPROTO_UNSPEC, .targetsize = sizeof(struct xt_ct_target_info), + .usersize = offsetof(struct xt_ct_target_info, ct), .checkentry = xt_ct_tg_check_v0, .destroy = xt_ct_tg_destroy_v0, .target = xt_ct_target_v0, @@ -384,6 +385,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = { .family = NFPROTO_UNSPEC, .revision = 1, .targetsize = sizeof(struct xt_ct_target_info_v1), + .usersize = offsetof(struct xt_ct_target_info, ct), .checkentry = xt_ct_tg_check_v1, .destroy = xt_ct_tg_destroy_v1, .target = xt_ct_target_v1, @@ -395,6 +397,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = { .family = NFPROTO_UNSPEC, .revision = 2, .targetsize = sizeof(struct xt_ct_target_info_v1), + .usersize = offsetof(struct xt_ct_target_info, ct), .checkentry = xt_ct_tg_check_v2, .destroy = xt_ct_tg_destroy_v1, .target = xt_ct_target_v1, diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index 91a373a3f534..498b54fd04d7 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c @@ -162,6 +162,7 @@ static struct xt_target xt_rateest_tg_reg __read_mostly = { .checkentry = xt_rateest_tg_checkentry, .destroy = xt_rateest_tg_destroy, .targetsize = sizeof(struct xt_rateest_target_info), + .usersize = offsetof(struct xt_rateest_target_info, est), .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index 1c57ace75ae6..86b0580b2216 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c @@ -133,6 +133,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = { .family = NFPROTO_IPV4, .target = tee_tg4, .targetsize = sizeof(struct xt_tee_tginfo), + .usersize = offsetof(struct xt_tee_tginfo, priv), .checkentry = tee_tg_check, .destroy = tee_tg_destroy, .me = THIS_MODULE, @@ -144,6 +145,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = { .family = NFPROTO_IPV6, .target = tee_tg6, .targetsize = sizeof(struct xt_tee_tginfo), + .usersize = offsetof(struct xt_tee_tginfo, priv), .checkentry = tee_tg_check, .destroy = tee_tg_destroy, .me = THIS_MODULE, diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c index 2dedaa23ab0a..38986a95216c 100644 --- a/net/netfilter/xt_bpf.c +++ b/net/netfilter/xt_bpf.c @@ -110,6 +110,7 @@ static struct xt_match bpf_mt_reg[] __read_mostly = { .match = bpf_mt, .destroy = bpf_mt_destroy, .matchsize = sizeof(struct xt_bpf_info), + .usersize = offsetof(struct xt_bpf_info, filter), .me = THIS_MODULE, }, { @@ -120,6 +121,7 @@ static struct xt_match bpf_mt_reg[] __read_mostly = { .match = bpf_mt_v1, .destroy = bpf_mt_destroy_v1, .matchsize = sizeof(struct xt_bpf_info_v1), + .usersize = offsetof(struct xt_bpf_info_v1, filter), .me = THIS_MODULE, }, }; diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c index a086a914865f..1db1ce59079f 100644 --- a/net/netfilter/xt_cgroup.c +++ b/net/netfilter/xt_cgroup.c @@ -122,6 +122,7 @@ static struct xt_match cgroup_mt_reg[] __read_mostly = { .checkentry = cgroup_mt_check_v1, .match = cgroup_mt_v1, .matchsize = sizeof(struct xt_cgroup_info_v1), + .usersize = offsetof(struct xt_cgroup_info_v1, priv), .destroy = cgroup_mt_destroy_v1, .me = THIS_MODULE, .hooks = (1 << NF_INET_LOCAL_OUT) | diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 660b61dbd776..b8fd4ab762ed 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c @@ -431,6 +431,7 @@ static struct xt_match connlimit_mt_reg __read_mostly = { .checkentry = connlimit_mt_check, .match = connlimit_mt, .matchsize = sizeof(struct xt_connlimit_info), + .usersize = offsetof(struct xt_connlimit_info, data), .destroy = connlimit_mt_destroy, .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 10063408141d..26ef70c50e3b 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -838,6 +838,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { .family = NFPROTO_IPV4, .match = hashlimit_mt_v1, .matchsize = sizeof(struct xt_hashlimit_mtinfo1), + .usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo), .checkentry = hashlimit_mt_check_v1, .destroy = hashlimit_mt_destroy_v1, .me = THIS_MODULE, @@ -848,6 +849,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { .family = NFPROTO_IPV4, .match = hashlimit_mt, .matchsize = sizeof(struct xt_hashlimit_mtinfo2), + .usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo), .checkentry = hashlimit_mt_check, .destroy = hashlimit_mt_destroy, .me = THIS_MODULE, @@ -859,6 +861,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { .family = NFPROTO_IPV6, .match = hashlimit_mt_v1, .matchsize = sizeof(struct xt_hashlimit_mtinfo1), + .usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo), .checkentry = hashlimit_mt_check_v1, .destroy = hashlimit_mt_destroy_v1, .me = THIS_MODULE, @@ -869,6 +872,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { .family = NFPROTO_IPV6, .match = hashlimit_mt, .matchsize = sizeof(struct xt_hashlimit_mtinfo2), + .usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo), .checkentry = hashlimit_mt_check, .destroy = hashlimit_mt_destroy, .me = THIS_MODULE, diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index bef850596558..dab962df1787 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c @@ -192,6 +192,8 @@ static struct xt_match limit_mt_reg __read_mostly = { .compatsize = sizeof(struct compat_xt_rateinfo), .compat_from_user = limit_mt_compat_from_user, .compat_to_user = limit_mt_compat_to_user, +#else + .usersize = offsetof(struct xt_rateinfo, prev), #endif .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c index 44c8eb4c9d66..10d61a6eed71 100644 --- a/net/netfilter/xt_quota.c +++ b/net/netfilter/xt_quota.c @@ -73,6 +73,7 @@ static struct xt_match quota_mt_reg __read_mostly = { .checkentry = quota_mt_check, .destroy = quota_mt_destroy, .matchsize = sizeof(struct xt_quota_info), + .usersize = offsetof(struct xt_quota_info, master), .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c index 1db02f6fca54..755d2f6693a2 100644 --- a/net/netfilter/xt_rateest.c +++ b/net/netfilter/xt_rateest.c @@ -133,6 +133,7 @@ static struct xt_match xt_rateest_mt_reg __read_mostly = { .checkentry = xt_rateest_mt_checkentry, .destroy = xt_rateest_mt_destroy, .matchsize = sizeof(struct xt_rateest_match_info), + .usersize = offsetof(struct xt_rateest_match_info, est1), .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index 0bc3460319c8..423293ee57c2 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c @@ -77,6 +77,7 @@ static struct xt_match xt_string_mt_reg __read_mostly = { .match = string_mt, .destroy = string_mt_destroy, .matchsize = sizeof(struct xt_string_info), + .usersize = offsetof(struct xt_string_info, config), .me = THIS_MODULE, }; -- cgit v1.2.3 From 1e9116327e1dbbf33fcb539c977ff39e1c680e6c Mon Sep 17 00:00:00 2001 From: yuan linyu Date: Sat, 7 Jan 2017 17:18:31 +0800 Subject: net: change init_inodecache() return void sock_init() call it but not check it's return value, so change it to void return and add an internal BUG_ON() check. Signed-off-by: yuan linyu Signed-off-by: David S. Miller --- net/socket.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/socket.c b/net/socket.c index c65bb927b104..3ef02e97ecf3 100644 --- a/net/socket.c +++ b/net/socket.c @@ -287,7 +287,7 @@ static void init_once(void *foo) inode_init_once(&ei->vfs_inode); } -static int init_inodecache(void) +static void init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), @@ -296,9 +296,7 @@ static int init_inodecache(void) SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT), init_once); - if (sock_inode_cachep == NULL) - return -ENOMEM; - return 0; + BUG_ON(sock_inode_cachep == NULL); } static const struct super_operations sockfs_ops = { -- cgit v1.2.3 From cecf62d6c618e4ede77b83101e2484188b7af7e1 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sat, 7 Jan 2017 17:47:47 +0100 Subject: net: ibm: ehea: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ehea/ehea_ethtool.c | 51 ++++++++++++++++------------ 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c index 85a3866459cf..4f58d338d739 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c @@ -31,9 +31,11 @@ #include "ehea.h" #include "ehea_phyp.h" -static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ehea_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct ehea_port *port = netdev_priv(dev); + u32 supported, advertising; u32 speed; int ret; @@ -60,68 +62,75 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed = -1; break; /* BUG */ } - cmd->duplex = port->full_duplex == 1 ? + cmd->base.duplex = port->full_duplex == 1 ? DUPLEX_FULL : DUPLEX_HALF; } else { speed = SPEED_UNKNOWN; - cmd->duplex = DUPLEX_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_cmd_speed_set(cmd, speed); + cmd->base.speed = speed; - if (cmd->speed == SPEED_10000) { - cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - cmd->port = PORT_FIBRE; + if (cmd->base.speed == SPEED_10000) { + supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + cmd->base.port = PORT_FIBRE; } else { - cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full + supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg | SUPPORTED_TP); - cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg + advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_TP); - cmd->port = PORT_TP; + cmd->base.port = PORT_TP; } - cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; + cmd->base.autoneg = port->autoneg == 1 ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } -static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ehea_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct ehea_port *port = netdev_priv(dev); int ret = 0; u32 sp; - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { sp = EHEA_SPEED_AUTONEG; goto doit; } - switch (cmd->speed) { + switch (cmd->base.speed) { case SPEED_10: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_10M_F; else sp = H_SPEED_10M_H; break; case SPEED_100: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_100M_F; else sp = H_SPEED_100M_H; break; case SPEED_1000: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_1G_F; else ret = -EINVAL; break; case SPEED_10000: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_10G_F; else ret = -EINVAL; @@ -264,7 +273,6 @@ static void ehea_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops ehea_ethtool_ops = { - .get_settings = ehea_get_settings, .get_drvinfo = ehea_get_drvinfo, .get_msglevel = ehea_get_msglevel, .set_msglevel = ehea_set_msglevel, @@ -272,8 +280,9 @@ static const struct ethtool_ops ehea_ethtool_ops = { .get_strings = ehea_get_strings, .get_sset_count = ehea_get_sset_count, .get_ethtool_stats = ehea_get_ethtool_stats, - .set_settings = ehea_set_settings, .nway_reset = ehea_nway_reset, /* Restart autonegotiation */ + .get_link_ksettings = ehea_get_link_ksettings, + .set_link_ksettings = ehea_set_link_ksettings, }; void ehea_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3 From e4ccf764488adbd072e78f416ef0b5bd0044c899 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sat, 7 Jan 2017 22:32:27 +0100 Subject: net: ibm: emac: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/emac/core.c | 70 ++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 5909615c27f7..6ead2335a169 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1991,69 +1991,79 @@ static struct mal_commac_ops emac_commac_sg_ops = { }; /* Ethtool support */ -static int emac_ethtool_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int emac_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct emac_instance *dev = netdev_priv(ndev); + u32 supported, advertising; - cmd->supported = dev->phy.features; - cmd->port = PORT_MII; - cmd->phy_address = dev->phy.address; - cmd->transceiver = - dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL; + supported = dev->phy.features; + cmd->base.port = PORT_MII; + cmd->base.phy_address = dev->phy.address; mutex_lock(&dev->link_lock); - cmd->advertising = dev->phy.advertising; - cmd->autoneg = dev->phy.autoneg; - cmd->speed = dev->phy.speed; - cmd->duplex = dev->phy.duplex; + advertising = dev->phy.advertising; + cmd->base.autoneg = dev->phy.autoneg; + cmd->base.speed = dev->phy.speed; + cmd->base.duplex = dev->phy.duplex; mutex_unlock(&dev->link_lock); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } -static int emac_ethtool_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int +emac_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct emac_instance *dev = netdev_priv(ndev); u32 f = dev->phy.features; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL, - cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising); + cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising); /* Basic sanity checks */ if (dev->phy.address < 0) return -EOPNOTSUPP; - if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) + if (cmd->base.autoneg != AUTONEG_ENABLE && + cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; - if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) + if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0) return -EINVAL; - if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) + if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL) return -EINVAL; - if (cmd->autoneg == AUTONEG_DISABLE) { - switch (cmd->speed) { + if (cmd->base.autoneg == AUTONEG_DISABLE) { + switch (cmd->base.speed) { case SPEED_10: - if (cmd->duplex == DUPLEX_HALF && + if (cmd->base.duplex == DUPLEX_HALF && !(f & SUPPORTED_10baseT_Half)) return -EINVAL; - if (cmd->duplex == DUPLEX_FULL && + if (cmd->base.duplex == DUPLEX_FULL && !(f & SUPPORTED_10baseT_Full)) return -EINVAL; break; case SPEED_100: - if (cmd->duplex == DUPLEX_HALF && + if (cmd->base.duplex == DUPLEX_HALF && !(f & SUPPORTED_100baseT_Half)) return -EINVAL; - if (cmd->duplex == DUPLEX_FULL && + if (cmd->base.duplex == DUPLEX_FULL && !(f & SUPPORTED_100baseT_Full)) return -EINVAL; break; case SPEED_1000: - if (cmd->duplex == DUPLEX_HALF && + if (cmd->base.duplex == DUPLEX_HALF && !(f & SUPPORTED_1000baseT_Half)) return -EINVAL; - if (cmd->duplex == DUPLEX_FULL && + if (cmd->base.duplex == DUPLEX_FULL && !(f & SUPPORTED_1000baseT_Full)) return -EINVAL; break; @@ -2062,8 +2072,8 @@ static int emac_ethtool_set_settings(struct net_device *ndev, } mutex_lock(&dev->link_lock); - dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed, - cmd->duplex); + dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed, + cmd->base.duplex); mutex_unlock(&dev->link_lock); } else { @@ -2072,7 +2082,7 @@ static int emac_ethtool_set_settings(struct net_device *ndev, mutex_lock(&dev->link_lock); dev->phy.def->ops->setup_aneg(&dev->phy, - (cmd->advertising & f) | + (advertising & f) | (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause))); @@ -2234,8 +2244,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, } static const struct ethtool_ops emac_ethtool_ops = { - .get_settings = emac_ethtool_get_settings, - .set_settings = emac_ethtool_set_settings, .get_drvinfo = emac_ethtool_get_drvinfo, .get_regs_len = emac_ethtool_get_regs_len, @@ -2251,6 +2259,8 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_ethtool_stats = emac_ethtool_get_ethtool_stats, .get_link = ethtool_op_get_link, + .get_link_ksettings = emac_ethtool_get_link_ksettings, + .set_link_ksettings = emac_ethtool_set_link_ksettings, }; static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -- cgit v1.2.3 From 9ce8c2dfceaec82e3cde57934b6cc69d24c42e95 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sat, 7 Jan 2017 22:35:13 +0100 Subject: net: ibm: ibmveth: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmveth.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index a831f947ca8c..c6ba75c595e0 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -729,20 +729,26 @@ static int ibmveth_close(struct net_device *netdev) return 0; } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + u32 supported, advertising; + + supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | + advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); - ethtool_cmd_speed_set(cmd, SPEED_1000); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_FIBRE; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_ENABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 1; + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_FIBRE; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_ENABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } @@ -978,11 +984,11 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev, static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, .get_link = ethtool_op_get_link, .get_strings = ibmveth_get_strings, .get_sset_count = ibmveth_get_sset_count, .get_ethtool_stats = ibmveth_get_ethtool_stats, + .get_link_ksettings = netdev_get_link_ksettings, }; static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -- cgit v1.2.3 From 8a43379fc61910aa620e405fb0b81adddc2ca84d Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sat, 7 Jan 2017 22:37:29 +0100 Subject: net: ibm: ibmvnic: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c12596676bbb..3c2526bde7a4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1025,21 +1025,26 @@ static const struct net_device_ops ibmvnic_netdev_ops = { /* ethtool functions */ -static int ibmvnic_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int ibmvnic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + u32 supported, advertising; + + supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | + advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); - ethtool_cmd_speed_set(cmd, SPEED_1000); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_FIBRE; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_ENABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 1; + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_FIBRE; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_ENABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } @@ -1132,7 +1137,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops ibmvnic_ethtool_ops = { - .get_settings = ibmvnic_get_settings, .get_drvinfo = ibmvnic_get_drvinfo, .get_msglevel = ibmvnic_get_msglevel, .set_msglevel = ibmvnic_set_msglevel, @@ -1141,6 +1145,7 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_strings = ibmvnic_get_strings, .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, + .get_link_ksettings = ibmvnic_get_link_ksettings, }; /* Routines for managing CRQs/sCRQs */ -- cgit v1.2.3 From 6b0c06e0b660ed175dbdd93a591476041403e54b Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sat, 7 Jan 2017 23:18:07 +0100 Subject: net: intel: e100: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/e100.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 25c6dfd500b4..04e939226e08 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2426,19 +2426,21 @@ err_clean_rx: #define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ #define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ -static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct nic *nic = netdev_priv(netdev); - return mii_ethtool_gset(&nic->mii, cmd); + return mii_ethtool_get_link_ksettings(&nic->mii, cmd); } -static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct nic *nic = netdev_priv(netdev); int err; mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); - err = mii_ethtool_sset(&nic->mii, cmd); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); e100_exec_cb(nic, NULL, e100_configure); return err; @@ -2741,8 +2743,6 @@ static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } static const struct ethtool_ops e100_ethtool_ops = { - .get_settings = e100_get_settings, - .set_settings = e100_set_settings, .get_drvinfo = e100_get_drvinfo, .get_regs_len = e100_get_regs_len, .get_regs = e100_get_regs, @@ -2763,6 +2763,8 @@ static const struct ethtool_ops e100_ethtool_ops = { .get_ethtool_stats = e100_get_ethtool_stats, .get_sset_count = e100_get_sset_count, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, }; static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -- cgit v1.2.3 From 0118717583cda6f4f36092853ad0345e8150b286 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:24 +0200 Subject: net/mlx5: Add interface to get reference to a UAR A reference to a UAR is required to generate CQ or EQ doorbells. Since CQ or EQ doorbells can all be generated using the same UAR area without any effect on performance, we are just getting a reference to any available UAR, If one is not available we allocate it but we don't waste the blue flame registers it can provide and we will use them for subsequent allocations. We get a reference to such UAR and put in mlx5_priv so any kernel consumer can make use of it. Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 14 ++++------- drivers/net/ethernet/mellanox/mlx5/core/main.c | 22 ++++++++++++++---- drivers/net/ethernet/mellanox/mlx5/core/uar.c | 32 ++++++++++++++++++++++++++ include/linux/mlx5/driver.h | 5 +++- 4 files changed, 59 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 11a8d638bcd0..5130d65dd41a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -512,7 +512,7 @@ static void init_eq_buf(struct mlx5_eq *eq) int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, - struct mlx5_uar *uar, enum mlx5_eq_type type) + enum mlx5_eq_type type) { u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; struct mlx5_priv *priv = &dev->priv; @@ -556,7 +556,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); - MLX5_SET(eqc, eqc, uar_page, uar->index); + MLX5_SET(eqc, eqc, uar_page, priv->uar->index); MLX5_SET(eqc, eqc, intr, vecidx); MLX5_SET(eqc, eqc, log_page_size, eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); @@ -571,7 +571,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, eq->eqn = MLX5_GET(create_eq_out, out, eq_number); eq->irqn = priv->msix_arr[vecidx].vector; eq->dev = dev; - eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; + eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; err = request_irq(eq->irqn, handler, 0, priv->irq_info[vecidx].name, eq); if (err) @@ -686,8 +686,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, - "mlx5_cmd_eq", &dev->priv.bfregi.uars[0], - MLX5_EQ_TYPE_ASYNC); + "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); return err; @@ -697,8 +696,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, MLX5_NUM_ASYNC_EQE, async_event_mask, - "mlx5_async_eq", &dev->priv.bfregi.uars[0], - MLX5_EQ_TYPE_ASYNC); + "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create async EQ %d\n", err); goto err1; @@ -708,7 +706,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) MLX5_EQ_VEC_PAGES, /* TODO: sriov max_vf + */ 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", - &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); @@ -722,7 +719,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) MLX5_NUM_ASYNC_EQE, 1 << MLX5_EVENT_TYPE_PAGE_FAULT, "mlx5_page_fault_eq", - &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_PF); if (err) { mlx5_core_warn(dev, "failed to create page fault EQ %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 634e96a02516..2882d0483ed8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -753,8 +753,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, - name, &dev->priv.bfregi.uars[0], - MLX5_EQ_TYPE_COMP); + name, MLX5_EQ_TYPE_COMP); if (err) { kfree(eq); goto clean; @@ -1094,12 +1093,18 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_cleanup_once; } - err = mlx5_alloc_bfregs(dev, &priv->bfregi); - if (err) { + dev->priv.uar = mlx5_get_uars_page(dev); + if (!dev->priv.uar) { dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); goto err_disable_msix; } + err = mlx5_alloc_bfregs(dev, &priv->bfregi); + if (err) { + dev_err(&pdev->dev, "Failed allocating uuars, aborting\n"); + goto err_uar_cleanup; + } + err = mlx5_start_eqs(dev); if (err) { dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); @@ -1172,6 +1177,9 @@ err_stop_eqs: err_free_uar: mlx5_free_bfregs(dev, &priv->bfregi); +err_uar_cleanup: + mlx5_put_uars_page(dev, priv->uar); + err_disable_msix: mlx5_disable_msix(dev); @@ -1231,6 +1239,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_bfregs(dev, &priv->bfregi); + mlx5_put_uars_page(dev, priv->uar); mlx5_disable_msix(dev); if (cleanup) mlx5_cleanup_once(dev); @@ -1305,6 +1314,11 @@ static int init_one(struct pci_dev *pdev, goto clean_dev; } #endif + mutex_init(&priv->bfregs.reg_head.lock); + mutex_init(&priv->bfregs.wc_head.lock); + INIT_LIST_HEAD(&priv->bfregs.reg_head.list); + INIT_LIST_HEAD(&priv->bfregs.wc_head.list); + err = mlx5_pci_init(dev, priv); if (err) { dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 6a081a8787a7..fcc0270ea72f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -332,6 +332,38 @@ error1: return ERR_PTR(err); } +struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev) +{ + struct mlx5_uars_page *ret; + + mutex_lock(&mdev->priv.bfregs.reg_head.lock); + if (list_empty(&mdev->priv.bfregs.reg_head.list)) { + ret = alloc_uars_page(mdev, false); + if (IS_ERR(ret)) { + ret = NULL; + goto out; + } + list_add(&ret->list, &mdev->priv.bfregs.reg_head.list); + } else { + ret = list_first_entry(&mdev->priv.bfregs.reg_head.list, + struct mlx5_uars_page, list); + kref_get(&ret->ref_count); + } +out: + mutex_unlock(&mdev->priv.bfregs.reg_head.lock); + + return ret; +} +EXPORT_SYMBOL(mlx5_get_uars_page); + +void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up) +{ + mutex_lock(&mdev->priv.bfregs.reg_head.lock); + kref_put(&up->ref_count, up_rel_func); + mutex_unlock(&mdev->priv.bfregs.reg_head.lock); +} +EXPORT_SYMBOL(mlx5_put_uars_page); + static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi) { /* return the offset in bytes from the start of the page to the diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 969aa1fe17e2..9a3a0954855b 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -679,6 +679,7 @@ struct mlx5_priv { struct srcu_struct pfault_srcu; #endif struct mlx5_bfreg_data bfregs; + struct mlx5_uars_page *uar; }; enum mlx5_device_state { @@ -1007,7 +1008,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, - struct mlx5_uar *uar, enum mlx5_eq_type type); + enum mlx5_eq_type type); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); @@ -1118,6 +1119,8 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); +struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); +void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); struct mlx5_profile { u64 mask; -- cgit v1.2.3 From 5fe9dec0d045437e48f112b8fa705197bd7bc3c0 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:25 +0200 Subject: IB/mlx5: Use blue flame register allocator in mlx5_ib Make use of the blue flame registers allocator at mlx5_ib. Since blue flame was not really supported we remove all the code that is related to blue flame and we let all consumers to use the same blue flame register. Once blue flame is supported we will add the code. As part of this patch we also move the definition of struct mlx5_bf to mlx5_ib.h as it is only used by mlx5_ib. Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/cq.c | 8 +- drivers/infiniband/hw/mlx5/main.c | 28 +++++- drivers/infiniband/hw/mlx5/mlx5_ib.h | 11 ++- drivers/infiniband/hw/mlx5/qp.c | 73 +++------------- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 16 +--- drivers/net/ethernet/mellanox/mlx5/core/uar.c | 114 ------------------------- include/linux/mlx5/cq.h | 3 +- include/linux/mlx5/doorbell.h | 6 +- include/linux/mlx5/driver.h | 19 ----- 10 files changed, 59 insertions(+), 221 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index bb7e91c55003..a28ec33b82ed 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; struct mlx5_ib_cq *cq = to_mcq(ibcq); - void __iomem *uar_page = mdev->priv.bfregi.uars[0].map; + void __iomem *uar_page = mdev->priv.uar->map; unsigned long irq_flags; int ret = 0; @@ -704,9 +704,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) mlx5_cq_arm(&cq->mcq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, - uar_page, - MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock), - to_mcq(ibcq)->mcq.cons_index); + uar_page, to_mcq(ibcq)->mcq.cons_index); return ret; } @@ -886,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, MLX5_SET(cqc, cqc, log_page_size, cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); - *index = dev->mdev->priv.bfregi.uars[0].index; + *index = dev->mdev->priv.uar->index; return 0; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d5cf82b387d3..e9f0830eca1c 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3074,8 +3074,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) if (mlx5_use_mad_ifc(dev)) get_ext_port_caps(dev); - MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); - if (!mlx5_lag_is_active(mdev)) name = "mlx5_%d"; else @@ -3251,9 +3249,21 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) if (err) goto err_odp; + dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); + if (!dev->mdev->priv.uar) + goto err_q_cnt; + + err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); + if (err) + goto err_uar_page; + + err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true); + if (err) + goto err_bfreg; + err = ib_register_device(&dev->ib_dev, NULL); if (err) - goto err_q_cnt; + goto err_fp_bfreg; err = create_umr_res(dev); if (err) @@ -3276,6 +3286,15 @@ err_umrc: err_dev: ib_unregister_device(&dev->ib_dev); +err_fp_bfreg: + mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); + +err_bfreg: + mlx5_free_bfreg(dev->mdev, &dev->bfreg); + +err_uar_page: + mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); + err_q_cnt: mlx5_ib_dealloc_q_counters(dev); @@ -3307,6 +3326,9 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) mlx5_remove_netdev_notifier(dev); ib_unregister_device(&dev->ib_dev); + mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); + mlx5_free_bfreg(dev->mdev, &dev->bfreg); + mlx5_put_uars_page(dev->mdev, mdev->priv.uar); mlx5_ib_dealloc_q_counters(dev); destroy_umrc_res(dev); mlx5_ib_odp_remove_one(dev); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d4d1329df94a..ae3bc4a1bfed 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -324,6 +324,12 @@ struct mlx5_ib_raw_packet_qp { struct mlx5_ib_rq rq; }; +struct mlx5_bf { + int buf_size; + unsigned long offset; + struct mlx5_sq_bfreg *bfreg; +}; + struct mlx5_ib_qp { struct ib_qp ibqp; union { @@ -349,7 +355,7 @@ struct mlx5_ib_qp { int wq_sig; int scat_cqe; int max_inline_data; - struct mlx5_bf *bf; + struct mlx5_bf bf; int has_rq; /* only for user space QPs. For kernel @@ -591,7 +597,6 @@ struct mlx5_ib_dev { struct ib_device ib_dev; struct mlx5_core_dev *mdev; struct mlx5_roce roce; - MLX5_DECLARE_DOORBELL_LOCK(uar_lock); int num_ports; /* serialize update of capability mask */ @@ -621,6 +626,8 @@ struct mlx5_ib_dev { struct list_head qp_list; /* Array with num_ports elements */ struct mlx5_ib_port *port; + struct mlx5_sq_bfreg bfreg; + struct mlx5_sq_bfreg fp_bfreg; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 240fbb0c63ba..fce1c6db393b 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -909,14 +909,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, u32 **in, int *inlen, struct mlx5_ib_qp_base *base) { - enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; - struct mlx5_bfreg_info *bfregi; int uar_index; void *qpc; - int bfregn; int err; - bfregi = &dev->mdev->priv.bfregi; if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | IB_QP_CREATE_IPOIB_UD_LSO | @@ -924,21 +920,17 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, return -EINVAL; if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) - lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; - - bfregn = alloc_bfreg(bfregi, lc); - if (bfregn < 0) { - mlx5_ib_dbg(dev, "\n"); - return -ENOMEM; - } + qp->bf.bfreg = &dev->fp_bfreg; + else + qp->bf.bfreg = &dev->bfreg; - qp->bf = &bfregi->bfs[bfregn]; - uar_index = qp->bf->uar->index; + qp->bf.buf_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); + uar_index = qp->bf.bfreg->index; err = calc_sq_size(dev, init_attr, qp); if (err < 0) { mlx5_ib_dbg(dev, "err %d\n", err); - goto err_bfreg; + return err; } qp->rq.offset = 0; @@ -948,7 +940,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf); if (err) { mlx5_ib_dbg(dev, "err %d\n", err); - goto err_bfreg; + return err; } qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); @@ -1010,9 +1002,6 @@ err_free: err_buf: mlx5_buf_free(dev->mdev, &qp->buf); - -err_bfreg: - free_bfreg(&dev->mdev->priv.bfregi, bfregn); return err; } @@ -1025,7 +1014,6 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) kfree(qp->rq.wrid); mlx5_db_free(dev->mdev, &qp->db); mlx5_buf_free(dev->mdev, &qp->buf); - free_bfreg(&dev->mdev->priv.bfregi, qp->bf->bfregn); } static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) @@ -3744,24 +3732,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) } } -static void mlx5_bf_copy(u64 __iomem *dst, u64 *src, - unsigned bytecnt, struct mlx5_ib_qp *qp) -{ - while (bytecnt > 0) { - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - bytecnt -= 64; - if (unlikely(src == qp->sq.qend)) - src = mlx5_get_send_wqe(qp, 0); - } -} - static u8 get_fence(u8 fence, struct ib_send_wr *wr) { if (unlikely(wr->opcode == IB_WR_LOCAL_INV && @@ -3857,7 +3827,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); qp = to_mqp(ibqp); - bf = qp->bf; + bf = &qp->bf; qend = qp->sq.qend; spin_lock_irqsave(&qp->sq.lock, flags); @@ -4130,28 +4100,13 @@ out: * we hit doorbell */ wmb(); - if (bf->need_lock) - spin_lock(&bf->lock); - else - __acquire(&bf->lock); - - /* TBD enable WC */ - if (0 && nreq == 1 && bf->bfregn && inl && size > 1 && size <= bf->buf_size / 16) { - mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); - /* wc_wmb(); */ - } else { - mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset, - MLX5_GET_DOORBELL_LOCK(&bf->lock32)); - /* Make sure doorbells don't leak out of SQ spinlock - * and reach the HCA out of order. - */ - mmiowb(); - } + /* currently we support only regular doorbells */ + mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL); + /* Make sure doorbells don't leak out of SQ spinlock + * and reach the HCA out of order. + */ + mmiowb(); bf->offset ^= bf->buf_size; - if (bf->need_lock) - spin_unlock(&bf->lock); - else - __release(&bf->lock); } spin_unlock_irqrestore(&qp->sq.lock, flags); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 951dbd58594d..3037631570b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -832,7 +832,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) struct mlx5_core_cq *mcq; mcq = &cq->mcq; - mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); + mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); } static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2882d0483ed8..ff1f14498c22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -913,8 +913,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto out; } - MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); - err = mlx5_init_cq_table(dev); if (err) { dev_err(&pdev->dev, "failed to initialize cq table\n"); @@ -1099,16 +1097,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_disable_msix; } - err = mlx5_alloc_bfregs(dev, &priv->bfregi); - if (err) { - dev_err(&pdev->dev, "Failed allocating uuars, aborting\n"); - goto err_uar_cleanup; - } - err = mlx5_start_eqs(dev); if (err) { dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); - goto err_free_uar; + goto err_put_uars; } err = alloc_comp_eqs(dev); @@ -1174,10 +1166,7 @@ err_affinity_hints: err_stop_eqs: mlx5_stop_eqs(dev); -err_free_uar: - mlx5_free_bfregs(dev, &priv->bfregi); - -err_uar_cleanup: +err_put_uars: mlx5_put_uars_page(dev, priv->uar); err_disable_msix: @@ -1238,7 +1227,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); - mlx5_free_bfregs(dev, &priv->bfregi); mlx5_put_uars_page(dev, priv->uar); mlx5_disable_msix(dev); if (cleanup) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index fcc0270ea72f..07b273cccc26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -67,120 +67,6 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) } EXPORT_SYMBOL(mlx5_cmd_free_uar); -static int need_bfreg_lock(int bfregn) -{ - int tot_bfregs = NUM_DRIVER_UARS * MLX5_BFREGS_PER_UAR; - - if (bfregn == 0 || tot_bfregs - NUM_LOW_LAT_BFREGS) - return 0; - - return 1; -} - -int mlx5_alloc_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi) -{ - int tot_bfregs = NUM_DRIVER_UARS * MLX5_BFREGS_PER_UAR; - struct mlx5_bf *bf; - phys_addr_t addr; - int err; - int i; - - bfregi->num_uars = NUM_DRIVER_UARS; - bfregi->num_low_latency_bfregs = NUM_LOW_LAT_BFREGS; - - mutex_init(&bfregi->lock); - bfregi->uars = kcalloc(bfregi->num_uars, sizeof(*bfregi->uars), GFP_KERNEL); - if (!bfregi->uars) - return -ENOMEM; - - bfregi->bfs = kcalloc(tot_bfregs, sizeof(*bfregi->bfs), GFP_KERNEL); - if (!bfregi->bfs) { - err = -ENOMEM; - goto out_uars; - } - - bfregi->bitmap = kcalloc(BITS_TO_LONGS(tot_bfregs), sizeof(*bfregi->bitmap), - GFP_KERNEL); - if (!bfregi->bitmap) { - err = -ENOMEM; - goto out_bfs; - } - - bfregi->count = kcalloc(tot_bfregs, sizeof(*bfregi->count), GFP_KERNEL); - if (!bfregi->count) { - err = -ENOMEM; - goto out_bitmap; - } - - for (i = 0; i < bfregi->num_uars; i++) { - err = mlx5_cmd_alloc_uar(dev, &bfregi->uars[i].index); - if (err) - goto out_count; - - addr = dev->iseg_base + ((phys_addr_t)(bfregi->uars[i].index) << PAGE_SHIFT); - bfregi->uars[i].map = ioremap(addr, PAGE_SIZE); - if (!bfregi->uars[i].map) { - mlx5_cmd_free_uar(dev, bfregi->uars[i].index); - err = -ENOMEM; - goto out_count; - } - mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", - bfregi->uars[i].index, bfregi->uars[i].map); - } - - for (i = 0; i < tot_bfregs; i++) { - bf = &bfregi->bfs[i]; - - bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2; - bf->uar = &bfregi->uars[i / MLX5_BFREGS_PER_UAR]; - bf->regreg = bfregi->uars[i / MLX5_BFREGS_PER_UAR].map; - bf->reg = NULL; /* Add WC support */ - bf->offset = (i % MLX5_BFREGS_PER_UAR) * - (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) + - MLX5_BF_OFFSET; - bf->need_lock = need_bfreg_lock(i); - spin_lock_init(&bf->lock); - spin_lock_init(&bf->lock32); - bf->bfregn = i; - } - - return 0; - -out_count: - for (i--; i >= 0; i--) { - iounmap(bfregi->uars[i].map); - mlx5_cmd_free_uar(dev, bfregi->uars[i].index); - } - kfree(bfregi->count); - -out_bitmap: - kfree(bfregi->bitmap); - -out_bfs: - kfree(bfregi->bfs); - -out_uars: - kfree(bfregi->uars); - return err; -} - -int mlx5_free_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi) -{ - int i = bfregi->num_uars; - - for (i--; i >= 0; i--) { - iounmap(bfregi->uars[i].map); - mlx5_cmd_free_uar(dev, bfregi->uars[i].index); - } - - kfree(bfregi->count); - kfree(bfregi->bitmap); - kfree(bfregi->bfs); - kfree(bfregi->uars); - - return 0; -} - int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, bool map_wc) { diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 7c3c0d3aca37..996863381bc8 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -144,7 +144,6 @@ enum { static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, void __iomem *uar_page, - spinlock_t *doorbell_lock, u32 cons_index) { __be32 doorbell[2]; @@ -164,7 +163,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); doorbell[1] = cpu_to_be32(cq->cqn); - mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock); + mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL); } int mlx5_init_cq_table(struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h index afc78a3f4462..0787de28f2fc 100644 --- a/include/linux/mlx5/doorbell.h +++ b/include/linux/mlx5/doorbell.h @@ -68,10 +68,12 @@ static inline void mlx5_write64(__be32 val[2], void __iomem *dest, { unsigned long flags; - spin_lock_irqsave(doorbell_lock, flags); + if (doorbell_lock) + spin_lock_irqsave(doorbell_lock, flags); __raw_writel((__force u32) val[0], dest); __raw_writel((__force u32) val[1], dest + 4); - spin_unlock_irqrestore(doorbell_lock, flags); + if (doorbell_lock) + spin_unlock_irqrestore(doorbell_lock, flags); } #endif diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 9a3a0954855b..bb362f506a2e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -203,23 +203,6 @@ struct mlx5_bfreg_info { u32 ver; }; -struct mlx5_bf { - void __iomem *reg; - void __iomem *regreg; - int buf_size; - struct mlx5_uar *uar; - unsigned long offset; - int need_lock; - /* protect blue flame buffer selection when needed - */ - spinlock_t lock; - - /* serialize 64 bit writes when done as two 32 bit accesses - */ - spinlock_t lock32; - int bfregn; -}; - struct mlx5_cmd_first { __be32 data[4]; }; @@ -612,8 +595,6 @@ struct mlx5_priv { struct mlx5_eq_table eq_table; struct msix_entry *msix_arr; struct mlx5_irq_info *irq_info; - struct mlx5_bfreg_info bfregi; - MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); /* pages stuff */ struct workqueue_struct *pg_wq; -- cgit v1.2.3 From b037c29a8056b8e896c4e084ba7cc30d6a1f165f Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:26 +0200 Subject: IB/mlx5: Allow future extension of libmlx5 input data Current check requests that new fields in struct mlx5_ib_alloc_ucontext_req_v2 that are not known to the driver be zero. This was introduced so new libraries passing additional information to the kernel through struct mlx5_ib_alloc_ucontext_req_v2 will be notified by old kernels that do not support their request by failing the operation. This schecme is problematic since it requires libmlx5 to issue the requests with descending input size for struct mlx5_ib_alloc_ucontext_req_v2. To avoid this, we require that new features that will obey the following rules: If the feature requires one or more fields in the response and the at least one of the fields can be encoded such that a zero value means the kernel ignored the request then this field will provide the indication to the library. If no response is required or if zero is a valid response, a new field should be added that indicates to the library whether its request was processed. Fixes: b368d7cb8ceb ('IB/mlx5: Add hca_core_clock_offset to udata in init_ucontext') Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/cq.c | 2 +- drivers/infiniband/hw/mlx5/main.c | 201 ++++++++++++++++++++++------------- drivers/infiniband/hw/mlx5/mlx5_ib.h | 15 ++- drivers/infiniband/hw/mlx5/qp.c | 133 ++++++++++------------- include/linux/mlx5/device.h | 12 ++- include/linux/mlx5/driver.h | 12 +-- 6 files changed, 209 insertions(+), 166 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index a28ec33b82ed..31803b367104 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -788,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, MLX5_SET(cqc, cqc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); - *index = to_mucontext(context)->bfregi.uars[0].index; + *index = to_mucontext(context)->bfregi.sys_pages[0]; if (ucmd.cqe_comp_en == 1) { if (unlikely((*cqe_size != 64) || diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e9f0830eca1c..664067239de5 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -992,6 +992,80 @@ out: return err; } +static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k, + struct mlx5_ib_alloc_ucontext_req_v2 *req, + u32 *num_sys_pages) +{ + int uars_per_sys_page; + int bfregs_per_sys_page; + int ref_bfregs = req->total_num_bfregs; + + if (req->total_num_bfregs == 0) + return -EINVAL; + + BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE); + BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE); + + if (req->total_num_bfregs > MLX5_MAX_BFREGS) + return -ENOMEM; + + uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k); + bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR; + req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page); + *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page; + + if (req->num_low_latency_bfregs > req->total_num_bfregs - 1) + return -EINVAL; + + mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n", + MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no", + lib_uar_4k ? "yes" : "no", ref_bfregs, + req->total_num_bfregs, *num_sys_pages); + + return 0; +} + +static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) +{ + struct mlx5_bfreg_info *bfregi; + int err; + int i; + + bfregi = &context->bfregi; + for (i = 0; i < bfregi->num_sys_pages; i++) { + err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]); + if (err) + goto error; + + mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]); + } + return 0; + +error: + for (--i; i >= 0; i--) + if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i])) + mlx5_ib_warn(dev, "failed to free uar %d\n", i); + + return err; +} + +static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) +{ + struct mlx5_bfreg_info *bfregi; + int err; + int i; + + bfregi = &context->bfregi; + for (i = 0; i < bfregi->num_sys_pages; i++) { + err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); + if (err) { + mlx5_ib_warn(dev, "failed to free uar %d\n", i); + return err; + } + } + return 0; +} + static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { @@ -1000,16 +1074,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, struct mlx5_ib_alloc_ucontext_resp resp = {}; struct mlx5_ib_ucontext *context; struct mlx5_bfreg_info *bfregi; - struct mlx5_uar *uars; - int gross_bfregs; - int num_uars; int ver; - int bfregn; int err; - int i; size_t reqlen; size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, max_cqe_version); + bool lib_uar_4k; if (!dev->ib_active) return ERR_PTR(-EAGAIN); @@ -1032,27 +1102,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (req.flags) return ERR_PTR(-EINVAL); - if (req.total_num_bfregs > MLX5_MAX_BFREGS) - return ERR_PTR(-ENOMEM); - - if (req.total_num_bfregs == 0) - return ERR_PTR(-EINVAL); - if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) return ERR_PTR(-EOPNOTSUPP); - if (reqlen > sizeof(req) && - !ib_is_udata_cleared(udata, sizeof(req), - reqlen - sizeof(req))) - return ERR_PTR(-EOPNOTSUPP); - req.total_num_bfregs = ALIGN(req.total_num_bfregs, MLX5_NON_FP_BFREGS_PER_UAR); if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) return ERR_PTR(-EINVAL); - num_uars = req.total_num_bfregs / MLX5_NON_FP_BFREGS_PER_UAR; - gross_bfregs = num_uars * MLX5_BFREGS_PER_UAR; resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); @@ -1072,42 +1129,34 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (!context) return ERR_PTR(-ENOMEM); + lib_uar_4k = false; bfregi = &context->bfregi; - mutex_init(&bfregi->lock); - uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); - if (!uars) { - err = -ENOMEM; + + /* updates req->total_num_bfregs */ + err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages); + if (err) goto out_ctx; - } - bfregi->bitmap = kcalloc(BITS_TO_LONGS(gross_bfregs), - sizeof(*bfregi->bitmap), + mutex_init(&bfregi->lock); + bfregi->lib_uar_4k = lib_uar_4k; + bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count), GFP_KERNEL); - if (!bfregi->bitmap) { + if (!bfregi->count) { err = -ENOMEM; - goto out_uar_ctx; - } - /* - * clear all fast path bfregs - */ - for (i = 0; i < gross_bfregs; i++) { - bfregn = i & 3; - if (bfregn == 2 || bfregn == 3) - set_bit(i, bfregi->bitmap); + goto out_ctx; } - bfregi->count = kcalloc(gross_bfregs, - sizeof(*bfregi->count), GFP_KERNEL); - if (!bfregi->count) { + bfregi->sys_pages = kcalloc(bfregi->num_sys_pages, + sizeof(*bfregi->sys_pages), + GFP_KERNEL); + if (!bfregi->sys_pages) { err = -ENOMEM; - goto out_bitmap; + goto out_count; } - for (i = 0; i < num_uars; i++) { - err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index); - if (err) - goto out_count; - } + err = allocate_uars(dev, context); + if (err) + goto out_sys_pages; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; @@ -1166,9 +1215,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, bfregi->ver = ver; bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; - bfregi->uars = uars; - bfregi->num_uars = num_uars; context->cqe_version = resp.cqe_version; + context->lib_caps = false; return &context->ibucontext; @@ -1180,19 +1228,17 @@ out_page: free_page(context->upd_xlt_page); out_uars: - for (i--; i >= 0; i--) - mlx5_cmd_free_uar(dev->mdev, uars[i].index); -out_count: - kfree(bfregi->count); + deallocate_uars(dev, context); -out_bitmap: - kfree(bfregi->bitmap); +out_sys_pages: + kfree(bfregi->sys_pages); -out_uar_ctx: - kfree(uars); +out_count: + kfree(bfregi->count); out_ctx: kfree(context); + return ERR_PTR(err); } @@ -1200,31 +1246,31 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); - struct mlx5_bfreg_info *bfregi = &context->bfregi; - int i; + struct mlx5_bfreg_info *bfregi; + bfregi = &context->bfregi; if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn); free_page(context->upd_xlt_page); - - for (i = 0; i < bfregi->num_uars; i++) { - if (mlx5_cmd_free_uar(dev->mdev, bfregi->uars[i].index)) - mlx5_ib_warn(dev, "Failed to free UAR 0x%x\n", - bfregi->uars[i].index); - } - + deallocate_uars(dev, context); + kfree(bfregi->sys_pages); kfree(bfregi->count); - kfree(bfregi->bitmap); - kfree(bfregi->uars); kfree(context); return 0; } -static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) +static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi, + int idx) { - return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index; + int fw_uars_per_page; + + fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; + + return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + + bfregi->sys_pages[idx] / fw_uars_per_page; } static int get_command(unsigned long offset) @@ -1384,6 +1430,18 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, unsigned long idx; phys_addr_t pfn, pa; pgprot_t prot; + int uars_per_page; + + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k); + idx = get_index(vma->vm_pgoff); + if (idx % uars_per_page || + idx * uars_per_page >= bfregi->num_sys_pages) { + mlx5_ib_warn(dev, "invalid uar index %lu\n", idx); + return -EINVAL; + } switch (cmd) { case MLX5_IB_MMAP_WC_PAGE: @@ -1406,14 +1464,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, return -EINVAL; } - if (vma->vm_end - vma->vm_start != PAGE_SIZE) - return -EINVAL; - - idx = get_index(vma->vm_pgoff); - if (idx >= bfregi->num_uars) - return -EINVAL; - - pfn = uar_index2pfn(dev, bfregi->uars[idx].index); + pfn = uar_index2pfn(dev, bfregi, idx); mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); vma->vm_page_prot = prot; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index ae3bc4a1bfed..e1a4b93dce6b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -90,7 +90,6 @@ enum mlx5_ib_latency_class { MLX5_IB_LATENCY_CLASS_LOW, MLX5_IB_LATENCY_CLASS_MEDIUM, MLX5_IB_LATENCY_CLASS_HIGH, - MLX5_IB_LATENCY_CLASS_FAST_PATH }; enum mlx5_ib_mad_ifc_flags { @@ -129,6 +128,7 @@ struct mlx5_ib_ucontext { unsigned long upd_xlt_page; /* protect ODP/KSM */ struct mutex upd_xlt_page_mutex; + u64 lib_caps; }; static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) @@ -975,4 +975,17 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); } + +static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support) +{ + return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? + MLX5_UARS_IN_PAGE : 1; +} + +static inline int get_num_uars(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) +{ + return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages; +} + #endif /* MLX5_IB_H */ diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index fce1c6db393b..6a83fb32599d 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -480,16 +480,6 @@ static int first_med_bfreg(void) return 1; } -static int next_bfreg(int n) -{ - n++; - - while (((n % 4) & 2)) - n++; - - return n; -} - enum { /* this is the first blue flame register in the array of bfregs assigned * to a processes. Since we do not use it for blue flame but rather @@ -499,36 +489,38 @@ enum { NUM_NON_BLUE_FLAME_BFREGS = 1, }; -static int num_med_bfreg(struct mlx5_bfreg_info *bfregi) +static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi) +{ + return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR; +} + +static int num_med_bfreg(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) { int n; - n = bfregi->num_uars * MLX5_NON_FP_BFREGS_PER_UAR - - bfregi->num_low_latency_bfregs - NUM_NON_BLUE_FLAME_BFREGS; + n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs - + NUM_NON_BLUE_FLAME_BFREGS; return n >= 0 ? n : 0; } -static int max_bfregi(struct mlx5_bfreg_info *bfregi) -{ - return bfregi->num_uars * 4; -} - -static int first_hi_bfreg(struct mlx5_bfreg_info *bfregi) +static int first_hi_bfreg(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) { int med; - med = num_med_bfreg(bfregi); - return next_bfreg(med); + med = num_med_bfreg(dev, bfregi); + return ++med; } -static int alloc_high_class_bfreg(struct mlx5_bfreg_info *bfregi) +static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) { int i; - for (i = first_hi_bfreg(bfregi); i < max_bfregi(bfregi); i = next_bfreg(i)) { - if (!test_bit(i, bfregi->bitmap)) { - set_bit(i, bfregi->bitmap); + for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) { + if (!bfregi->count[i]) { bfregi->count[i]++; return i; } @@ -537,12 +529,13 @@ static int alloc_high_class_bfreg(struct mlx5_bfreg_info *bfregi) return -ENOMEM; } -static int alloc_med_class_bfreg(struct mlx5_bfreg_info *bfregi) +static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) { int minidx = first_med_bfreg(); int i; - for (i = first_med_bfreg(); i < first_hi_bfreg(bfregi); i = next_bfreg(i)) { + for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) { if (bfregi->count[i] < bfregi->count[minidx]) minidx = i; if (!bfregi->count[minidx]) @@ -553,7 +546,8 @@ static int alloc_med_class_bfreg(struct mlx5_bfreg_info *bfregi) return minidx; } -static int alloc_bfreg(struct mlx5_bfreg_info *bfregi, +static int alloc_bfreg(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi, enum mlx5_ib_latency_class lat) { int bfregn = -EINVAL; @@ -570,18 +564,14 @@ static int alloc_bfreg(struct mlx5_bfreg_info *bfregi, if (bfregi->ver < 2) bfregn = -ENOMEM; else - bfregn = alloc_med_class_bfreg(bfregi); + bfregn = alloc_med_class_bfreg(dev, bfregi); break; case MLX5_IB_LATENCY_CLASS_HIGH: if (bfregi->ver < 2) bfregn = -ENOMEM; else - bfregn = alloc_high_class_bfreg(bfregi); - break; - - case MLX5_IB_LATENCY_CLASS_FAST_PATH: - bfregn = 2; + bfregn = alloc_high_class_bfreg(dev, bfregi); break; } mutex_unlock(&bfregi->lock); @@ -589,37 +579,10 @@ static int alloc_bfreg(struct mlx5_bfreg_info *bfregi, return bfregn; } -static void free_med_class_bfreg(struct mlx5_bfreg_info *bfregi, int bfregn) +static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn) { - clear_bit(bfregn, bfregi->bitmap); - --bfregi->count[bfregn]; -} - -static void free_high_class_bfreg(struct mlx5_bfreg_info *bfregi, int bfregn) -{ - clear_bit(bfregn, bfregi->bitmap); - --bfregi->count[bfregn]; -} - -static void free_bfreg(struct mlx5_bfreg_info *bfregi, int bfregn) -{ - int nbfregs = bfregi->num_uars * MLX5_BFREGS_PER_UAR; - int high_bfreg = nbfregs - bfregi->num_low_latency_bfregs; - mutex_lock(&bfregi->lock); - if (bfregn == 0) { - --bfregi->count[bfregn]; - goto out; - } - - if (bfregn < high_bfreg) { - free_med_class_bfreg(bfregi, bfregn); - goto out; - } - - free_high_class_bfreg(bfregi, bfregn); - -out: + bfregi->count[bfregn]--; mutex_unlock(&bfregi->lock); } @@ -661,9 +624,20 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq); -static int bfregn_to_uar_index(struct mlx5_bfreg_info *bfregi, int bfregn) +static int bfregn_to_uar_index(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi, int bfregn) { - return bfregi->uars[bfregn / MLX5_BFREGS_PER_UAR].index; + int bfregs_per_sys_page; + int index_of_sys_page; + int offset; + + bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * + MLX5_NON_FP_BFREGS_PER_UAR; + index_of_sys_page = bfregn / bfregs_per_sys_page; + + offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR; + + return bfregi->sys_pages[index_of_sys_page] + offset; } static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, @@ -766,6 +740,13 @@ err_umem: return err; } +static int adjust_bfregn(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi, int bfregn) +{ + return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR + + bfregn % MLX5_NON_FP_BFREGS_PER_UAR; +} + static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct ib_udata *udata, struct ib_qp_init_attr *attr, @@ -800,15 +781,15 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, /* In CROSS_CHANNEL CQ and QP must use the same UAR */ bfregn = MLX5_CROSS_CHANNEL_BFREG; else { - bfregn = alloc_bfreg(&context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH); + bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH); if (bfregn < 0) { mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n"); mlx5_ib_dbg(dev, "reverting to medium latency\n"); - bfregn = alloc_bfreg(&context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM); + bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM); if (bfregn < 0) { mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n"); mlx5_ib_dbg(dev, "reverting to high latency\n"); - bfregn = alloc_bfreg(&context->bfregi, MLX5_IB_LATENCY_CLASS_LOW); + bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW); if (bfregn < 0) { mlx5_ib_warn(dev, "bfreg allocation failed\n"); return bfregn; @@ -817,7 +798,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, } } - uar_index = bfregn_to_uar_index(&context->bfregi, bfregn); + uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn); mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index); qp->rq.offset = 0; @@ -858,7 +839,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, MLX5_SET(qpc, qpc, page_offset, offset); MLX5_SET(qpc, qpc, uar_page, uar_index); - resp->bfreg_index = bfregn; + resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn); qp->bfregn = bfregn; err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); @@ -887,12 +868,12 @@ err_umem: ib_umem_release(ubuffer->umem); err_bfreg: - free_bfreg(&context->bfregi, bfregn); + free_bfreg(dev, &context->bfregi, bfregn); return err; } -static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp, - struct mlx5_ib_qp_base *base) +static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd, + struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base) { struct mlx5_ib_ucontext *context; @@ -900,7 +881,7 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp, mlx5_ib_db_unmap_user(context, &qp->db); if (base->ubuffer.umem) ib_umem_release(base->ubuffer.umem); - free_bfreg(&context->bfregi, qp->bfregn); + free_bfreg(dev, &context->bfregi, qp->bfregn); } static int create_kernel_qp(struct mlx5_ib_dev *dev, @@ -1784,7 +1765,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, err_create: if (qp->create_type == MLX5_QP_USER) - destroy_qp_user(pd, qp, base); + destroy_qp_user(dev, pd, qp, base); else if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_kernel(dev, qp); @@ -1962,7 +1943,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_kernel(dev, qp); else if (qp->create_type == MLX5_QP_USER) - destroy_qp_user(&get_pd(qp)->ibpd, qp, base); + destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base); } static const char *ib_qp_type_str(enum ib_qp_type type) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index db1b9287012f..dd345e8cf6f0 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -211,6 +211,11 @@ enum { MLX5_EN_WR = (u64)2 }; +enum { + MLX5_ADAPTER_PAGE_SHIFT = 12, + MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, +}; + enum { MLX5_BFREGS_PER_UAR = 4, MLX5_MAX_UARS = 1 << 8, @@ -219,6 +224,8 @@ enum { MLX5_NON_FP_BFREGS_PER_UAR, MLX5_MAX_BFREGS = MLX5_MAX_UARS * MLX5_NON_FP_BFREGS_PER_UAR, + MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, + MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, }; enum { @@ -391,11 +398,6 @@ enum { MLX5_MAX_PAGE_SHIFT = 31 }; -enum { - MLX5_ADAPTER_PAGE_SHIFT = 12, - MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, -}; - enum { MLX5_CAP_OFF_CMDIF_CSUM = 46, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index bb362f506a2e..7e7394fef835 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -189,18 +189,17 @@ enum mlx5_eq_type { }; struct mlx5_bfreg_info { - struct mlx5_uar *uars; - int num_uars; + u32 *sys_pages; int num_low_latency_bfregs; - unsigned long *bitmap; unsigned int *count; - struct mlx5_bf *bfs; /* * protect bfreg allocation data structs */ struct mutex lock; u32 ver; + bool lib_uar_4k; + u32 num_sys_pages; }; struct mlx5_cmd_first { @@ -470,13 +469,10 @@ struct mlx5_sq_bfreg { struct mlx5_uar { u32 index; - struct list_head bf_list; - unsigned free_bf_bmap; - void __iomem *bf_map; void __iomem *map; + void __iomem *bf_map; }; - struct mlx5_core_health { struct health_buffer __iomem *health; __be32 __iomem *health_counter; -- cgit v1.2.3 From 30aa60b3bd12bd79b5324b7b595bd3446ab24b52 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:27 +0200 Subject: IB/mlx5: Support 4k UAR for libmlx5 Add fields to structs to convey to kernel an indication whether the library supports multi UARs per page and return to the library the size of a UAR based on the queried value. Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/main.c | 21 +++++++- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 2 + drivers/net/ethernet/mellanox/mlx5/core/en.h | 9 ++-- .../net/ethernet/mellanox/mlx5/core/en_common.c | 12 +---- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 21 ++++---- drivers/net/ethernet/mellanox/mlx5/core/uar.c | 56 ---------------------- include/linux/mlx5/cq.h | 2 +- include/linux/mlx5/driver.h | 12 ----- include/uapi/rdma/mlx5-abi.h | 7 +++ 9 files changed, 42 insertions(+), 100 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 664067239de5..a191b9327b0c 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -992,6 +992,12 @@ out: return err; } +static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps) +{ + mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n", + caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n"); +} + static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k, struct mlx5_ib_alloc_ucontext_req_v2 *req, u32 *num_sys_pages) @@ -1122,6 +1128,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, resp.cqe_version = min_t(__u8, (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), req.max_cqe_version); + resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ? + MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT; + resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? + MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1; resp.response_length = min(offsetof(typeof(resp), response_length) + sizeof(resp.response_length), udata->outlen); @@ -1129,7 +1139,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (!context) return ERR_PTR(-ENOMEM); - lib_uar_4k = false; + lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; bfregi = &context->bfregi; /* updates req->total_num_bfregs */ @@ -1209,6 +1219,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, sizeof(resp.reserved2); } + if (field_avail(typeof(resp), log_uar_size, udata->outlen)) + resp.response_length += sizeof(resp.log_uar_size); + + if (field_avail(typeof(resp), num_uars_per_page, udata->outlen)) + resp.response_length += sizeof(resp.num_uars_per_page); + err = ib_copy_to_udata(udata, &resp, resp.response_length); if (err) goto out_td; @@ -1216,7 +1232,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, bfregi->ver = ver; bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; context->cqe_version = resp.cqe_version; - context->lib_caps = false; + context->lib_caps = req.lib_caps; + print_lib_caps(dev, context->lib_caps); return &context->ibucontext; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 32d4af9b594d..336d4738b807 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -179,6 +179,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", cq->cqn); + cq->uar = dev->priv.uar; + return 0; err_cmd: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 3037631570b1..a473cea10c16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -465,7 +465,6 @@ struct mlx5e_sq { /* read only */ struct mlx5_wq_cyc wq; u32 dma_fifo_mask; - void __iomem *uar_map; struct netdev_queue *txq; u32 sqn; u16 bf_buf_size; @@ -479,7 +478,7 @@ struct mlx5e_sq { /* control path */ struct mlx5_wq_ctrl wq_ctrl; - struct mlx5_uar uar; + struct mlx5_sq_bfreg bfreg; struct mlx5e_channel *channel; int tc; u32 rate_limit; @@ -806,7 +805,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) { - u16 ofst = MLX5_BF_OFFSET + sq->bf_offset; + u16 ofst = sq->bf_offset; /* ensure wqe is visible to device before updating doorbell record */ dma_wmb(); @@ -818,9 +817,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, */ wmb(); if (bf_sz) - __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz); + __iowrite64_copy(sq->bfreg.map + ofst, ctrl, bf_sz); else - mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL); + mlx5_write64((__be32 *)ctrl, sq->bfreg.map + ofst, NULL); /* flush the write-combining mapped buffer */ wmb(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index f175518ff07a..bd898d8deda0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -89,16 +89,10 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) struct mlx5e_resources *res = &mdev->mlx5e_res; int err; - err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false); - if (err) { - mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); - return err; - } - err = mlx5_core_alloc_pd(mdev, &res->pdn); if (err) { mlx5_core_err(mdev, "alloc pd failed, %d\n", err); - goto err_unmap_free_uar; + return err; } err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn); @@ -121,9 +115,6 @@ err_dealloc_transport_domain: mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); err_dealloc_pd: mlx5_core_dealloc_pd(mdev, res->pdn); -err_unmap_free_uar: - mlx5_unmap_free_uar(mdev, &res->cq_uar); - return err; } @@ -134,7 +125,6 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) mlx5_core_destroy_mkey(mdev, &res->mkey); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_pd(mdev, res->pdn); - mlx5_unmap_free_uar(mdev, &res->cq_uar); } int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5ff86f0ecb7b..c32754b1598e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -991,7 +991,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->channel = c; sq->tc = tc; - err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf)); + err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false); if (err) return err; @@ -1003,12 +1003,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, goto err_unmap_free_uar; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - if (sq->uar.bf_map) { + if (sq->bfreg.wc) set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state); - sq->uar_map = sq->uar.bf_map; - } else { - sq->uar_map = sq->uar.map; - } + sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; sq->min_inline_mode = @@ -1036,7 +1033,7 @@ err_sq_wq_destroy: mlx5_wq_destroy(&sq->wq_ctrl); err_unmap_free_uar: - mlx5_unmap_free_uar(mdev, &sq->uar); + mlx5_free_bfreg(mdev, &sq->bfreg); return err; } @@ -1048,7 +1045,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq) mlx5e_free_sq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); - mlx5_unmap_free_uar(priv->mdev, &sq->uar); + mlx5_free_bfreg(priv->mdev, &sq->bfreg); } static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) @@ -1082,7 +1079,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); - MLX5_SET(wq, wq, uar_page, sq->uar.index); + MLX5_SET(wq, wq, uar_page, sq->bfreg.index); MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); @@ -1240,7 +1237,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &mdev->mlx5e_res.cq_uar; for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); @@ -1289,7 +1285,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); - MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); @@ -1701,7 +1697,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, { void *cqc = param->cqc; - MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index); + MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index); } static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, @@ -2320,7 +2316,6 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv, mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &mdev->mlx5e_res.cq_uar; cq->priv = priv; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 07b273cccc26..2e6b0f290ddc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -37,11 +37,6 @@ #include #include "mlx5_core.h" -enum { - NUM_DRIVER_UARS = 4, - NUM_LOW_LAT_BFREGS = 4, -}; - int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) { u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0}; @@ -67,57 +62,6 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) } EXPORT_SYMBOL(mlx5_cmd_free_uar); -int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, - bool map_wc) -{ - phys_addr_t pfn; - phys_addr_t uar_bar_start; - int err; - - err = mlx5_cmd_alloc_uar(mdev, &uar->index); - if (err) { - mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); - return err; - } - - uar_bar_start = pci_resource_start(mdev->pdev, 0); - pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; - - if (map_wc) { - uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->bf_map) { - mlx5_core_warn(mdev, "ioremap_wc() failed\n"); - uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->map) - goto err_free_uar; - } - } else { - uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->map) - goto err_free_uar; - } - - return 0; - -err_free_uar: - mlx5_core_warn(mdev, "ioremap() failed\n"); - err = -ENOMEM; - mlx5_cmd_free_uar(mdev, uar->index); - - return err; -} -EXPORT_SYMBOL(mlx5_alloc_map_uar); - -void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) -{ - if (uar->map) - iounmap(uar->map); - else - iounmap(uar->bf_map); - mlx5_cmd_free_uar(mdev, uar->index); -} -EXPORT_SYMBOL(mlx5_unmap_free_uar); - static int uars_per_sys_page(struct mlx5_core_dev *mdev) { if (MLX5_CAP_GEN(mdev, uar_4k)) diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 996863381bc8..95898847c7d4 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -42,13 +42,13 @@ struct mlx5_core_cq { int cqe_sz; __be32 *set_ci_db; __be32 *arm_db; + struct mlx5_uars_page *uar; atomic_t refcount; struct completion free; unsigned vector; unsigned int irqn; void (*comp) (struct mlx5_core_cq *); void (*event) (struct mlx5_core_cq *, enum mlx5_event); - struct mlx5_uar *uar; u32 cons_index; unsigned arm_sn; struct mlx5_rsc_debug *dbg; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 7e7394fef835..10e632588cd5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -467,12 +467,6 @@ struct mlx5_sq_bfreg { unsigned int offset; }; -struct mlx5_uar { - u32 index; - void __iomem *map; - void __iomem *bf_map; -}; - struct mlx5_core_health { struct health_buffer __iomem *health; __be32 __iomem *health_counter; @@ -725,7 +719,6 @@ struct mlx5_td { }; struct mlx5e_resources { - struct mlx5_uar cq_uar; u32 pdn; struct mlx5_td td; struct mlx5_core_mkey mkey; @@ -915,11 +908,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); -int mlx5_alloc_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi); -int mlx5_free_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi); -int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, - bool map_wc); -void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 86a8f30060f3..85dc966ea70b 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -65,6 +65,10 @@ struct mlx5_ib_alloc_ucontext_req { __u32 num_low_latency_bfregs; }; +enum mlx5_lib_caps { + MLX5_LIB_CAP_4K_UAR = (u64)1 << 0, +}; + struct mlx5_ib_alloc_ucontext_req_v2 { __u32 total_num_bfregs; __u32 num_low_latency_bfregs; @@ -74,6 +78,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 { __u8 reserved0; __u16 reserved1; __u32 reserved2; + __u64 lib_caps; }; enum mlx5_ib_alloc_ucontext_resp_mask { @@ -103,6 +108,8 @@ struct mlx5_ib_alloc_ucontext_resp { __u8 cmds_supp_uhw; __u16 reserved2; __u64 hca_core_clock_offset; + __u32 log_uar_size; + __u32 num_uars_per_page; }; struct mlx5_ib_alloc_pd_resp { -- cgit v1.2.3 From f502d834950a28e02651bb7e2cc7111ddd352644 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:28 +0200 Subject: net/mlx5: Activate support for 4K UARs Activate 4K UAR support for firmware versions that support it. Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index ff1f14498c22..a16ee1603272 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -530,6 +530,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); + /* If the HCA supports 4K UARs use it */ + if (MLX5_CAP_GEN_MAX(dev, uar_4k)) + MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); + MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); err = set_caps(dev, set_ctx, set_sz, -- cgit v1.2.3 From eafea7390e597f766927d1ba7459f3904b0b9194 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sat, 7 Jan 2017 20:04:23 -0800 Subject: net: ipv4: remove disable of bottom half in inet_rtm_getroute Nothing about the route lookup requires bottom half to be disabled. Remove the local_bh_disable ... local_bh_enable around ip_route_input. This appears to be a vestige of days gone by as it has been there since the beginning of git time. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/route.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f51823dc998b..7144288371cf 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2631,9 +2631,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) skb->protocol = htons(ETH_P_IP); skb->dev = dev; skb->mark = mark; - local_bh_disable(); err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); - local_bh_enable(); rt = skb_rtable(skb); if (err == 0 && rt->dst.error) -- cgit v1.2.3 From 2c956a60778cbb6a27e0c7a8a52a91378c90e1d1 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 8 Jan 2017 13:54:00 +0100 Subject: siphash: add cryptographically secure PRF SipHash is a 64-bit keyed hash function that is actually a cryptographically secure PRF, like HMAC. Except SipHash is super fast, and is meant to be used as a hashtable keyed lookup function, or as a general PRF for short input use cases, such as sequence numbers or RNG chaining. For the first usage: There are a variety of attacks known as "hashtable poisoning" in which an attacker forms some data such that the hash of that data will be the same, and then preceeds to fill up all entries of a hashbucket. This is a realistic and well-known denial-of-service vector. Currently hashtables use jhash, which is fast but not secure, and some kind of rotating key scheme (or none at all, which isn't good). SipHash is meant as a replacement for jhash in these cases. There are a modicum of places in the kernel that are vulnerable to hashtable poisoning attacks, either via userspace vectors or network vectors, and there's not a reliable mechanism inside the kernel at the moment to fix it. The first step toward fixing these issues is actually getting a secure primitive into the kernel for developers to use. Then we can, bit by bit, port things over to it as deemed appropriate. While SipHash is extremely fast for a cryptographically secure function, it is likely a bit slower than the insecure jhash, and so replacements will be evaluated on a case-by-case basis based on whether or not the difference in speed is negligible and whether or not the current jhash usage poses a real security risk. For the second usage: A few places in the kernel are using MD5 or SHA1 for creating secure sequence numbers, syn cookies, port numbers, or fast random numbers. SipHash is a faster and more fitting, and more secure replacement for MD5 in those situations. Replacing MD5 and SHA1 with SipHash for these uses is obvious and straight-forward, and so is submitted along with this patch series. There shouldn't be much of a debate over its efficacy. Dozens of languages are already using this internally for their hash tables and PRFs. Some of the BSDs already use this in their kernels. SipHash is a widely known high-speed solution to a widely known set of problems, and it's time we catch-up. Signed-off-by: Jason A. Donenfeld Reviewed-by: Jean-Philippe Aumasson Cc: Linus Torvalds Cc: Eric Biggers Cc: David Laight Cc: Eric Dumazet Signed-off-by: David S. Miller --- Documentation/siphash.txt | 100 ++++++++++++++++++++ MAINTAINERS | 7 ++ include/linux/siphash.h | 85 +++++++++++++++++ lib/Kconfig.debug | 6 +- lib/Makefile | 5 +- lib/siphash.c | 232 ++++++++++++++++++++++++++++++++++++++++++++++ lib/test_siphash.c | 131 ++++++++++++++++++++++++++ 7 files changed, 561 insertions(+), 5 deletions(-) create mode 100644 Documentation/siphash.txt create mode 100644 include/linux/siphash.h create mode 100644 lib/siphash.c create mode 100644 lib/test_siphash.c diff --git a/Documentation/siphash.txt b/Documentation/siphash.txt new file mode 100644 index 000000000000..e8e6ddbbaab4 --- /dev/null +++ b/Documentation/siphash.txt @@ -0,0 +1,100 @@ + SipHash - a short input PRF +----------------------------------------------- +Written by Jason A. Donenfeld + +SipHash is a cryptographically secure PRF -- a keyed hash function -- that +performs very well for short inputs, hence the name. It was designed by +cryptographers Daniel J. Bernstein and Jean-Philippe Aumasson. It is intended +as a replacement for some uses of: `jhash`, `md5_transform`, `sha_transform`, +and so forth. + +SipHash takes a secret key filled with randomly generated numbers and either +an input buffer or several input integers. It spits out an integer that is +indistinguishable from random. You may then use that integer as part of secure +sequence numbers, secure cookies, or mask it off for use in a hash table. + +1. Generating a key + +Keys should always be generated from a cryptographically secure source of +random numbers, either using get_random_bytes or get_random_once: + +siphash_key_t key; +get_random_bytes(&key, sizeof(key)); + +If you're not deriving your key from here, you're doing it wrong. + +2. Using the functions + +There are two variants of the function, one that takes a list of integers, and +one that takes a buffer: + +u64 siphash(const void *data, size_t len, const siphash_key_t *key); + +And: + +u64 siphash_1u64(u64, const siphash_key_t *key); +u64 siphash_2u64(u64, u64, const siphash_key_t *key); +u64 siphash_3u64(u64, u64, u64, const siphash_key_t *key); +u64 siphash_4u64(u64, u64, u64, u64, const siphash_key_t *key); +u64 siphash_1u32(u32, const siphash_key_t *key); +u64 siphash_2u32(u32, u32, const siphash_key_t *key); +u64 siphash_3u32(u32, u32, u32, const siphash_key_t *key); +u64 siphash_4u32(u32, u32, u32, u32, const siphash_key_t *key); + +If you pass the generic siphash function something of a constant length, it +will constant fold at compile-time and automatically choose one of the +optimized functions. + +3. Hashtable key function usage: + +struct some_hashtable { + DECLARE_HASHTABLE(hashtable, 8); + siphash_key_t key; +}; + +void init_hashtable(struct some_hashtable *table) +{ + get_random_bytes(&table->key, sizeof(table->key)); +} + +static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input) +{ + return &table->hashtable[siphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)]; +} + +You may then iterate like usual over the returned hash bucket. + +4. Security + +SipHash has a very high security margin, with its 128-bit key. So long as the +key is kept secret, it is impossible for an attacker to guess the outputs of +the function, even if being able to observe many outputs, since 2^128 outputs +is significant. + +Linux implements the "2-4" variant of SipHash. + +5. Struct-passing Pitfalls + +Often times the XuY functions will not be large enough, and instead you'll +want to pass a pre-filled struct to siphash. When doing this, it's important +to always ensure the struct has no padding holes. The easiest way to do this +is to simply arrange the members of the struct in descending order of size, +and to use offsetendof() instead of sizeof() for getting the size. For +performance reasons, if possible, it's probably a good thing to align the +struct to the right boundary. Here's an example: + +const struct { + struct in6_addr saddr; + u32 counter; + u16 dport; +} __aligned(SIPHASH_ALIGNMENT) combined = { + .saddr = *(struct in6_addr *)saddr, + .counter = counter, + .dport = dport +}; +u64 h = siphash(&combined, offsetofend(typeof(combined), dport), &secret); + +6. Resources + +Read the SipHash paper if you're interested in learning more: +https://131002.net/siphash/siphash.pdf diff --git a/MAINTAINERS b/MAINTAINERS index fcb33a17831a..21edaedcab46 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11304,6 +11304,13 @@ F: arch/arm/mach-s3c24xx/mach-bast.c F: arch/arm/mach-s3c24xx/bast-ide.c F: arch/arm/mach-s3c24xx/bast-irq.c +SIPHASH PRF ROUTINES +M: Jason A. Donenfeld +S: Maintained +F: lib/siphash.c +F: lib/test_siphash.c +F: include/linux/siphash.h + TI DAVINCI MACHINE SUPPORT M: Sekhar Nori M: Kevin Hilman diff --git a/include/linux/siphash.h b/include/linux/siphash.h new file mode 100644 index 000000000000..feeb29cd113e --- /dev/null +++ b/include/linux/siphash.h @@ -0,0 +1,85 @@ +/* Copyright (C) 2016 Jason A. Donenfeld . All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4. + */ + +#ifndef _LINUX_SIPHASH_H +#define _LINUX_SIPHASH_H + +#include +#include + +#define SIPHASH_ALIGNMENT __alignof__(u64) +typedef struct { + u64 key[2]; +} siphash_key_t; + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); +#endif + +u64 siphash_1u64(const u64 a, const siphash_key_t *key); +u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); +u64 siphash_3u64(const u64 a, const u64 b, const u64 c, + const siphash_key_t *key); +u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, + const siphash_key_t *key); +u64 siphash_1u32(const u32 a, const siphash_key_t *key); +u64 siphash_3u32(const u32 a, const u32 b, const u32 c, + const siphash_key_t *key); + +static inline u64 siphash_2u32(const u32 a, const u32 b, + const siphash_key_t *key) +{ + return siphash_1u64((u64)b << 32 | a, key); +} +static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c, + const u32 d, const siphash_key_t *key) +{ + return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); +} + + +static inline u64 ___siphash_aligned(const __le64 *data, size_t len, + const siphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return siphash_1u32(le32_to_cpup((const __le32 *)data), key); + if (__builtin_constant_p(len) && len == 8) + return siphash_1u64(le64_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 16) + return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 24) + return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 32) + return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), le64_to_cpu(data[3]), + key); + return __siphash_aligned(data, len, key); +} + +/** + * siphash - compute 64-bit siphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the siphash key + */ +static inline u64 siphash(const void *data, size_t len, + const siphash_key_t *key) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + return __siphash_unaligned(data, len, key); +#endif + return ___siphash_aligned(data, len, key); +} + +#endif /* _LINUX_SIPHASH_H */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b06848a104e6..3d2515a770c3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1819,9 +1819,9 @@ config TEST_HASH tristate "Perform selftest on hash functions" default n help - Enable this option to test the kernel's integer () - and string () hash functions on boot - (or module load). + Enable this option to test the kernel's integer (), + string (), and siphash () + hash functions on boot (or module load). This is intended to help people writing architecture-specific optimized versions. If unsure, say N. diff --git a/lib/Makefile b/lib/Makefile index bc4073a8cd08..7b3008d58600 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -22,7 +22,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ - earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o win_minmax.o + earlycpio.o seq_buf.o siphash.o \ + nmi_backtrace.o nodemask.o win_minmax.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -44,7 +45,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o obj-y += kstrtox.o obj-$(CONFIG_TEST_BPF) += test_bpf.o obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o -obj-$(CONFIG_TEST_HASH) += test_hash.o +obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o obj-$(CONFIG_TEST_KASAN) += test_kasan.o obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o obj-$(CONFIG_TEST_LKM) += test_module.o diff --git a/lib/siphash.c b/lib/siphash.c new file mode 100644 index 000000000000..c43cf406e71b --- /dev/null +++ b/lib/siphash.c @@ -0,0 +1,232 @@ +/* Copyright (C) 2016 Jason A. Donenfeld . All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4. + */ + +#include +#include + +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 +#include +#include +#endif + +#define SIPROUND \ + do { \ + v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ + v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ + v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ + v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ + } while (0) + +#define PREAMBLE(len) \ + u64 v0 = 0x736f6d6570736575ULL; \ + u64 v1 = 0x646f72616e646f6dULL; \ + u64 v2 = 0x6c7967656e657261ULL; \ + u64 v3 = 0x7465646279746573ULL; \ + u64 b = ((u64)(len)) << 56; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define POSTAMBLE \ + v3 ^= b; \ + SIPROUND; \ + SIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} +EXPORT_SYMBOL(__siphash_aligned); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} +EXPORT_SYMBOL(__siphash_unaligned); +#endif + +/** + * siphash_1u64 - compute 64-bit siphash PRF value of a u64 + * @first: first u64 + * @key: the siphash key + */ +u64 siphash_1u64(const u64 first, const siphash_key_t *key) +{ + PREAMBLE(8) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_1u64); + +/** + * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64 + * @first: first u64 + * @second: second u64 + * @key: the siphash key + */ +u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key) +{ + PREAMBLE(16) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_2u64); + +/** + * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @key: the siphash key + */ +u64 siphash_3u64(const u64 first, const u64 second, const u64 third, + const siphash_key_t *key) +{ + PREAMBLE(24) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_3u64); + +/** + * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @forth: forth u64 + * @key: the siphash key + */ +u64 siphash_4u64(const u64 first, const u64 second, const u64 third, + const u64 forth, const siphash_key_t *key) +{ + PREAMBLE(32) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + v3 ^= forth; + SIPROUND; + SIPROUND; + v0 ^= forth; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_4u64); + +u64 siphash_1u32(const u32 first, const siphash_key_t *key) +{ + PREAMBLE(4) + b |= first; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_1u32); + +u64 siphash_3u32(const u32 first, const u32 second, const u32 third, + const siphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + PREAMBLE(12) + v3 ^= combined; + SIPROUND; + SIPROUND; + v0 ^= combined; + b |= third; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_3u32); diff --git a/lib/test_siphash.c b/lib/test_siphash.c new file mode 100644 index 000000000000..d972acfc15e4 --- /dev/null +++ b/lib/test_siphash.c @@ -0,0 +1,131 @@ +/* Test cases for siphash.c + * + * Copyright (C) 2016 Jason A. Donenfeld . All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +/* Test vectors taken from official reference source available at: + * https://131002.net/siphash/siphash24.c + */ + +static const siphash_key_t test_key_siphash = + {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }}; + +static const u64 test_vectors_siphash[64] = { + 0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL, + 0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL, + 0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL, + 0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL, + 0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL, + 0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL, + 0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL, + 0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL, + 0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL, + 0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL, + 0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL, + 0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL, + 0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL, + 0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL, + 0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL, + 0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL, + 0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL, + 0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL, + 0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL, + 0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL, + 0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL, + 0x958a324ceb064572ULL +}; + +static int __init siphash_test_init(void) +{ + u8 in[64] __aligned(SIPHASH_ALIGNMENT); + u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT); + u8 i; + int ret = 0; + + for (i = 0; i < 64; ++i) { + in[i] = i; + in_unaligned[i + 1] = i; + if (siphash(in, i, &test_key_siphash) != + test_vectors_siphash[i]) { + pr_info("siphash self-test aligned %u: FAIL\n", i + 1); + ret = -EINVAL; + } + if (siphash(in_unaligned + 1, i, &test_key_siphash) != + test_vectors_siphash[i]) { + pr_info("siphash self-test unaligned %u: FAIL\n", i + 1); + ret = -EINVAL; + } + } + if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) != + test_vectors_siphash[8]) { + pr_info("siphash self-test 1u64: FAIL\n"); + ret = -EINVAL; + } + if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL, + &test_key_siphash) != test_vectors_siphash[16]) { + pr_info("siphash self-test 2u64: FAIL\n"); + ret = -EINVAL; + } + if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL, + 0x1716151413121110ULL, &test_key_siphash) != + test_vectors_siphash[24]) { + pr_info("siphash self-test 3u64: FAIL\n"); + ret = -EINVAL; + } + if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL, + 0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL, + &test_key_siphash) != test_vectors_siphash[32]) { + pr_info("siphash self-test 4u64: FAIL\n"); + ret = -EINVAL; + } + if (siphash_1u32(0x03020100U, &test_key_siphash) != + test_vectors_siphash[4]) { + pr_info("siphash self-test 1u32: FAIL\n"); + ret = -EINVAL; + } + if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) != + test_vectors_siphash[8]) { + pr_info("siphash self-test 2u32: FAIL\n"); + ret = -EINVAL; + } + if (siphash_3u32(0x03020100U, 0x07060504U, + 0x0b0a0908U, &test_key_siphash) != + test_vectors_siphash[12]) { + pr_info("siphash self-test 3u32: FAIL\n"); + ret = -EINVAL; + } + if (siphash_4u32(0x03020100U, 0x07060504U, + 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) != + test_vectors_siphash[16]) { + pr_info("siphash self-test 4u32: FAIL\n"); + ret = -EINVAL; + } + if (!ret) + pr_info("self-tests: pass\n"); + return ret; +} + +static void __exit siphash_test_exit(void) +{ +} + +module_init(siphash_test_init); +module_exit(siphash_test_exit); + +MODULE_AUTHOR("Jason A. Donenfeld "); +MODULE_LICENSE("Dual BSD/GPL"); -- cgit v1.2.3 From 1ae2324f732c9c4e2fa4ebd885fa1001b70d52e1 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 8 Jan 2017 13:54:01 +0100 Subject: siphash: implement HalfSipHash1-3 for hash tables HalfSipHash, or hsiphash, is a shortened version of SipHash, which generates 32-bit outputs using a weaker 64-bit key. It has *much* lower security margins, and shouldn't be used for anything too sensitive, but it could be used as a hashtable key function replacement, if the output is never exposed, and if the security requirement is not too high. The goal is to make this something that performance-critical jhash users would be willing to use. On 64-bit machines, HalfSipHash1-3 is slower than SipHash1-3, so we alias SipHash1-3 to HalfSipHash1-3 on those systems. 64-bit x86_64: [ 0.509409] test_siphash: SipHash2-4 cycles: 4049181 [ 0.510650] test_siphash: SipHash1-3 cycles: 2512884 [ 0.512205] test_siphash: HalfSipHash1-3 cycles: 3429920 [ 0.512904] test_siphash: JenkinsHash cycles: 978267 So, we map hsiphash() -> SipHash1-3 32-bit x86: [ 0.509868] test_siphash: SipHash2-4 cycles: 14812892 [ 0.513601] test_siphash: SipHash1-3 cycles: 9510710 [ 0.515263] test_siphash: HalfSipHash1-3 cycles: 3856157 [ 0.515952] test_siphash: JenkinsHash cycles: 1148567 So, we map hsiphash() -> HalfSipHash1-3 hsiphash() is roughly 3 times slower than jhash(), but comes with a considerable security improvement. Signed-off-by: Jason A. Donenfeld Reviewed-by: Jean-Philippe Aumasson Signed-off-by: David S. Miller --- Documentation/siphash.txt | 75 +++++++++++ include/linux/siphash.h | 57 +++++++- lib/siphash.c | 321 +++++++++++++++++++++++++++++++++++++++++++++- lib/test_siphash.c | 98 +++++++++++++- 4 files changed, 546 insertions(+), 5 deletions(-) diff --git a/Documentation/siphash.txt b/Documentation/siphash.txt index e8e6ddbbaab4..908d348ff777 100644 --- a/Documentation/siphash.txt +++ b/Documentation/siphash.txt @@ -98,3 +98,78 @@ u64 h = siphash(&combined, offsetofend(typeof(combined), dport), &secret); Read the SipHash paper if you're interested in learning more: https://131002.net/siphash/siphash.pdf + + +~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ + +HalfSipHash - SipHash's insecure younger cousin +----------------------------------------------- +Written by Jason A. Donenfeld + +On the off-chance that SipHash is not fast enough for your needs, you might be +able to justify using HalfSipHash, a terrifying but potentially useful +possibility. HalfSipHash cuts SipHash's rounds down from "2-4" to "1-3" and, +even scarier, uses an easily brute-forcable 64-bit key (with a 32-bit output) +instead of SipHash's 128-bit key. However, this may appeal to some +high-performance `jhash` users. + +Danger! + +Do not ever use HalfSipHash except for as a hashtable key function, and only +then when you can be absolutely certain that the outputs will never be +transmitted out of the kernel. This is only remotely useful over `jhash` as a +means of mitigating hashtable flooding denial of service attacks. + +1. Generating a key + +Keys should always be generated from a cryptographically secure source of +random numbers, either using get_random_bytes or get_random_once: + +hsiphash_key_t key; +get_random_bytes(&key, sizeof(key)); + +If you're not deriving your key from here, you're doing it wrong. + +2. Using the functions + +There are two variants of the function, one that takes a list of integers, and +one that takes a buffer: + +u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key); + +And: + +u32 hsiphash_1u32(u32, const hsiphash_key_t *key); +u32 hsiphash_2u32(u32, u32, const hsiphash_key_t *key); +u32 hsiphash_3u32(u32, u32, u32, const hsiphash_key_t *key); +u32 hsiphash_4u32(u32, u32, u32, u32, const hsiphash_key_t *key); + +If you pass the generic hsiphash function something of a constant length, it +will constant fold at compile-time and automatically choose one of the +optimized functions. + +3. Hashtable key function usage: + +struct some_hashtable { + DECLARE_HASHTABLE(hashtable, 8); + hsiphash_key_t key; +}; + +void init_hashtable(struct some_hashtable *table) +{ + get_random_bytes(&table->key, sizeof(table->key)); +} + +static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input) +{ + return &table->hashtable[hsiphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)]; +} + +You may then iterate like usual over the returned hash bucket. + +4. Performance + +HalfSipHash is roughly 3 times slower than JenkinsHash. For many replacements, +this will not be a problem, as the hashtable lookup isn't the bottleneck. And +in general, this is probably a good sacrifice to make for the security and DoS +resistance of HalfSipHash. diff --git a/include/linux/siphash.h b/include/linux/siphash.h index feeb29cd113e..fa7a6b9cedbf 100644 --- a/include/linux/siphash.h +++ b/include/linux/siphash.h @@ -5,7 +5,9 @@ * SipHash: a fast short-input PRF * https://131002.net/siphash/ * - * This implementation is specifically for SipHash2-4. + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. */ #ifndef _LINUX_SIPHASH_H @@ -82,4 +84,57 @@ static inline u64 siphash(const void *data, size_t len, return ___siphash_aligned(data, len, key); } +#define HSIPHASH_ALIGNMENT __alignof__(unsigned long) +typedef struct { + unsigned long key[2]; +} hsiphash_key_t; + +u32 __hsiphash_aligned(const void *data, size_t len, + const hsiphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key); +#endif + +u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); +u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); +u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c, + const hsiphash_key_t *key); +u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, + const hsiphash_key_t *key); + +static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, + const hsiphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return hsiphash_1u32(le32_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 8) + return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 12) + return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 16) + return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), le32_to_cpu(data[3]), + key); + return __hsiphash_aligned(data, len, key); +} + +/** + * hsiphash - compute 32-bit hsiphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the hsiphash key + */ +static inline u32 hsiphash(const void *data, size_t len, + const hsiphash_key_t *key) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + return __hsiphash_unaligned(data, len, key); +#endif + return ___hsiphash_aligned(data, len, key); +} + #endif /* _LINUX_SIPHASH_H */ diff --git a/lib/siphash.c b/lib/siphash.c index c43cf406e71b..3ae58b4edad6 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -5,7 +5,9 @@ * SipHash: a fast short-input PRF * https://131002.net/siphash/ * - * This implementation is specifically for SipHash2-4. + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. */ #include @@ -230,3 +232,320 @@ u64 siphash_3u32(const u32 first, const u32 second, const u32 third, POSTAMBLE } EXPORT_SYMBOL(siphash_3u32); + +#if BITS_PER_LONG == 64 +/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for + * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3. + */ + +#define HSIPROUND SIPROUND +#define HPREAMBLE(len) PREAMBLE(len) +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_aligned); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_unaligned); +#endif + +/** + * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + b |= first; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_1u32); + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(8) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_2u32); + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(12) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + b |= third; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_3u32); + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(16) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + combined = (u64)forth << 32 | third; + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_4u32); +#else +#define HSIPROUND \ + do { \ + v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ + v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ + v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ + v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ + } while (0) + +#define HPREAMBLE(len) \ + u32 v0 = 0; \ + u32 v1 = 0; \ + u32 v2 = 0x6c796765U; \ + u32 v3 = 0x74656462U; \ + u32 b = ((u32)(len)) << 24; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return v1 ^ v3; + +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = le32_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_aligned); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = get_unaligned_le32(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_unaligned); +#endif + +/** + * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + v3 ^= first; + HSIPROUND; + v0 ^= first; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_1u32); + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + HPREAMBLE(8) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_2u32); + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + HPREAMBLE(12) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_3u32); + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + HPREAMBLE(16) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + v3 ^= forth; + HSIPROUND; + v0 ^= forth; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_4u32); +#endif diff --git a/lib/test_siphash.c b/lib/test_siphash.c index d972acfc15e4..a6d854d933bf 100644 --- a/lib/test_siphash.c +++ b/lib/test_siphash.c @@ -7,7 +7,9 @@ * SipHash: a fast short-input PRF * https://131002.net/siphash/ * - * This implementation is specifically for SipHash2-4. + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -18,8 +20,8 @@ #include #include -/* Test vectors taken from official reference source available at: - * https://131002.net/siphash/siphash24.c +/* Test vectors taken from reference source available at: + * https://github.com/veorq/SipHash */ static const siphash_key_t test_key_siphash = @@ -50,6 +52,64 @@ static const u64 test_vectors_siphash[64] = { 0x958a324ceb064572ULL }; +#if BITS_PER_LONG == 64 +static const hsiphash_key_t test_key_hsiphash = + {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }}; + +static const u32 test_vectors_hsiphash[64] = { + 0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU, + 0xe7ddf7fbU, 0x88d38328U, 0x49533b67U, + 0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU, + 0x6c063de4U, 0x92ff097fU, 0xf94dc352U, + 0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U, + 0x2a519956U, 0x7d908b66U, 0x63dbd80cU, + 0xb473e63eU, 0x8d297d1cU, 0xa6cce040U, + 0x2b45f844U, 0xa320872eU, 0xdae6c123U, + 0x67349c8cU, 0x705b0979U, 0xca9913a5U, + 0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U, + 0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU, + 0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U, + 0xada26206U, 0xa3c33057U, 0xae3a36a1U, + 0x7b108392U, 0x99e41531U, 0x3f1ad944U, + 0xc8138825U, 0xc28949a6U, 0xfaf8876bU, + 0x9f042196U, 0x68b1d623U, 0x8b5114fdU, + 0xdf074c46U, 0x12cc86b3U, 0x0a52098fU, + 0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U, + 0x73f0bce6U, 0x70a7e980U, 0x243c6d75U, + 0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U, + 0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U, + 0xb7bbb3a8U +}; +#else +static const hsiphash_key_t test_key_hsiphash = + {{ 0x03020100U, 0x07060504U }}; + +static const u32 test_vectors_hsiphash[64] = { + 0x5814c896U, 0xe7e864caU, 0xbc4b0e30U, + 0x01539939U, 0x7e059ea6U, 0x88e3d89bU, + 0xa0080b65U, 0x9d38d9d6U, 0x577999b1U, + 0xc839caedU, 0xe4fa32cfU, 0x959246eeU, + 0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU, + 0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU, + 0x06712339U, 0x522aca67U, 0x911bb605U, + 0x90a65f0eU, 0xf826ef7bU, 0x62512debU, + 0x57150ad7U, 0x5d473507U, 0x1ec47442U, + 0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U, + 0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU, + 0xe0f6c934U, 0xb0652033U, 0x9b9851ccU, + 0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU, + 0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU, + 0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U, + 0x65671619U, 0x9f5fff91U, 0xd89c5267U, + 0x007783ebU, 0x95766243U, 0xab639262U, + 0x9c7e1390U, 0xc368dda6U, 0x38ddc455U, + 0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU, + 0x2ee80657U, 0x33dbb66aU, 0xae3f0577U, + 0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U, + 0x87178304U +}; +#endif + static int __init siphash_test_init(void) { u8 in[64] __aligned(SIPHASH_ALIGNMENT); @@ -70,6 +130,16 @@ static int __init siphash_test_init(void) pr_info("siphash self-test unaligned %u: FAIL\n", i + 1); ret = -EINVAL; } + if (hsiphash(in, i, &test_key_hsiphash) != + test_vectors_hsiphash[i]) { + pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1); + ret = -EINVAL; + } + if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) != + test_vectors_hsiphash[i]) { + pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1); + ret = -EINVAL; + } } if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) != test_vectors_siphash[8]) { @@ -115,6 +185,28 @@ static int __init siphash_test_init(void) pr_info("siphash self-test 4u32: FAIL\n"); ret = -EINVAL; } + if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) != + test_vectors_hsiphash[4]) { + pr_info("hsiphash self-test 1u32: FAIL\n"); + ret = -EINVAL; + } + if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) != + test_vectors_hsiphash[8]) { + pr_info("hsiphash self-test 2u32: FAIL\n"); + ret = -EINVAL; + } + if (hsiphash_3u32(0x03020100U, 0x07060504U, + 0x0b0a0908U, &test_key_hsiphash) != + test_vectors_hsiphash[12]) { + pr_info("hsiphash self-test 3u32: FAIL\n"); + ret = -EINVAL; + } + if (hsiphash_4u32(0x03020100U, 0x07060504U, + 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) != + test_vectors_hsiphash[16]) { + pr_info("hsiphash self-test 4u32: FAIL\n"); + ret = -EINVAL; + } if (!ret) pr_info("self-tests: pass\n"); return ret; -- cgit v1.2.3 From 7cd23e5300c1b95903859a8bdc084e79be66ce16 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 8 Jan 2017 13:54:02 +0100 Subject: secure_seq: use SipHash in place of MD5 This gives a clear speed and security improvement. Siphash is both faster and is more solid crypto than the aging MD5. Rather than manually filling MD5 buffers, for IPv6, we simply create a layout by a simple anonymous struct, for which gcc generates rather efficient code. For IPv4, we pass the values directly to the short input convenience functions. 64-bit x86_64: [ 1.683628] secure_tcpv6_sequence_number_md5# cycles: 99563527 [ 1.717350] secure_tcp_sequence_number_md5# cycles: 92890502 [ 1.741968] secure_tcpv6_sequence_number_siphash# cycles: 67825362 [ 1.762048] secure_tcp_sequence_number_siphash# cycles: 67485526 32-bit x86: [ 1.600012] secure_tcpv6_sequence_number_md5# cycles: 103227892 [ 1.634219] secure_tcp_sequence_number_md5# cycles: 94732544 [ 1.669102] secure_tcpv6_sequence_number_siphash# cycles: 96299384 [ 1.700165] secure_tcp_sequence_number_siphash# cycles: 86015473 Signed-off-by: Jason A. Donenfeld Cc: Andi Kleen Cc: David Miller Cc: David Laight Cc: Tom Herbert Cc: Hannes Frederic Sowa Cc: Eric Dumazet Signed-off-by: David S. Miller --- net/core/secure_seq.c | 145 ++++++++++++++++++++++---------------------------- 1 file changed, 63 insertions(+), 82 deletions(-) diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 88a8e429fc3e..3a9fcec94ace 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -1,3 +1,7 @@ +/* + * Copyright (C) 2016 Jason A. Donenfeld . All Rights Reserved. + */ + #include #include #include @@ -8,18 +12,18 @@ #include #include #include - +#include #include #if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET) +#include #include -#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4) -static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned; +static siphash_key_t net_secret __read_mostly; static __always_inline void net_secret_init(void) { - net_get_random_once(net_secret, sizeof(net_secret)); + net_get_random_once(&net_secret, sizeof(net_secret)); } #endif @@ -44,80 +48,70 @@ static u32 seq_scale(u32 seq) u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, __be16 sport, __be16 dport, u32 *tsoff) { - u32 secret[MD5_MESSAGE_BYTES / 4]; - u32 hash[MD5_DIGEST_WORDS]; - u32 i; - + const struct { + struct in6_addr saddr; + struct in6_addr daddr; + __be16 sport; + __be16 dport; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .saddr = *(struct in6_addr *)saddr, + .daddr = *(struct in6_addr *)daddr, + .sport = sport, + .dport = dport + }; + u64 hash; net_secret_init(); - memcpy(hash, saddr, 16); - for (i = 0; i < 4; i++) - secret[i] = net_secret[i] + (__force u32)daddr[i]; - secret[4] = net_secret[4] + - (((__force u16)sport << 16) + (__force u16)dport); - for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++) - secret[i] = net_secret[i]; - - md5_transform(hash, secret); - - *tsoff = sysctl_tcp_timestamps == 1 ? hash[1] : 0; - return seq_scale(hash[0]); + hash = siphash(&combined, offsetofend(typeof(combined), dport), + &net_secret); + *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; + return seq_scale(hash); } EXPORT_SYMBOL(secure_tcpv6_sequence_number); u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport) { - u32 secret[MD5_MESSAGE_BYTES / 4]; - u32 hash[MD5_DIGEST_WORDS]; - u32 i; - + const struct { + struct in6_addr saddr; + struct in6_addr daddr; + __be16 dport; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .saddr = *(struct in6_addr *)saddr, + .daddr = *(struct in6_addr *)daddr, + .dport = dport + }; net_secret_init(); - memcpy(hash, saddr, 16); - for (i = 0; i < 4; i++) - secret[i] = net_secret[i] + (__force u32) daddr[i]; - secret[4] = net_secret[4] + (__force u32)dport; - for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++) - secret[i] = net_secret[i]; - - md5_transform(hash, secret); - - return hash[0]; + return siphash(&combined, offsetofend(typeof(combined), dport), + &net_secret); } EXPORT_SYMBOL(secure_ipv6_port_ephemeral); #endif #ifdef CONFIG_INET +/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), + * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, + * it would be easy enough to have the former function use siphash_4u32, passing + * the arguments as separate u32. + */ + u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, u32 *tsoff) { - u32 hash[MD5_DIGEST_WORDS]; - + u64 hash; net_secret_init(); - hash[0] = (__force u32)saddr; - hash[1] = (__force u32)daddr; - hash[2] = ((__force u16)sport << 16) + (__force u16)dport; - hash[3] = net_secret[15]; - - md5_transform(hash, net_secret); - - *tsoff = sysctl_tcp_timestamps == 1 ? hash[1] : 0; - return seq_scale(hash[0]); + hash = siphash_3u32((__force u32)saddr, (__force u32)daddr, + (__force u32)sport << 16 | (__force u32)dport, + &net_secret); + *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; + return seq_scale(hash); } u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) { - u32 hash[MD5_DIGEST_WORDS]; - net_secret_init(); - hash[0] = (__force u32)saddr; - hash[1] = (__force u32)daddr; - hash[2] = (__force u32)dport ^ net_secret[14]; - hash[3] = net_secret[15]; - - md5_transform(hash, net_secret); - - return hash[0]; + return siphash_3u32((__force u32)saddr, (__force u32)daddr, + (__force u16)dport, &net_secret); } EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); #endif @@ -126,21 +120,11 @@ EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport) { - u32 hash[MD5_DIGEST_WORDS]; u64 seq; - net_secret_init(); - hash[0] = (__force u32)saddr; - hash[1] = (__force u32)daddr; - hash[2] = ((__force u16)sport << 16) + (__force u16)dport; - hash[3] = net_secret[15]; - - md5_transform(hash, net_secret); - - seq = hash[0] | (((u64)hash[1]) << 32); + seq = siphash_3u32(saddr, daddr, (u32)sport << 16 | dport, &net_secret); seq += ktime_get_real_ns(); seq &= (1ull << 48) - 1; - return seq; } EXPORT_SYMBOL(secure_dccp_sequence_number); @@ -149,26 +133,23 @@ EXPORT_SYMBOL(secure_dccp_sequence_number); u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, __be16 sport, __be16 dport) { - u32 secret[MD5_MESSAGE_BYTES / 4]; - u32 hash[MD5_DIGEST_WORDS]; + const struct { + struct in6_addr saddr; + struct in6_addr daddr; + __be16 sport; + __be16 dport; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .saddr = *(struct in6_addr *)saddr, + .daddr = *(struct in6_addr *)daddr, + .sport = sport, + .dport = dport + }; u64 seq; - u32 i; - net_secret_init(); - memcpy(hash, saddr, 16); - for (i = 0; i < 4; i++) - secret[i] = net_secret[i] + (__force u32)daddr[i]; - secret[4] = net_secret[4] + - (((__force u16)sport << 16) + (__force u16)dport); - for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++) - secret[i] = net_secret[i]; - - md5_transform(hash, secret); - - seq = hash[0] | (((u64)hash[1]) << 32); + seq = siphash(&combined, offsetofend(typeof(combined), dport), + &net_secret); seq += ktime_get_real_ns(); seq &= (1ull << 48) - 1; - return seq; } EXPORT_SYMBOL(secure_dccpv6_sequence_number); -- cgit v1.2.3 From fe62d05b295bde037fa324767674540907c89362 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 8 Jan 2017 13:54:03 +0100 Subject: syncookies: use SipHash in place of SHA1 SHA1 is slower and less secure than SipHash, and so replacing syncookie generation with SipHash makes natural sense. Some BSDs have been doing this for several years in fact. The speedup should be similar -- and even more impressive -- to the speedup from the sequence number fix in this series. Signed-off-by: Jason A. Donenfeld Cc: Eric Dumazet Cc: David Miller Signed-off-by: David S. Miller --- net/ipv4/syncookies.c | 21 +++++---------------- net/ipv6/syncookies.c | 41 +++++++++++++++++++---------------------- 2 files changed, 24 insertions(+), 38 deletions(-) diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 3e88467d70ee..496b97e17aaf 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -13,13 +13,13 @@ #include #include #include -#include +#include #include #include #include #include -static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; +static siphash_key_t syncookie_secret[2] __read_mostly; #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) @@ -48,24 +48,13 @@ static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; #define TSBITS 6 #define TSMASK (((__u32)1 << TSBITS) - 1) -static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv4_cookie_scratch); - static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, u32 count, int c) { - __u32 *tmp; - net_get_random_once(syncookie_secret, sizeof(syncookie_secret)); - - tmp = this_cpu_ptr(ipv4_cookie_scratch); - memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); - tmp[0] = (__force u32)saddr; - tmp[1] = (__force u32)daddr; - tmp[2] = ((__force u32)sport << 16) + (__force u32)dport; - tmp[3] = count; - sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5); - - return tmp[17]; + return siphash_4u32((__force u32)saddr, (__force u32)daddr, + (__force u32)sport << 16 | (__force u32)dport, + count, &syncookie_secret[c]); } diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index a4d49760bf43..895ff650db43 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include #include @@ -24,7 +24,7 @@ #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) -static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; +static siphash_key_t syncookie6_secret[2] __read_mostly; /* RFC 2460, Section 8.3: * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..] @@ -41,30 +41,27 @@ static __u16 const msstab[] = { 9000 - 60, }; -static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv6_cookie_scratch); - -static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr, +static u32 cookie_hash(const struct in6_addr *saddr, + const struct in6_addr *daddr, __be16 sport, __be16 dport, u32 count, int c) { - __u32 *tmp; + const struct { + struct in6_addr saddr; + struct in6_addr daddr; + u32 count; + __be16 sport; + __be16 dport; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .saddr = *saddr, + .daddr = *daddr, + .count = count, + .sport = sport, + .dport = dport + }; net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret)); - - tmp = this_cpu_ptr(ipv6_cookie_scratch); - - /* - * we have 320 bits of information to hash, copy in the remaining - * 192 bits required for sha_transform, from the syncookie6_secret - * and overwrite the digest with the secret - */ - memcpy(tmp + 10, syncookie6_secret[c], 44); - memcpy(tmp, saddr, 16); - memcpy(tmp + 4, daddr, 16); - tmp[8] = ((__force u32)sport << 16) + (__force u32)dport; - tmp[9] = count; - sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5); - - return tmp[17]; + return siphash(&combined, offsetofend(typeof(combined), dport), + &syncookie6_secret[c]); } static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr, -- cgit v1.2.3 From 58fa118f3de45481df2ac2b8b41e8114cae2574d Mon Sep 17 00:00:00 2001 From: Alexandru Moise <00moses.alexander00@gmail.com> Date: Sun, 8 Jan 2017 18:49:46 +0200 Subject: cls_u32: don't bother explicitly initializing ->divisor to zero This struct member is already initialized to zero upon root_ht's allocation via kzalloc(). Signed-off-by: Alexandru Moise <00moses.alexander00@gmail.com> Signed-off-by: David S. Miller --- net/sched/cls_u32.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index ae83c3aec308..a6ec3e4b57ab 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -334,7 +334,6 @@ static int u32_init(struct tcf_proto *tp) if (root_ht == NULL) return -ENOBUFS; - root_ht->divisor = 0; root_ht->refcnt++; root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; root_ht->prio = tp->prio; -- cgit v1.2.3 From f89d21b9c310d43ad8513a3fe345b7677ee7ead7 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Sun, 8 Jan 2017 22:12:27 +0200 Subject: net: ethernet: ti: cpsw: extend limits for cpsw_get/set_ringparam Allow to set number of descs close to possible values. In case of minimum limit it's equal to number of channels to be able to set at least one desc per channel. For maximum limit leave enough descs number for tx channels. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f339268da11a..02b03ee2e314 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2493,8 +2493,7 @@ static void cpsw_get_ringparam(struct net_device *ndev, /* not supported */ ering->tx_max_pending = 0; ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); - /* Max 90% RX buffers */ - ering->rx_max_pending = (descs_pool_size * 9) / 10; + ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES; ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); } @@ -2509,8 +2508,8 @@ static int cpsw_set_ringparam(struct net_device *ndev, /* ignore ering->tx_pending - only rx_pending adjustment is supported */ if (ering->rx_mini_pending || ering->rx_jumbo_pending || - ering->rx_pending < (descs_pool_size / 10) || - ering->rx_pending > ((descs_pool_size * 9) / 10)) + ering->rx_pending < CPSW_MAX_QUEUES || + ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES)) return -EINVAL; if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma)) -- cgit v1.2.3 From 36ca68bf9919299c95ff80e7cf3c217f4c250cbe Mon Sep 17 00:00:00 2001 From: Elad Raz Date: Mon, 9 Jan 2017 11:25:44 +0100 Subject: mlxsw: Fix mlxsw_i2c_write return value The "err" variable is been checked, return always 0. Signed-off-by: Elad Raz Acked-by: Ido Schimmel Acked-by: Vadim Pasternak Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/i2c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index e50c8db2602a..12c3a4449120 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -338,7 +338,7 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num, return -EIO; } - return err > 0 ? 0 : err; + return 0; } /* Routine executes I2C command. */ -- cgit v1.2.3 From 169852718f217659811858562aaaa1de3abbcf58 Mon Sep 17 00:00:00 2001 From: Elad Raz Date: Mon, 9 Jan 2017 11:25:45 +0100 Subject: mlxsw: cmd: Fix API name comments for event-queues Probably some copy-paste error from "int_msix" that caused "int_" prefix to appear in the comments for all "eq_" APIs. Signed-off-by: Elad Raz Acked-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/cmd.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 56e19b0d2f8f..a1b48421648a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -1132,12 +1132,12 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core, */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1); -/* cmd_mbox_sw2hw_eq_int_oi +/* cmd_mbox_sw2hw_eq_oi * When set, overrun ignore is enabled. */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1); -/* cmd_mbox_sw2hw_eq_int_st +/* cmd_mbox_sw2hw_eq_st * Event delivery state machine * 0x0 - FIRED * 0x1 - ARMED (Request for Notification) @@ -1146,19 +1146,19 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1); */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2); -/* cmd_mbox_sw2hw_eq_int_log_eq_size +/* cmd_mbox_sw2hw_eq_log_eq_size * Log (base 2) of the EQ size (in entries). */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4); -/* cmd_mbox_sw2hw_eq_int_producer_counter +/* cmd_mbox_sw2hw_eq_producer_counter * Producer Counter. The counter is incremented for each EQE that is written * by the HW to the EQ. * Maintained by HW (valid for the QUERY_EQ command only) */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16); -/* cmd_mbox_sw2hw_eq_int_pa +/* cmd_mbox_sw2hw_eq_pa * Physical Address. */ MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true); -- cgit v1.2.3 From 65acb5d0827c8a05022e77eced63cdc8616ba43a Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Mon, 9 Jan 2017 11:25:46 +0100 Subject: mlxsw: spectrum: Make the add_matchall_tc_entry symmetric Currently, the mlxsw spectrum driver only supports offloading the matchall classifier together with the mirred action. To allow more matchall tc offloads, make the code symmetric so that it can be easily extended later on for other actions. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 91 ++++++++++++++------------ 1 file changed, 48 insertions(+), 43 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 46c53a042e6b..f4b8ba21dd8f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1161,8 +1161,8 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, } static struct mlxsw_sp_port_mall_tc_entry * -mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, - unsigned long cookie) { +mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, + unsigned long cookie) { struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) @@ -1174,17 +1174,15 @@ mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, static int mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_cls_matchall_offload *cls, + struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, const struct tc_action *a, bool ingress) { - struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; struct net *net = dev_net(mlxsw_sp_port->dev); enum mlxsw_sp_span_type span_type; struct mlxsw_sp_port *to_port; struct net_device *to_dev; int ifindex; - int err; ifindex = tcf_mirred_ifindex(a); to_dev = __dev_get_by_index(net, ifindex); @@ -1199,26 +1197,24 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, } to_port = netdev_priv(to_dev); - mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); - if (!mall_tc_entry) - return -ENOMEM; - - mall_tc_entry->cookie = cls->cookie; - mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; - mall_tc_entry->mirror.to_local_port = to_port->local_port; - mall_tc_entry->mirror.ingress = ingress; - list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); - + mirror->to_local_port = to_port->local_port; + mirror->ingress = ingress; span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); - if (err) - goto err_mirror_add; - return 0; + return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); +} -err_mirror_add: - list_del(&mall_tc_entry->list); - kfree(mall_tc_entry); - return err; +static void +mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + enum mlxsw_sp_span_type span_type; + struct mlxsw_sp_port *to_port; + + to_port = mlxsw_sp->ports[mirror->to_local_port]; + span_type = mirror->ingress ? + MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); } static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, @@ -1226,6 +1222,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_cls_matchall_offload *cls, bool ingress) { + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; const struct tc_action *a; LIST_HEAD(actions); int err; @@ -1235,50 +1232,58 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, return -ENOTSUPP; } + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; + mall_tc_entry->cookie = cls->cookie; + tcf_exts_to_list(cls->exts, &actions); - list_for_each_entry(a, &actions, list) { - if (!is_tcf_mirred_egress_mirror(a) || - protocol != htons(ETH_P_ALL)) { - return -ENOTSUPP; - } + a = list_first_entry(&actions, struct tc_action, list); - err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, - a, ingress); - if (err) - return err; + if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { + struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; + + mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; + mirror = &mall_tc_entry->mirror; + err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, + mirror, a, ingress); + } else { + err = -EOPNOTSUPP; } + if (err) + goto err_add_action; + + list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); return 0; + +err_add_action: + kfree(mall_tc_entry); + return err; } static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_cls_matchall_offload *cls) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; - enum mlxsw_sp_span_type span_type; - struct mlxsw_sp_port *to_port; - mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port, - cls->cookie); + mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, + cls->cookie); if (!mall_tc_entry) { netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); return; } + list_del(&mall_tc_entry->list); switch (mall_tc_entry->type) { case MLXSW_SP_PORT_MALL_MIRROR: - to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port]; - span_type = mall_tc_entry->mirror.ingress ? - MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - - mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); + mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, + &mall_tc_entry->mirror); break; default: WARN_ON(1); } - list_del(&mall_tc_entry->list); kfree(mall_tc_entry); } -- cgit v1.2.3 From 136f1445780063273614077f38c0ceddeedd2494 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Mon, 9 Jan 2017 11:25:47 +0100 Subject: mlxsw: spectrum: Fix order of commands in port remove function Fix the order of the free directives to match the port init function Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index f4b8ba21dd8f..1f4fdce7efa4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2431,8 +2431,8 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); - free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->hw_stats.cache); + free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); -- cgit v1.2.3 From e915ac68d9c543f8f51446853106c69056acd872 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Mon, 9 Jan 2017 11:25:48 +0100 Subject: mlxsw: spectrum: Change ENOTSUPP to EOPNOTSUPP As ENOTSUPP is specific to NFS, change the return error value to EOPNOTSUPP in various places in the mlxsw driver. Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1f4fdce7efa4..d0e803f4f775 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1193,7 +1193,7 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, if (!mlxsw_sp_port_dev_check(to_dev)) { netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); - return -ENOTSUPP; + return -EOPNOTSUPP; } to_port = netdev_priv(to_dev); @@ -1229,7 +1229,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, if (!tc_single_action(cls->exts)) { netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); @@ -1309,7 +1309,7 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, } } - return -ENOTSUPP; + return -EOPNOTSUPP; } static const struct net_device_ops mlxsw_sp_port_netdev_ops = { @@ -1652,7 +1652,7 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, break; default: WARN_ON(1); - return -ENOTSUPP; + return -EOPNOTSUPP; } return 0; } -- cgit v1.2.3 From ab9d226e3e2a8aeca426701b735194bc35179c2d Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Mon, 9 Jan 2017 11:24:20 +0100 Subject: net/sched: Kconfig: select LIBCRC32C if NET_ACT_CSUM is selected LIBCRC32C is needed to compute crc32c on SCTP packets. Signed-off-by: Davide Caratti Signed-off-by: David S. Miller --- net/sched/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 87956a768d1b..a9aa38d43fa7 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -707,6 +707,7 @@ config NET_ACT_SKBEDIT config NET_ACT_CSUM tristate "Checksum Updating" depends on NET_CLS_ACT && INET + select LIBCRC32C ---help--- Say Y here to update some common checksum after some direct packet alterations. -- cgit v1.2.3 From c008b33f3ef0915dfb57432dba1fa0ce34fdcc29 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Mon, 9 Jan 2017 11:24:21 +0100 Subject: net/sched: act_csum: compute crc32c on SCTP packets modify act_csum to compute crc32c on IPv4/IPv6 packets having SCTP in their payload, and extend UAPI definitions accordingly. Signed-off-by: Davide Caratti Signed-off-by: David S. Miller --- include/uapi/linux/tc_act/tc_csum.h | 3 ++- net/sched/act_csum.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/tc_act/tc_csum.h b/include/uapi/linux/tc_act/tc_csum.h index 8ac8041ab5f1..a11bb355dbfb 100644 --- a/include/uapi/linux/tc_act/tc_csum.h +++ b/include/uapi/linux/tc_act/tc_csum.h @@ -21,7 +21,8 @@ enum { TCA_CSUM_UPDATE_FLAG_IGMP = 4, TCA_CSUM_UPDATE_FLAG_TCP = 8, TCA_CSUM_UPDATE_FLAG_UDP = 16, - TCA_CSUM_UPDATE_FLAG_UDPLITE = 32 + TCA_CSUM_UPDATE_FLAG_UDPLITE = 32, + TCA_CSUM_UPDATE_FLAG_SCTP = 64, }; struct tc_csum { diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index a0edd80a44db..e978ccd4402c 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -30,6 +30,7 @@ #include #include #include +#include #include @@ -322,6 +323,25 @@ ignore_obscure_skb: return 1; } +static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, + unsigned int ipl) +{ + struct sctphdr *sctph; + + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) + return 1; + + sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); + if (!sctph) + return 0; + + sctph->checksum = sctp_compute_cksum(skb, + skb_network_offset(skb) + ihl); + skb->ip_summed = CHECKSUM_NONE; + + return 1; +} + static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) { const struct iphdr *iph; @@ -365,6 +385,11 @@ static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) ntohs(iph->tot_len), 1)) goto fail; break; + case IPPROTO_SCTP: + if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && + !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len))) + goto fail; + break; } if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { @@ -481,6 +506,11 @@ static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) pl + sizeof(*ip6h), 1)) goto fail; goto done; + case IPPROTO_SCTP: + if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && + !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h))) + goto fail; + goto done; default: goto ignore_skb; } -- cgit v1.2.3 From 3a7f75e532420edb375b8d0a1526c3c1b777c1f6 Mon Sep 17 00:00:00 2001 From: Tobias Regnery Date: Mon, 9 Jan 2017 13:15:55 +0100 Subject: alx: add feature flag for rx checksumming The code to handle rx checksumming was in the driver since its introduction but for reasons unknown the feature flag was left out. Now it is possible to enable this feature with ethtool. Tested on my AR8161 ethernet card, there are no regressions observed in netperf if this feature is enabled. Signed-off-by: Tobias Regnery Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c66195d00ed4..4c80e0689db9 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1816,6 +1816,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6; -- cgit v1.2.3 From b4b7b772e8b018286482d8d1fba7804ceac56a64 Mon Sep 17 00:00:00 2001 From: jpinto Date: Mon, 9 Jan 2017 12:35:08 +0000 Subject: stmmac: adding DT parameter for LPI tx clock gating This patch adds a new parameter to the stmmac DT: snps,en-tx-lpi-clockgating. It was ported from synopsys/dwc_eth_qos.c and it is useful if lpi tx clock gating is needed by stmmac users also. Signed-off-by: Joao Pinto Tested-by: Niklas Cassel Reviewed-by: Lars Persson Acked-by: Alexandre TORGUE Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/stmmac.txt | 2 ++ drivers/net/ethernet/stmicro/stmmac/common.h | 3 ++- drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c | 5 ++++- drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 1 + drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 6 +++++- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 3 ++- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 3 +++ include/linux/stmmac.h | 1 + 8 files changed, 20 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index c3d2fd480a1b..d3bfc2b30fb5 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt @@ -49,6 +49,8 @@ Optional properties: - snps,force_sf_dma_mode Force DMA to use the Store and Forward mode for both tx and rx. This flag is ignored if force_thresh_dma_mode is set. +- snps,en-tx-lpi-clockgating Enable gating of the MAC TX clock during + TX low-power mode - snps,multicast-filter-bins: Number of multicast filter hash bins supported by this device instance - snps,perfect-filter-entries: Number of perfect filter entries supported diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 6c9629138462..75e2666df940 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -476,7 +476,8 @@ struct stmmac_ops { unsigned int reg_n); void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, unsigned int reg_n); - void (*set_eee_mode)(struct mac_device_info *hw); + void (*set_eee_mode)(struct mac_device_info *hw, + bool en_tx_lpi_clockgating); void (*reset_eee_mode)(struct mac_device_info *hw); void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); void (*set_eee_pls)(struct mac_device_info *hw, int link); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index be3c91c7f211..a5ffca116edd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -343,11 +343,14 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, return ret; } -static void dwmac1000_set_eee_mode(struct mac_device_info *hw) +static void dwmac1000_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) { void __iomem *ioaddr = hw->pcsr; u32 value; + /*TODO - en_tx_lpi_clockgating treatment */ + /* Enable the link status receive on RGMII, SGMII ore SMII * receive path and instruct the transmit to enter in LPI * state. diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 73d1dabcdba3..db45134fddf0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -98,6 +98,7 @@ enum power_event { #define GMAC4_LPI_TIMER_CTRL 0xd4 /* LPI control and status defines */ +#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */ #define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ #define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ #define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 02eab798050d..834f40f08208 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -137,7 +137,8 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw, GMAC_ADDR_LOW(reg_n)); } -static void dwmac4_set_eee_mode(struct mac_device_info *hw) +static void dwmac4_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) { void __iomem *ioaddr = hw->pcsr; u32 value; @@ -149,6 +150,9 @@ static void dwmac4_set_eee_mode(struct mac_device_info *hw) value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; + if (en_tx_lpi_clockgating) + value |= GMAC4_LPI_CTRL_STATUS_LPITCSE; + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 92ac0064a52e..fa0b4de74c3e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -239,7 +239,8 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) /* Check and enter in LPI mode */ if ((priv->dirty_tx == priv->cur_tx) && (priv->tx_path_in_lpi_mode == false)) - priv->hw->mac->set_eee_mode(priv->hw); + priv->hw->mac->set_eee_mode(priv->hw, + priv->plat->en_tx_lpi_clockgating); } /** diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 60ba8993c650..78ccb50cfa4f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -248,6 +248,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode"); + plat->en_tx_lpi_clockgating = + of_property_read_bool(np, "snps,en-tx-lpi-clockgating"); + /* Set the maxmtu to a default of JUMBO_LEN in case the * parameter is not present in the device tree. */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 889e0e9a3f1c..e3cd7588623d 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -142,5 +142,6 @@ struct plat_stmmacenet_data { int has_gmac4; bool tso_en; int mac_port_sel_speed; + bool en_tx_lpi_clockgating; }; #endif -- cgit v1.2.3 From f573c0b9c4e02691cf87736bd0824fd37ec02e65 Mon Sep 17 00:00:00 2001 From: jpinto Date: Mon, 9 Jan 2017 12:35:09 +0000 Subject: stmmac: move stmmac_clk, pclk, clk_ptp_ref and stmmac_rst to platform structure This patch moves stmmac_clk, pclk, clk_ptp_ref and stmmac_rst to the plat_stmmacenet_data structure. It also moves these platform variables initialization to stmmac_platform. This was done for two reasons: a) If PCI is used, platform related code is being executed in stmmac_main resulting in warnings that have no sense and conceptually was not right b) stmmac as a synopsys reference ethernet driver stack will be hosting more and more drivers to its structure like synopsys/dwc_eth_qos.c. These drivers have their own DT bindings that are not compatible with stmmac's. One of the most important are the clock names, and so they need to be parsed in the glue logic and initialized there, and that is the main reason why the clocks were passed to the platform structure. Signed-off-by: Joao Pinto Tested-by: Niklas Cassel Reviewed-by: Lars Persson Acked-by: Alexandre TORGUE Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 2 +- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 5 -- .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 4 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 82 ++++------------------ .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 47 +++++++++++++ include/linux/stmmac.h | 5 ++ 6 files changed, 70 insertions(+), 75 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 1f997027ae51..17d4bbaeb65c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -341,7 +341,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) * mode. Create a copy of the core reset handle so it can be used by * the driver later. */ - dwmac->stmmac_rst = stpriv->stmmac_rst; + dwmac->stmmac_rst = stpriv->plat->stmmac_rst; ret = socfpga_dwmac_set_phy_mode(dwmac); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index eab04aeeeb95..bf8a83ef96f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -106,9 +106,6 @@ struct stmmac_priv { u32 msg_enable; int wolopts; int wol_irq; - struct clk *stmmac_clk; - struct clk *pclk; - struct reset_control *stmmac_rst; int clk_csr; struct timer_list eee_ctrl_timer; int lpi_irq; @@ -120,8 +117,6 @@ struct stmmac_priv { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; unsigned int default_addend; - struct clk *clk_ptp_ref; - unsigned int clk_ptp_rate; u32 adv_ts; int use_riwt; int irq_wake; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 699ee1d30426..322e5c6a0d4b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -712,7 +712,7 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) { - unsigned long clk = clk_get_rate(priv->stmmac_clk); + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); if (!clk) return 0; @@ -722,7 +722,7 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) { - unsigned long clk = clk_get_rate(priv->stmmac_clk); + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); if (!clk) return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fa0b4de74c3e..02808e827c93 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -158,7 +158,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) { u32 clk_rate; - clk_rate = clk_get_rate(priv->stmmac_clk); + clk_rate = clk_get_rate(priv->plat->stmmac_clk); /* Platform provided default clk_csr would be assumed valid * for all other cases except for the below mentioned ones. @@ -607,7 +607,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) /* program Sub Second Increment reg */ sec_inc = priv->hw->ptp->config_sub_second_increment( - priv->ptpaddr, priv->clk_ptp_rate, + priv->ptpaddr, priv->plat->clk_ptp_rate, priv->plat->has_gmac4); temp = div_u64(1000000000ULL, sec_inc); @@ -617,7 +617,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) * where, freq_div_ratio = 1e9ns/sec_inc */ temp = (u64)(temp << 32); - priv->default_addend = div_u64(temp, priv->clk_ptp_rate); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); priv->hw->ptp->config_addend(priv->ptpaddr, priv->default_addend); @@ -645,18 +645,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; - /* Fall-back to main clock in case of no PTP ref is passed */ - priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref"); - if (IS_ERR(priv->clk_ptp_ref)) { - priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); - priv->clk_ptp_ref = NULL; - netdev_dbg(priv->dev, "PTP uses main clock\n"); - } else { - clk_prepare_enable(priv->clk_ptp_ref); - priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); - netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate); - } - priv->adv_ts = 0; /* Check if adv_ts can be enabled for dwmac 4.x core */ if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp) @@ -683,8 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) static void stmmac_release_ptp(struct stmmac_priv *priv) { - if (priv->clk_ptp_ref) - clk_disable_unprepare(priv->clk_ptp_ref); + if (priv->plat->clk_ptp_ref) + clk_disable_unprepare(priv->plat->clk_ptp_ref); stmmac_ptp_unregister(priv); } @@ -3278,44 +3266,8 @@ int stmmac_dvr_probe(struct device *device, if ((phyaddr >= 0) && (phyaddr <= 31)) priv->plat->phy_addr = phyaddr; - priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME); - if (IS_ERR(priv->stmmac_clk)) { - netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n", - __func__); - /* If failed to obtain stmmac_clk and specific clk_csr value - * is NOT passed from the platform, probe fail. - */ - if (!priv->plat->clk_csr) { - ret = PTR_ERR(priv->stmmac_clk); - goto error_clk_get; - } else { - priv->stmmac_clk = NULL; - } - } - clk_prepare_enable(priv->stmmac_clk); - - priv->pclk = devm_clk_get(priv->device, "pclk"); - if (IS_ERR(priv->pclk)) { - if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto error_pclk_get; - } - priv->pclk = NULL; - } - clk_prepare_enable(priv->pclk); - - priv->stmmac_rst = devm_reset_control_get(priv->device, - STMMAC_RESOURCE_NAME); - if (IS_ERR(priv->stmmac_rst)) { - if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto error_hw_init; - } - dev_info(priv->device, "no reset control found\n"); - priv->stmmac_rst = NULL; - } - if (priv->stmmac_rst) - reset_control_deassert(priv->stmmac_rst); + if (priv->plat->stmmac_rst) + reset_control_deassert(priv->plat->stmmac_rst); /* Init MAC and get the capabilities */ ret = stmmac_hw_init(priv); @@ -3409,10 +3361,6 @@ error_netdev_register: error_mdio_register: netif_napi_del(&priv->napi); error_hw_init: - clk_disable_unprepare(priv->pclk); -error_pclk_get: - clk_disable_unprepare(priv->stmmac_clk); -error_clk_get: free_netdev(ndev); return ret; @@ -3438,10 +3386,10 @@ int stmmac_dvr_remove(struct device *dev) stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); - if (priv->stmmac_rst) - reset_control_assert(priv->stmmac_rst); - clk_disable_unprepare(priv->pclk); - clk_disable_unprepare(priv->stmmac_clk); + if (priv->plat->stmmac_rst) + reset_control_assert(priv->plat->stmmac_rst); + clk_disable_unprepare(priv->plat->pclk); + clk_disable_unprepare(priv->plat->stmmac_clk); if (priv->hw->pcs != STMMAC_PCS_RGMII && priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) @@ -3490,8 +3438,8 @@ int stmmac_suspend(struct device *dev) stmmac_set_mac(priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ - clk_disable(priv->pclk); - clk_disable(priv->stmmac_clk); + clk_disable(priv->plat->pclk); + clk_disable(priv->plat->stmmac_clk); } spin_unlock_irqrestore(&priv->lock, flags); @@ -3531,8 +3479,8 @@ int stmmac_resume(struct device *dev) } else { pinctrl_pm_select_default_state(priv->device); /* enable the clk prevously disabled */ - clk_enable(priv->stmmac_clk); - clk_enable(priv->pclk); + clk_enable(priv->plat->stmmac_clk); + clk_enable(priv->plat->pclk); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 78ccb50cfa4f..b489ae47f195 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -335,7 +335,54 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->axi = stmmac_axi_setup(pdev); + /* clock setup */ + plat->stmmac_clk = devm_clk_get(&pdev->dev, + STMMAC_RESOURCE_NAME); + if (IS_ERR(plat->stmmac_clk)) { + dev_warn(&pdev->dev, "Cannot get CSR clock\n"); + plat->stmmac_clk = NULL; + } + clk_prepare_enable(plat->stmmac_clk); + + plat->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(plat->pclk)) { + if (PTR_ERR(plat->pclk) == -EPROBE_DEFER) + goto error_pclk_get; + + plat->pclk = NULL; + } + clk_prepare_enable(plat->pclk); + + /* Fall-back to main clock in case of no PTP ref is passed */ + plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref"); + if (IS_ERR(plat->clk_ptp_ref)) { + plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); + plat->clk_ptp_ref = NULL; + dev_warn(&pdev->dev, "PTP uses main clock\n"); + } else { + clk_prepare_enable(plat->clk_ptp_ref); + plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); + dev_info(&pdev->dev, "No reset control found\n"); + } + + plat->stmmac_rst = devm_reset_control_get(&pdev->dev, + STMMAC_RESOURCE_NAME); + if (IS_ERR(plat->stmmac_rst)) { + if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER) + goto error_hw_init; + + dev_info(&pdev->dev, "no reset control found\n"); + plat->stmmac_rst = NULL; + } + return plat; + +error_hw_init: + clk_disable_unprepare(plat->pclk); +error_pclk_get: + clk_disable_unprepare(plat->stmmac_clk); + + return ERR_PTR(-EPROBE_DEFER); } /** diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index e3cd7588623d..d76033d6726d 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -138,6 +138,11 @@ struct plat_stmmacenet_data { int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); void *bsp_priv; + struct clk *stmmac_clk; + struct clk *pclk; + struct clk *clk_ptp_ref; + unsigned int clk_ptp_rate; + struct reset_control *stmmac_rst; struct stmmac_axi *axi; int has_gmac4; bool tso_en; -- cgit v1.2.3 From d8256121a91a653682070092b9f105d0c45bd48b Mon Sep 17 00:00:00 2001 From: jpinto Date: Mon, 9 Jan 2017 12:35:10 +0000 Subject: stmmac: adding new glue driver dwmac-dwc-qos-eth This patch adds a new glue driver called dwmac-dwc-qos-eth which was based in the dwc_eth_qos as is. To assure retro-compatibility a slight tweak was also added to stmmac_platform. Signed-off-by: Joao Pinto Tested-by: Niklas Cassel Reviewed-by: Lars Persson Acked-by: Alexandre TORGUE Signed-off-by: David S. Miller --- .../bindings/net/snps,dwc-qos-ethernet.txt | 3 + drivers/net/ethernet/stmicro/stmmac/Kconfig | 9 + drivers/net/ethernet/stmicro/stmmac/Makefile | 1 + .../ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c | 202 +++++++++++++++++++++ .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 15 +- 5 files changed, 227 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt index d93f71ce8346..21d27aa4c68c 100644 --- a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt +++ b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt @@ -1,5 +1,8 @@ * Synopsys DWC Ethernet QoS IP version 4.10 driver (GMAC) +This binding is deprecated, but it continues to be supported, but new +features should be preferably added to the stmmac binding document. + This binding supports the Synopsys Designware Ethernet QoS (Quality Of Service) IP block. The IP supports multiple options for bus type, clocking and reset structure, and feature list. Consequently, a number of properties and list diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index ab66248a4b78..99594e308db9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -29,6 +29,15 @@ config STMMAC_PLATFORM if STMMAC_PLATFORM +config DWMAC_DWC_QOS_ETH + tristate "Support for snps,dwc-qos-ethernet.txt DT binding." + select PHYLIB + select CRC32 + select MII + depends on OF && HAS_DMA + help + Support for chips using the snps,dwc-qos-ethernet.txt DT binding. + config DWMAC_GENERIC tristate "Generic driver for DWMAC" default STMMAC_PLATFORM diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 8f83a86ba13c..700c60336674 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o +obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o stmmac-platform-objs:= stmmac_platform.o dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c new file mode 100644 index 000000000000..1a3fa3d9f855 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -0,0 +1,202 @@ +/* + * Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver + * + * Copyright (C) 2016 Joao Pinto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac_platform.h" + +static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat) +{ + struct device_node *np = pdev->dev.of_node; + u32 burst_map = 0; + u32 bit_index = 0; + u32 a_index = 0; + + if (!plat_dat->axi) { + plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL); + + if (!plat_dat->axi) + return -ENOMEM; + } + + plat_dat->axi->axi_lpi_en = of_property_read_bool(np, "snps,en-lpi"); + if (of_property_read_u32(np, "snps,write-requests", + &plat_dat->axi->axi_wr_osr_lmt)) { + /** + * Since the register has a reset value of 1, if property + * is missing, default to 1. + */ + plat_dat->axi->axi_wr_osr_lmt = 1; + } else { + /** + * If property exists, to keep the behavior from dwc_eth_qos, + * subtract one after parsing. + */ + plat_dat->axi->axi_wr_osr_lmt--; + } + + if (of_property_read_u32(np, "read,read-requests", + &plat_dat->axi->axi_rd_osr_lmt)) { + /** + * Since the register has a reset value of 1, if property + * is missing, default to 1. + */ + plat_dat->axi->axi_rd_osr_lmt = 1; + } else { + /** + * If property exists, to keep the behavior from dwc_eth_qos, + * subtract one after parsing. + */ + plat_dat->axi->axi_rd_osr_lmt--; + } + of_property_read_u32(np, "snps,burst-map", &burst_map); + + /* converts burst-map bitmask to burst array */ + for (bit_index = 0; bit_index < 7; bit_index++) { + if (burst_map & (1 << bit_index)) { + switch (bit_index) { + case 0: + plat_dat->axi->axi_blen[a_index] = 4; break; + case 1: + plat_dat->axi->axi_blen[a_index] = 8; break; + case 2: + plat_dat->axi->axi_blen[a_index] = 16; break; + case 3: + plat_dat->axi->axi_blen[a_index] = 32; break; + case 4: + plat_dat->axi->axi_blen[a_index] = 64; break; + case 5: + plat_dat->axi->axi_blen[a_index] = 128; break; + case 6: + plat_dat->axi->axi_blen[a_index] = 256; break; + default: + break; + } + a_index++; + } + } + + /* dwc-qos needs GMAC4, AAL, TSO and PMT */ + plat_dat->has_gmac4 = 1; + plat_dat->dma_cfg->aal = 1; + plat_dat->tso_en = 1; + plat_dat->pmt = 1; + + return 0; +} + +static int dwc_eth_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct resource *res; + int ret; + + memset(&stmmac_res, 0, sizeof(struct stmmac_resources)); + + /** + * Since stmmac_platform supports name IRQ only, basic platform + * resource initialization is done in the glue logic. + */ + stmmac_res.irq = platform_get_irq(pdev, 0); + if (stmmac_res.irq < 0) { + if (stmmac_res.irq != -EPROBE_DEFER) + dev_err(&pdev->dev, + "IRQ configuration information not found\n"); + + return stmmac_res.irq; + } + stmmac_res.wol_irq = stmmac_res.irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(stmmac_res.addr)) + return PTR_ERR(stmmac_res.addr); + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); + if (IS_ERR(plat_dat->stmmac_clk)) { + dev_err(&pdev->dev, "apb_pclk clock not found.\n"); + ret = PTR_ERR(plat_dat->stmmac_clk); + plat_dat->stmmac_clk = NULL; + goto err_remove_config_dt; + } + clk_prepare_enable(plat_dat->stmmac_clk); + + plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); + if (IS_ERR(plat_dat->pclk)) { + dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); + ret = PTR_ERR(plat_dat->pclk); + plat_dat->pclk = NULL; + goto err_out_clk_dis_phy; + } + clk_prepare_enable(plat_dat->pclk); + + ret = dwc_eth_dwmac_config_dt(pdev, plat_dat); + if (ret) + goto err_out_clk_dis_aper; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_out_clk_dis_aper; + + return 0; + +err_out_clk_dis_aper: + clk_disable_unprepare(plat_dat->pclk); +err_out_clk_dis_phy: + clk_disable_unprepare(plat_dat->stmmac_clk); +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; +} + +static int dwc_eth_dwmac_remove(struct platform_device *pdev) +{ + return stmmac_pltfr_remove(pdev); +} + +static const struct of_device_id dwc_eth_dwmac_match[] = { + { .compatible = "snps,dwc-qos-ethernet-4.10", }, + { } +}; +MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match); + +static struct platform_driver dwc_eth_dwmac_driver = { + .probe = dwc_eth_dwmac_probe, + .remove = dwc_eth_dwmac_remove, + .driver = { + .name = "dwc-eth-dwmac", + .of_match_table = dwc_eth_dwmac_match, + }, +}; +module_platform_driver(dwc_eth_dwmac_driver); + +MODULE_AUTHOR("Joao Pinto "); +MODULE_DESCRIPTION("Synopsys DWC Ethernet Quality-of-Service v4.10a driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index b489ae47f195..ac32f9ef7bed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -180,10 +180,19 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, mdio = false; } - /* If snps,dwmac-mdio is passed from DT, always register the MDIO */ - for_each_child_of_node(np, plat->mdio_node) { - if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio")) + /* exception for dwmac-dwc-qos-eth glue logic */ + if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) { + plat->mdio_node = of_get_child_by_name(np, "mdio"); + } else { + /** + * If snps,dwmac-mdio is passed from DT, always register + * the MDIO + */ + for_each_child_of_node(np, plat->mdio_node) { + if (of_device_is_compatible(plat->mdio_node, + "snps,dwmac-mdio")) break; + } } if (plat->mdio_node) { -- cgit v1.2.3 From 2e653ff0758ae8e47499d588666eb77f6a0fc755 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sat, 7 Jan 2017 00:02:52 +0300 Subject: sh_eth: get rid of 'sh_eth_cpu_data::shift_rd0' After checking all the available manuals, I have enough information to conclude that the 'shift_rd0' flag is only relevant for the Ether cores supporting so called "intelligent checksum" (and hence having CSMR) which is indicated by the 'hw_crc' flag. Since all the relevant SoCs now have both these flags set, we can at last get rid of the former flag... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 5 +---- drivers/net/ethernet/renesas/sh_eth.h | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 9a97cb13029a..6ea30bace685 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -537,7 +537,6 @@ static struct sh_eth_cpu_data r7s72100_data = { .no_ade = 1, .hw_crc = 1, .tsu = 1, - .shift_rd0 = 1, }; static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) @@ -577,7 +576,6 @@ static struct sh_eth_cpu_data r8a7740_data = { .hw_crc = 1, .tsu = 1, .select_mii = 1, - .shift_rd0 = 1, }; /* There is CPU dependent code */ @@ -816,7 +814,6 @@ static struct sh_eth_cpu_data sh7734_data = { .tsu = 1, .hw_crc = 1, .select_mii = 1, - .shift_rd0 = 1, }; /* SH7763 */ @@ -1416,7 +1413,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) * the RFS bits are from bit 25 to bit 16. So, the * driver needs right shifting by 16. */ - if (mdp->cd->shift_rd0) + if (mdp->cd->hw_crc) desc_status >>= 16; skb = mdp->rx_skbuff[entry]; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 9eec1e185adf..f09fa8b47f9a 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -490,7 +490,6 @@ struct sh_eth_cpu_data { unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ unsigned hw_crc:1; /* E-DMAC have CSMR */ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ - unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ }; -- cgit v1.2.3 From 62e04b7e0e3c2926bdcbcced9feb22478258d0c3 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sat, 7 Jan 2017 00:03:37 +0300 Subject: sh_eth: rename 'sh_eth_cpu_data::hw_crc' The 'struct sh_eth_cpu_data' field indicating the "intelligent checksum" support was misnamed 'hw_crc' -- rename it to 'hw_checksum'. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 12 ++++++------ drivers/net/ethernet/renesas/sh_eth.h | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6ea30bace685..90fb0e9743b3 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -535,7 +535,7 @@ static struct sh_eth_cpu_data r7s72100_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, - .hw_crc = 1, + .hw_checksum = 1, .tsu = 1, }; @@ -573,7 +573,7 @@ static struct sh_eth_cpu_data r8a7740_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, - .hw_crc = 1, + .hw_checksum = 1, .tsu = 1, .select_mii = 1, }; @@ -812,7 +812,7 @@ static struct sh_eth_cpu_data sh7734_data = { .no_trimd = 1, .no_ade = 1, .tsu = 1, - .hw_crc = 1, + .hw_checksum = 1, .select_mii = 1, }; @@ -928,7 +928,7 @@ static int sh_eth_reset(struct net_device *ndev) sh_eth_write(ndev, 0x0, RDFFR); /* Reset HW CRC register */ - if (mdp->cd->hw_crc) + if (mdp->cd->hw_checksum) sh_eth_write(ndev, 0x0, CSMR); /* Select MII mode */ @@ -1413,7 +1413,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) * the RFS bits are from bit 25 to bit 16. So, the * driver needs right shifting by 16. */ - if (mdp->cd->hw_crc) + if (mdp->cd->hw_checksum) desc_status >>= 16; skb = mdp->rx_skbuff[entry]; @@ -1987,7 +1987,7 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) add_reg(MAFCR); if (cd->rtrate) add_reg(RTRATE); - if (cd->hw_crc) + if (cd->hw_checksum) add_reg(CSMR); if (cd->select_mii) add_reg(RMII_MII); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index f09fa8b47f9a..4ada0ff37712 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -488,7 +488,7 @@ struct sh_eth_cpu_data { unsigned rpadir:1; /* E-DMAC have RPADIR */ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ - unsigned hw_crc:1; /* E-DMAC have CSMR */ + unsigned hw_checksum:1; /* E-DMAC has CSMR */ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ -- cgit v1.2.3 From 3117455dd6c16dae13ad7425384bed045e98e63d Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 8 Jan 2017 14:52:05 -0800 Subject: net: dsa: b53: Export most operations to other drivers In preparation for making dsa_switch_ops const, export b53 operations utilized by other drivers such as bcm_sf2. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 79 +++++++++++++++++++++++----------------- drivers/net/dsa/b53/b53_priv.h | 33 +++++++++++++++++ 2 files changed, 79 insertions(+), 33 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index d5370c227043..a448661b55c6 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -712,7 +712,7 @@ static unsigned int b53_get_mib_size(struct b53_device *dev) return B53_MIBS_SIZE; } -static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) { struct b53_device *dev = ds->priv; const struct b53_mib_desc *mibs = b53_get_mib(dev); @@ -723,9 +723,9 @@ static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) memcpy(data + i * ETH_GSTRING_LEN, mibs[i].name, ETH_GSTRING_LEN); } +EXPORT_SYMBOL(b53_get_strings); -static void b53_get_ethtool_stats(struct dsa_switch *ds, int port, - uint64_t *data) +void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { struct b53_device *dev = ds->priv; const struct b53_mib_desc *mibs = b53_get_mib(dev); @@ -756,13 +756,15 @@ static void b53_get_ethtool_stats(struct dsa_switch *ds, int port, mutex_unlock(&dev->stats_mutex); } +EXPORT_SYMBOL(b53_get_ethtool_stats); -static int b53_get_sset_count(struct dsa_switch *ds) +int b53_get_sset_count(struct dsa_switch *ds) { struct b53_device *dev = ds->priv; return b53_get_mib_size(dev); } +EXPORT_SYMBOL(b53_get_sset_count); static int b53_setup(struct dsa_switch *ds) { @@ -921,15 +923,15 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, } } -static int b53_vlan_filtering(struct dsa_switch *ds, int port, - bool vlan_filtering) +int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { return 0; } +EXPORT_SYMBOL(b53_vlan_filtering); -static int b53_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +int b53_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) { struct b53_device *dev = ds->priv; @@ -943,10 +945,11 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, return 0; } +EXPORT_SYMBOL(b53_vlan_prepare); -static void b53_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +void b53_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) { struct b53_device *dev = ds->priv; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; @@ -977,9 +980,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port, b53_fast_age_vlan(dev, vid); } } +EXPORT_SYMBOL(b53_vlan_add); -static int b53_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) +int b53_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) { struct b53_device *dev = ds->priv; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; @@ -1015,10 +1019,11 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, return 0; } +EXPORT_SYMBOL(b53_vlan_del); -static int b53_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)) +int b53_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) { struct b53_device *dev = ds->priv; u16 vid, vid_start = 0, pvid; @@ -1057,6 +1062,7 @@ static int b53_vlan_dump(struct dsa_switch *ds, int port, return err; } +EXPORT_SYMBOL(b53_vlan_dump); /* Address Resolution Logic routines */ static int b53_arl_op_wait(struct b53_device *dev) @@ -1175,9 +1181,9 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, return b53_arl_rw_op(dev, 0); } -static int b53_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +int b53_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) { struct b53_device *priv = ds->priv; @@ -1189,24 +1195,27 @@ static int b53_fdb_prepare(struct dsa_switch *ds, int port, return 0; } +EXPORT_SYMBOL(b53_fdb_prepare); -static void b53_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +void b53_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) { struct b53_device *priv = ds->priv; if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) pr_err("%s: failed to add MAC address\n", __func__); } +EXPORT_SYMBOL(b53_fdb_add); -static int b53_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) +int b53_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb) { struct b53_device *priv = ds->priv; return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); } +EXPORT_SYMBOL(b53_fdb_del); static int b53_arl_search_wait(struct b53_device *dev) { @@ -1258,9 +1267,9 @@ static int b53_fdb_copy(struct net_device *dev, int port, return cb(&fdb->obj); } -static int b53_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) +int b53_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) { struct b53_device *priv = ds->priv; struct net_device *dev = ds->ports[port].netdev; @@ -1297,9 +1306,9 @@ static int b53_fdb_dump(struct dsa_switch *ds, int port, return 0; } +EXPORT_SYMBOL(b53_fdb_dump); -static int b53_br_join(struct dsa_switch *ds, int port, - struct net_device *bridge) +int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) { struct b53_device *dev = ds->priv; s8 cpu_port = ds->dst->cpu_port; @@ -1343,8 +1352,9 @@ static int b53_br_join(struct dsa_switch *ds, int port, return 0; } +EXPORT_SYMBOL(b53_br_join); -static void b53_br_leave(struct dsa_switch *ds, int port) +void b53_br_leave(struct dsa_switch *ds, int port) { struct b53_device *dev = ds->priv; struct net_device *bridge = dev->ports[port].bridge_dev; @@ -1393,8 +1403,9 @@ static void b53_br_leave(struct dsa_switch *ds, int port) b53_set_vlan_entry(dev, pvid, vl); } } +EXPORT_SYMBOL(b53_br_leave); -static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) +void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) { struct b53_device *dev = ds->priv; u8 hw_state; @@ -1426,14 +1437,16 @@ static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) reg |= hw_state; b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); } +EXPORT_SYMBOL(b53_br_set_stp_state); -static void b53_br_fast_age(struct dsa_switch *ds, int port) +void b53_br_fast_age(struct dsa_switch *ds, int port) { struct b53_device *dev = ds->priv; if (b53_fast_age_port(dev, port)) dev_err(ds->dev, "fast ageing failed\n"); } +EXPORT_SYMBOL(b53_br_fast_age); static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds) { diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 1f4b07b77de2..86f125d55aaf 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -374,4 +374,37 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev) return -ENOENT; } #endif + +/* Exported functions towards other drivers */ +void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data); +void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); +int b53_get_sset_count(struct dsa_switch *ds); +int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); +void b53_br_leave(struct dsa_switch *ds, int port); +void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); +void b53_br_fast_age(struct dsa_switch *ds, int port); +int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering); +int b53_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans); +void b53_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans); +int b53_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +int b53_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)); +int b53_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans); +void b53_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans); +int b53_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb); +int b53_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)); + #endif -- cgit v1.2.3 From 73095cb1881e6da77e2ed711deaf954bdfdde71e Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 8 Jan 2017 14:52:06 -0800 Subject: net: dsa: bcm_sf2: Declare our own dsa_switch_ops Utilize the b53 exported functions to fill our bcm_sf2_ops structure, also making it clear what we utilize and what we specifically override. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 55 ++++++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 2ce7ae97ac91..52027718d06f 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -977,6 +977,38 @@ static struct b53_io_ops bcm_sf2_io_ops = { .write64 = bcm_sf2_core_write64, }; +static struct dsa_switch_ops bcm_sf2_ops = { + .get_tag_protocol = bcm_sf2_sw_get_tag_protocol, + .setup = bcm_sf2_sw_setup, + .get_strings = b53_get_strings, + .get_ethtool_stats = b53_get_ethtool_stats, + .get_sset_count = b53_get_sset_count, + .get_phy_flags = bcm_sf2_sw_get_phy_flags, + .adjust_link = bcm_sf2_sw_adjust_link, + .fixed_link_update = bcm_sf2_sw_fixed_link_update, + .suspend = bcm_sf2_sw_suspend, + .resume = bcm_sf2_sw_resume, + .get_wol = bcm_sf2_sw_get_wol, + .set_wol = bcm_sf2_sw_set_wol, + .port_enable = bcm_sf2_port_setup, + .port_disable = bcm_sf2_port_disable, + .get_eee = bcm_sf2_sw_get_eee, + .set_eee = bcm_sf2_sw_set_eee, + .port_bridge_join = b53_br_join, + .port_bridge_leave = b53_br_leave, + .port_stp_state_set = b53_br_set_stp_state, + .port_fast_age = b53_br_fast_age, + .port_vlan_filtering = b53_vlan_filtering, + .port_vlan_prepare = b53_vlan_prepare, + .port_vlan_add = b53_vlan_add, + .port_vlan_del = b53_vlan_del, + .port_vlan_dump = b53_vlan_dump, + .port_fdb_prepare = b53_fdb_prepare, + .port_fdb_dump = b53_fdb_dump, + .port_fdb_add = b53_fdb_add, + .port_fdb_del = b53_fdb_del, +}; + static int bcm_sf2_sw_probe(struct platform_device *pdev) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; @@ -1017,28 +1049,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) priv->dev = dev; ds = dev->ds; - - /* Override the parts that are non-standard wrt. normal b53 devices */ - memcpy(ops, ds->ops, sizeof(*ops)); - ds->ops = ops; - ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol; - ds->ops->setup = bcm_sf2_sw_setup; - ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags; - ds->ops->adjust_link = bcm_sf2_sw_adjust_link; - ds->ops->fixed_link_update = bcm_sf2_sw_fixed_link_update; - ds->ops->suspend = bcm_sf2_sw_suspend; - ds->ops->resume = bcm_sf2_sw_resume; - ds->ops->get_wol = bcm_sf2_sw_get_wol; - ds->ops->set_wol = bcm_sf2_sw_set_wol; - ds->ops->port_enable = bcm_sf2_port_setup; - ds->ops->port_disable = bcm_sf2_port_disable; - ds->ops->get_eee = bcm_sf2_sw_get_eee; - ds->ops->set_eee = bcm_sf2_sw_set_eee; - - /* Avoid having DSA free our slave MDIO bus (checking for - * ds->slave_mii_bus and ds->ops->phy_read being non-NULL) - */ - ds->ops->phy_read = NULL; + ds->ops = &bcm_sf2_ops; dev_set_drvdata(&pdev->dev, priv); -- cgit v1.2.3 From ab3d408d3f40f939d46a32b1c24aa2833a13b846 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 8 Jan 2017 14:52:07 -0800 Subject: net: dsa: Encapsulate legacy switch drivers into dsa_switch_driver In preparation for making struct dsa_switch_ops const, encapsulate it within a dsa_switch_driver which has a list pointer and a pointer to dsa_switch_ops. This allows us to take the list_head pointer out of dsa_switch_ops, which is written to by {un,}register_switch_driver. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6060.c | 8 ++++++-- drivers/net/dsa/mv88e6xxx/chip.c | 8 ++++++-- include/net/dsa.h | 11 +++++++---- net/dsa/dsa.c | 12 +++++++----- 4 files changed, 26 insertions(+), 13 deletions(-) diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 7ce36dbd9b62..bcbd6dcbd8e8 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -261,16 +261,20 @@ static struct dsa_switch_ops mv88e6060_switch_ops = { .phy_write = mv88e6060_phy_write, }; +static struct dsa_switch_driver mv88e6060_switch_drv = { + .ops = &mv88e6060_switch_ops, +}; + static int __init mv88e6060_init(void) { - register_switch_driver(&mv88e6060_switch_ops); + register_switch_driver(&mv88e6060_switch_drv); return 0; } module_init(mv88e6060_init); static void __exit mv88e6060_cleanup(void) { - unregister_switch_driver(&mv88e6060_switch_ops); + unregister_switch_driver(&mv88e6060_switch_drv); } module_exit(mv88e6060_cleanup); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 676b0e2ad221..d43d12c281b3 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -4403,6 +4403,10 @@ static struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_mdb_dump = mv88e6xxx_port_mdb_dump, }; +static struct dsa_switch_driver mv88e6xxx_switch_drv = { + .ops = &mv88e6xxx_switch_ops, +}; + static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip, struct device_node *np) { @@ -4565,7 +4569,7 @@ static struct mdio_driver mv88e6xxx_driver = { static int __init mv88e6xxx_init(void) { - register_switch_driver(&mv88e6xxx_switch_ops); + register_switch_driver(&mv88e6xxx_switch_drv); return mdio_driver_register(&mv88e6xxx_driver); } module_init(mv88e6xxx_init); @@ -4573,7 +4577,7 @@ module_init(mv88e6xxx_init); static void __exit mv88e6xxx_cleanup(void) { mdio_driver_unregister(&mv88e6xxx_driver); - unregister_switch_driver(&mv88e6xxx_switch_ops); + unregister_switch_driver(&mv88e6xxx_switch_drv); } module_exit(mv88e6xxx_cleanup); diff --git a/include/net/dsa.h b/include/net/dsa.h index b122196d5a1f..edfa9b130953 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -240,8 +240,6 @@ struct switchdev_obj_port_mdb; struct switchdev_obj_port_vlan; struct dsa_switch_ops { - struct list_head list; - /* * Probing and setup. */ @@ -390,8 +388,13 @@ struct dsa_switch_ops { int (*cb)(struct switchdev_obj *obj)); }; -void register_switch_driver(struct dsa_switch_ops *type); -void unregister_switch_driver(struct dsa_switch_ops *type); +struct dsa_switch_driver { + struct list_head list; + struct dsa_switch_ops *ops; +}; + +void register_switch_driver(struct dsa_switch_driver *type); +void unregister_switch_driver(struct dsa_switch_driver *type); struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index cda787ebad15..4e7bc57cdae5 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -60,18 +60,18 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = { static DEFINE_MUTEX(dsa_switch_drivers_mutex); static LIST_HEAD(dsa_switch_drivers); -void register_switch_driver(struct dsa_switch_ops *ops) +void register_switch_driver(struct dsa_switch_driver *drv) { mutex_lock(&dsa_switch_drivers_mutex); - list_add_tail(&ops->list, &dsa_switch_drivers); + list_add_tail(&drv->list, &dsa_switch_drivers); mutex_unlock(&dsa_switch_drivers_mutex); } EXPORT_SYMBOL_GPL(register_switch_driver); -void unregister_switch_driver(struct dsa_switch_ops *ops) +void unregister_switch_driver(struct dsa_switch_driver *drv) { mutex_lock(&dsa_switch_drivers_mutex); - list_del_init(&ops->list); + list_del_init(&drv->list); mutex_unlock(&dsa_switch_drivers_mutex); } EXPORT_SYMBOL_GPL(unregister_switch_driver); @@ -90,8 +90,10 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, mutex_lock(&dsa_switch_drivers_mutex); list_for_each(list, &dsa_switch_drivers) { struct dsa_switch_ops *ops; + struct dsa_switch_driver *drv; - ops = list_entry(list, struct dsa_switch_ops, list); + drv = list_entry(list, struct dsa_switch_driver, list); + ops = drv->ops; name = ops->probe(parent, host_dev, sw_addr, priv); if (name != NULL) { -- cgit v1.2.3 From a82f67afe8e297834bedafa529941d9d0808caf8 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 8 Jan 2017 14:52:08 -0800 Subject: net: dsa: Make dsa_switch_ops const Now that we have properly encapsulated and made drivers utilize exported functions, we can switch dsa_switch_ops to be a annotated with const. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 2 +- drivers/net/dsa/bcm_sf2.c | 2 +- drivers/net/dsa/mv88e6060.c | 2 +- drivers/net/dsa/mv88e6xxx/chip.c | 2 +- drivers/net/dsa/qca8k.c | 2 +- include/net/dsa.h | 4 ++-- net/dsa/dsa.c | 10 +++++----- net/dsa/hwmon.c | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index a448661b55c6..5102a3701a1a 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1453,7 +1453,7 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds) return DSA_TAG_PROTO_NONE; } -static struct dsa_switch_ops b53_switch_ops = { +static const struct dsa_switch_ops b53_switch_ops = { .get_tag_protocol = b53_get_tag_protocol, .setup = b53_setup, .get_strings = b53_get_strings, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 52027718d06f..31d017086f8b 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -977,7 +977,7 @@ static struct b53_io_ops bcm_sf2_io_ops = { .write64 = bcm_sf2_core_write64, }; -static struct dsa_switch_ops bcm_sf2_ops = { +static const struct dsa_switch_ops bcm_sf2_ops = { .get_tag_protocol = bcm_sf2_sw_get_tag_protocol, .setup = bcm_sf2_sw_setup, .get_strings = b53_get_strings, diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index bcbd6dcbd8e8..5934b7a4c448 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -252,7 +252,7 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return reg_write(ds, addr, regnum, val); } -static struct dsa_switch_ops mv88e6060_switch_ops = { +static const struct dsa_switch_ops mv88e6060_switch_ops = { .get_tag_protocol = mv88e6060_get_tag_protocol, .probe = mv88e6060_drv_probe, .setup = mv88e6060_setup, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d43d12c281b3..eea8e0176e33 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -4361,7 +4361,7 @@ static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port, return err; } -static struct dsa_switch_ops mv88e6xxx_switch_ops = { +static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .probe = mv88e6xxx_drv_probe, .get_tag_protocol = mv88e6xxx_get_tag_protocol, .setup = mv88e6xxx_setup, diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index b3df70d07ff6..54d270d59eb0 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -911,7 +911,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds) return DSA_TAG_PROTO_QCA; } -static struct dsa_switch_ops qca8k_switch_ops = { +static const struct dsa_switch_ops qca8k_switch_ops = { .get_tag_protocol = qca8k_get_tag_protocol, .setup = qca8k_setup, .get_strings = qca8k_get_strings, diff --git a/include/net/dsa.h b/include/net/dsa.h index edfa9b130953..b94d1f2ef912 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -169,7 +169,7 @@ struct dsa_switch { /* * The switch operations. */ - struct dsa_switch_ops *ops; + const struct dsa_switch_ops *ops; /* * An array of which element [a] indicates which port on this @@ -390,7 +390,7 @@ struct dsa_switch_ops { struct dsa_switch_driver { struct list_head list; - struct dsa_switch_ops *ops; + const struct dsa_switch_ops *ops; }; void register_switch_driver(struct dsa_switch_driver *type); diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 4e7bc57cdae5..fd532487dfdf 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -76,11 +76,11 @@ void unregister_switch_driver(struct dsa_switch_driver *drv) } EXPORT_SYMBOL_GPL(unregister_switch_driver); -static struct dsa_switch_ops * +static const struct dsa_switch_ops * dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, const char **_name, void **priv) { - struct dsa_switch_ops *ret; + const struct dsa_switch_ops *ret; struct list_head *list; const char *name; @@ -89,7 +89,7 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, mutex_lock(&dsa_switch_drivers_mutex); list_for_each(list, &dsa_switch_drivers) { - struct dsa_switch_ops *ops; + const struct dsa_switch_ops *ops; struct dsa_switch_driver *drv; drv = list_entry(list, struct dsa_switch_driver, list); @@ -207,7 +207,7 @@ void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds) static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) { - struct dsa_switch_ops *ops = ds->ops; + const struct dsa_switch_ops *ops = ds->ops; struct dsa_switch_tree *dst = ds->dst; struct dsa_chip_data *cd = ds->cd; bool valid_name_found = false; @@ -326,7 +326,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, struct device *parent, struct device *host_dev) { struct dsa_chip_data *cd = dst->pd->chip + index; - struct dsa_switch_ops *ops; + const struct dsa_switch_ops *ops; struct dsa_switch *ds; int ret; const char *name; diff --git a/net/dsa/hwmon.c b/net/dsa/hwmon.c index 3a9cdf0b22b8..08831a811278 100644 --- a/net/dsa/hwmon.c +++ b/net/dsa/hwmon.c @@ -86,7 +86,7 @@ static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj, { struct device *dev = container_of(kobj, struct device, kobj); struct dsa_switch *ds = dev_get_drvdata(dev); - struct dsa_switch_ops *ops = ds->ops; + const struct dsa_switch_ops *ops = ds->ops; umode_t mode = attr->mode; if (index == 1) { -- cgit v1.2.3 From 8d9ba388f35b3c681975a6b3f6ba60bb42c98f8d Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Mon, 9 Jan 2017 16:04:04 +0100 Subject: Revert "icmp: avoid allocating large struct on stack" This reverts commit 9a99d4a50cb8 ("icmp: avoid allocating large struct on stack"), because struct icmp_bxm no really a large struct, and allocating and free of this small 112 bytes hurts performance. Fixes: 9a99d4a50cb8 ("icmp: avoid allocating large struct on stack") Signed-off-by: Jesper Dangaard Brouer Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/icmp.c | 40 +++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 0777ea949223..b4b9807329a7 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -571,7 +571,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) { struct iphdr *iph; int room; - struct icmp_bxm *icmp_param; + struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); struct ipcm_cookie ipc; struct flowi4 fl4; @@ -648,13 +648,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) } } - icmp_param = kmalloc(sizeof(*icmp_param), GFP_ATOMIC); - if (!icmp_param) - return; - sk = icmp_xmit_lock(net); if (!sk) - goto out_free; + return; /* * Construct source address and options. @@ -681,7 +677,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) iph->tos; mark = IP4_REPLY_MARK(net, skb_in->mark); - if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in)) + if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in)) goto out_unlock; @@ -689,22 +685,22 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) * Prepare data for ICMP header. */ - icmp_param->data.icmph.type = type; - icmp_param->data.icmph.code = code; - icmp_param->data.icmph.un.gateway = info; - icmp_param->data.icmph.checksum = 0; - icmp_param->skb = skb_in; - icmp_param->offset = skb_network_offset(skb_in); + icmp_param.data.icmph.type = type; + icmp_param.data.icmph.code = code; + icmp_param.data.icmph.un.gateway = info; + icmp_param.data.icmph.checksum = 0; + icmp_param.skb = skb_in; + icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; sk->sk_mark = mark; ipc.addr = iph->saddr; - ipc.opt = &icmp_param->replyopts.opt; + ipc.opt = &icmp_param.replyopts.opt; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, - type, code, icmp_param); + type, code, &icmp_param); if (IS_ERR(rt)) goto out_unlock; @@ -716,21 +712,19 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) room = dst_mtu(&rt->dst); if (room > 576) room = 576; - room -= sizeof(struct iphdr) + icmp_param->replyopts.opt.opt.optlen; + room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen; room -= sizeof(struct icmphdr); - icmp_param->data_len = skb_in->len - icmp_param->offset; - if (icmp_param->data_len > room) - icmp_param->data_len = room; - icmp_param->head_len = sizeof(struct icmphdr); + icmp_param.data_len = skb_in->len - icmp_param.offset; + if (icmp_param.data_len > room) + icmp_param.data_len = room; + icmp_param.head_len = sizeof(struct icmphdr); - icmp_push_reply(icmp_param, &fl4, &ipc, &rt); + icmp_push_reply(&icmp_param, &fl4, &ipc, &rt); ende: ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); -out_free: - kfree(icmp_param); out:; } EXPORT_SYMBOL(icmp_send); -- cgit v1.2.3 From c0303efeab7391ec51c337e0ac5740860ad01fe7 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Mon, 9 Jan 2017 16:04:09 +0100 Subject: net: reduce cycles spend on ICMP replies that gets rate limited This patch split the global and per (inet)peer ICMP-reply limiter code, and moves the global limit check to earlier in the packet processing path. Thus, avoid spending cycles on ICMP replies that gets limited/suppressed anyhow. The global ICMP rate limiter icmp_global_allow() is a good solution, it just happens too late in the process. The kernel goes through the full route lookup (return path) for the ICMP message, before taking the rate limit decision of not sending the ICMP reply. Details: The kernels global rate limiter for ICMP messages got added in commit 4cdf507d5452 ("icmp: add a global rate limitation"). It is a token bucket limiter with a global lock. It brilliantly avoids locking congestion by only updating when 20ms (HZ/50) were elapsed. It can then avoids taking lock when credit is exhausted (when under pressure) and time constraint for refill is not yet meet. Signed-off-by: Jesper Dangaard Brouer Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/icmp.c | 71 ++++++++++++++++++++++++++++++++++++++------------------- net/ipv6/icmp.c | 49 +++++++++++++++++++++++++++------------ 2 files changed, 82 insertions(+), 38 deletions(-) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index b4b9807329a7..58d75ca58b83 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -282,6 +282,33 @@ bool icmp_global_allow(void) } EXPORT_SYMBOL(icmp_global_allow); +static bool icmpv4_mask_allow(struct net *net, int type, int code) +{ + if (type > NR_ICMP_TYPES) + return true; + + /* Don't limit PMTU discovery. */ + if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) + return true; + + /* Limit if icmp type is enabled in ratemask. */ + if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask)) + return true; + + return false; +} + +static bool icmpv4_global_allow(struct net *net, int type, int code) +{ + if (icmpv4_mask_allow(net, type, code)) + return true; + + if (icmp_global_allow()) + return true; + + return false; +} + /* * Send an ICMP frame. */ @@ -290,34 +317,22 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, struct flowi4 *fl4, int type, int code) { struct dst_entry *dst = &rt->dst; + struct inet_peer *peer; bool rc = true; + int vif; - if (type > NR_ICMP_TYPES) - goto out; - - /* Don't limit PMTU discovery. */ - if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) + if (icmpv4_mask_allow(net, type, code)) goto out; /* No rate limit on loopback */ if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) goto out; - /* Limit if icmp type is enabled in ratemask. */ - if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask)) - goto out; - - rc = false; - if (icmp_global_allow()) { - int vif = l3mdev_master_ifindex(dst->dev); - struct inet_peer *peer; - - peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1); - rc = inet_peer_xrlim_allow(peer, - net->ipv4.sysctl_icmp_ratelimit); - if (peer) - inet_putpeer(peer); - } + vif = l3mdev_master_ifindex(dst->dev); + peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1); + rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit); + if (peer) + inet_putpeer(peer); out: return rc; } @@ -396,6 +411,8 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) struct inet_sock *inet; __be32 daddr, saddr; u32 mark = IP4_REPLY_MARK(net, skb->mark); + int type = icmp_param->data.icmph.type; + int code = icmp_param->data.icmph.code; if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb)) return; @@ -405,6 +422,10 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) return; inet = inet_sk(sk); + /* global icmp_msgs_per_sec */ + if (!icmpv4_global_allow(net, type, code)) + goto out_unlock; + icmp_param->data.icmph.checksum = 0; inet->tos = ip_hdr(skb)->tos; @@ -433,8 +454,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) goto out_unlock; - if (icmpv4_xrlim_allow(net, rt, &fl4, icmp_param->data.icmph.type, - icmp_param->data.icmph.code)) + if (icmpv4_xrlim_allow(net, rt, &fl4, type, code)) icmp_push_reply(icmp_param, &fl4, &ipc, &rt); ip_rt_put(rt); out_unlock: @@ -650,7 +670,11 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) sk = icmp_xmit_lock(net); if (!sk) - return; + goto out; + + /* Check global sysctl_icmp_msgs_per_sec ratelimit */ + if (!icmpv4_global_allow(net, type, code)) + goto out_unlock; /* * Construct source address and options. @@ -704,6 +728,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) if (IS_ERR(rt)) goto out_unlock; + /* peer icmp_ratelimit */ if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code)) goto ende; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 3036f665e6c8..b26ae8b5c1ce 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -168,6 +168,30 @@ static bool is_ineligible(const struct sk_buff *skb) return false; } +static bool icmpv6_mask_allow(int type) +{ + /* Informational messages are not limited. */ + if (type & ICMPV6_INFOMSG_MASK) + return true; + + /* Do not limit pmtu discovery, it would break it. */ + if (type == ICMPV6_PKT_TOOBIG) + return true; + + return false; +} + +static bool icmpv6_global_allow(int type) +{ + if (icmpv6_mask_allow(type)) + return true; + + if (icmp_global_allow()) + return true; + + return false; +} + /* * Check the ICMP output rate limit */ @@ -178,12 +202,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, struct dst_entry *dst; bool res = false; - /* Informational messages are not limited. */ - if (type & ICMPV6_INFOMSG_MASK) - return true; - - /* Do not limit pmtu discovery, it would break it. */ - if (type == ICMPV6_PKT_TOOBIG) + if (icmpv6_mask_allow(type)) return true; /* @@ -200,20 +219,16 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, } else { struct rt6_info *rt = (struct rt6_info *)dst; int tmo = net->ipv6.sysctl.icmpv6_time; + struct inet_peer *peer; /* Give more bandwidth to wider prefixes. */ if (rt->rt6i_dst.plen < 128) tmo >>= ((128 - rt->rt6i_dst.plen)>>5); - if (icmp_global_allow()) { - struct inet_peer *peer; - - peer = inet_getpeer_v6(net->ipv6.peers, - &fl6->daddr, 1); - res = inet_peer_xrlim_allow(peer, tmo); - if (peer) - inet_putpeer(peer); - } + peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1); + res = inet_peer_xrlim_allow(peer, tmo); + if (peer) + inet_putpeer(peer); } dst_release(dst); return res; @@ -493,6 +508,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, sk = icmpv6_xmit_lock(net); if (!sk) return; + + if (!icmpv6_global_allow(type)) + goto out; + sk->sk_mark = mark; np = inet6_sk(sk); -- cgit v1.2.3 From 7ba91ecb16824f74ba4fcbc4e88cd4d24a839b25 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Mon, 9 Jan 2017 16:04:14 +0100 Subject: net: for rate-limited ICMP replies save one atomic operation It is possible to avoid the atomic operation in icmp{v6,}_xmit_lock, by checking the sysctl_icmp_msgs_per_sec ratelimit before these calls, as pointed out by Eric Dumazet, but the BH disabled state must be correct. The icmp_global_allow() call states it must be called with BH disabled. This protection was given by the calls icmp_xmit_lock and icmpv6_xmit_lock. Thus, split out local_bh_disable/enable from these functions and maintain it explicitly at callers. Suggested-by: Eric Dumazet Signed-off-by: Jesper Dangaard Brouer Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/icmp.c | 34 +++++++++++++++++++++------------- net/ipv6/icmp.c | 25 ++++++++++++++++--------- 2 files changed, 37 insertions(+), 22 deletions(-) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 58d75ca58b83..fc310db2708b 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -209,19 +209,17 @@ static struct sock *icmp_sk(struct net *net) return *this_cpu_ptr(net->ipv4.icmp_sk); } +/* Called with BH disabled */ static inline struct sock *icmp_xmit_lock(struct net *net) { struct sock *sk; - local_bh_disable(); - sk = icmp_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ - local_bh_enable(); return NULL; } return sk; @@ -229,7 +227,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net) static inline void icmp_xmit_unlock(struct sock *sk) { - spin_unlock_bh(&sk->sk_lock.slock); + spin_unlock(&sk->sk_lock.slock); } int sysctl_icmp_msgs_per_sec __read_mostly = 1000; @@ -417,14 +415,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb)) return; - sk = icmp_xmit_lock(net); - if (!sk) - return; - inet = inet_sk(sk); + /* Needed by both icmp_global_allow and icmp_xmit_lock */ + local_bh_disable(); /* global icmp_msgs_per_sec */ if (!icmpv4_global_allow(net, type, code)) - goto out_unlock; + goto out_bh_enable; + + sk = icmp_xmit_lock(net); + if (!sk) + goto out_bh_enable; + inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; @@ -459,6 +460,8 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); +out_bh_enable: + local_bh_enable(); } #ifdef CONFIG_IP_ROUTE_MULTIPATH @@ -668,13 +671,16 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) } } - sk = icmp_xmit_lock(net); - if (!sk) - goto out; + /* Needed by both icmp_global_allow and icmp_xmit_lock */ + local_bh_disable(); /* Check global sysctl_icmp_msgs_per_sec ratelimit */ if (!icmpv4_global_allow(net, type, code)) - goto out_unlock; + goto out_bh_enable; + + sk = icmp_xmit_lock(net); + if (!sk) + goto out_bh_enable; /* * Construct source address and options. @@ -750,6 +756,8 @@ ende: ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); +out_bh_enable: + local_bh_enable(); out:; } EXPORT_SYMBOL(icmp_send); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index b26ae8b5c1ce..230b5aac9f03 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -110,19 +110,17 @@ static const struct inet6_protocol icmpv6_protocol = { .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; +/* Called with BH disabled */ static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) { struct sock *sk; - local_bh_disable(); - sk = icmpv6_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path (f.e. SIT or * ip6ip6 tunnel) signals dst_link_failure() for an * outgoing ICMP6 packet. */ - local_bh_enable(); return NULL; } return sk; @@ -130,7 +128,7 @@ static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) static __inline__ void icmpv6_xmit_unlock(struct sock *sk) { - spin_unlock_bh(&sk->sk_lock.slock); + spin_unlock(&sk->sk_lock.slock); } /* @@ -489,6 +487,13 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, return; } + /* Needed by both icmp_global_allow and icmpv6_xmit_lock */ + local_bh_disable(); + + /* Check global sysctl_icmp_msgs_per_sec ratelimit */ + if (!icmpv6_global_allow(type)) + goto out_bh_enable; + mip6_addr_swap(skb); memset(&fl6, 0, sizeof(fl6)); @@ -507,10 +512,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, sk = icmpv6_xmit_lock(net); if (!sk) - return; - - if (!icmpv6_global_allow(type)) - goto out; + goto out_bh_enable; sk->sk_mark = mark; np = inet6_sk(sk); @@ -571,6 +573,8 @@ out_dst_release: dst_release(dst); out: icmpv6_xmit_unlock(sk); +out_bh_enable: + local_bh_enable(); } /* Slightly more convenient version of icmp6_send. @@ -684,9 +688,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb) fl6.flowi6_uid = sock_net_uid(net, NULL); security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); + local_bh_disable(); sk = icmpv6_xmit_lock(net); if (!sk) - return; + goto out_bh_enable; sk->sk_mark = mark; np = inet6_sk(sk); @@ -728,6 +733,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb) dst_release(dst); out: icmpv6_xmit_unlock(sk); +out_bh_enable: + local_bh_enable(); } void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) -- cgit v1.2.3 From 6dcf45e514974a1ff10755015b5e06746a033e5f Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Mon, 9 Jan 2017 16:34:04 +0100 Subject: sh_eth: use correct name for ECMR_MPDE bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This bit was wrongly named due to a typo, Sergei checked the SH7734/63 manuals and this bit should be named MPDE. Suggested-by: Sergei Shtylyov Signed-off-by: Niklas Söderlund Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 4ada0ff37712..90b6943e502e 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -339,7 +339,7 @@ enum FELIC_MODE_BIT { ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000, ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000, ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000, - ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, + ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004, ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001, }; -- cgit v1.2.3 From d8981d029da9d230955dabe596dbb30e7971b7b9 Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Mon, 9 Jan 2017 16:34:05 +0100 Subject: sh_eth: add generic wake-on-lan support via magic packet MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add generic functionality to support Wake-on-LAN using MagicPacket which are supported by at least a few versions of sh_eth. Only add functionality for WoL, no specific sh_eth versions are marked to support WoL yet. WoL is enabled in the suspend callback by setting MagicPacket detection and disabling all interrupts expect MagicPacket. In the resume path the driver needs to reset the hardware to rearm the WoL logic, this prevents the driver from simply restoring the registers and to take advantage of that sh_eth was not suspended to reduce resume time. To reset the hardware the driver closes and reopens the device just like it would do in a normal suspend/resume scenario without WoL enabled, but it both closes and opens the device in the resume callback since the device needs to be open for WoL to work. One quirk needed for WoL is that the module clock needs to be prevented from being switched off by Runtime PM. To keep the clock alive the suspend callback need to call clk_enable() directly to increase the usage count of the clock. Then when Runtime PM decreases the clock usage count it won't reach 0 and be switched off. Signed-off-by: Niklas Söderlund Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 114 +++++++++++++++++++++++++++++++--- drivers/net/ethernet/renesas/sh_eth.h | 3 + 2 files changed, 109 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 90fb0e9743b3..4eae834eea8d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1550,6 +1550,8 @@ static void sh_eth_emac_interrupt(struct net_device *ndev) sh_eth_rcv_snd_enable(ndev); } } + if (felic_stat & ECSR_MPD) + pm_wakeup_event(&mdp->pdev->dev, 0); } /* error control function */ @@ -2199,6 +2201,33 @@ static int sh_eth_set_ringparam(struct net_device *ndev, return 0; } +static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + wol->supported = 0; + wol->wolopts = 0; + + if (mdp->cd->magic && mdp->clk) { + wol->supported = WAKE_MAGIC; + wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; + } +} + +static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); + + device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled); + + return 0; +} + static const struct ethtool_ops sh_eth_ethtool_ops = { .get_regs_len = sh_eth_get_regs_len, .get_regs = sh_eth_get_regs, @@ -2213,6 +2242,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { .set_ringparam = sh_eth_set_ringparam, .get_link_ksettings = sh_eth_get_link_ksettings, .set_link_ksettings = sh_eth_set_link_ksettings, + .get_wol = sh_eth_get_wol, + .set_wol = sh_eth_set_wol, }; /* network device open function */ @@ -3015,6 +3046,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) goto out_release; } + /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ + mdp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(mdp->clk)) + mdp->clk = NULL; + ndev->base_addr = res->start; spin_lock_init(&mdp->lock); @@ -3109,6 +3145,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (ret) goto out_napi_del; + if (mdp->cd->magic && mdp->clk) + device_set_wakeup_capable(&pdev->dev, 1); + /* print device information */ netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n", (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); @@ -3148,15 +3187,67 @@ static int sh_eth_drv_remove(struct platform_device *pdev) #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP +static int sh_eth_wol_setup(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* Only allow ECI interrupts */ + synchronize_irq(ndev->irq); + napi_disable(&mdp->napi); + sh_eth_write(ndev, DMAC_M_ECI, EESIPR); + + /* Enable MagicPacket */ + sh_eth_modify(ndev, ECMR, 0, ECMR_MPDE); + + /* Increased clock usage so device won't be suspended */ + clk_enable(mdp->clk); + + return enable_irq_wake(ndev->irq); +} + +static int sh_eth_wol_restore(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + napi_enable(&mdp->napi); + + /* Disable MagicPacket */ + sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0); + + /* The device needs to be reset to restore MagicPacket logic + * for next wakeup. If we close and open the device it will + * both be reset and all registers restored. This is what + * happens during suspend and resume without WoL enabled. + */ + ret = sh_eth_close(ndev); + if (ret < 0) + return ret; + ret = sh_eth_open(ndev); + if (ret < 0) + return ret; + + /* Restore clock usage count */ + clk_disable(mdp->clk); + + return disable_irq_wake(ndev->irq); +} + static int sh_eth_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); + struct sh_eth_private *mdp = netdev_priv(ndev); int ret = 0; - if (netif_running(ndev)) { - netif_device_detach(ndev); + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + if (mdp->wol_enabled) + ret = sh_eth_wol_setup(ndev); + else ret = sh_eth_close(ndev); - } return ret; } @@ -3164,14 +3255,21 @@ static int sh_eth_suspend(struct device *dev) static int sh_eth_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); + struct sh_eth_private *mdp = netdev_priv(ndev); int ret = 0; - if (netif_running(ndev)) { + if (!netif_running(ndev)) + return 0; + + if (mdp->wol_enabled) + ret = sh_eth_wol_restore(ndev); + else ret = sh_eth_open(ndev); - if (ret < 0) - return ret; - netif_device_attach(ndev); - } + + if (ret < 0) + return ret; + + netif_device_attach(ndev); return ret; } diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 90b6943e502e..a1bb8cc413dc 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -492,6 +492,7 @@ struct sh_eth_cpu_data { unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ + unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ }; struct sh_eth_private { @@ -500,6 +501,7 @@ struct sh_eth_private { const u16 *reg_offset; void __iomem *addr; void __iomem *tsu_addr; + struct clk *clk; u32 num_rx_ring; u32 num_tx_ring; dma_addr_t rx_desc_dma; @@ -528,6 +530,7 @@ struct sh_eth_private { unsigned no_ether_link:1; unsigned ether_link_active_low:1; unsigned is_opened:1; + unsigned wol_enabled:1; }; static inline void sh_eth_soft_swap(char *src, int len) -- cgit v1.2.3 From e410d86d4aa79a1a37231af6aacd93b2c4395c46 Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Mon, 9 Jan 2017 16:34:06 +0100 Subject: sh_eth: enable wake-on-lan for R-Car Gen2 devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tested on Gen2 r8a7791/Koelsch. Signed-off-by: Niklas Söderlund Tested-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 4eae834eea8d..55274398b957 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -622,8 +622,9 @@ static struct sh_eth_cpu_data r8a779x_data = { .register_type = SH_ETH_REG_FAST_RCAR, - .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, - .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | + ECSIPR_MPDIP, .eesipr_value = 0x01ff009f, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, @@ -638,6 +639,7 @@ static struct sh_eth_cpu_data r8a779x_data = { .tpauser = 1, .hw_swap = 1, .rmiimode = 1, + .magic = 1, }; #endif /* CONFIG_OF */ -- cgit v1.2.3 From 33017e240f489b6353e33f2630f2c5cbd2ad1d13 Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Mon, 9 Jan 2017 16:34:07 +0100 Subject: sh_eth: enable wake-on-lan for r8a7740/armadillo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Geert Uytterhoeven reported WoL worked on his Armadillo board. Signed-off-by: Niklas Söderlund Tested-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 55274398b957..5700d0b70039 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -576,6 +576,7 @@ static struct sh_eth_cpu_data r8a7740_data = { .hw_checksum = 1, .tsu = 1, .select_mii = 1, + .magic = 1, }; /* There is CPU dependent code */ -- cgit v1.2.3 From 159c2a90442c6d5ad0b3d085e348979cd9a0ac1b Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Mon, 9 Jan 2017 16:34:08 +0100 Subject: sh_eth: enable wake-on-lan for sh7734 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is based on public datasheet for sh7734 which shows it has the same behavior and registers for WoL as other versions of sh_eth. Signed-off-by: Niklas Söderlund Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5700d0b70039..4fdaa0400e4a 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -817,6 +817,7 @@ static struct sh_eth_cpu_data sh7734_data = { .tsu = 1, .hw_checksum = 1, .select_mii = 1, + .magic = 1, }; /* SH7763 */ -- cgit v1.2.3 From 267e1d5c7473cdb264c3153bf2adeb9d0c4bcae3 Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Mon, 9 Jan 2017 16:34:09 +0100 Subject: sh_eth: enable wake-on-lan for sh7763 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is based on public datasheet for sh7763 which shows it has the same behavior and registers for WoL as other versions of sh_eth. Signed-off-by: Niklas Söderlund Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 4fdaa0400e4a..45a7a6ba7644 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -845,6 +845,7 @@ static struct sh_eth_cpu_data sh7763_data = { .no_ade = 1, .tsu = 1, .irq_flags = IRQF_SHARED, + .magic = 1, }; static struct sh_eth_cpu_data sh7619_data = { -- cgit v1.2.3 From 4b9d07a44015a0e940448fa3885b894349e8b162 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:12 +0100 Subject: net: introduce keepalive function in struct proto Direct call of tcp_set_keepalive() function from protocol-agnostic sock_setsockopt() function in net/core/sock.c violates network layering. And newly introduced protocol (SMC-R) will need its own keepalive function. Therefore, add "keepalive" function pointer to "struct proto", and call it from sock_setsockopt() via this pointer. Signed-off-by: Ursula Braun Reviewed-by: Utz Bacher Signed-off-by: David S. Miller --- include/net/sock.h | 1 + net/core/sock.c | 7 ++----- net/ipv4/tcp_ipv4.c | 1 + net/ipv4/tcp_timer.c | 1 + net/ipv6/tcp_ipv6.c | 1 + 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index f0e867f58722..99deda67eba0 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1024,6 +1024,7 @@ struct proto { int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *option); + void (*keepalive)(struct sock *sk, int valbool); #ifdef CONFIG_COMPAT int (*compat_setsockopt)(struct sock *sk, int level, diff --git a/net/core/sock.c b/net/core/sock.c index f560e0826009..5018703ee2c2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -762,11 +762,8 @@ set_rcvbuf: goto set_rcvbuf; case SO_KEEPALIVE: -#ifdef CONFIG_INET - if (sk->sk_protocol == IPPROTO_TCP && - sk->sk_type == SOCK_STREAM) - tcp_set_keepalive(sk, valbool); -#endif + if (sk->sk_prot->keepalive) + sk->sk_prot->keepalive(sk, valbool); sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); break; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 7e4be4f361f3..56d756ecfb59 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2376,6 +2376,7 @@ struct proto tcp_prot = { .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, + .keepalive = tcp_set_keepalive, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .sendpage = tcp_sendpage, diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 3705075f42c3..29a9bd5f1225 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -617,6 +617,7 @@ void tcp_set_keepalive(struct sock *sk, int val) else if (!val) inet_csk_delete_keepalive_timer(sk); } +EXPORT_SYMBOL_GPL(tcp_set_keepalive); static void tcp_keepalive_timer (unsigned long data) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index a4cdf6a34c30..228965dca3c5 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1889,6 +1889,7 @@ struct proto tcpv6_prot = { .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, + .keepalive = tcp_set_keepalive, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .sendpage = tcp_sendpage, -- cgit v1.2.3 From ac7138746e14137a451f8539614cdd349153e0c0 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:13 +0100 Subject: smc: establish new socket family * enable smc module loading and unloading * register new socket family * basic smc socket creation and deletion * use backing TCP socket to run CLC (Connection Layer Control) handshake of SMC protocol * Setup for infiniband traffic is implemented in follow-on patches. For now fallback to TCP socket is always used. Signed-off-by: Ursula Braun Reviewed-by: Utz Bacher Signed-off-by: David S. Miller --- MAINTAINERS | 7 + include/linux/socket.h | 7 +- net/Kconfig | 1 + net/Makefile | 1 + net/core/sock.c | 6 +- net/smc/Kconfig | 11 + net/smc/Makefile | 2 + net/smc/af_smc.c | 620 +++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc.h | 37 +++ 9 files changed, 688 insertions(+), 4 deletions(-) create mode 100644 net/smc/Kconfig create mode 100644 net/smc/Makefile create mode 100644 net/smc/af_smc.c create mode 100644 net/smc/smc.h diff --git a/MAINTAINERS b/MAINTAINERS index d42a37a351f8..c8df0e1ef833 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10850,6 +10850,13 @@ S: Maintained F: drivers/staging/media/st-cec/ F: Documentation/devicetree/bindings/media/stih-cec.txt +SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS +M: Ursula Braun +L: linux-s390@vger.kernel.org +W: http://www.ibm.com/developerworks/linux/linux390/ +S: Supported +F: net/smc/ + SYNOPSYS DESIGNWARE DMAC DRIVER M: Viresh Kumar M: Andy Shevchenko diff --git a/include/linux/socket.h b/include/linux/socket.h index c06438023d79..082027457825 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -202,8 +202,12 @@ struct ucred { #define AF_VSOCK 40 /* vSockets */ #define AF_KCM 41 /* Kernel Connection Multiplexor*/ #define AF_QIPCRTR 42 /* Qualcomm IPC Router */ +#define AF_SMC 43 /* smc sockets: reserve number for + * PF_SMC protocol family that + * reuses AF_INET address family + */ -#define AF_MAX 43 /* For now.. */ +#define AF_MAX 44 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -251,6 +255,7 @@ struct ucred { #define PF_VSOCK AF_VSOCK #define PF_KCM AF_KCM #define PF_QIPCRTR AF_QIPCRTR +#define PF_SMC AF_SMC #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ diff --git a/net/Kconfig b/net/Kconfig index a1005007224c..2e9ee61822e2 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -57,6 +57,7 @@ source "net/packet/Kconfig" source "net/unix/Kconfig" source "net/xfrm/Kconfig" source "net/iucv/Kconfig" +source "net/smc/Kconfig" config INET bool "TCP/IP networking" diff --git a/net/Makefile b/net/Makefile index 4cafaa2b4667..5d6e0e5ff7f8 100644 --- a/net/Makefile +++ b/net/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_MAC80211) += mac80211/ obj-$(CONFIG_TIPC) += tipc/ obj-$(CONFIG_NETLABEL) += netlabel/ obj-$(CONFIG_IUCV) += iucv/ +obj-$(CONFIG_SMC) += smc/ obj-$(CONFIG_RFKILL) += rfkill/ obj-$(CONFIG_NET_9P) += 9p/ obj-$(CONFIG_CAIF) += caif/ diff --git a/net/core/sock.c b/net/core/sock.c index 5018703ee2c2..31f72f3a3559 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -222,7 +222,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = { "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" , - "sk_lock-AF_MAX" + "sk_lock-AF_SMC" , "sk_lock-AF_MAX" }; static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , @@ -239,7 +239,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" , - "slock-AF_MAX" + "slock-AF_SMC" , "slock-AF_MAX" }; static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , @@ -256,7 +256,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" , - "clock-AF_MAX" + "closck-AF_smc" , "clock-AF_MAX" }; /* diff --git a/net/smc/Kconfig b/net/smc/Kconfig new file mode 100644 index 000000000000..bc029803e728 --- /dev/null +++ b/net/smc/Kconfig @@ -0,0 +1,11 @@ +config SMC + tristate "SMC socket protocol family" + depends on INET && INFINIBAND + ---help--- + SMC-R provides a "sockets over RDMA" solution making use of + RDMA over Converged Ethernet (RoCE) technology to upgrade + AF_INET TCP connections transparently. + The Linux implementation of the SMC-R solution is designed as + a separate socket family SMC. + + Select this option if you want to run SMC socket applications diff --git a/net/smc/Makefile b/net/smc/Makefile new file mode 100644 index 000000000000..c285c868dd82 --- /dev/null +++ b/net/smc/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_SMC) += smc.o +smc-y := af_smc.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c new file mode 100644 index 000000000000..7fd773fc6238 --- /dev/null +++ b/net/smc/af_smc.c @@ -0,0 +1,620 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * AF_SMC protocol family socket handler keeping the AF_INET sock address type + * applies to SOCK_STREAM sockets only + * offers an alternative communication option for TCP-protocol sockets + * applicable with RoCE-cards only + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + * based on prototype from Frank Blaschka + */ + +#define KMSG_COMPONENT "smc" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include + +#include "smc.h" + +static void smc_set_keepalive(struct sock *sk, int val) +{ + struct smc_sock *smc = smc_sk(sk); + + smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val); +} + +static struct proto smc_proto = { + .name = "SMC", + .owner = THIS_MODULE, + .keepalive = smc_set_keepalive, + .obj_size = sizeof(struct smc_sock), + .slab_flags = SLAB_DESTROY_BY_RCU, +}; + +static int smc_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + + if (!sk) + goto out; + + smc = smc_sk(sk); + lock_sock(sk); + + sk->sk_state = SMC_CLOSED; + if (smc->clcsock) { + sock_release(smc->clcsock); + smc->clcsock = NULL; + } + + /* detach socket */ + sock_orphan(sk); + sock->sk = NULL; + release_sock(sk); + + sock_put(sk); +out: + return 0; +} + +static void smc_destruct(struct sock *sk) +{ + if (sk->sk_state != SMC_CLOSED) + return; + if (!sock_flag(sk, SOCK_DEAD)) + return; + + sk_refcnt_debug_dec(sk); +} + +static struct sock *smc_sock_alloc(struct net *net, struct socket *sock) +{ + struct smc_sock *smc; + struct sock *sk; + + sk = sk_alloc(net, PF_SMC, GFP_KERNEL, &smc_proto, 0); + if (!sk) + return NULL; + + sock_init_data(sock, sk); /* sets sk_refcnt to 1 */ + sk->sk_state = SMC_INIT; + sk->sk_destruct = smc_destruct; + sk->sk_protocol = SMCPROTO_SMC; + sk_refcnt_debug_inc(sk); + + smc = smc_sk(sk); + + return sk; +} + +static int smc_bind(struct socket *sock, struct sockaddr *uaddr, + int addr_len) +{ + struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; + struct sock *sk = sock->sk; + struct smc_sock *smc; + int rc; + + smc = smc_sk(sk); + + /* replicate tests from inet_bind(), to be safe wrt. future changes */ + rc = -EINVAL; + if (addr_len < sizeof(struct sockaddr_in)) + goto out; + + rc = -EAFNOSUPPORT; + /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */ + if ((addr->sin_family != AF_INET) && + ((addr->sin_family != AF_UNSPEC) || + (addr->sin_addr.s_addr != htonl(INADDR_ANY)))) + goto out; + + lock_sock(sk); + + /* Check if socket is already active */ + rc = -EINVAL; + if (sk->sk_state != SMC_INIT) + goto out_rel; + + smc->clcsock->sk->sk_reuse = sk->sk_reuse; + rc = kernel_bind(smc->clcsock, uaddr, addr_len); + +out_rel: + release_sock(sk); +out: + return rc; +} + +static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk, + unsigned long mask) +{ + /* options we don't get control via setsockopt for */ + nsk->sk_type = osk->sk_type; + nsk->sk_sndbuf = osk->sk_sndbuf; + nsk->sk_rcvbuf = osk->sk_rcvbuf; + nsk->sk_sndtimeo = osk->sk_sndtimeo; + nsk->sk_rcvtimeo = osk->sk_rcvtimeo; + nsk->sk_mark = osk->sk_mark; + nsk->sk_priority = osk->sk_priority; + nsk->sk_rcvlowat = osk->sk_rcvlowat; + nsk->sk_bound_dev_if = osk->sk_bound_dev_if; + nsk->sk_err = osk->sk_err; + + nsk->sk_flags &= ~mask; + nsk->sk_flags |= osk->sk_flags & mask; +} + +#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \ + (1UL << SOCK_KEEPOPEN) | \ + (1UL << SOCK_LINGER) | \ + (1UL << SOCK_BROADCAST) | \ + (1UL << SOCK_TIMESTAMP) | \ + (1UL << SOCK_DBG) | \ + (1UL << SOCK_RCVTSTAMP) | \ + (1UL << SOCK_RCVTSTAMPNS) | \ + (1UL << SOCK_LOCALROUTE) | \ + (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ + (1UL << SOCK_RXQ_OVFL) | \ + (1UL << SOCK_WIFI_STATUS) | \ + (1UL << SOCK_NOFCS) | \ + (1UL << SOCK_FILTER_LOCKED)) +/* copy only relevant settings and flags of SOL_SOCKET level from smc to + * clc socket (since smc is not called for these options from net/core) + */ +static void smc_copy_sock_settings_to_clc(struct smc_sock *smc) +{ + smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC); +} + +#define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \ + (1UL << SOCK_KEEPOPEN) | \ + (1UL << SOCK_LINGER) | \ + (1UL << SOCK_DBG)) +/* copy only settings and flags relevant for smc from clc to smc socket */ +static void smc_copy_sock_settings_to_smc(struct smc_sock *smc) +{ + smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); +} + +static int smc_connect(struct socket *sock, struct sockaddr *addr, + int alen, int flags) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + int rc = -EINVAL; + + smc = smc_sk(sk); + + /* separate smc parameter checking to be safe */ + if (alen < sizeof(addr->sa_family)) + goto out_err; + if (addr->sa_family != AF_INET) + goto out_err; + + lock_sock(sk); + switch (sk->sk_state) { + default: + goto out; + case SMC_ACTIVE: + rc = -EISCONN; + goto out; + case SMC_INIT: + rc = 0; + break; + } + + smc_copy_sock_settings_to_clc(smc); + rc = kernel_connect(smc->clcsock, addr, alen, flags); + if (rc) + goto out; + + sk->sk_state = SMC_ACTIVE; + + /* always use TCP fallback as transport mechanism for now; + * This will change once RDMA transport is implemented + */ + smc->use_fallback = true; + +out: + release_sock(sk); +out_err: + return rc; +} + +static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) +{ + struct sock *sk = &lsmc->sk; + struct socket *new_clcsock; + struct sock *new_sk; + int rc; + + new_sk = smc_sock_alloc(sock_net(sk), NULL); + if (!new_sk) { + rc = -ENOMEM; + lsmc->sk.sk_err = ENOMEM; + *new_smc = NULL; + goto out; + } + *new_smc = smc_sk(new_sk); + + rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0); + if (rc) { + sock_put(new_sk); + *new_smc = NULL; + goto out; + } + + (*new_smc)->clcsock = new_clcsock; +out: + return rc; +} + +static int smc_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + int rc; + + smc = smc_sk(sk); + lock_sock(sk); + + rc = -EINVAL; + if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN)) + goto out; + + rc = 0; + if (sk->sk_state == SMC_LISTEN) { + sk->sk_max_ack_backlog = backlog; + goto out; + } + /* some socket options are handled in core, so we could not apply + * them to the clc socket -- copy smc socket options to clc socket + */ + smc_copy_sock_settings_to_clc(smc); + + rc = kernel_listen(smc->clcsock, backlog); + if (rc) + goto out; + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + sk->sk_state = SMC_LISTEN; + +out: + release_sock(sk); + return rc; +} + +static int smc_accept(struct socket *sock, struct socket *new_sock, + int flags) +{ + struct smc_sock *new_smc; + struct sock *sk = sock->sk; + struct smc_sock *lsmc; + int rc; + + lsmc = smc_sk(sk); + lock_sock(sk); + + if (lsmc->sk.sk_state != SMC_LISTEN) { + rc = -EINVAL; + goto out; + } + + rc = smc_clcsock_accept(lsmc, &new_smc); + if (rc) + goto out; + sock_graft(&new_smc->sk, new_sock); + new_smc->sk.sk_state = SMC_ACTIVE; + + smc_copy_sock_settings_to_smc(new_smc); + + /* always use TCP fallback as transport mechanism for now; + * This will change once RDMA transport is implemented + */ + new_smc->use_fallback = true; + +out: + release_sock(sk); + return rc; +} + +static int smc_getname(struct socket *sock, struct sockaddr *addr, + int *len, int peer) +{ + struct smc_sock *smc; + + if (peer && (sock->sk->sk_state != SMC_ACTIVE)) + return -ENOTCONN; + + smc = smc_sk(sock->sk); + + return smc->clcsock->ops->getname(smc->clcsock, addr, len, peer); +} + +static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + int rc = -EPIPE; + + smc = smc_sk(sk); + lock_sock(sk); + if (sk->sk_state != SMC_ACTIVE) + goto out; + if (smc->use_fallback) + rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len); + else + rc = sock_no_sendmsg(sock, msg, len); +out: + release_sock(sk); + return rc; +} + +static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + int rc = -ENOTCONN; + + smc = smc_sk(sk); + lock_sock(sk); + if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_CLOSED)) + goto out; + + if (smc->use_fallback) + rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags); + else + rc = sock_no_recvmsg(sock, msg, len, flags); +out: + release_sock(sk); + return rc; +} + +static unsigned int smc_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + struct sock *sk = sock->sk; + unsigned int mask = 0; + struct smc_sock *smc; + + smc = smc_sk(sock->sk); + if ((sk->sk_state == SMC_INIT) || (sk->sk_state == SMC_LISTEN) || + smc->use_fallback) { + mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); + /* if non-blocking connect finished ... */ + lock_sock(sk); + if ((sk->sk_state == SMC_INIT) && (mask & POLLOUT)) { + sk->sk_state = SMC_ACTIVE; + /* always use TCP fallback as transport mechanism; + * This will change once RDMA transport is implemented + */ + smc->use_fallback = true; + } + release_sock(sk); + } else { + mask = sock_no_poll(file, sock, wait); + } + + return mask; +} + +static int smc_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + int rc = -EINVAL; + + smc = smc_sk(sk); + + if ((how < SHUT_RD) || (how > SHUT_RDWR)) + goto out_err; + + lock_sock(sk); + + rc = -ENOTCONN; + if (sk->sk_state == SMC_CLOSED) + goto out; + if (smc->use_fallback) { + rc = kernel_sock_shutdown(smc->clcsock, how); + sk->sk_shutdown = smc->clcsock->sk->sk_shutdown; + if (sk->sk_shutdown == SHUTDOWN_MASK) + sk->sk_state = SMC_CLOSED; + } else { + rc = sock_no_shutdown(sock, how); + } + +out: + release_sock(sk); + +out_err: + return rc; +} + +static int smc_setsockopt(struct socket *sock, int level, int optname, + char __user *optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + + smc = smc_sk(sk); + + /* generic setsockopts reaching us here always apply to the + * CLC socket + */ + return smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, + optval, optlen); +} + +static int smc_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct smc_sock *smc; + + smc = smc_sk(sock->sk); + /* socket options apply to the CLC socket */ + return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname, + optval, optlen); +} + +static int smc_ioctl(struct socket *sock, unsigned int cmd, + unsigned long arg) +{ + struct smc_sock *smc; + + smc = smc_sk(sock->sk); + if (smc->use_fallback) + return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); + else + return sock_no_ioctl(sock, cmd, arg); +} + +static ssize_t smc_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int flags) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + int rc = -EPIPE; + + smc = smc_sk(sk); + lock_sock(sk); + if (sk->sk_state != SMC_ACTIVE) + goto out; + if (smc->use_fallback) + rc = kernel_sendpage(smc->clcsock, page, offset, + size, flags); + else + rc = sock_no_sendpage(sock, page, offset, size, flags); + +out: + release_sock(sk); + return rc; +} + +static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + int rc = -ENOTCONN; + + smc = smc_sk(sk); + lock_sock(sk); + if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_CLOSED)) + goto out; + if (smc->use_fallback) { + rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos, + pipe, len, flags); + } else { + rc = -EOPNOTSUPP; + } +out: + release_sock(sk); + return rc; +} + +/* must look like tcp */ +static const struct proto_ops smc_sock_ops = { + .family = PF_SMC, + .owner = THIS_MODULE, + .release = smc_release, + .bind = smc_bind, + .connect = smc_connect, + .socketpair = sock_no_socketpair, + .accept = smc_accept, + .getname = smc_getname, + .poll = smc_poll, + .ioctl = smc_ioctl, + .listen = smc_listen, + .shutdown = smc_shutdown, + .setsockopt = smc_setsockopt, + .getsockopt = smc_getsockopt, + .sendmsg = smc_sendmsg, + .recvmsg = smc_recvmsg, + .mmap = sock_no_mmap, + .sendpage = smc_sendpage, + .splice_read = smc_splice_read, +}; + +static int smc_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct smc_sock *smc; + struct sock *sk; + int rc; + + rc = -ESOCKTNOSUPPORT; + if (sock->type != SOCK_STREAM) + goto out; + + rc = -EPROTONOSUPPORT; + if ((protocol != IPPROTO_IP) && (protocol != IPPROTO_TCP)) + goto out; + + rc = -ENOBUFS; + sock->ops = &smc_sock_ops; + sk = smc_sock_alloc(net, sock); + if (!sk) + goto out; + + /* create internal TCP socket for CLC handshake and fallback */ + smc = smc_sk(sk); + rc = sock_create_kern(net, PF_INET, SOCK_STREAM, + IPPROTO_TCP, &smc->clcsock); + if (rc) + sk_common_release(sk); + +out: + return rc; +} + +static const struct net_proto_family smc_sock_family_ops = { + .family = PF_SMC, + .owner = THIS_MODULE, + .create = smc_create, +}; + +static int __init smc_init(void) +{ + int rc; + + rc = proto_register(&smc_proto, 1); + if (rc) { + pr_err("%s: proto_register fails with %d\n", __func__, rc); + goto out; + } + + rc = sock_register(&smc_sock_family_ops); + if (rc) { + pr_err("%s: sock_register fails with %d\n", __func__, rc); + goto out_proto; + } + + return 0; + +out_proto: + proto_unregister(&smc_proto); +out: + return rc; +} + +static void __exit smc_exit(void) +{ + sock_unregister(PF_SMC); + proto_unregister(&smc_proto); +} + +module_init(smc_init); +module_exit(smc_exit); + +MODULE_AUTHOR("Ursula Braun "); +MODULE_DESCRIPTION("smc socket address family"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETPROTO(PF_SMC); diff --git a/net/smc/smc.h b/net/smc/smc.h new file mode 100644 index 000000000000..fcea7399e2eb --- /dev/null +++ b/net/smc/smc.h @@ -0,0 +1,37 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Definitions for the SMC module (socket related) + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ +#ifndef __SMC_H +#define __SMC_H + +#include +#include +#include + +#define SMCPROTO_SMC 0 /* SMC protocol */ + +enum smc_state { /* possible states of an SMC socket */ + SMC_ACTIVE = 1, + SMC_INIT = 2, + SMC_CLOSED = 7, + SMC_LISTEN = 10, +}; + +struct smc_sock { /* smc sock container */ + struct sock sk; + struct socket *clcsock; /* internal tcp socket */ + bool use_fallback; /* fallback to tcp */ +}; + +static inline struct smc_sock *smc_sk(const struct sock *sk) +{ + return (struct smc_sock *)sk; +} + +#endif /* __SMC_H */ -- cgit v1.2.3 From a4cf0443c4143b19e42389a1674b5b65224544ce Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:14 +0100 Subject: smc: introduce SMC as an IB-client * create a list of SMC IB-devices Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/Makefile | 2 +- net/smc/af_smc.c | 10 ++++ net/smc/smc.h | 6 +++ net/smc/smc_ib.c | 143 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_ib.h | 40 ++++++++++++++++ 5 files changed, 200 insertions(+), 1 deletion(-) create mode 100644 net/smc/smc_ib.c create mode 100644 net/smc/smc_ib.h diff --git a/net/smc/Makefile b/net/smc/Makefile index c285c868dd82..56f2170e6f64 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_SMC) += smc.o -smc-y := af_smc.o +smc-y := af_smc.o smc_ib.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 7fd773fc6238..50492ee495ce 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -20,6 +20,7 @@ #include #include "smc.h" +#include "smc_ib.h" static void smc_set_keepalive(struct sock *sk, int val) { @@ -597,8 +598,16 @@ static int __init smc_init(void) goto out_proto; } + rc = smc_ib_register_client(); + if (rc) { + pr_err("%s: ib_register fails with %d\n", __func__, rc); + goto out_sock; + } + return 0; +out_sock: + sock_unregister(PF_SMC); out_proto: proto_unregister(&smc_proto); out: @@ -607,6 +616,7 @@ out: static void __exit smc_exit(void) { + smc_ib_unregister_client(); sock_unregister(PF_SMC); proto_unregister(&smc_proto); } diff --git a/net/smc/smc.h b/net/smc/smc.h index fcea7399e2eb..e87d79867099 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -16,6 +16,8 @@ #define SMCPROTO_SMC 0 /* SMC protocol */ +#define SMC_MAX_PORTS 2 /* Max # of ports */ + enum smc_state { /* possible states of an SMC socket */ SMC_ACTIVE = 1, SMC_INIT = 2, @@ -34,4 +36,8 @@ static inline struct smc_sock *smc_sk(const struct sock *sk) return (struct smc_sock *)sk; } +#define SMC_SYSTEMID_LEN 8 + +extern u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */ + #endif /* __SMC_H */ diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c new file mode 100644 index 000000000000..7222b7ede900 --- /dev/null +++ b/net/smc/smc_ib.c @@ -0,0 +1,143 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * IB infrastructure: + * Establish SMC-R as an Infiniband Client to be notified about added and + * removed IB devices of type RDMA. + * Determine device and port characteristics for these IB devices. + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#include +#include + +#include "smc_ib.h" +#include "smc.h" + +struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ + .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock), + .list = LIST_HEAD_INIT(smc_ib_devices.list), +}; + +#define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%" + +u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system + * identifier + */ + +static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) +{ + struct net_device *ndev; + int rc; + + rc = ib_query_gid(smcibdev->ibdev, ibport, 0, + &smcibdev->gid[ibport - 1], NULL); + /* the SMC protocol requires specification of the roce MAC address; + * if net_device cannot be determined, it can be derived from gid 0 + */ + ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport); + if (ndev) { + memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN); + } else if (!rc) { + memcpy(&smcibdev->mac[ibport - 1][0], + &smcibdev->gid[ibport - 1].raw[8], 3); + memcpy(&smcibdev->mac[ibport - 1][3], + &smcibdev->gid[ibport - 1].raw[13], 3); + smcibdev->mac[ibport - 1][0] &= ~0x02; + } + return rc; +} + +/* Create an identifier unique for this instance of SMC-R. + * The MAC-address of the first active registered IB device + * plus a random 2-byte number is used to create this identifier. + * This name is delivered to the peer during connection initialization. + */ +static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, + u8 ibport) +{ + memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], + sizeof(smcibdev->mac[ibport - 1])); + get_random_bytes(&local_systemid[0], 2); +} + +bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) +{ + return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; +} + +int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) +{ + int rc; + + memset(&smcibdev->pattr[ibport - 1], 0, + sizeof(smcibdev->pattr[ibport - 1])); + rc = ib_query_port(smcibdev->ibdev, ibport, + &smcibdev->pattr[ibport - 1]); + if (rc) + goto out; + rc = smc_ib_fill_gid_and_mac(smcibdev, ibport); + if (rc) + goto out; + if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, + sizeof(local_systemid)) && + smc_ib_port_active(smcibdev, ibport)) + /* create unique system identifier */ + smc_ib_define_local_systemid(smcibdev, ibport); +out: + return rc; +} + +static struct ib_client smc_ib_client; + +/* callback function for ib_register_client() */ +static void smc_ib_add_dev(struct ib_device *ibdev) +{ + struct smc_ib_device *smcibdev; + + if (ibdev->node_type != RDMA_NODE_IB_CA) + return; + + smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL); + if (!smcibdev) + return; + + smcibdev->ibdev = ibdev; + + spin_lock(&smc_ib_devices.lock); + list_add_tail(&smcibdev->list, &smc_ib_devices.list); + spin_unlock(&smc_ib_devices.lock); + ib_set_client_data(ibdev, &smc_ib_client, smcibdev); +} + +/* callback function for ib_register_client() */ +static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) +{ + struct smc_ib_device *smcibdev; + + smcibdev = ib_get_client_data(ibdev, &smc_ib_client); + ib_set_client_data(ibdev, &smc_ib_client, NULL); + spin_lock(&smc_ib_devices.lock); + list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ + spin_unlock(&smc_ib_devices.lock); + kfree(smcibdev); +} + +static struct ib_client smc_ib_client = { + .name = "smc_ib", + .add = smc_ib_add_dev, + .remove = smc_ib_remove_dev, +}; + +int __init smc_ib_register_client(void) +{ + return ib_register_client(&smc_ib_client); +} + +void smc_ib_unregister_client(void) +{ + ib_unregister_client(&smc_ib_client); +} diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h new file mode 100644 index 000000000000..63613e715f4f --- /dev/null +++ b/net/smc/smc_ib.h @@ -0,0 +1,40 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Definitions for IB environment + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#ifndef _SMC_IB_H +#define _SMC_IB_H + +#include + +#define SMC_MAX_PORTS 2 /* Max # of ports */ +#define SMC_GID_SIZE sizeof(union ib_gid) + +struct smc_ib_devices { /* list of smc ib devices definition */ + struct list_head list; + spinlock_t lock; /* protects list of smc ib devices */ +}; + +extern struct smc_ib_devices smc_ib_devices; /* list of smc ib devices */ + +struct smc_ib_device { /* ib-device infos for smc */ + struct list_head list; + struct ib_device *ibdev; + struct ib_port_attr pattr[SMC_MAX_PORTS]; /* ib dev. port attrs */ + char mac[SMC_MAX_PORTS][6]; /* mac address per port*/ + union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */ + u8 initialized : 1; /* ib dev CQ, evthdl done */ +}; + +int smc_ib_register_client(void) __init; +void smc_ib_unregister_client(void); +bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport); +int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport); + +#endif -- cgit v1.2.3 From 6812baabf24d5c299c13223366a23c269408f4d0 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Mon, 9 Jan 2017 16:55:15 +0100 Subject: smc: establish pnet table management Connection creation with SMC-R starts through an internal TCP-connection. The Ethernet interface for this TCP-connection is not restricted to the Ethernet interface of a RoCE device. Any existing Ethernet interface belonging to the same physical net can be used, as long as there is a defined relation between the Ethernet interface and some RoCE devices. This relation is defined with the help of an identification string called "Physical Net ID" or short "pnet ID". Information about defined pnet IDs and their related Ethernet interfaces and RoCE devices is stored in the SMC-R pnet table. A pnet table entry consists of the identifying pnet ID and the associated network and IB device. This patch adds pnet table configuration support using the generic netlink message interface referring to network and IB device by their names. Commands exist to add, delete, and display pnet table entries, and to flush or display the entire pnet table. There are cross-checks to verify whether the ethernet interfaces or infiniband devices really exist in the system. If either device is not available, the pnet ID entry is not created. Loss of network devices and IB devices is also monitored; a pnet ID entry is removed when an associated network or IB device is removed. Signed-off-by: Thomas Richter Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- include/uapi/linux/smc.h | 35 ++++ net/smc/Makefile | 2 +- net/smc/af_smc.c | 11 +- net/smc/smc_ib.c | 2 + net/smc/smc_pnet.c | 534 +++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_pnet.h | 23 ++ 6 files changed, 604 insertions(+), 3 deletions(-) create mode 100644 include/uapi/linux/smc.h create mode 100644 net/smc/smc_pnet.c create mode 100644 net/smc/smc_pnet.h diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h new file mode 100644 index 000000000000..ab1dea8e53ee --- /dev/null +++ b/include/uapi/linux/smc.h @@ -0,0 +1,35 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Definitions for generic netlink based configuration of an SMC-R PNET table + * + * Copyright IBM Corp. 2016 + * + * Author(s): Thomas Richter + */ + +#ifndef _UAPI_LINUX_SMC_H_ +#define _UAPI_LINUX_SMC_H_ + +/* Netlink SMC_PNETID attributes */ +enum { + SMC_PNETID_UNSPEC, + SMC_PNETID_NAME, + SMC_PNETID_ETHNAME, + SMC_PNETID_IBNAME, + SMC_PNETID_IBPORT, + __SMC_PNETID_MAX, + SMC_PNETID_MAX = __SMC_PNETID_MAX - 1 +}; + +enum { /* SMC PNET Table commands */ + SMC_PNETID_GET = 1, + SMC_PNETID_ADD, + SMC_PNETID_DEL, + SMC_PNETID_FLUSH +}; + +#define SMCR_GENL_FAMILY_NAME "SMC_PNETID" +#define SMCR_GENL_FAMILY_VERSION 1 + +#endif /* _UAPI_LINUX_SMC_H */ diff --git a/net/smc/Makefile b/net/smc/Makefile index 56f2170e6f64..50f39ffec8c9 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_SMC) += smc.o -smc-y := af_smc.o smc_ib.o +smc-y := af_smc.o smc_pnet.o smc_ib.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 50492ee495ce..8b059b2fc34d 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -21,6 +21,7 @@ #include "smc.h" #include "smc_ib.h" +#include "smc_pnet.h" static void smc_set_keepalive(struct sock *sk, int val) { @@ -586,10 +587,14 @@ static int __init smc_init(void) { int rc; + rc = smc_pnet_init(); + if (rc) + return rc; + rc = proto_register(&smc_proto, 1); if (rc) { pr_err("%s: proto_register fails with %d\n", __func__, rc); - goto out; + goto out_pnet; } rc = sock_register(&smc_sock_family_ops); @@ -610,7 +615,8 @@ out_sock: sock_unregister(PF_SMC); out_proto: proto_unregister(&smc_proto); -out: +out_pnet: + smc_pnet_exit(); return rc; } @@ -619,6 +625,7 @@ static void __exit smc_exit(void) smc_ib_unregister_client(); sock_unregister(PF_SMC); proto_unregister(&smc_proto); + smc_pnet_exit(); } module_init(smc_init); diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 7222b7ede900..5b037f435bc1 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -14,6 +14,7 @@ #include #include +#include "smc_pnet.h" #include "smc_ib.h" #include "smc.h" @@ -123,6 +124,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) spin_lock(&smc_ib_devices.lock); list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ spin_unlock(&smc_ib_devices.lock); + smc_pnet_remove_by_ibdev(smcibdev); kfree(smcibdev); } diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c new file mode 100644 index 000000000000..9d3e7fb8348d --- /dev/null +++ b/net/smc/smc_pnet.c @@ -0,0 +1,534 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Generic netlink support functions to configure an SMC-R PNET table + * + * Copyright IBM Corp. 2016 + * + * Author(s): Thomas Richter + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "smc_pnet.h" +#include "smc_ib.h" + +#define SMC_MAX_PNET_ID_LEN 16 /* Max. length of PNET id */ + +static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { + [SMC_PNETID_NAME] = { + .type = NLA_NUL_STRING, + .len = SMC_MAX_PNET_ID_LEN - 1 + }, + [SMC_PNETID_ETHNAME] = { + .type = NLA_NUL_STRING, + .len = IFNAMSIZ - 1 + }, + [SMC_PNETID_IBNAME] = { + .type = NLA_NUL_STRING, + .len = IB_DEVICE_NAME_MAX - 1 + }, + [SMC_PNETID_IBPORT] = { .type = NLA_U8 } +}; + +static struct genl_family smc_pnet_nl_family; + +/** + * struct smc_pnettable - SMC PNET table anchor + * @lock: Lock for list action + * @pnetlist: List of PNETIDs + */ +static struct smc_pnettable { + rwlock_t lock; + struct list_head pnetlist; +} smc_pnettable = { + .pnetlist = LIST_HEAD_INIT(smc_pnettable.pnetlist), + .lock = __RW_LOCK_UNLOCKED(smc_pnettable.lock) +}; + +/** + * struct smc_pnetentry - pnet identifier name entry + * @list: List node. + * @pnet_name: Pnet identifier name + * @ndev: pointer to network device. + * @smcibdev: Pointer to IB device. + */ +struct smc_pnetentry { + struct list_head list; + char pnet_name[SMC_MAX_PNET_ID_LEN + 1]; + struct net_device *ndev; + struct smc_ib_device *smcibdev; + u8 ib_port; +}; + +/* Check if two RDMA device entries are identical. Use device name and port + * number for comparison. + */ +static bool smc_pnet_same_ibname(struct smc_pnetentry *pnetelem, char *ibname, + u8 ibport) +{ + return pnetelem->ib_port == ibport && + !strncmp(pnetelem->smcibdev->ibdev->name, ibname, + sizeof(pnetelem->smcibdev->ibdev->name)); +} + +/* Find a pnetid in the pnet table. + */ +static struct smc_pnetentry *smc_pnet_find_pnetid(char *pnet_name) +{ + struct smc_pnetentry *pnetelem, *found_pnetelem = NULL; + + read_lock(&smc_pnettable.lock); + list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) { + if (!strncmp(pnetelem->pnet_name, pnet_name, + sizeof(pnetelem->pnet_name))) { + found_pnetelem = pnetelem; + break; + } + } + read_unlock(&smc_pnettable.lock); + return found_pnetelem; +} + +/* Remove a pnetid from the pnet table. + */ +static int smc_pnet_remove_by_pnetid(char *pnet_name) +{ + struct smc_pnetentry *pnetelem, *tmp_pe; + int rc = -ENOENT; + + write_lock(&smc_pnettable.lock); + list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist, + list) { + if (!strncmp(pnetelem->pnet_name, pnet_name, + sizeof(pnetelem->pnet_name))) { + list_del(&pnetelem->list); + dev_put(pnetelem->ndev); + kfree(pnetelem); + rc = 0; + break; + } + } + write_unlock(&smc_pnettable.lock); + return rc; +} + +/* Remove a pnet entry mentioning a given network device from the pnet table. + */ +static int smc_pnet_remove_by_ndev(struct net_device *ndev) +{ + struct smc_pnetentry *pnetelem, *tmp_pe; + int rc = -ENOENT; + + write_lock(&smc_pnettable.lock); + list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist, + list) { + if (pnetelem->ndev == ndev) { + list_del(&pnetelem->list); + dev_put(pnetelem->ndev); + kfree(pnetelem); + rc = 0; + break; + } + } + write_unlock(&smc_pnettable.lock); + return rc; +} + +/* Remove a pnet entry mentioning a given ib device from the pnet table. + */ +int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev) +{ + struct smc_pnetentry *pnetelem, *tmp_pe; + int rc = -ENOENT; + + write_lock(&smc_pnettable.lock); + list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist, + list) { + if (pnetelem->smcibdev == ibdev) { + list_del(&pnetelem->list); + dev_put(pnetelem->ndev); + kfree(pnetelem); + rc = 0; + break; + } + } + write_unlock(&smc_pnettable.lock); + return rc; +} + +/* Append a pnetid to the end of the pnet table if not already on this list. + */ +static int smc_pnet_enter(struct smc_pnetentry *new_pnetelem) +{ + struct smc_pnetentry *pnetelem; + int rc = -EEXIST; + + write_lock(&smc_pnettable.lock); + list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) { + if (!strncmp(pnetelem->pnet_name, new_pnetelem->pnet_name, + sizeof(new_pnetelem->pnet_name)) || + !strncmp(pnetelem->ndev->name, new_pnetelem->ndev->name, + sizeof(new_pnetelem->ndev->name)) || + smc_pnet_same_ibname(pnetelem, + new_pnetelem->smcibdev->ibdev->name, + new_pnetelem->ib_port)) + goto found; + } + list_add_tail(&new_pnetelem->list, &smc_pnettable.pnetlist); + rc = 0; +found: + write_unlock(&smc_pnettable.lock); + return rc; +} + +/* The limit for pnetid is 16 characters. + * Valid characters should be (single-byte character set) a-z, A-Z, 0-9. + * Lower case letters are converted to upper case. + * Interior blanks should not be used. + */ +static bool smc_pnetid_valid(const char *pnet_name, char *pnetid) +{ + char *bf = skip_spaces(pnet_name); + size_t len = strlen(bf); + char *end = bf + len; + + if (!len) + return false; + while (--end >= bf && isspace(*end)) + ; + if (end - bf >= SMC_MAX_PNET_ID_LEN) + return false; + while (bf <= end) { + if (!isalnum(*bf)) + return false; + *pnetid++ = islower(*bf) ? toupper(*bf) : *bf; + bf++; + } + *pnetid = '\0'; + return true; +} + +/* Find an infiniband device by a given name. The device might not exist. */ +struct smc_ib_device *smc_pnet_find_ib(char *ib_name) +{ + struct smc_ib_device *ibdev; + + spin_lock(&smc_ib_devices.lock); + list_for_each_entry(ibdev, &smc_ib_devices.list, list) { + if (!strncmp(ibdev->ibdev->name, ib_name, + sizeof(ibdev->ibdev->name))) { + goto out; + } + } + ibdev = NULL; +out: + spin_unlock(&smc_ib_devices.lock); + return ibdev; +} + +/* Parse the supplied netlink attributes and fill a pnetentry structure. + * For ethernet and infiniband device names verify that the devices exist. + */ +static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem, + struct nlattr *tb[]) +{ + char *string, *ibname = NULL; + int rc = 0; + + memset(pnetelem, 0, sizeof(*pnetelem)); + INIT_LIST_HEAD(&pnetelem->list); + if (tb[SMC_PNETID_NAME]) { + string = (char *)nla_data(tb[SMC_PNETID_NAME]); + if (!smc_pnetid_valid(string, pnetelem->pnet_name)) { + rc = -EINVAL; + goto error; + } + } + if (tb[SMC_PNETID_ETHNAME]) { + string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]); + pnetelem->ndev = dev_get_by_name(net, string); + if (!pnetelem->ndev) + return -ENOENT; + } + if (tb[SMC_PNETID_IBNAME]) { + ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]); + ibname = strim(ibname); + pnetelem->smcibdev = smc_pnet_find_ib(ibname); + if (!pnetelem->smcibdev) { + rc = -ENOENT; + goto error; + } + } + if (tb[SMC_PNETID_IBPORT]) { + pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]); + if (pnetelem->ib_port > SMC_MAX_PORTS) { + rc = -EINVAL; + goto error; + } + } + return 0; + +error: + if (pnetelem->ndev) + dev_put(pnetelem->ndev); + return rc; +} + +/* Convert an smc_pnetentry to a netlink attribute sequence */ +static int smc_pnet_set_nla(struct sk_buff *msg, struct smc_pnetentry *pnetelem) +{ + if (nla_put_string(msg, SMC_PNETID_NAME, pnetelem->pnet_name) || + nla_put_string(msg, SMC_PNETID_ETHNAME, pnetelem->ndev->name) || + nla_put_string(msg, SMC_PNETID_IBNAME, + pnetelem->smcibdev->ibdev->name) || + nla_put_u8(msg, SMC_PNETID_IBPORT, pnetelem->ib_port)) + return -1; + return 0; +} + +/* Retrieve one PNETID entry */ +static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info) +{ + struct smc_pnetentry *pnetelem; + struct sk_buff *msg; + void *hdr; + int rc; + + pnetelem = smc_pnet_find_pnetid( + (char *)nla_data(info->attrs[SMC_PNETID_NAME])); + if (!pnetelem) + return -ENOENT; + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, + &smc_pnet_nl_family, 0, SMC_PNETID_GET); + if (!hdr) { + rc = -EMSGSIZE; + goto err_out; + } + + if (smc_pnet_set_nla(msg, pnetelem)) { + rc = -ENOBUFS; + goto err_out; + } + + genlmsg_end(msg, hdr); + return genlmsg_reply(msg, info); + +err_out: + nlmsg_free(msg); + return rc; +} + +static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info) +{ + struct net *net = genl_info_net(info); + struct smc_pnetentry *pnetelem; + int rc; + + pnetelem = kzalloc(sizeof(*pnetelem), GFP_KERNEL); + if (!pnetelem) + return -ENOMEM; + rc = smc_pnet_fill_entry(net, pnetelem, info->attrs); + if (!rc) + rc = smc_pnet_enter(pnetelem); + if (rc) { + kfree(pnetelem); + return rc; + } + rc = smc_ib_remember_port_attr(pnetelem->smcibdev, pnetelem->ib_port); + if (rc) + smc_pnet_remove_by_pnetid(pnetelem->pnet_name); + return rc; +} + +static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info) +{ + return smc_pnet_remove_by_pnetid( + (char *)nla_data(info->attrs[SMC_PNETID_NAME])); +} + +static int smc_pnet_dump_start(struct netlink_callback *cb) +{ + cb->args[0] = 0; + return 0; +} + +static int smc_pnet_dumpinfo(struct sk_buff *skb, + u32 portid, u32 seq, u32 flags, + struct smc_pnetentry *pnetelem) +{ + void *hdr; + + hdr = genlmsg_put(skb, portid, seq, &smc_pnet_nl_family, + flags, SMC_PNETID_GET); + if (!hdr) + return -ENOMEM; + if (smc_pnet_set_nla(skb, pnetelem) < 0) { + genlmsg_cancel(skb, hdr); + return -EMSGSIZE; + } + genlmsg_end(skb, hdr); + return 0; +} + +static int smc_pnet_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct smc_pnetentry *pnetelem; + int idx = 0; + + read_lock(&smc_pnettable.lock); + list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) { + if (idx++ < cb->args[0]) + continue; + if (smc_pnet_dumpinfo(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + pnetelem)) { + --idx; + break; + } + } + cb->args[0] = idx; + read_unlock(&smc_pnettable.lock); + return skb->len; +} + +/* Remove and delete all pnetids from pnet table. + */ +static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info) +{ + struct smc_pnetentry *pnetelem, *tmp_pe; + + write_lock(&smc_pnettable.lock); + list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist, + list) { + list_del(&pnetelem->list); + dev_put(pnetelem->ndev); + kfree(pnetelem); + } + write_unlock(&smc_pnettable.lock); + return 0; +} + +/* SMC_PNETID generic netlink operation definition */ +static const struct genl_ops smc_pnet_ops[] = { + { + .cmd = SMC_PNETID_GET, + .flags = GENL_ADMIN_PERM, + .policy = smc_pnet_policy, + .doit = smc_pnet_get, + .dumpit = smc_pnet_dump, + .start = smc_pnet_dump_start + }, + { + .cmd = SMC_PNETID_ADD, + .flags = GENL_ADMIN_PERM, + .policy = smc_pnet_policy, + .doit = smc_pnet_add + }, + { + .cmd = SMC_PNETID_DEL, + .flags = GENL_ADMIN_PERM, + .policy = smc_pnet_policy, + .doit = smc_pnet_del + }, + { + .cmd = SMC_PNETID_FLUSH, + .flags = GENL_ADMIN_PERM, + .policy = smc_pnet_policy, + .doit = smc_pnet_flush + } +}; + +/* SMC_PNETID family definition */ +static struct genl_family smc_pnet_nl_family = { + .hdrsize = 0, + .name = SMCR_GENL_FAMILY_NAME, + .version = SMCR_GENL_FAMILY_VERSION, + .maxattr = SMC_PNETID_MAX, + .netnsok = true, + .module = THIS_MODULE, + .ops = smc_pnet_ops, + .n_ops = ARRAY_SIZE(smc_pnet_ops) +}; + +static int smc_pnet_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_REBOOT: + case NETDEV_UNREGISTER: + smc_pnet_remove_by_ndev(event_dev); + default: + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block smc_netdev_notifier = { + .notifier_call = smc_pnet_netdev_event +}; + +int __init smc_pnet_init(void) +{ + int rc; + + rc = genl_register_family(&smc_pnet_nl_family); + if (rc) + return rc; + rc = register_netdevice_notifier(&smc_netdev_notifier); + if (rc) + genl_unregister_family(&smc_pnet_nl_family); + return rc; +} + +void smc_pnet_exit(void) +{ + smc_pnet_flush(NULL, NULL); + unregister_netdevice_notifier(&smc_netdev_notifier); + genl_unregister_family(&smc_pnet_nl_family); +} + +/* PNET table analysis for a given sock: + * determine ib_device and port belonging to used internal TCP socket + * ethernet interface. + */ +void smc_pnet_find_roce_resource(struct sock *sk, + struct smc_ib_device **smcibdev, u8 *ibport) +{ + struct dst_entry *dst = sk_dst_get(sk); + struct smc_pnetentry *pnetelem; + + *smcibdev = NULL; + *ibport = 0; + + if (!dst) + return; + if (!dst->dev) + goto out_rel; + read_lock(&smc_pnettable.lock); + list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) { + if (dst->dev == pnetelem->ndev) { + *smcibdev = pnetelem->smcibdev; + *ibport = pnetelem->ib_port; + break; + } + } + read_unlock(&smc_pnettable.lock); +out_rel: + dst_release(dst); +} diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h new file mode 100644 index 000000000000..32ab3df928ca --- /dev/null +++ b/net/smc/smc_pnet.h @@ -0,0 +1,23 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * PNET table queries + * + * Copyright IBM Corp. 2016 + * + * Author(s): Thomas Richter + */ + +#ifndef _SMC_PNET_H +#define _SMC_PNET_H + +struct smc_ib_device; + +int smc_pnet_init(void) __init; +void smc_pnet_exit(void); +int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev); +struct smc_ib_device *smc_pnet_find_ib(char *ib_name); +void smc_pnet_find_roce_resource(struct sock *sk, + struct smc_ib_device **smcibdev, u8 *ibport); + +#endif -- cgit v1.2.3 From a046d57da19f812216f393e7c535f5858f793ac3 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:16 +0100 Subject: smc: CLC handshake (incl. preparation steps) * CLC (Connection Layer Control) handshake Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/Makefile | 2 +- net/smc/af_smc.c | 464 ++++++++++++++++++++++++++++++++++++++++++++++++++---- net/smc/smc.h | 22 +++ net/smc/smc_clc.c | 252 +++++++++++++++++++++++++++++ net/smc/smc_clc.h | 114 ++++++++++++++ 5 files changed, 822 insertions(+), 32 deletions(-) create mode 100644 net/smc/smc_clc.c create mode 100644 net/smc/smc_clc.h diff --git a/net/smc/Makefile b/net/smc/Makefile index 50f39ffec8c9..c0ad5883e888 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_SMC) += smc.o -smc-y := af_smc.o smc_pnet.o smc_ib.o +smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 8b059b2fc34d..05c705a688e5 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -6,6 +6,13 @@ * offers an alternative communication option for TCP-protocol sockets * applicable with RoCE-cards only * + * Initial restrictions: + * - non-blocking connect postponed + * - IPv6 support postponed + * - support for alternate links postponed + * - partial support for non-blocking sockets only + * - support for urgent data postponed + * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun @@ -17,12 +24,18 @@ #include #include +#include +#include #include +#include #include "smc.h" +#include "smc_clc.h" #include "smc_ib.h" #include "smc_pnet.h" +static void smc_tcp_listen_work(struct work_struct *); + static void smc_set_keepalive(struct sock *sk, int val) { struct smc_sock *smc = smc_sk(sk); @@ -88,9 +101,11 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock) sk->sk_state = SMC_INIT; sk->sk_destruct = smc_destruct; sk->sk_protocol = SMCPROTO_SMC; - sk_refcnt_debug_inc(sk); - smc = smc_sk(sk); + INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); + INIT_LIST_HEAD(&smc->accept_q); + spin_lock_init(&smc->accept_q_lock); + sk_refcnt_debug_inc(sk); return sk; } @@ -184,6 +199,119 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc) smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); } +/* determine subnet and mask of internal TCP socket */ +int smc_netinfo_by_tcpsk(struct socket *clcsock, + __be32 *subnet, u8 *prefix_len) +{ + struct dst_entry *dst = sk_dst_get(clcsock->sk); + struct sockaddr_in addr; + int rc = -ENOENT; + int len; + + if (!dst) { + rc = -ENOTCONN; + goto out; + } + if (!dst->dev) { + rc = -ENODEV; + goto out_rel; + } + + /* get address to which the internal TCP socket is bound */ + kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len); + /* analyze IPv4 specific data of net_device belonging to TCP socket */ + for_ifa(dst->dev->ip_ptr) { + if (ifa->ifa_address != addr.sin_addr.s_addr) + continue; + *prefix_len = inet_mask_len(ifa->ifa_mask); + *subnet = ifa->ifa_address & ifa->ifa_mask; + rc = 0; + break; + } endfor_ifa(dst->dev->ip_ptr); + +out_rel: + dst_release(dst); +out: + return rc; +} + +/* setup for RDMA connection of client */ +static int smc_connect_rdma(struct smc_sock *smc) +{ + struct smc_clc_msg_accept_confirm aclc; + struct smc_ib_device *smcibdev; + int reason_code = 0; + int rc = 0; + u8 ibport; + + /* IPSec connections opt out of SMC-R optimizations */ + if (using_ipsec(smc)) { + reason_code = SMC_CLC_DECL_IPSEC; + goto decline_rdma; + } + + /* PNET table look up: search active ib_device and port + * within same PNETID that also contains the ethernet device + * used for the internal TCP socket + */ + smc_pnet_find_roce_resource(smc->clcsock->sk, &smcibdev, &ibport); + if (!smcibdev) { + reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */ + goto decline_rdma; + } + + /* do inband token exchange */ + reason_code = smc_clc_send_proposal(smc, smcibdev, ibport); + if (reason_code < 0) { + rc = reason_code; + goto out_err; + } + if (reason_code > 0) /* configuration error */ + goto decline_rdma; + /* receive SMC Accept CLC message */ + reason_code = smc_clc_wait_msg(smc, &aclc, sizeof(aclc), + SMC_CLC_ACCEPT); + if (reason_code < 0) { + rc = reason_code; + goto out_err; + } + if (reason_code > 0) + goto decline_rdma; + + /* tbd in follow-on patch: more steps to setup RDMA communcication, + * create connection, link group, link + */ + + /* tbd in follow-on patch: more steps to setup RDMA communcication, + * create rmbs, map rmbs, rtoken_handling, modify_qp + */ + + rc = smc_clc_send_confirm(smc); + if (rc) + goto out_err; + + /* tbd in follow-on patch: llc_confirm */ + +out_connected: + smc_copy_sock_settings_to_clc(smc); + smc->sk.sk_state = SMC_ACTIVE; + + return rc; + +decline_rdma: + /* RDMA setup failed, switch back to TCP */ + smc->use_fallback = true; + if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { + rc = smc_clc_send_decline(smc, reason_code, 0); + if (rc < sizeof(struct smc_clc_msg_decline)) + goto out_err; + } + goto out_connected; + +out_err: + return rc; +} + static int smc_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { @@ -198,6 +326,7 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr, goto out_err; if (addr->sa_family != AF_INET) goto out_err; + smc->addr = addr; /* needed for nonblocking connect */ lock_sock(sk); switch (sk->sk_state) { @@ -216,12 +345,12 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr, if (rc) goto out; - sk->sk_state = SMC_ACTIVE; - - /* always use TCP fallback as transport mechanism for now; - * This will change once RDMA transport is implemented - */ - smc->use_fallback = true; + /* setup RDMA connection */ + rc = smc_connect_rdma(smc); + if (rc < 0) + goto out; + else + rc = 0; /* success cases including fallback */ out: release_sock(sk); @@ -236,17 +365,32 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) struct sock *new_sk; int rc; + release_sock(&lsmc->sk); new_sk = smc_sock_alloc(sock_net(sk), NULL); if (!new_sk) { rc = -ENOMEM; lsmc->sk.sk_err = ENOMEM; *new_smc = NULL; + lock_sock(&lsmc->sk); goto out; } *new_smc = smc_sk(new_sk); rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0); - if (rc) { + lock_sock(&lsmc->sk); + if (rc < 0) { + lsmc->sk.sk_err = -rc; + new_sk->sk_state = SMC_CLOSED; + sock_set_flag(new_sk, SOCK_DEAD); + sock_put(new_sk); + *new_smc = NULL; + goto out; + } + if (lsmc->sk.sk_state == SMC_CLOSED) { + if (new_clcsock) + sock_release(new_clcsock); + new_sk->sk_state = SMC_CLOSED; + sock_set_flag(new_sk, SOCK_DEAD); sock_put(new_sk); *new_smc = NULL; goto out; @@ -257,6 +401,216 @@ out: return rc; } +/* add a just created sock to the accept queue of the listen sock as + * candidate for a following socket accept call from user space + */ +static void smc_accept_enqueue(struct sock *parent, struct sock *sk) +{ + struct smc_sock *par = smc_sk(parent); + + sock_hold(sk); + spin_lock(&par->accept_q_lock); + list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q); + spin_unlock(&par->accept_q_lock); + sk_acceptq_added(parent); +} + +/* remove a socket from the accept queue of its parental listening socket */ +static void smc_accept_unlink(struct sock *sk) +{ + struct smc_sock *par = smc_sk(sk)->listen_smc; + + spin_lock(&par->accept_q_lock); + list_del_init(&smc_sk(sk)->accept_q); + spin_unlock(&par->accept_q_lock); + sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk); + sock_put(sk); +} + +/* remove a sock from the accept queue to bind it to a new socket created + * for a socket accept call from user space + */ +static struct sock *smc_accept_dequeue(struct sock *parent, + struct socket *new_sock) +{ + struct smc_sock *isk, *n; + struct sock *new_sk; + + list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) { + new_sk = (struct sock *)isk; + + smc_accept_unlink(new_sk); + if (new_sk->sk_state == SMC_CLOSED) { + /* tbd in follow-on patch: close this sock */ + continue; + } + if (new_sock) + sock_graft(new_sk, new_sock); + return new_sk; + } + return NULL; +} + +/* clean up for a created but never accepted sock */ +static void smc_close_non_accepted(struct sock *sk) +{ + struct smc_sock *smc = smc_sk(sk); + + sock_hold(sk); + if (smc->clcsock) { + struct socket *tcp; + + tcp = smc->clcsock; + smc->clcsock = NULL; + sock_release(tcp); + } + /* more closing stuff to be added with socket closing patch */ + sock_put(sk); +} + +/* setup for RDMA connection of server */ +static void smc_listen_work(struct work_struct *work) +{ + struct smc_sock *new_smc = container_of(work, struct smc_sock, + smc_listen_work); + struct socket *newclcsock = new_smc->clcsock; + struct smc_sock *lsmc = new_smc->listen_smc; + struct smc_clc_msg_accept_confirm cclc; + struct sock *newsmcsk = &new_smc->sk; + struct smc_clc_msg_proposal pclc; + struct smc_ib_device *smcibdev; + struct sockaddr_in peeraddr; + int reason_code = 0; + int rc = 0, len; + __be32 subnet; + u8 prefix_len; + u8 ibport; + + /* do inband token exchange - + *wait for and receive SMC Proposal CLC message + */ + reason_code = smc_clc_wait_msg(new_smc, &pclc, sizeof(pclc), + SMC_CLC_PROPOSAL); + if (reason_code < 0) + goto out_err; + if (reason_code > 0) + goto decline_rdma; + + /* IPSec connections opt out of SMC-R optimizations */ + if (using_ipsec(new_smc)) { + reason_code = SMC_CLC_DECL_IPSEC; + goto decline_rdma; + } + + /* PNET table look up: search active ib_device and port + * within same PNETID that also contains the ethernet device + * used for the internal TCP socket + */ + smc_pnet_find_roce_resource(newclcsock->sk, &smcibdev, &ibport); + if (!smcibdev) { + reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */ + goto decline_rdma; + } + + /* determine subnet and mask from internal TCP socket */ + rc = smc_netinfo_by_tcpsk(newclcsock, &subnet, &prefix_len); + if (rc) { + reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */ + goto decline_rdma; + } + if ((pclc.outgoing_subnet != subnet) || + (pclc.prefix_len != prefix_len)) { + reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */ + goto decline_rdma; + } + + /* get address of the peer connected to the internal TCP socket */ + kernel_getpeername(newclcsock, (struct sockaddr *)&peeraddr, &len); + + /* tbd in follow-on patch: more steps to setup RDMA communcication, + * create connection, link_group, link + */ + + /* tbd in follow-on patch: more steps to setup RDMA communcication, + * create rmbs, map rmbs + */ + + rc = smc_clc_send_accept(new_smc); + if (rc) + goto out_err; + + /* receive SMC Confirm CLC message */ + reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc), + SMC_CLC_CONFIRM); + if (reason_code < 0) + goto out_err; + if (reason_code > 0) + goto decline_rdma; + + /* tbd in follow-on patch: more steps to setup RDMA communcication, + * rtoken_handling, modify_qp + */ + +out_connected: + sk_refcnt_debug_inc(newsmcsk); + newsmcsk->sk_state = SMC_ACTIVE; +enqueue: + lock_sock(&lsmc->sk); + if (lsmc->sk.sk_state == SMC_LISTEN) { + smc_accept_enqueue(&lsmc->sk, newsmcsk); + } else { /* no longer listening */ + smc_close_non_accepted(newsmcsk); + } + release_sock(&lsmc->sk); + + /* Wake up accept */ + lsmc->sk.sk_data_ready(&lsmc->sk); + sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */ + return; + +decline_rdma: + /* RDMA setup failed, switch back to TCP */ + new_smc->use_fallback = true; + if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { + rc = smc_clc_send_decline(new_smc, reason_code, 0); + if (rc < sizeof(struct smc_clc_msg_decline)) + goto out_err; + } + goto out_connected; + +out_err: + newsmcsk->sk_state = SMC_CLOSED; + goto enqueue; /* queue new sock with sk_err set */ +} + +static void smc_tcp_listen_work(struct work_struct *work) +{ + struct smc_sock *lsmc = container_of(work, struct smc_sock, + tcp_listen_work); + struct smc_sock *new_smc; + int rc = 0; + + lock_sock(&lsmc->sk); + while (lsmc->sk.sk_state == SMC_LISTEN) { + rc = smc_clcsock_accept(lsmc, &new_smc); + if (rc) + goto out; + if (!new_smc) + continue; + + new_smc->listen_smc = lsmc; + new_smc->use_fallback = false; /* assume rdma capability first*/ + sock_hold(&lsmc->sk); /* sock_put in smc_listen_work */ + INIT_WORK(&new_smc->smc_listen_work, smc_listen_work); + smc_copy_sock_settings_to_smc(new_smc); + schedule_work(&new_smc->smc_listen_work); + } + +out: + release_sock(&lsmc->sk); + lsmc->sk.sk_data_ready(&lsmc->sk); /* no more listening, wake accept */ +} + static int smc_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; @@ -286,6 +640,8 @@ static int smc_listen(struct socket *sock, int backlog) sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = SMC_LISTEN; + INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); + schedule_work(&smc->tcp_listen_work); out: release_sock(sk); @@ -295,10 +651,11 @@ out: static int smc_accept(struct socket *sock, struct socket *new_sock, int flags) { - struct smc_sock *new_smc; - struct sock *sk = sock->sk; + struct sock *sk = sock->sk, *nsk; + DECLARE_WAITQUEUE(wait, current); struct smc_sock *lsmc; - int rc; + long timeo; + int rc = 0; lsmc = smc_sk(sk); lock_sock(sk); @@ -308,18 +665,30 @@ static int smc_accept(struct socket *sock, struct socket *new_sock, goto out; } - rc = smc_clcsock_accept(lsmc, &new_smc); - if (rc) - goto out; - sock_graft(&new_smc->sk, new_sock); - new_smc->sk.sk_state = SMC_ACTIVE; - - smc_copy_sock_settings_to_smc(new_smc); + /* Wait for an incoming connection */ + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + add_wait_queue_exclusive(sk_sleep(sk), &wait); + while (!(nsk = smc_accept_dequeue(sk, new_sock))) { + set_current_state(TASK_INTERRUPTIBLE); + if (!timeo) { + rc = -EAGAIN; + break; + } + release_sock(sk); + timeo = schedule_timeout(timeo); + /* wakeup by sk_data_ready in smc_listen_work() */ + sched_annotate_sleep(); + lock_sock(sk); + if (signal_pending(current)) { + rc = sock_intr_errno(timeo); + break; + } + } + set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); - /* always use TCP fallback as transport mechanism for now; - * This will change once RDMA transport is implemented - */ - new_smc->use_fallback = true; + if (!rc) + rc = sock_error(nsk); out: release_sock(sk); @@ -379,29 +748,61 @@ out: return rc; } +static unsigned int smc_accept_poll(struct sock *parent) +{ + struct smc_sock *isk; + struct sock *sk; + + lock_sock(parent); + list_for_each_entry(isk, &smc_sk(parent)->accept_q, accept_q) { + sk = (struct sock *)isk; + + if (sk->sk_state == SMC_ACTIVE) { + release_sock(parent); + return POLLIN | POLLRDNORM; + } + } + release_sock(parent); + + return 0; +} + static unsigned int smc_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask = 0; struct smc_sock *smc; + int rc; smc = smc_sk(sock->sk); - if ((sk->sk_state == SMC_INIT) || (sk->sk_state == SMC_LISTEN) || - smc->use_fallback) { + if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { + /* delegate to CLC child sock */ mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); /* if non-blocking connect finished ... */ lock_sock(sk); if ((sk->sk_state == SMC_INIT) && (mask & POLLOUT)) { - sk->sk_state = SMC_ACTIVE; - /* always use TCP fallback as transport mechanism; - * This will change once RDMA transport is implemented - */ - smc->use_fallback = true; + sk->sk_err = smc->clcsock->sk->sk_err; + if (sk->sk_err) { + mask |= POLLERR; + } else { + rc = smc_connect_rdma(smc); + if (rc < 0) + mask |= POLLERR; + else + /* success cases including fallback */ + mask |= POLLOUT | POLLWRNORM; + } } release_sock(sk); } else { - mask = sock_no_poll(file, sock, wait); + sock_poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_state == SMC_LISTEN) + /* woken up by sk_data_ready in smc_listen_work() */ + mask |= smc_accept_poll(sk); + if (sk->sk_err) + mask |= POLLERR; + /* for now - to be enhanced in follow-on patch */ } return mask; @@ -568,6 +969,7 @@ static int smc_create(struct net *net, struct socket *sock, int protocol, /* create internal TCP socket for CLC handshake and fallback */ smc = smc_sk(sk); + smc->use_fallback = false; /* assume rdma capability first */ rc = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &smc->clcsock); if (rc) diff --git a/net/smc/smc.h b/net/smc/smc.h index e87d79867099..5946aaecdebf 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -28,6 +28,12 @@ enum smc_state { /* possible states of an SMC socket */ struct smc_sock { /* smc sock container */ struct sock sk; struct socket *clcsock; /* internal tcp socket */ + struct sockaddr *addr; /* inet connect address */ + struct smc_sock *listen_smc; /* listen parent */ + struct work_struct tcp_listen_work;/* handle tcp socket accepts */ + struct work_struct smc_listen_work;/* prepare new accept socket */ + struct list_head accept_q; /* sockets to be accepted */ + spinlock_t accept_q_lock; /* protects accept_q */ bool use_fallback; /* fallback to tcp */ }; @@ -40,4 +46,20 @@ static inline struct smc_sock *smc_sk(const struct sock *sk) extern u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */ +#ifdef CONFIG_XFRM +static inline bool using_ipsec(struct smc_sock *smc) +{ + return (smc->clcsock->sk->sk_policy[0] || + smc->clcsock->sk->sk_policy[1]) ? 1 : 0; +} +#else +static inline bool using_ipsec(struct smc_sock *smc) +{ + return 0; +} +#endif + +int smc_netinfo_by_tcpsk(struct socket *clcsock, __be32 *subnet, + u8 *prefix_len); + #endif /* __SMC_H */ diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c new file mode 100644 index 000000000000..b613b866a2ef --- /dev/null +++ b/net/smc/smc_clc.c @@ -0,0 +1,252 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * CLC (connection layer control) handshake over initial TCP socket to + * prepare for RDMA traffic + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#include +#include +#include + +#include "smc.h" +#include "smc_clc.h" +#include "smc_ib.h" + +/* Wait for data on the tcp-socket, analyze received data + * Returns: + * 0 if success and it was not a decline that we received. + * SMC_CLC_DECL_REPLY if decline received for fallback w/o another decl send. + * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise. + */ +int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, + u8 expected_type) +{ + struct sock *clc_sk = smc->clcsock->sk; + struct smc_clc_msg_hdr *clcm = buf; + struct msghdr msg = {NULL, 0}; + int reason_code = 0; + struct kvec vec; + int len, datlen; + int krflags; + + /* peek the first few bytes to determine length of data to receive + * so we don't consume any subsequent CLC message or payload data + * in the TCP byte stream + */ + vec.iov_base = buf; + vec.iov_len = buflen; + krflags = MSG_PEEK | MSG_WAITALL; + smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME; + len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1, + sizeof(struct smc_clc_msg_hdr), krflags); + if (signal_pending(current)) { + reason_code = -EINTR; + clc_sk->sk_err = EINTR; + smc->sk.sk_err = EINTR; + goto out; + } + if (clc_sk->sk_err) { + reason_code = -clc_sk->sk_err; + smc->sk.sk_err = clc_sk->sk_err; + goto out; + } + if (!len) { /* peer has performed orderly shutdown */ + smc->sk.sk_err = ECONNRESET; + reason_code = -ECONNRESET; + goto out; + } + if (len < 0) { + smc->sk.sk_err = -len; + reason_code = len; + goto out; + } + datlen = ntohs(clcm->length); + if ((len < sizeof(struct smc_clc_msg_hdr)) || + (datlen < sizeof(struct smc_clc_msg_decline)) || + (datlen > sizeof(struct smc_clc_msg_accept_confirm)) || + memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) || + ((clcm->type != SMC_CLC_DECLINE) && + (clcm->type != expected_type))) { + smc->sk.sk_err = EPROTO; + reason_code = -EPROTO; + goto out; + } + + /* receive the complete CLC message */ + vec.iov_base = buf; + vec.iov_len = buflen; + memset(&msg, 0, sizeof(struct msghdr)); + krflags = MSG_WAITALL; + smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME; + len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1, datlen, krflags); + if (len < datlen) { + smc->sk.sk_err = EPROTO; + reason_code = -EPROTO; + goto out; + } + if (clcm->type == SMC_CLC_DECLINE) + reason_code = SMC_CLC_DECL_REPLY; +out: + return reason_code; +} + +/* send CLC DECLINE message across internal TCP socket */ +int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, + u8 out_of_sync) +{ + struct smc_clc_msg_decline dclc; + struct msghdr msg; + struct kvec vec; + int len; + + memset(&dclc, 0, sizeof(dclc)); + memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + dclc.hdr.type = SMC_CLC_DECLINE; + dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline)); + dclc.hdr.version = SMC_CLC_V1; + dclc.hdr.flag = out_of_sync ? 1 : 0; + memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid)); + dclc.peer_diagnosis = htonl(peer_diag_info); + memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + + memset(&msg, 0, sizeof(msg)); + vec.iov_base = &dclc; + vec.iov_len = sizeof(struct smc_clc_msg_decline); + len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, + sizeof(struct smc_clc_msg_decline)); + if (len < sizeof(struct smc_clc_msg_decline)) + smc->sk.sk_err = EPROTO; + if (len < 0) + smc->sk.sk_err = -len; + return len; +} + +/* send CLC PROPOSAL message across internal TCP socket */ +int smc_clc_send_proposal(struct smc_sock *smc, + struct smc_ib_device *smcibdev, + u8 ibport) +{ + struct smc_clc_msg_proposal pclc; + int reason_code = 0; + struct msghdr msg; + struct kvec vec; + int len, rc; + + /* send SMC Proposal CLC message */ + memset(&pclc, 0, sizeof(pclc)); + memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + pclc.hdr.type = SMC_CLC_PROPOSAL; + pclc.hdr.length = htons(sizeof(pclc)); + pclc.hdr.version = SMC_CLC_V1; /* SMC version */ + memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); + memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE); + memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], + sizeof(smcibdev->mac[ibport - 1])); + + /* determine subnet and mask from internal TCP socket */ + rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc.outgoing_subnet, + &pclc.prefix_len); + if (rc) + return SMC_CLC_DECL_CNFERR; /* configuration error */ + memcpy(pclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + memset(&msg, 0, sizeof(msg)); + vec.iov_base = &pclc; + vec.iov_len = sizeof(pclc); + /* due to the few bytes needed for clc-handshake this cannot block */ + len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(pclc)); + if (len < sizeof(pclc)) { + if (len >= 0) { + reason_code = -ENETUNREACH; + smc->sk.sk_err = -reason_code; + } else { + smc->sk.sk_err = smc->clcsock->sk->sk_err; + reason_code = -smc->sk.sk_err; + } + } + + return reason_code; +} + +/* send CLC CONFIRM message across internal TCP socket */ +int smc_clc_send_confirm(struct smc_sock *smc) +{ + struct smc_clc_msg_accept_confirm cclc; + int reason_code = 0; + struct msghdr msg; + struct kvec vec; + int len; + + /* send SMC Confirm CLC msg */ + memset(&cclc, 0, sizeof(cclc)); + memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + cclc.hdr.type = SMC_CLC_CONFIRM; + cclc.hdr.length = htons(sizeof(cclc)); + cclc.hdr.version = SMC_CLC_V1; /* SMC version */ + memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); + + /* tbd in follow-on patch: fill in link-related values */ + + /* tbd in follow-on patch: fill in rmb-related values */ + + cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ + + memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + + memset(&msg, 0, sizeof(msg)); + vec.iov_base = &cclc; + vec.iov_len = sizeof(cclc); + len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(cclc)); + if (len < sizeof(cclc)) { + if (len >= 0) { + reason_code = -ENETUNREACH; + smc->sk.sk_err = -reason_code; + } else { + smc->sk.sk_err = smc->clcsock->sk->sk_err; + reason_code = -smc->sk.sk_err; + } + } + return reason_code; +} + +/* send CLC ACCEPT message across internal TCP socket */ +int smc_clc_send_accept(struct smc_sock *new_smc) +{ + struct smc_clc_msg_accept_confirm aclc; + struct msghdr msg; + struct kvec vec; + int rc = 0; + int len; + + memset(&aclc, 0, sizeof(aclc)); + memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + aclc.hdr.type = SMC_CLC_ACCEPT; + aclc.hdr.length = htons(sizeof(aclc)); + aclc.hdr.version = SMC_CLC_V1; /* SMC version */ + memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); + + /* tbd in follow-on patch: fill in link-related values */ + + /* tbd in follow-on patch: fill in rmb-related values */ + + aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ + memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + + memset(&msg, 0, sizeof(msg)); + vec.iov_base = &aclc; + vec.iov_len = sizeof(aclc); + len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, sizeof(aclc)); + if (len < sizeof(aclc)) { + if (len >= 0) + new_smc->sk.sk_err = EPROTO; + else + new_smc->sk.sk_err = new_smc->clcsock->sk->sk_err; + rc = sock_error(&new_smc->sk); + } + + return rc; +} diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h new file mode 100644 index 000000000000..60bfc8bd8d89 --- /dev/null +++ b/net/smc/smc_clc.h @@ -0,0 +1,114 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * CLC (connection layer control) handshake over initial TCP socket to + * prepare for RDMA traffic + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#ifndef _SMC_CLC_H +#define _SMC_CLC_H + +#include + +#include "smc.h" + +#define SMC_CLC_PROPOSAL 0x01 +#define SMC_CLC_ACCEPT 0x02 +#define SMC_CLC_CONFIRM 0x03 +#define SMC_CLC_DECLINE 0x04 + +/* eye catcher "SMCR" EBCDIC for CLC messages */ +static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'}; + +#define SMC_CLC_V1 0x1 /* SMC version */ +#define CLC_WAIT_TIME (6 * HZ) /* max. wait time on clcsock */ +#define SMC_CLC_DECL_MEM 0x01010000 /* insufficient memory resources */ +#define SMC_CLC_DECL_TIMEOUT 0x02000000 /* timeout */ +#define SMC_CLC_DECL_CNFERR 0x03000000 /* configuration error */ +#define SMC_CLC_DECL_IPSEC 0x03030000 /* IPsec usage */ +#define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */ +#define SMC_CLC_DECL_REPLY 0x06000000 /* reply to a received decline */ +#define SMC_CLC_DECL_INTERR 0x99990000 /* internal error */ + +struct smc_clc_msg_hdr { /* header1 of clc messages */ + u8 eyecatcher[4]; /* eye catcher */ + u8 type; /* proposal / accept / confirm / decline */ + __be16 length; +#if defined(__BIG_ENDIAN_BITFIELD) + u8 version : 4, + flag : 1, + rsvd : 3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd : 3, + flag : 1, + version : 4; +#endif +} __packed; /* format defined in RFC7609 */ + +struct smc_clc_msg_trail { /* trailer of clc messages */ + u8 eyecatcher[4]; +}; + +struct smc_clc_msg_local { /* header2 of clc messages */ + u8 id_for_peer[SMC_SYSTEMID_LEN]; /* unique system id */ + u8 gid[16]; /* gid of ib_device port */ + u8 mac[6]; /* mac of ib_device port */ +}; + +struct smc_clc_msg_proposal { /* clc proposal message */ + struct smc_clc_msg_hdr hdr; + struct smc_clc_msg_local lcl; + __be16 iparea_offset; /* offset to IP address information area */ + __be32 outgoing_subnet; /* subnet mask */ + u8 prefix_len; /* number of significant bits in mask */ + u8 reserved[2]; + u8 ipv6_prefixes_cnt; /* number of IPv6 prefixes in prefix array */ + struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */ +} __aligned(4); + +struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */ + struct smc_clc_msg_hdr hdr; + struct smc_clc_msg_local lcl; + u8 qpn[3]; /* QP number */ + __be32 rmb_rkey; /* RMB rkey */ + u8 conn_idx; /* Connection index, which RMBE in RMB */ + __be32 rmbe_alert_token;/* unique connection id */ +#if defined(__BIG_ENDIAN_BITFIELD) + u8 rmbe_size : 4, /* RMBE buf size (compressed notation) */ + qp_mtu : 4; /* QP mtu */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 qp_mtu : 4, + rmbe_size : 4; +#endif + u8 reserved; + __be64 rmb_dma_addr; /* RMB virtual address */ + u8 reserved2; + u8 psn[3]; /* initial packet sequence number */ + struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */ +} __packed; /* format defined in RFC7609 */ + +struct smc_clc_msg_decline { /* clc decline message */ + struct smc_clc_msg_hdr hdr; + u8 id_for_peer[SMC_SYSTEMID_LEN]; /* sender peer_id */ + __be32 peer_diagnosis; /* diagnosis information */ + u8 reserved2[4]; + struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */ +} __aligned(4); + +struct smc_sock; +struct smc_ib_device; + +int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, + u8 expected_type); +int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, + u8 out_of_sync); +int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev, + u8 ibport); +int smc_clc_send_confirm(struct smc_sock *smc); +int smc_clc_send_accept(struct smc_sock *smc); + +#endif -- cgit v1.2.3 From 0cfdd8f92cac01afbb12e4500514036a2b78756b Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:17 +0100 Subject: smc: connection and link group creation * create smc_connection for SMC-sockets * determine suitable link group for a connection * create a new link group if necessary Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/Makefile | 2 +- net/smc/af_smc.c | 102 ++++++++++++++-- net/smc/smc.h | 36 ++++++ net/smc/smc_clc.c | 38 +++++- net/smc/smc_clc.h | 2 +- net/smc/smc_core.c | 336 +++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_core.h | 106 +++++++++++++++++ 7 files changed, 605 insertions(+), 17 deletions(-) create mode 100644 net/smc/smc_core.c create mode 100644 net/smc/smc_core.h diff --git a/net/smc/Makefile b/net/smc/Makefile index c0ad5883e888..cb8bcd9df53e 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_SMC) += smc.o -smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o +smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 05c705a688e5..5fda37decc55 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -31,9 +31,19 @@ #include "smc.h" #include "smc_clc.h" +#include "smc_core.h" #include "smc_ib.h" #include "smc_pnet.h" +static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group + * creation + */ + +struct smc_lgr_list smc_lgr_list = { /* established link groups */ + .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock), + .list = LIST_HEAD_INIT(smc_lgr_list.list), +}; + static void smc_tcp_listen_work(struct work_struct *); static void smc_set_keepalive(struct sock *sk, int val) @@ -235,11 +245,31 @@ out: return rc; } +static void smc_conn_save_peer_info(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) +{ + smc->conn.peer_conn_idx = clc->conn_idx; +} + +static void smc_link_save_peer_info(struct smc_link *link, + struct smc_clc_msg_accept_confirm *clc) +{ + link->peer_qpn = ntoh24(clc->qpn); + memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE); + memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac)); + link->peer_psn = ntoh24(clc->psn); + link->peer_mtu = clc->qp_mtu; +} + /* setup for RDMA connection of client */ static int smc_connect_rdma(struct smc_sock *smc) { + struct sockaddr_in *inaddr = (struct sockaddr_in *)smc->addr; struct smc_clc_msg_accept_confirm aclc; + int local_contact = SMC_FIRST_CONTACT; struct smc_ib_device *smcibdev; + struct smc_link *link; + u8 srv_first_contact; int reason_code = 0; int rc = 0; u8 ibport; @@ -278,26 +308,43 @@ static int smc_connect_rdma(struct smc_sock *smc) if (reason_code > 0) goto decline_rdma; - /* tbd in follow-on patch: more steps to setup RDMA communcication, - * create connection, link group, link - */ + srv_first_contact = aclc.hdr.flag; + mutex_lock(&smc_create_lgr_pending); + local_contact = smc_conn_create(smc, inaddr->sin_addr.s_addr, smcibdev, + ibport, &aclc.lcl, srv_first_contact); + if (local_contact < 0) { + rc = local_contact; + if (rc == -ENOMEM) + reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ + else if (rc == -ENOLINK) + reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */ + goto decline_rdma_unlock; + } + link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK]; + smc_conn_save_peer_info(smc, &aclc); + if (local_contact == SMC_FIRST_CONTACT) + smc_link_save_peer_info(link, &aclc); /* tbd in follow-on patch: more steps to setup RDMA communcication, * create rmbs, map rmbs, rtoken_handling, modify_qp */ rc = smc_clc_send_confirm(smc); if (rc) - goto out_err; + goto out_err_unlock; /* tbd in follow-on patch: llc_confirm */ + mutex_unlock(&smc_create_lgr_pending); out_connected: smc_copy_sock_settings_to_clc(smc); smc->sk.sk_state = SMC_ACTIVE; - return rc; + return rc ? rc : local_contact; +decline_rdma_unlock: + mutex_unlock(&smc_create_lgr_pending); + smc_conn_free(&smc->conn); decline_rdma: /* RDMA setup failed, switch back to TCP */ smc->use_fallback = true; @@ -308,6 +355,9 @@ decline_rdma: } goto out_connected; +out_err_unlock: + mutex_unlock(&smc_create_lgr_pending); + smc_conn_free(&smc->conn); out_err: return rc; } @@ -476,10 +526,12 @@ static void smc_listen_work(struct work_struct *work) struct socket *newclcsock = new_smc->clcsock; struct smc_sock *lsmc = new_smc->listen_smc; struct smc_clc_msg_accept_confirm cclc; + int local_contact = SMC_REUSE_CONTACT; struct sock *newsmcsk = &new_smc->sk; struct smc_clc_msg_proposal pclc; struct smc_ib_device *smcibdev; struct sockaddr_in peeraddr; + struct smc_link *link; int reason_code = 0; int rc = 0, len; __be32 subnet; @@ -527,15 +579,30 @@ static void smc_listen_work(struct work_struct *work) /* get address of the peer connected to the internal TCP socket */ kernel_getpeername(newclcsock, (struct sockaddr *)&peeraddr, &len); - /* tbd in follow-on patch: more steps to setup RDMA communcication, - * create connection, link_group, link - */ + /* allocate connection / link group */ + mutex_lock(&smc_create_lgr_pending); + local_contact = smc_conn_create(new_smc, peeraddr.sin_addr.s_addr, + smcibdev, ibport, &pclc.lcl, 0); + if (local_contact == SMC_REUSE_CONTACT) + /* lock no longer needed, free it due to following + * smc_clc_wait_msg() call + */ + mutex_unlock(&smc_create_lgr_pending); + if (local_contact < 0) { + rc = local_contact; + if (rc == -ENOMEM) + reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ + else if (rc == -ENOLINK) + reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */ + goto decline_rdma; + } + link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; /* tbd in follow-on patch: more steps to setup RDMA communcication, * create rmbs, map rmbs */ - rc = smc_clc_send_accept(new_smc); + rc = smc_clc_send_accept(new_smc, local_contact); if (rc) goto out_err; @@ -546,6 +613,9 @@ static void smc_listen_work(struct work_struct *work) goto out_err; if (reason_code > 0) goto decline_rdma; + smc_conn_save_peer_info(new_smc, &cclc); + if (local_contact == SMC_FIRST_CONTACT) + smc_link_save_peer_info(link, &cclc); /* tbd in follow-on patch: more steps to setup RDMA communcication, * rtoken_handling, modify_qp @@ -555,6 +625,8 @@ out_connected: sk_refcnt_debug_inc(newsmcsk); newsmcsk->sk_state = SMC_ACTIVE; enqueue: + if (local_contact == SMC_FIRST_CONTACT) + mutex_unlock(&smc_create_lgr_pending); lock_sock(&lsmc->sk); if (lsmc->sk.sk_state == SMC_LISTEN) { smc_accept_enqueue(&lsmc->sk, newsmcsk); @@ -570,6 +642,7 @@ enqueue: decline_rdma: /* RDMA setup failed, switch back to TCP */ + smc_conn_free(&new_smc->conn); new_smc->use_fallback = true; if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { rc = smc_clc_send_decline(new_smc, reason_code, 0); @@ -1024,6 +1097,17 @@ out_pnet: static void __exit smc_exit(void) { + struct smc_link_group *lgr, *lg; + LIST_HEAD(lgr_freeing_list); + + spin_lock_bh(&smc_lgr_list.lock); + if (!list_empty(&smc_lgr_list.list)) + list_splice_init(&smc_lgr_list.list, &lgr_freeing_list); + spin_unlock_bh(&smc_lgr_list.lock); + list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) { + list_del_init(&lgr->list); + smc_lgr_free(lgr); /* free link group */ + } smc_ib_unregister_client(); sock_unregister(PF_SMC); proto_unregister(&smc_proto); diff --git a/net/smc/smc.h b/net/smc/smc.h index 5946aaecdebf..11265bde4655 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -14,6 +14,8 @@ #include #include +#include "smc_ib.h" + #define SMCPROTO_SMC 0 /* SMC protocol */ #define SMC_MAX_PORTS 2 /* Max # of ports */ @@ -25,9 +27,19 @@ enum smc_state { /* possible states of an SMC socket */ SMC_LISTEN = 10, }; +struct smc_link_group; + +struct smc_connection { + struct rb_node alert_node; + struct smc_link_group *lgr; /* link group of connection */ + u32 alert_token_local; /* unique conn. id */ + u8 peer_conn_idx; /* from tcp handshake */ +}; + struct smc_sock { /* smc sock container */ struct sock sk; struct socket *clcsock; /* internal tcp socket */ + struct smc_connection conn; /* smc connection */ struct sockaddr *addr; /* inet connect address */ struct smc_sock *listen_smc; /* listen parent */ struct work_struct tcp_listen_work;/* handle tcp socket accepts */ @@ -46,6 +58,24 @@ static inline struct smc_sock *smc_sk(const struct sock *sk) extern u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */ +/* convert an u32 value into network byte order, store it into a 3 byte field */ +static inline void hton24(u8 *net, u32 host) +{ + __be32 t; + + t = cpu_to_be32(host); + memcpy(net, ((u8 *)&t) + 1, 3); +} + +/* convert a received 3 byte field into host byte order*/ +static inline u32 ntoh24(u8 *net) +{ + __be32 t = 0; + + memcpy(((u8 *)&t) + 1, net, 3); + return be32_to_cpu(t); +} + #ifdef CONFIG_XFRM static inline bool using_ipsec(struct smc_sock *smc) { @@ -59,7 +89,13 @@ static inline bool using_ipsec(struct smc_sock *smc) } #endif +struct smc_clc_msg_local; + int smc_netinfo_by_tcpsk(struct socket *clcsock, __be32 *subnet, u8 *prefix_len); +void smc_conn_free(struct smc_connection *conn); +int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr, + struct smc_ib_device *smcibdev, u8 ibport, + struct smc_clc_msg_local *lcl, int srv_first_contact); #endif /* __SMC_H */ diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index b613b866a2ef..f8e47c06d2ed 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -14,6 +14,7 @@ #include #include "smc.h" +#include "smc_core.h" #include "smc_clc.h" #include "smc_ib.h" @@ -89,8 +90,13 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, reason_code = -EPROTO; goto out; } - if (clcm->type == SMC_CLC_DECLINE) + if (clcm->type == SMC_CLC_DECLINE) { reason_code = SMC_CLC_DECL_REPLY; + if (ntohl(((struct smc_clc_msg_decline *)buf)->peer_diagnosis) + == SMC_CLC_DECL_SYNCERR) + smc->conn.lgr->sync_err = true; + } + out: return reason_code; } @@ -175,12 +181,15 @@ int smc_clc_send_proposal(struct smc_sock *smc, /* send CLC CONFIRM message across internal TCP socket */ int smc_clc_send_confirm(struct smc_sock *smc) { + struct smc_connection *conn = &smc->conn; struct smc_clc_msg_accept_confirm cclc; + struct smc_link *link; int reason_code = 0; struct msghdr msg; struct kvec vec; int len; + link = &conn->lgr->lnk[SMC_SINGLE_LINK]; /* send SMC Confirm CLC msg */ memset(&cclc, 0, sizeof(cclc)); memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); @@ -188,12 +197,18 @@ int smc_clc_send_confirm(struct smc_sock *smc) cclc.hdr.length = htons(sizeof(cclc)); cclc.hdr.version = SMC_CLC_V1; /* SMC version */ memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - - /* tbd in follow-on patch: fill in link-related values */ + memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], + SMC_GID_SIZE); + memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], + sizeof(link->smcibdev->mac)); /* tbd in follow-on patch: fill in rmb-related values */ + hton24(cclc.qpn, link->roce_qp->qp_num); cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ + cclc.rmbe_alert_token = htonl(conn->alert_token_local); + cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); + hton24(cclc.psn, link->psn_initial); memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); @@ -214,26 +229,37 @@ int smc_clc_send_confirm(struct smc_sock *smc) } /* send CLC ACCEPT message across internal TCP socket */ -int smc_clc_send_accept(struct smc_sock *new_smc) +int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) { + struct smc_connection *conn = &new_smc->conn; struct smc_clc_msg_accept_confirm aclc; + struct smc_link *link; struct msghdr msg; struct kvec vec; int rc = 0; int len; + link = &conn->lgr->lnk[SMC_SINGLE_LINK]; memset(&aclc, 0, sizeof(aclc)); memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); aclc.hdr.type = SMC_CLC_ACCEPT; aclc.hdr.length = htons(sizeof(aclc)); aclc.hdr.version = SMC_CLC_V1; /* SMC version */ + if (srv_first_contact) + aclc.hdr.flag = 1; memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - - /* tbd in follow-on patch: fill in link-related values */ + memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], + SMC_GID_SIZE); + memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], + sizeof(link->smcibdev->mac[link->ibport - 1])); /* tbd in follow-on patch: fill in rmb-related values */ + hton24(aclc.qpn, link->roce_qp->qp_num); aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ + aclc.rmbe_alert_token = htonl(conn->alert_token_local); + aclc.qp_mtu = link->path_mtu; + hton24(aclc.psn, link->psn_initial); memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); memset(&msg, 0, sizeof(msg)); diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 60bfc8bd8d89..5924d998b5ca 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -109,6 +109,6 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev, u8 ibport); int smc_clc_send_confirm(struct smc_sock *smc); -int smc_clc_send_accept(struct smc_sock *smc); +int smc_clc_send_accept(struct smc_sock *smc, int srv_first_contact); #endif diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c new file mode 100644 index 000000000000..b88a82918c82 --- /dev/null +++ b/net/smc/smc_core.c @@ -0,0 +1,336 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Basic Transport Functions exploiting Infiniband API + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "smc.h" +#include "smc_clc.h" +#include "smc_core.h" +#include "smc_ib.h" + +#define SMC_LGR_FREE_DELAY (600 * HZ) + +/* Register connection's alert token in our lookup structure. + * To use rbtrees we have to implement our own insert core. + * Requires @conns_lock + * @smc connection to register + * Returns 0 on success, != otherwise. + */ +static void smc_lgr_add_alert_token(struct smc_connection *conn) +{ + struct rb_node **link, *parent = NULL; + u32 token = conn->alert_token_local; + + link = &conn->lgr->conns_all.rb_node; + while (*link) { + struct smc_connection *cur = rb_entry(*link, + struct smc_connection, alert_node); + + parent = *link; + if (cur->alert_token_local > token) + link = &parent->rb_left; + else + link = &parent->rb_right; + } + /* Put the new node there */ + rb_link_node(&conn->alert_node, parent, link); + rb_insert_color(&conn->alert_node, &conn->lgr->conns_all); +} + +/* Register connection in link group by assigning an alert token + * registered in a search tree. + * Requires @conns_lock + * Note that '0' is a reserved value and not assigned. + */ +static void smc_lgr_register_conn(struct smc_connection *conn) +{ + struct smc_sock *smc = container_of(conn, struct smc_sock, conn); + static atomic_t nexttoken = ATOMIC_INIT(0); + + /* find a new alert_token_local value not yet used by some connection + * in this link group + */ + sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */ + while (!conn->alert_token_local) { + conn->alert_token_local = atomic_inc_return(&nexttoken); + if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr)) + conn->alert_token_local = 0; + } + smc_lgr_add_alert_token(conn); + conn->lgr->conns_num++; +} + +/* Unregister connection and reset the alert token of the given connection< + */ +static void __smc_lgr_unregister_conn(struct smc_connection *conn) +{ + struct smc_sock *smc = container_of(conn, struct smc_sock, conn); + struct smc_link_group *lgr = conn->lgr; + + rb_erase(&conn->alert_node, &lgr->conns_all); + lgr->conns_num--; + conn->alert_token_local = 0; + conn->lgr = NULL; + sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */ +} + +/* Unregister connection and trigger lgr freeing if applicable + */ +static void smc_lgr_unregister_conn(struct smc_connection *conn) +{ + struct smc_link_group *lgr = conn->lgr; + int reduced = 0; + + write_lock_bh(&lgr->conns_lock); + if (conn->alert_token_local) { + reduced = 1; + __smc_lgr_unregister_conn(conn); + } + write_unlock_bh(&lgr->conns_lock); + if (reduced && !lgr->conns_num) + schedule_delayed_work(&lgr->free_work, SMC_LGR_FREE_DELAY); +} + +static void smc_lgr_free_work(struct work_struct *work) +{ + struct smc_link_group *lgr = container_of(to_delayed_work(work), + struct smc_link_group, + free_work); + bool conns; + + spin_lock_bh(&smc_lgr_list.lock); + read_lock_bh(&lgr->conns_lock); + conns = RB_EMPTY_ROOT(&lgr->conns_all); + read_unlock_bh(&lgr->conns_lock); + if (!conns) { /* number of lgr connections is no longer zero */ + spin_unlock_bh(&smc_lgr_list.lock); + return; + } + list_del_init(&lgr->list); /* remove from smc_lgr_list */ + spin_unlock_bh(&smc_lgr_list.lock); + smc_lgr_free(lgr); +} + +/* create a new SMC link group */ +static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, + struct smc_ib_device *smcibdev, u8 ibport, + char *peer_systemid, unsigned short vlan_id) +{ + struct smc_link_group *lgr; + struct smc_link *lnk; + u8 rndvec[3]; + int rc = 0; + + lgr = kzalloc(sizeof(*lgr), GFP_KERNEL); + if (!lgr) { + rc = -ENOMEM; + goto out; + } + lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; + lgr->sync_err = false; + lgr->daddr = peer_in_addr; + memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN); + lgr->vlan_id = vlan_id; + INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work); + lgr->conns_all = RB_ROOT; + + lnk = &lgr->lnk[SMC_SINGLE_LINK]; + /* initialize link */ + lnk->smcibdev = smcibdev; + lnk->ibport = ibport; + lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; + get_random_bytes(rndvec, sizeof(rndvec)); + lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16); + + smc->conn.lgr = lgr; + rwlock_init(&lgr->conns_lock); + spin_lock_bh(&smc_lgr_list.lock); + list_add(&lgr->list, &smc_lgr_list.list); + spin_unlock_bh(&smc_lgr_list.lock); +out: + return rc; +} + +/* remove a finished connection from its link group */ +void smc_conn_free(struct smc_connection *conn) +{ + struct smc_link_group *lgr = conn->lgr; + + if (!lgr) + return; + smc_lgr_unregister_conn(conn); +} + +static void smc_link_clear(struct smc_link *lnk) +{ + lnk->peer_qpn = 0; +} + +/* remove a link group */ +void smc_lgr_free(struct smc_link_group *lgr) +{ + smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); + kfree(lgr); +} + +/* terminate linkgroup abnormally */ +void smc_lgr_terminate(struct smc_link_group *lgr) +{ + struct smc_connection *conn; + struct rb_node *node; + + spin_lock_bh(&smc_lgr_list.lock); + if (list_empty(&lgr->list)) { + /* termination already triggered */ + spin_unlock_bh(&smc_lgr_list.lock); + return; + } + /* do not use this link group for new connections */ + list_del_init(&lgr->list); + spin_unlock_bh(&smc_lgr_list.lock); + + write_lock_bh(&lgr->conns_lock); + node = rb_first(&lgr->conns_all); + while (node) { + conn = rb_entry(node, struct smc_connection, alert_node); + __smc_lgr_unregister_conn(conn); + node = rb_first(&lgr->conns_all); + } + write_unlock_bh(&lgr->conns_lock); + schedule_delayed_work(&lgr->free_work, SMC_LGR_FREE_DELAY); +} + +/* Determine vlan of internal TCP socket. + * @vlan_id: address to store the determined vlan id into + */ +static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id) +{ + struct dst_entry *dst = sk_dst_get(clcsock->sk); + int rc = 0; + + *vlan_id = 0; + if (!dst) { + rc = -ENOTCONN; + goto out; + } + if (!dst->dev) { + rc = -ENODEV; + goto out_rel; + } + + if (is_vlan_dev(dst->dev)) + *vlan_id = vlan_dev_vlan_id(dst->dev); + +out_rel: + dst_release(dst); +out: + return rc; +} + +/* determine the link gid matching the vlan id of the link group */ +static int smc_link_determine_gid(struct smc_link_group *lgr) +{ + struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; + struct ib_gid_attr gattr; + union ib_gid gid; + int i; + + if (!lgr->vlan_id) { + lnk->gid = lnk->smcibdev->gid[lnk->ibport - 1]; + return 0; + } + + for (i = 0; i < lnk->smcibdev->pattr[lnk->ibport - 1].gid_tbl_len; + i++) { + if (ib_query_gid(lnk->smcibdev->ibdev, lnk->ibport, i, &gid, + &gattr)) + continue; + if (gattr.ndev && + (vlan_dev_vlan_id(gattr.ndev) == lgr->vlan_id)) { + lnk->gid = gid; + return 0; + } + } + return -ENODEV; +} + +/* create a new SMC connection (and a new link group if necessary) */ +int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr, + struct smc_ib_device *smcibdev, u8 ibport, + struct smc_clc_msg_local *lcl, int srv_first_contact) +{ + struct smc_connection *conn = &smc->conn; + struct smc_link_group *lgr; + unsigned short vlan_id; + enum smc_lgr_role role; + int local_contact = SMC_FIRST_CONTACT; + int rc = 0; + + role = smc->listen_smc ? SMC_SERV : SMC_CLNT; + rc = smc_vlan_by_tcpsk(smc->clcsock, &vlan_id); + if (rc) + return rc; + + if ((role == SMC_CLNT) && srv_first_contact) + /* create new link group as well */ + goto create; + + /* determine if an existing link group can be reused */ + spin_lock_bh(&smc_lgr_list.lock); + list_for_each_entry(lgr, &smc_lgr_list.list, list) { + write_lock_bh(&lgr->conns_lock); + if (!memcmp(lgr->peer_systemid, lcl->id_for_peer, + SMC_SYSTEMID_LEN) && + !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid, + SMC_GID_SIZE) && + !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, + sizeof(lcl->mac)) && + !lgr->sync_err && + (lgr->role == role) && + (lgr->vlan_id == vlan_id)) { + /* link group found */ + local_contact = SMC_REUSE_CONTACT; + conn->lgr = lgr; + smc_lgr_register_conn(conn); /* add smc conn to lgr */ + write_unlock_bh(&lgr->conns_lock); + break; + } + write_unlock_bh(&lgr->conns_lock); + } + spin_unlock_bh(&smc_lgr_list.lock); + + if (role == SMC_CLNT && !srv_first_contact && + (local_contact == SMC_FIRST_CONTACT)) { + /* Server reuses a link group, but Client wants to start + * a new one + * send out_of_sync decline, reason synchr. error + */ + return -ENOLINK; + } + +create: + if (local_contact == SMC_FIRST_CONTACT) { + rc = smc_lgr_create(smc, peer_in_addr, smcibdev, ibport, + lcl->id_for_peer, vlan_id); + if (rc) + goto out; + smc_lgr_register_conn(conn); /* add smc conn to lgr */ + rc = smc_link_determine_gid(conn->lgr); + } + +out: + return rc ? rc : local_contact; +} diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h new file mode 100644 index 000000000000..14b787abae02 --- /dev/null +++ b/net/smc/smc_core.h @@ -0,0 +1,106 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Definitions for SMC Connections, Link Groups and Links + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#ifndef _SMC_CORE_H +#define _SMC_CORE_H + +#include + +#include "smc.h" +#include "smc_ib.h" + +struct smc_lgr_list { /* list of link group definition */ + struct list_head list; + spinlock_t lock; /* protects list of link groups */ +}; + +extern struct smc_lgr_list smc_lgr_list; /* list of link groups */ + +enum smc_lgr_role { /* possible roles of a link group */ + SMC_CLNT, /* client */ + SMC_SERV /* server */ +}; + +struct smc_link { + struct smc_ib_device *smcibdev; /* ib-device */ + u8 ibport; /* port - values 1 | 2 */ + struct ib_qp *roce_qp; /* IB queue pair */ + struct ib_qp_attr qp_attr; /* IB queue pair attributes */ + union ib_gid gid; /* gid matching used vlan id */ + u32 peer_qpn; /* QP number of peer */ + enum ib_mtu path_mtu; /* used mtu */ + enum ib_mtu peer_mtu; /* mtu size of peer */ + u32 psn_initial; /* QP tx initial packet seqno */ + u32 peer_psn; /* QP rx initial packet seqno */ + u8 peer_mac[ETH_ALEN]; /* = gid[8:10||13:15] */ + u8 peer_gid[sizeof(union ib_gid)]; /* gid of peer*/ +}; + +/* For now we just allow one parallel link per link group. The SMC protocol + * allows more (up to 8). + */ +#define SMC_LINKS_PER_LGR_MAX 1 +#define SMC_SINGLE_LINK 0 + +#define SMC_FIRST_CONTACT 1 /* first contact to a peer */ +#define SMC_REUSE_CONTACT 0 /* follow-on contact to a peer*/ + +struct smc_link_group { + struct list_head list; + enum smc_lgr_role role; /* client or server */ + __be32 daddr; /* destination ip address */ + struct smc_link lnk[SMC_LINKS_PER_LGR_MAX]; /* smc link */ + char peer_systemid[SMC_SYSTEMID_LEN]; + /* unique system_id of peer */ + struct rb_root conns_all; /* connection tree */ + rwlock_t conns_lock; /* protects conns_all */ + unsigned int conns_num; /* current # of connections */ + unsigned short vlan_id; /* vlan id of link group */ + struct delayed_work free_work; /* delayed freeing of an lgr */ + bool sync_err; /* lgr no longer fits to peer */ +}; + +/* Find the connection associated with the given alert token in the link group. + * To use rbtrees we have to implement our own search core. + * Requires @conns_lock + * @token alert token to search for + * @lgr link group to search in + * Returns connection associated with token if found, NULL otherwise. + */ +static inline struct smc_connection *smc_lgr_find_conn( + u32 token, struct smc_link_group *lgr) +{ + struct smc_connection *res = NULL; + struct rb_node *node; + + node = lgr->conns_all.rb_node; + while (node) { + struct smc_connection *cur = rb_entry(node, + struct smc_connection, alert_node); + + if (cur->alert_token_local > token) { + node = node->rb_left; + } else { + if (cur->alert_token_local < token) { + node = node->rb_right; + } else { + res = cur; + break; + } + } + } + + return res; +} + +void smc_lgr_free(struct smc_link_group *lgr); +void smc_lgr_terminate(struct smc_link_group *lgr); + +#endif -- cgit v1.2.3 From cd6851f30386e5e04b5c2253f8e1647ba0ebcd31 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:18 +0100 Subject: smc: remote memory buffers (RMBs) * allocate data RMB memory for sending and receiving * size depends on the maximum socket send and receive buffers * allocated RMBs are kept during life time of the owning link group * map the allocated RMBs to DMA Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 29 ++++++- net/smc/smc.h | 45 +++++++++++ net/smc/smc_clc.c | 6 +- net/smc/smc_core.c | 224 ++++++++++++++++++++++++++++++++++++++++++++++++++++- net/smc/smc_core.h | 21 +++++ net/smc/smc_ib.c | 19 +++++ net/smc/smc_ib.h | 5 ++ 7 files changed, 342 insertions(+), 7 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 5fda37decc55..a38f470130d3 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -249,6 +249,8 @@ static void smc_conn_save_peer_info(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { smc->conn.peer_conn_idx = clc->conn_idx; + smc->conn.peer_rmbe_size = smc_uncompress_bufsize(clc->rmbe_size); + atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); } static void smc_link_save_peer_info(struct smc_link *link, @@ -323,6 +325,18 @@ static int smc_connect_rdma(struct smc_sock *smc) link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK]; smc_conn_save_peer_info(smc, &aclc); + + rc = smc_sndbuf_create(smc); + if (rc) { + reason_code = SMC_CLC_DECL_MEM; + goto decline_rdma_unlock; + } + rc = smc_rmb_create(smc); + if (rc) { + reason_code = SMC_CLC_DECL_MEM; + goto decline_rdma_unlock; + } + if (local_contact == SMC_FIRST_CONTACT) smc_link_save_peer_info(link, &aclc); /* tbd in follow-on patch: more steps to setup RDMA communcication, @@ -598,9 +612,16 @@ static void smc_listen_work(struct work_struct *work) } link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; - /* tbd in follow-on patch: more steps to setup RDMA communcication, - * create rmbs, map rmbs - */ + rc = smc_sndbuf_create(new_smc); + if (rc) { + reason_code = SMC_CLC_DECL_MEM; + goto decline_rdma; + } + rc = smc_rmb_create(new_smc); + if (rc) { + reason_code = SMC_CLC_DECL_MEM; + goto decline_rdma; + } rc = smc_clc_send_accept(new_smc, local_contact); if (rc) @@ -1047,6 +1068,8 @@ static int smc_create(struct net *net, struct socket *sock, int protocol, IPPROTO_TCP, &smc->clcsock); if (rc) sk_common_release(sk); + smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); + smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); out: return rc; diff --git a/net/smc/smc.h b/net/smc/smc.h index 11265bde4655..2bf504492133 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -34,6 +34,16 @@ struct smc_connection { struct smc_link_group *lgr; /* link group of connection */ u32 alert_token_local; /* unique conn. id */ u8 peer_conn_idx; /* from tcp handshake */ + int peer_rmbe_size; /* size of peer rx buffer */ + atomic_t peer_rmbe_space;/* remaining free bytes in peer + * rmbe + */ + + struct smc_buf_desc *sndbuf_desc; /* send buffer descriptor */ + int sndbuf_size; /* sndbuf size <== sock wmem */ + struct smc_buf_desc *rmb_desc; /* RMBE descriptor */ + int rmbe_size; /* RMBE size <== sock rmem */ + int rmbe_size_short;/* compressed notation */ }; struct smc_sock { /* smc sock container */ @@ -76,6 +86,41 @@ static inline u32 ntoh24(u8 *net) return be32_to_cpu(t); } +#define SMC_BUF_MIN_SIZE 16384 /* minimum size of an RMB */ + +#define SMC_RMBE_SIZES 16 /* number of distinct sizes for an RMBE */ +/* theoretically, the RFC states that largest size would be 512K, + * i.e. compressed 5 and thus 6 sizes (0..5), despite + * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15) + */ + +/* convert the RMB size into the compressed notation - minimum 16K. + * In contrast to plain ilog2, this rounds towards the next power of 2, + * so the socket application gets at least its desired sndbuf / rcvbuf size. + */ +static inline u8 smc_compress_bufsize(int size) +{ + u8 compressed; + + if (size <= SMC_BUF_MIN_SIZE) + return 0; + + size = (size - 1) >> 14; + compressed = ilog2(size) + 1; + if (compressed >= SMC_RMBE_SIZES) + compressed = SMC_RMBE_SIZES - 1; + return compressed; +} + +/* convert the RMB size from compressed notation into integer */ +static inline int smc_uncompress_bufsize(u8 compressed) +{ + u32 size; + + size = 0x00000001 << (((int)compressed) + 14); + return (int)size; +} + #ifdef CONFIG_XFRM static inline bool using_ipsec(struct smc_sock *smc) { diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index f8e47c06d2ed..4b475dddef16 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -252,13 +252,13 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) SMC_GID_SIZE); memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], sizeof(link->smcibdev->mac[link->ibport - 1])); - - /* tbd in follow-on patch: fill in rmb-related values */ - hton24(aclc.qpn, link->roce_qp->qp_num); aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ aclc.rmbe_alert_token = htonl(conn->alert_token_local); aclc.qp_mtu = link->path_mtu; + aclc.rmbe_size = conn->rmbe_size_short, + aclc.rmb_dma_addr = + cpu_to_be64((u64)conn->rmb_desc->dma_addr[SMC_SINGLE_LINK]); hton24(aclc.psn, link->psn_initial); memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index b88a82918c82..e1b95728ca81 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -133,6 +133,7 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, struct smc_link *lnk; u8 rndvec[3]; int rc = 0; + int i; lgr = kzalloc(sizeof(*lgr), GFP_KERNEL); if (!lgr) { @@ -144,6 +145,12 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, lgr->daddr = peer_in_addr; memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN); lgr->vlan_id = vlan_id; + rwlock_init(&lgr->sndbufs_lock); + rwlock_init(&lgr->rmbs_lock); + for (i = 0; i < SMC_RMBE_SIZES; i++) { + INIT_LIST_HEAD(&lgr->sndbufs[i]); + INIT_LIST_HEAD(&lgr->rmbs[i]); + } INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work); lgr->conns_all = RB_ROOT; @@ -164,6 +171,22 @@ out: return rc; } +static void smc_sndbuf_unuse(struct smc_connection *conn) +{ + if (conn->sndbuf_desc) { + conn->sndbuf_desc->used = 0; + conn->sndbuf_size = 0; + } +} + +static void smc_rmb_unuse(struct smc_connection *conn) +{ + if (conn->rmb_desc) { + conn->rmb_desc->used = 0; + conn->rmbe_size = 0; + } +} + /* remove a finished connection from its link group */ void smc_conn_free(struct smc_connection *conn) { @@ -172,6 +195,8 @@ void smc_conn_free(struct smc_connection *conn) if (!lgr) return; smc_lgr_unregister_conn(conn); + smc_rmb_unuse(conn); + smc_sndbuf_unuse(conn); } static void smc_link_clear(struct smc_link *lnk) @@ -179,9 +204,39 @@ static void smc_link_clear(struct smc_link *lnk) lnk->peer_qpn = 0; } +static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) +{ + struct smc_buf_desc *sndbuf_desc, *bf_desc; + int i; + + for (i = 0; i < SMC_RMBE_SIZES; i++) { + list_for_each_entry_safe(sndbuf_desc, bf_desc, &lgr->sndbufs[i], + list) { + kfree(sndbuf_desc->cpu_addr); + kfree(sndbuf_desc); + } + } +} + +static void smc_lgr_free_rmbs(struct smc_link_group *lgr) +{ + struct smc_buf_desc *rmb_desc, *bf_desc; + int i; + + for (i = 0; i < SMC_RMBE_SIZES; i++) { + list_for_each_entry_safe(rmb_desc, bf_desc, &lgr->rmbs[i], + list) { + kfree(rmb_desc->cpu_addr); + kfree(rmb_desc); + } + } +} + /* remove a link group */ void smc_lgr_free(struct smc_link_group *lgr) { + smc_lgr_free_rmbs(lgr); + smc_lgr_free_sndbufs(lgr); smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); kfree(lgr); } @@ -300,7 +355,9 @@ int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr, sizeof(lcl->mac)) && !lgr->sync_err && (lgr->role == role) && - (lgr->vlan_id == vlan_id)) { + (lgr->vlan_id == vlan_id) && + ((role == SMC_CLNT) || + (lgr->conns_num < SMC_RMBS_PER_LGR_MAX))) { /* link group found */ local_contact = SMC_REUSE_CONTACT; conn->lgr = lgr; @@ -334,3 +391,168 @@ create: out: return rc ? rc : local_contact; } + +/* try to reuse a sndbuf description slot of the sndbufs list for a certain + * buf_size; if not available, return NULL + */ +static inline +struct smc_buf_desc *smc_sndbuf_get_slot(struct smc_link_group *lgr, + int compressed_bufsize) +{ + struct smc_buf_desc *sndbuf_slot; + + read_lock_bh(&lgr->sndbufs_lock); + list_for_each_entry(sndbuf_slot, &lgr->sndbufs[compressed_bufsize], + list) { + if (cmpxchg(&sndbuf_slot->used, 0, 1) == 0) { + read_unlock_bh(&lgr->sndbufs_lock); + return sndbuf_slot; + } + } + read_unlock_bh(&lgr->sndbufs_lock); + return NULL; +} + +/* try to reuse an rmb description slot of the rmbs list for a certain + * rmbe_size; if not available, return NULL + */ +static inline +struct smc_buf_desc *smc_rmb_get_slot(struct smc_link_group *lgr, + int compressed_bufsize) +{ + struct smc_buf_desc *rmb_slot; + + read_lock_bh(&lgr->rmbs_lock); + list_for_each_entry(rmb_slot, &lgr->rmbs[compressed_bufsize], + list) { + if (cmpxchg(&rmb_slot->used, 0, 1) == 0) { + read_unlock_bh(&lgr->rmbs_lock); + return rmb_slot; + } + } + read_unlock_bh(&lgr->rmbs_lock); + return NULL; +} + +/* create the tx buffer for an SMC socket */ +int smc_sndbuf_create(struct smc_sock *smc) +{ + struct smc_connection *conn = &smc->conn; + struct smc_link_group *lgr = conn->lgr; + int tmp_bufsize, tmp_bufsize_short; + struct smc_buf_desc *sndbuf_desc; + int rc; + + /* use socket send buffer size (w/o overhead) as start value */ + for (tmp_bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); + tmp_bufsize_short >= 0; tmp_bufsize_short--) { + tmp_bufsize = smc_uncompress_bufsize(tmp_bufsize_short); + /* check for reusable sndbuf_slot in the link group */ + sndbuf_desc = smc_sndbuf_get_slot(lgr, tmp_bufsize_short); + if (sndbuf_desc) { + memset(sndbuf_desc->cpu_addr, 0, tmp_bufsize); + break; /* found reusable slot */ + } + /* try to alloc a new send buffer */ + sndbuf_desc = kzalloc(sizeof(*sndbuf_desc), GFP_KERNEL); + if (!sndbuf_desc) + break; /* give up with -ENOMEM */ + sndbuf_desc->cpu_addr = kzalloc(tmp_bufsize, + GFP_KERNEL | __GFP_NOWARN | + __GFP_NOMEMALLOC | + __GFP_NORETRY); + if (!sndbuf_desc->cpu_addr) { + kfree(sndbuf_desc); + /* if send buffer allocation has failed, + * try a smaller one + */ + continue; + } + rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + tmp_bufsize, sndbuf_desc, + DMA_TO_DEVICE); + if (rc) { + kfree(sndbuf_desc->cpu_addr); + kfree(sndbuf_desc); + continue; /* if mapping failed, try smaller one */ + } + sndbuf_desc->used = 1; + write_lock_bh(&lgr->sndbufs_lock); + list_add(&sndbuf_desc->list, + &lgr->sndbufs[tmp_bufsize_short]); + write_unlock_bh(&lgr->sndbufs_lock); + break; + } + if (sndbuf_desc && sndbuf_desc->cpu_addr) { + conn->sndbuf_desc = sndbuf_desc; + conn->sndbuf_size = tmp_bufsize; + smc->sk.sk_sndbuf = tmp_bufsize * 2; + return 0; + } else { + return -ENOMEM; + } +} + +/* create the RMB for an SMC socket (even though the SMC protocol + * allows more than one RMB-element per RMB, the Linux implementation + * uses just one RMB-element per RMB, i.e. uses an extra RMB for every + * connection in a link group + */ +int smc_rmb_create(struct smc_sock *smc) +{ + struct smc_connection *conn = &smc->conn; + struct smc_link_group *lgr = conn->lgr; + int tmp_bufsize, tmp_bufsize_short; + struct smc_buf_desc *rmb_desc; + int rc; + + /* use socket recv buffer size (w/o overhead) as start value */ + for (tmp_bufsize_short = smc_compress_bufsize(smc->sk.sk_rcvbuf / 2); + tmp_bufsize_short >= 0; tmp_bufsize_short--) { + tmp_bufsize = smc_uncompress_bufsize(tmp_bufsize_short); + /* check for reusable rmb_slot in the link group */ + rmb_desc = smc_rmb_get_slot(lgr, tmp_bufsize_short); + if (rmb_desc) { + memset(rmb_desc->cpu_addr, 0, tmp_bufsize); + break; /* found reusable slot */ + } + /* try to alloc a new RMB */ + rmb_desc = kzalloc(sizeof(*rmb_desc), GFP_KERNEL); + if (!rmb_desc) + break; /* give up with -ENOMEM */ + rmb_desc->cpu_addr = kzalloc(tmp_bufsize, + GFP_KERNEL | __GFP_NOWARN | + __GFP_NOMEMALLOC | + __GFP_NORETRY); + if (!rmb_desc->cpu_addr) { + kfree(rmb_desc); + /* if RMB allocation has failed, + * try a smaller one + */ + continue; + } + rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + tmp_bufsize, rmb_desc, + DMA_FROM_DEVICE); + if (rc) { + kfree(rmb_desc->cpu_addr); + kfree(rmb_desc); + continue; /* if mapping failed, try smaller one */ + } + rmb_desc->used = 1; + write_lock_bh(&lgr->rmbs_lock); + list_add(&rmb_desc->list, + &lgr->rmbs[tmp_bufsize_short]); + write_unlock_bh(&lgr->rmbs_lock); + break; + } + if (rmb_desc && rmb_desc->cpu_addr) { + conn->rmb_desc = rmb_desc; + conn->rmbe_size = tmp_bufsize; + conn->rmbe_size_short = tmp_bufsize_short; + smc->sk.sk_rcvbuf = tmp_bufsize * 2; + return 0; + } else { + return -ENOMEM; + } +} diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 14b787abae02..bf0026db44e9 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -16,6 +16,8 @@ #include "smc.h" #include "smc_ib.h" +#define SMC_RMBS_PER_LGR_MAX 255 /* max. # of RMBs per link group */ + struct smc_lgr_list { /* list of link group definition */ struct list_head list; spinlock_t lock; /* protects list of link groups */ @@ -52,6 +54,15 @@ struct smc_link { #define SMC_FIRST_CONTACT 1 /* first contact to a peer */ #define SMC_REUSE_CONTACT 0 /* follow-on contact to a peer*/ +/* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */ +struct smc_buf_desc { + struct list_head list; + u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; + /* mapped address of buffer */ + void *cpu_addr; /* virtual address of buffer */ + u32 used; /* currently used / unused */ +}; + struct smc_link_group { struct list_head list; enum smc_lgr_role role; /* client or server */ @@ -63,6 +74,11 @@ struct smc_link_group { rwlock_t conns_lock; /* protects conns_all */ unsigned int conns_num; /* current # of connections */ unsigned short vlan_id; /* vlan id of link group */ + + struct list_head sndbufs[SMC_RMBE_SIZES];/* tx buffers */ + rwlock_t sndbufs_lock; /* protects tx buffers */ + struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */ + rwlock_t rmbs_lock; /* protects rx buffers */ struct delayed_work free_work; /* delayed freeing of an lgr */ bool sync_err; /* lgr no longer fits to peer */ }; @@ -100,7 +116,12 @@ static inline struct smc_connection *smc_lgr_find_conn( return res; } +struct smc_sock; +struct smc_clc_msg_accept_confirm; + void smc_lgr_free(struct smc_link_group *lgr); void smc_lgr_terminate(struct smc_link_group *lgr); +int smc_sndbuf_create(struct smc_sock *smc); +int smc_rmb_create(struct smc_sock *smc); #endif diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 5b037f435bc1..762b7e13c93d 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -16,6 +16,7 @@ #include "smc_pnet.h" #include "smc_ib.h" +#include "smc_core.h" #include "smc.h" struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ @@ -29,6 +30,24 @@ u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system * identifier */ +/* map a new TX or RX buffer to DMA */ +int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction) +{ + int rc = 0; + + if (buf_slot->dma_addr[SMC_SINGLE_LINK]) + return rc; /* already mapped */ + buf_slot->dma_addr[SMC_SINGLE_LINK] = + ib_dma_map_single(smcibdev->ibdev, buf_slot->cpu_addr, + buf_size, data_direction); + if (ib_dma_mapping_error(smcibdev->ibdev, + buf_slot->dma_addr[SMC_SINGLE_LINK])) + rc = -EIO; + return rc; +} + static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) { struct net_device *ndev; diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index 63613e715f4f..c3b61726861a 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -32,9 +32,14 @@ struct smc_ib_device { /* ib-device infos for smc */ u8 initialized : 1; /* ib dev CQ, evthdl done */ }; +struct smc_buf_desc; + int smc_ib_register_client(void) __init; void smc_ib_unregister_client(void); bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport); int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport); +int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction); #endif -- cgit v1.2.3 From f38ba179c6ca94ebeb0ac6a0956c4ea533151ad8 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:19 +0100 Subject: smc: work request (WR) base for use by LLC and CDC The base containers for RDMA transport are work requests and completion queue entries processed through Infiniband verbs: * allocate and initialize these areas * map these areas to DMA * implement the basic communication consisting of work request posting and receival of completion queue events Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/Makefile | 2 +- net/smc/smc.h | 5 + net/smc/smc_core.c | 11 ++ net/smc/smc_core.h | 30 +++ net/smc/smc_ib.c | 73 +++++++ net/smc/smc_ib.h | 11 ++ net/smc/smc_wr.c | 564 +++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_wr.h | 95 +++++++++ 8 files changed, 790 insertions(+), 1 deletion(-) create mode 100644 net/smc/smc_wr.c create mode 100644 net/smc/smc_wr.h diff --git a/net/smc/Makefile b/net/smc/Makefile index cb8bcd9df53e..b19120ed7102 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_SMC) += smc.o -smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o +smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o diff --git a/net/smc/smc.h b/net/smc/smc.h index 2bf504492133..209a0b5f59cb 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -12,6 +12,7 @@ #include #include +#include /* __aligned */ #include #include "smc_ib.h" @@ -29,6 +30,10 @@ enum smc_state { /* possible states of an SMC socket */ struct smc_link_group; +struct smc_wr_rx_hdr { /* common prefix part of LLC and CDC to demultiplex */ + u8 type; +} __aligned(1); + struct smc_connection { struct rb_node alert_node; struct smc_link_group *lgr; /* link group of connection */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index e1b95728ca81..0eed4c154081 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -20,6 +20,7 @@ #include "smc_clc.h" #include "smc_core.h" #include "smc_ib.h" +#include "smc_wr.h" #define SMC_LGR_FREE_DELAY (600 * HZ) @@ -161,12 +162,20 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; get_random_bytes(rndvec, sizeof(rndvec)); lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16); + rc = smc_wr_alloc_link_mem(lnk); + if (rc) + goto free_lgr; + init_waitqueue_head(&lnk->wr_tx_wait); smc->conn.lgr = lgr; rwlock_init(&lgr->conns_lock); spin_lock_bh(&smc_lgr_list.lock); list_add(&lgr->list, &smc_lgr_list.list); spin_unlock_bh(&smc_lgr_list.lock); + return 0; + +free_lgr: + kfree(lgr); out: return rc; } @@ -202,6 +211,8 @@ void smc_conn_free(struct smc_connection *conn) static void smc_link_clear(struct smc_link *lnk) { lnk->peer_qpn = 0; + smc_wr_free_link(lnk); + smc_wr_free_link_mem(lnk); } static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index bf0026db44e9..ca4587a95450 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -11,6 +11,7 @@ #ifndef _SMC_CORE_H #define _SMC_CORE_H +#include #include #include "smc.h" @@ -30,11 +31,40 @@ enum smc_lgr_role { /* possible roles of a link group */ SMC_SERV /* server */ }; +#define SMC_WR_BUF_SIZE 48 /* size of work request buffer */ + +struct smc_wr_buf { + u8 raw[SMC_WR_BUF_SIZE]; +}; + struct smc_link { struct smc_ib_device *smcibdev; /* ib-device */ u8 ibport; /* port - values 1 | 2 */ + struct ib_pd *roce_pd; /* IB protection domain, + * unique for every RoCE QP + */ struct ib_qp *roce_qp; /* IB queue pair */ struct ib_qp_attr qp_attr; /* IB queue pair attributes */ + + struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ + struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ + struct ib_sge *wr_tx_sges; /* WR send gather meta data */ + struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ + /* above four vectors have wr_tx_cnt elements and use the same index */ + dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ + atomic_long_t wr_tx_id; /* seq # of last sent WR */ + unsigned long *wr_tx_mask; /* bit mask of used indexes */ + u32 wr_tx_cnt; /* number of WR send buffers */ + wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */ + + struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */ + struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */ + struct ib_sge *wr_rx_sges; /* WR recv scatter meta data */ + /* above three vectors have wr_rx_cnt elements and use the same index */ + dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */ + u64 wr_rx_id; /* seq # of last recv WR */ + u32 wr_rx_cnt; /* number of WR recv buffers */ + union ib_gid gid; /* gid matching used vlan id */ u32 peer_qpn; /* QP number of peer */ enum ib_mtu path_mtu; /* used mtu */ diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 762b7e13c93d..9fb46a63a17e 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -17,6 +17,7 @@ #include "smc_pnet.h" #include "smc_ib.h" #include "smc_core.h" +#include "smc_wr.h" #include "smc.h" struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ @@ -30,6 +31,78 @@ u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system * identifier */ +void smc_ib_dealloc_protection_domain(struct smc_link *lnk) +{ + ib_dealloc_pd(lnk->roce_pd); + lnk->roce_pd = NULL; +} + +int smc_ib_create_protection_domain(struct smc_link *lnk) +{ + int rc; + + lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); + rc = PTR_ERR_OR_ZERO(lnk->roce_pd); + if (IS_ERR(lnk->roce_pd)) + lnk->roce_pd = NULL; + return rc; +} + +static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) +{ + switch (ibevent->event) { + case IB_EVENT_DEVICE_FATAL: + case IB_EVENT_GID_CHANGE: + case IB_EVENT_PORT_ERR: + case IB_EVENT_QP_ACCESS_ERR: + /* tbd in follow-on patch: + * abnormal close of corresponding connections + */ + break; + default: + break; + } +} + +void smc_ib_destroy_queue_pair(struct smc_link *lnk) +{ + ib_destroy_qp(lnk->roce_qp); + lnk->roce_qp = NULL; +} + +/* create a queue pair within the protection domain for a link */ +int smc_ib_create_queue_pair(struct smc_link *lnk) +{ + struct ib_qp_init_attr qp_attr = { + .event_handler = smc_ib_qp_event_handler, + .qp_context = lnk, + .send_cq = lnk->smcibdev->roce_cq_send, + .recv_cq = lnk->smcibdev->roce_cq_recv, + .srq = NULL, + .cap = { + .max_send_wr = SMC_WR_BUF_CNT, + /* include unsolicited rdma_writes as well, + * there are max. 2 RDMA_WRITE per 1 WR_SEND + */ + .max_recv_wr = SMC_WR_BUF_CNT * 3, + .max_send_sge = SMC_IB_MAX_SEND_SGE, + .max_recv_sge = 1, + .max_inline_data = SMC_WR_TX_SIZE, + }, + .sq_sig_type = IB_SIGNAL_REQ_WR, + .qp_type = IB_QPT_RC, + }; + int rc; + + lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr); + rc = PTR_ERR_OR_ZERO(lnk->roce_qp); + if (IS_ERR(lnk->roce_qp)) + lnk->roce_qp = NULL; + else + smc_wr_remember_qp_attr(lnk); + return rc; +} + /* map a new TX or RX buffer to DMA */ int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, struct smc_buf_desc *buf_slot, diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index c3b61726861a..441fd168911d 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -16,6 +16,8 @@ #define SMC_MAX_PORTS 2 /* Max # of ports */ #define SMC_GID_SIZE sizeof(union ib_gid) +#define SMC_IB_MAX_SEND_SGE 2 + struct smc_ib_devices { /* list of smc ib devices definition */ struct list_head list; spinlock_t lock; /* protects list of smc ib devices */ @@ -27,12 +29,17 @@ struct smc_ib_device { /* ib-device infos for smc */ struct list_head list; struct ib_device *ibdev; struct ib_port_attr pattr[SMC_MAX_PORTS]; /* ib dev. port attrs */ + struct ib_cq *roce_cq_send; /* send completion queue */ + struct ib_cq *roce_cq_recv; /* recv completion queue */ + struct tasklet_struct send_tasklet; /* called by send cq handler */ + struct tasklet_struct recv_tasklet; /* called by recv cq handler */ char mac[SMC_MAX_PORTS][6]; /* mac address per port*/ union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */ u8 initialized : 1; /* ib dev CQ, evthdl done */ }; struct smc_buf_desc; +struct smc_link; int smc_ib_register_client(void) __init; void smc_ib_unregister_client(void); @@ -41,5 +48,9 @@ int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport); int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction); +void smc_ib_dealloc_protection_domain(struct smc_link *lnk); +int smc_ib_create_protection_domain(struct smc_link *lnk); +void smc_ib_destroy_queue_pair(struct smc_link *lnk); +int smc_ib_create_queue_pair(struct smc_link *lnk); #endif diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c new file mode 100644 index 000000000000..a2bc6b612d03 --- /dev/null +++ b/net/smc/smc_wr.c @@ -0,0 +1,564 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Work Requests exploiting Infiniband API + * + * Work requests (WR) of type ib_post_send or ib_post_recv respectively + * are submitted to either RC SQ or RC RQ respectively + * (reliably connected send/receive queue) + * and become work queue entries (WQEs). + * While an SQ WR/WQE is pending, we track it until transmission completion. + * Through a send or receive completion queue (CQ) respectively, + * we get completion queue entries (CQEs) [aka work completions (WCs)]. + * Since the CQ callback is called from IRQ context, we split work by using + * bottom halves implemented by tasklets. + * + * SMC uses this to exchange LLC (link layer control) + * and CDC (connection data control) messages. + * + * Copyright IBM Corp. 2016 + * + * Author(s): Steffen Maier + */ + +#include +#include +#include +#include +#include + +#include "smc.h" +#include "smc_wr.h" + +#define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */ + +#define SMC_WR_RX_HASH_BITS 4 +static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS); +static DEFINE_SPINLOCK(smc_wr_rx_hash_lock); + +struct smc_wr_tx_pend { /* control data for a pending send request */ + u64 wr_id; /* work request id sent */ + smc_wr_tx_handler handler; + enum ib_wc_status wc_status; /* CQE status */ + struct smc_link *link; + u32 idx; + struct smc_wr_tx_pend_priv priv; +}; + +/******************************** send queue *********************************/ + +/*------------------------------- completion --------------------------------*/ + +static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) +{ + u32 i; + + for (i = 0; i < link->wr_tx_cnt; i++) { + if (link->wr_tx_pends[i].wr_id == wr_id) + return i; + } + return link->wr_tx_cnt; +} + +static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) +{ + struct smc_wr_tx_pend pnd_snd; + struct smc_link *link; + u32 pnd_snd_idx; + int i; + + link = wc->qp->qp_context; + pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); + if (pnd_snd_idx == link->wr_tx_cnt) + return; + link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status; + memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd)); + /* clear the full struct smc_wr_tx_pend including .priv */ + memset(&link->wr_tx_pends[pnd_snd_idx], 0, + sizeof(link->wr_tx_pends[pnd_snd_idx])); + memset(&link->wr_tx_bufs[pnd_snd_idx], 0, + sizeof(link->wr_tx_bufs[pnd_snd_idx])); + if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask)) + return; + if (wc->status) { + for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { + /* clear full struct smc_wr_tx_pend including .priv */ + memset(&link->wr_tx_pends[i], 0, + sizeof(link->wr_tx_pends[i])); + memset(&link->wr_tx_bufs[i], 0, + sizeof(link->wr_tx_bufs[i])); + clear_bit(i, link->wr_tx_mask); + } + /* tbd in future patch: terminate connections of this link + * group abnormally + */ + } + if (pnd_snd.handler) + pnd_snd.handler(&pnd_snd.priv, link, wc->status); + wake_up(&link->wr_tx_wait); +} + +static void smc_wr_tx_tasklet_fn(unsigned long data) +{ + struct smc_ib_device *dev = (struct smc_ib_device *)data; + struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; + int i = 0, rc; + int polled = 0; + +again: + polled++; + do { + rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc); + if (polled == 1) { + ib_req_notify_cq(dev->roce_cq_send, + IB_CQ_NEXT_COMP | + IB_CQ_REPORT_MISSED_EVENTS); + } + if (!rc) + break; + for (i = 0; i < rc; i++) + smc_wr_tx_process_cqe(&wc[i]); + } while (rc > 0); + if (polled == 1) + goto again; +} + +void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context) +{ + struct smc_ib_device *dev = (struct smc_ib_device *)cq_context; + + tasklet_schedule(&dev->send_tasklet); +} + +/*---------------------------- request submission ---------------------------*/ + +static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) +{ + *idx = link->wr_tx_cnt; + for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) { + if (!test_and_set_bit(*idx, link->wr_tx_mask)) + return 0; + } + *idx = link->wr_tx_cnt; + return -EBUSY; +} + +/** + * smc_wr_tx_get_free_slot() - returns buffer for message assembly, + * and sets info for pending transmit tracking + * @link: Pointer to smc_link used to later send the message. + * @handler: Send completion handler function pointer. + * @wr_buf: Out value returns pointer to message buffer. + * @wr_pend_priv: Out value returns pointer serving as handler context. + * + * Return: 0 on success, or -errno on error. + */ +int smc_wr_tx_get_free_slot(struct smc_link *link, + smc_wr_tx_handler handler, + struct smc_wr_buf **wr_buf, + struct smc_wr_tx_pend_priv **wr_pend_priv) +{ + struct smc_wr_tx_pend *wr_pend; + struct ib_send_wr *wr_ib; + u64 wr_id; + u32 idx; + int rc; + + *wr_buf = NULL; + *wr_pend_priv = NULL; + if (in_softirq()) { + rc = smc_wr_tx_get_free_slot_index(link, &idx); + if (rc) + return rc; + } else { + rc = wait_event_interruptible_timeout( + link->wr_tx_wait, + (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY), + SMC_WR_TX_WAIT_FREE_SLOT_TIME); + if (!rc) { + /* tbd in future patch: timeout - terminate connections + * of this link group abnormally + */ + return -EPIPE; + } + if (rc == -ERESTARTSYS) + return -EINTR; + if (idx == link->wr_tx_cnt) + return -EPIPE; + } + wr_id = smc_wr_tx_get_next_wr_id(link); + wr_pend = &link->wr_tx_pends[idx]; + wr_pend->wr_id = wr_id; + wr_pend->handler = handler; + wr_pend->link = link; + wr_pend->idx = idx; + wr_ib = &link->wr_tx_ibs[idx]; + wr_ib->wr_id = wr_id; + *wr_buf = &link->wr_tx_bufs[idx]; + *wr_pend_priv = &wr_pend->priv; + return 0; +} + +int smc_wr_tx_put_slot(struct smc_link *link, + struct smc_wr_tx_pend_priv *wr_pend_priv) +{ + struct smc_wr_tx_pend *pend; + + pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); + if (pend->idx < link->wr_tx_cnt) { + /* clear the full struct smc_wr_tx_pend including .priv */ + memset(&link->wr_tx_pends[pend->idx], 0, + sizeof(link->wr_tx_pends[pend->idx])); + memset(&link->wr_tx_bufs[pend->idx], 0, + sizeof(link->wr_tx_bufs[pend->idx])); + test_and_clear_bit(pend->idx, link->wr_tx_mask); + return 1; + } + + return 0; +} + +/* Send prepared WR slot via ib_post_send. + * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer + */ +int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) +{ + struct ib_send_wr *failed_wr = NULL; + struct smc_wr_tx_pend *pend; + int rc; + + ib_req_notify_cq(link->smcibdev->roce_cq_send, + IB_CQ_SOLICITED_MASK | IB_CQ_REPORT_MISSED_EVENTS); + pend = container_of(priv, struct smc_wr_tx_pend, priv); + rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], + &failed_wr); + if (rc) + smc_wr_tx_put_slot(link, priv); + return rc; +} + +/****************************** receive queue ********************************/ + +int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler) +{ + struct smc_wr_rx_handler *h_iter; + int rc = 0; + + spin_lock(&smc_wr_rx_hash_lock); + hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) { + if (h_iter->type == handler->type) { + rc = -EEXIST; + goto out_unlock; + } + } + hash_add(smc_wr_rx_hash, &handler->list, handler->type); +out_unlock: + spin_unlock(&smc_wr_rx_hash_lock); + return rc; +} + +/* Demultiplex a received work request based on the message type to its handler. + * Relies on smc_wr_rx_hash having been completely filled before any IB WRs, + * and not being modified any more afterwards so we don't need to lock it. + */ +static inline void smc_wr_rx_demultiplex(struct ib_wc *wc) +{ + struct smc_link *link = (struct smc_link *)wc->qp->qp_context; + struct smc_wr_rx_handler *handler; + struct smc_wr_rx_hdr *wr_rx; + u64 temp_wr_id; + u32 index; + + if (wc->byte_len < sizeof(*wr_rx)) + return; /* short message */ + temp_wr_id = wc->wr_id; + index = do_div(temp_wr_id, link->wr_rx_cnt); + wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index]; + hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) { + if (handler->type == wr_rx->type) + handler->handler(wc, wr_rx); + } +} + +static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) +{ + struct smc_link *link; + int i; + + for (i = 0; i < num; i++) { + link = wc[i].qp->qp_context; + if (wc[i].status == IB_WC_SUCCESS) { + smc_wr_rx_demultiplex(&wc[i]); + smc_wr_rx_post(link); /* refill WR RX */ + } else { + /* handle status errors */ + switch (wc[i].status) { + case IB_WC_RETRY_EXC_ERR: + case IB_WC_RNR_RETRY_EXC_ERR: + case IB_WC_WR_FLUSH_ERR: + /* tbd in future patch: terminate connections of this + * link group abnormally + */ + break; + default: + smc_wr_rx_post(link); /* refill WR RX */ + break; + } + } + } +} + +static void smc_wr_rx_tasklet_fn(unsigned long data) +{ + struct smc_ib_device *dev = (struct smc_ib_device *)data; + struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; + int polled = 0; + int rc; + +again: + polled++; + do { + memset(&wc, 0, sizeof(wc)); + rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc); + if (polled == 1) { + ib_req_notify_cq(dev->roce_cq_recv, + IB_CQ_SOLICITED_MASK + | IB_CQ_REPORT_MISSED_EVENTS); + } + if (!rc) + break; + smc_wr_rx_process_cqes(&wc[0], rc); + } while (rc > 0); + if (polled == 1) + goto again; +} + +void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context) +{ + struct smc_ib_device *dev = (struct smc_ib_device *)cq_context; + + tasklet_schedule(&dev->recv_tasklet); +} + +int smc_wr_rx_post_init(struct smc_link *link) +{ + u32 i; + int rc = 0; + + for (i = 0; i < link->wr_rx_cnt; i++) + rc = smc_wr_rx_post(link); + return rc; +} + +/***************************** init, exit, misc ******************************/ + +void smc_wr_remember_qp_attr(struct smc_link *lnk) +{ + struct ib_qp_attr *attr = &lnk->qp_attr; + struct ib_qp_init_attr init_attr; + + memset(attr, 0, sizeof(*attr)); + memset(&init_attr, 0, sizeof(init_attr)); + ib_query_qp(lnk->roce_qp, attr, + IB_QP_STATE | + IB_QP_CUR_STATE | + IB_QP_PKEY_INDEX | + IB_QP_PORT | + IB_QP_QKEY | + IB_QP_AV | + IB_QP_PATH_MTU | + IB_QP_TIMEOUT | + IB_QP_RETRY_CNT | + IB_QP_RNR_RETRY | + IB_QP_RQ_PSN | + IB_QP_ALT_PATH | + IB_QP_MIN_RNR_TIMER | + IB_QP_SQ_PSN | + IB_QP_PATH_MIG_STATE | + IB_QP_CAP | + IB_QP_DEST_QPN, + &init_attr); + + lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT, + lnk->qp_attr.cap.max_send_wr); + lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3, + lnk->qp_attr.cap.max_recv_wr); +} + +static void smc_wr_init_sge(struct smc_link *lnk) +{ + u32 i; + + for (i = 0; i < lnk->wr_tx_cnt; i++) { + lnk->wr_tx_sges[i].addr = + lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; + lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; + lnk->wr_tx_ibs[i].next = NULL; + lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; + lnk->wr_tx_ibs[i].num_sge = 1; + lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; + lnk->wr_tx_ibs[i].send_flags = + IB_SEND_SIGNALED | IB_SEND_SOLICITED | IB_SEND_INLINE; + } + for (i = 0; i < lnk->wr_rx_cnt; i++) { + lnk->wr_rx_sges[i].addr = + lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE; + lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE; + lnk->wr_rx_ibs[i].next = NULL; + lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i]; + lnk->wr_rx_ibs[i].num_sge = 1; + } +} + +void smc_wr_free_link(struct smc_link *lnk) +{ + struct ib_device *ibdev; + + memset(lnk->wr_tx_mask, 0, + BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); + + if (!lnk->smcibdev) + return; + ibdev = lnk->smcibdev->ibdev; + + if (lnk->wr_rx_dma_addr) { + ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, + SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, + DMA_FROM_DEVICE); + lnk->wr_rx_dma_addr = 0; + } + if (lnk->wr_tx_dma_addr) { + ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr, + SMC_WR_BUF_SIZE * lnk->wr_tx_cnt, + DMA_TO_DEVICE); + lnk->wr_tx_dma_addr = 0; + } +} + +void smc_wr_free_link_mem(struct smc_link *lnk) +{ + kfree(lnk->wr_tx_pends); + lnk->wr_tx_pends = NULL; + kfree(lnk->wr_tx_mask); + lnk->wr_tx_mask = NULL; + kfree(lnk->wr_tx_sges); + lnk->wr_tx_sges = NULL; + kfree(lnk->wr_rx_sges); + lnk->wr_rx_sges = NULL; + kfree(lnk->wr_rx_ibs); + lnk->wr_rx_ibs = NULL; + kfree(lnk->wr_tx_ibs); + lnk->wr_tx_ibs = NULL; + kfree(lnk->wr_tx_bufs); + lnk->wr_tx_bufs = NULL; + kfree(lnk->wr_rx_bufs); + lnk->wr_rx_bufs = NULL; +} + +int smc_wr_alloc_link_mem(struct smc_link *link) +{ + /* allocate link related memory */ + link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL); + if (!link->wr_tx_bufs) + goto no_mem; + link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE, + GFP_KERNEL); + if (!link->wr_rx_bufs) + goto no_mem_wr_tx_bufs; + link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]), + GFP_KERNEL); + if (!link->wr_tx_ibs) + goto no_mem_wr_rx_bufs; + link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3, + sizeof(link->wr_rx_ibs[0]), + GFP_KERNEL); + if (!link->wr_rx_ibs) + goto no_mem_wr_tx_ibs; + link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), + GFP_KERNEL); + if (!link->wr_tx_sges) + goto no_mem_wr_rx_ibs; + link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, + sizeof(link->wr_rx_sges[0]), + GFP_KERNEL); + if (!link->wr_rx_sges) + goto no_mem_wr_tx_sges; + link->wr_tx_mask = kzalloc( + BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*link->wr_tx_mask), + GFP_KERNEL); + if (!link->wr_tx_mask) + goto no_mem_wr_rx_sges; + link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT, + sizeof(link->wr_tx_pends[0]), + GFP_KERNEL); + if (!link->wr_tx_pends) + goto no_mem_wr_tx_mask; + return 0; + +no_mem_wr_tx_mask: + kfree(link->wr_tx_mask); +no_mem_wr_rx_sges: + kfree(link->wr_rx_sges); +no_mem_wr_tx_sges: + kfree(link->wr_tx_sges); +no_mem_wr_rx_ibs: + kfree(link->wr_rx_ibs); +no_mem_wr_tx_ibs: + kfree(link->wr_tx_ibs); +no_mem_wr_rx_bufs: + kfree(link->wr_rx_bufs); +no_mem_wr_tx_bufs: + kfree(link->wr_tx_bufs); +no_mem: + return -ENOMEM; +} + +void smc_wr_remove_dev(struct smc_ib_device *smcibdev) +{ + tasklet_kill(&smcibdev->recv_tasklet); + tasklet_kill(&smcibdev->send_tasklet); +} + +void smc_wr_add_dev(struct smc_ib_device *smcibdev) +{ + tasklet_init(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn, + (unsigned long)smcibdev); + tasklet_init(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn, + (unsigned long)smcibdev); +} + +int smc_wr_create_link(struct smc_link *lnk) +{ + struct ib_device *ibdev = lnk->smcibdev->ibdev; + int rc = 0; + + smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0); + lnk->wr_rx_id = 0; + lnk->wr_rx_dma_addr = ib_dma_map_single( + ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, + DMA_FROM_DEVICE); + if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) { + lnk->wr_rx_dma_addr = 0; + rc = -EIO; + goto out; + } + lnk->wr_tx_dma_addr = ib_dma_map_single( + ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) { + rc = -EIO; + goto dma_unmap; + } + smc_wr_init_sge(lnk); + memset(lnk->wr_tx_mask, 0, + BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); + return rc; + +dma_unmap: + ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, + SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, + DMA_FROM_DEVICE); + lnk->wr_rx_dma_addr = 0; +out: + return rc; +} diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h new file mode 100644 index 000000000000..0b626728ce44 --- /dev/null +++ b/net/smc/smc_wr.h @@ -0,0 +1,95 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Work Requests exploiting Infiniband API + * + * Copyright IBM Corp. 2016 + * + * Author(s): Steffen Maier + */ + +#ifndef SMC_WR_H +#define SMC_WR_H + +#include +#include +#include + +#include "smc.h" +#include "smc_core.h" + +#define SMC_WR_MAX_CQE 32768 /* max. # of completion queue elements */ +#define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */ + +#define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ) +#define SMC_WR_TX_WAIT_PENDING_TIME (5 * HZ) + +#define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */ + +#define SMC_WR_TX_PEND_PRIV_SIZE 32 + +struct smc_wr_tx_pend_priv { + u8 priv[SMC_WR_TX_PEND_PRIV_SIZE]; +}; + +typedef void (*smc_wr_tx_handler)(struct smc_wr_tx_pend_priv *, + struct smc_link *, + enum ib_wc_status); + +struct smc_wr_rx_handler { + struct hlist_node list; /* hash table collision resolution */ + void (*handler)(struct ib_wc *, void *); + u8 type; +}; + +/* Only used by RDMA write WRs. + * All other WRs (CDC/LLC) use smc_wr_tx_send handling WR_ID implicitly + */ +static inline long smc_wr_tx_get_next_wr_id(struct smc_link *link) +{ + return atomic_long_inc_return(&link->wr_tx_id); +} + +static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val) +{ + atomic_long_set(wr_tx_id, val); +} + +/* post a new receive work request to fill a completed old work request entry */ +static inline int smc_wr_rx_post(struct smc_link *link) +{ + struct ib_recv_wr *bad_recv_wr = NULL; + int rc; + u64 wr_id, temp_wr_id; + u32 index; + + wr_id = ++link->wr_rx_id; /* tasklet context, thus not atomic */ + temp_wr_id = wr_id; + index = do_div(temp_wr_id, link->wr_rx_cnt); + link->wr_rx_ibs[index].wr_id = wr_id; + rc = ib_post_recv(link->roce_qp, &link->wr_rx_ibs[index], &bad_recv_wr); + return rc; +} + +int smc_wr_create_link(struct smc_link *lnk); +int smc_wr_alloc_link_mem(struct smc_link *lnk); +void smc_wr_free_link(struct smc_link *lnk); +void smc_wr_free_link_mem(struct smc_link *lnk); +void smc_wr_remember_qp_attr(struct smc_link *lnk); +void smc_wr_remove_dev(struct smc_ib_device *smcibdev); +void smc_wr_add_dev(struct smc_ib_device *smcibdev); + +int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, + struct smc_wr_buf **wr_buf, + struct smc_wr_tx_pend_priv **wr_pend_priv); +int smc_wr_tx_put_slot(struct smc_link *link, + struct smc_wr_tx_pend_priv *wr_pend_priv); +int smc_wr_tx_send(struct smc_link *link, + struct smc_wr_tx_pend_priv *wr_pend_priv); +void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context); + +int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler); +int smc_wr_rx_post_init(struct smc_link *link); +void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context); + +#endif /* SMC_WR_H */ -- cgit v1.2.3 From bd4ad57718cc86d2972a20f9791cd079996a4dd6 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:20 +0100 Subject: smc: initialize IB transport incl. PD, MR, QP, CQ, event, WR Prepare the link for RDMA transport: Create a queue pair (QP) and move it into the state Ready-To-Receive (RTR). Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 34 ++++++-- net/smc/smc.h | 1 + net/smc/smc_clc.c | 10 ++- net/smc/smc_core.c | 76 ++++++++++++++++++ net/smc/smc_core.h | 18 +++++ net/smc/smc_ib.c | 229 +++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_ib.h | 13 +++ net/smc/smc_wr.c | 2 + 8 files changed, 374 insertions(+), 9 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index a38f470130d3..1026fad35998 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -339,9 +339,20 @@ static int smc_connect_rdma(struct smc_sock *smc) if (local_contact == SMC_FIRST_CONTACT) smc_link_save_peer_info(link, &aclc); - /* tbd in follow-on patch: more steps to setup RDMA communcication, - * create rmbs, map rmbs, rtoken_handling, modify_qp - */ + + rc = smc_rmb_rtoken_handling(&smc->conn, &aclc); + if (rc) { + reason_code = SMC_CLC_DECL_INTERR; + goto decline_rdma_unlock; + } + + if (local_contact == SMC_FIRST_CONTACT) { + rc = smc_ib_ready_link(link); + if (rc) { + reason_code = SMC_CLC_DECL_INTERR; + goto decline_rdma_unlock; + } + } rc = smc_clc_send_confirm(smc); if (rc) @@ -638,9 +649,20 @@ static void smc_listen_work(struct work_struct *work) if (local_contact == SMC_FIRST_CONTACT) smc_link_save_peer_info(link, &cclc); - /* tbd in follow-on patch: more steps to setup RDMA communcication, - * rtoken_handling, modify_qp - */ + rc = smc_rmb_rtoken_handling(&new_smc->conn, &cclc); + if (rc) { + reason_code = SMC_CLC_DECL_INTERR; + goto decline_rdma; + } + + /* tbd in follow-on patch: modify_qp, llc_confirm */ + if (local_contact == SMC_FIRST_CONTACT) { + rc = smc_ib_ready_link(link); + if (rc) { + reason_code = SMC_CLC_DECL_INTERR; + goto decline_rdma; + } + } out_connected: sk_refcnt_debug_inc(newsmcsk); diff --git a/net/smc/smc.h b/net/smc/smc.h index 209a0b5f59cb..4d2e54d176f2 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -43,6 +43,7 @@ struct smc_connection { atomic_t peer_rmbe_space;/* remaining free bytes in peer * rmbe */ + int rtoken_idx; /* idx to peer RMB rkey/addr */ struct smc_buf_desc *sndbuf_desc; /* send buffer descriptor */ int sndbuf_size; /* sndbuf size <== sock wmem */ diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 4b475dddef16..e1e684c562b8 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -201,13 +201,15 @@ int smc_clc_send_confirm(struct smc_sock *smc) SMC_GID_SIZE); memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], sizeof(link->smcibdev->mac)); - - /* tbd in follow-on patch: fill in rmb-related values */ - hton24(cclc.qpn, link->roce_qp->qp_num); + cclc.rmb_rkey = + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ cclc.rmbe_alert_token = htonl(conn->alert_token_local); cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); + cclc.rmbe_size = conn->rmbe_size_short; + cclc.rmb_dma_addr = + cpu_to_be64((u64)conn->rmb_desc->dma_addr[SMC_SINGLE_LINK]); hton24(cclc.psn, link->psn_initial); memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); @@ -253,6 +255,8 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], sizeof(link->smcibdev->mac[link->ibport - 1])); hton24(aclc.qpn, link->roce_qp->qp_num); + aclc.rmb_rkey = + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ aclc.rmbe_alert_token = htonl(conn->alert_token_local); aclc.qp_mtu = link->path_mtu; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 0eed4c154081..0e9adbd9cd68 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -160,12 +160,23 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, lnk->smcibdev = smcibdev; lnk->ibport = ibport; lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; + if (!smcibdev->initialized) + smc_ib_setup_per_ibdev(smcibdev); get_random_bytes(rndvec, sizeof(rndvec)); lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16); rc = smc_wr_alloc_link_mem(lnk); if (rc) goto free_lgr; init_waitqueue_head(&lnk->wr_tx_wait); + rc = smc_ib_create_protection_domain(lnk); + if (rc) + goto free_link_mem; + rc = smc_ib_create_queue_pair(lnk); + if (rc) + goto dealloc_pd; + rc = smc_wr_create_link(lnk); + if (rc) + goto destroy_qp; smc->conn.lgr = lgr; rwlock_init(&lgr->conns_lock); @@ -174,6 +185,12 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, spin_unlock_bh(&smc_lgr_list.lock); return 0; +destroy_qp: + smc_ib_destroy_queue_pair(lnk); +dealloc_pd: + smc_ib_dealloc_protection_domain(lnk); +free_link_mem: + smc_wr_free_link_mem(lnk); free_lgr: kfree(lgr); out: @@ -211,7 +228,10 @@ void smc_conn_free(struct smc_connection *conn) static void smc_link_clear(struct smc_link *lnk) { lnk->peer_qpn = 0; + smc_ib_modify_qp_reset(lnk); smc_wr_free_link(lnk); + smc_ib_destroy_queue_pair(lnk); + smc_ib_dealloc_protection_domain(lnk); smc_wr_free_link_mem(lnk); } @@ -223,6 +243,10 @@ static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) for (i = 0; i < SMC_RMBE_SIZES; i++) { list_for_each_entry_safe(sndbuf_desc, bf_desc, &lgr->sndbufs[i], list) { + list_del(&sndbuf_desc->list); + smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + smc_uncompress_bufsize(i), + sndbuf_desc, DMA_TO_DEVICE); kfree(sndbuf_desc->cpu_addr); kfree(sndbuf_desc); } @@ -232,11 +256,16 @@ static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) static void smc_lgr_free_rmbs(struct smc_link_group *lgr) { struct smc_buf_desc *rmb_desc, *bf_desc; + struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; int i; for (i = 0; i < SMC_RMBE_SIZES; i++) { list_for_each_entry_safe(rmb_desc, bf_desc, &lgr->rmbs[i], list) { + list_del(&rmb_desc->list); + smc_ib_buf_unmap(lnk->smcibdev, + smc_uncompress_bufsize(i), + rmb_desc, DMA_FROM_DEVICE); kfree(rmb_desc->cpu_addr); kfree(rmb_desc); } @@ -550,6 +579,18 @@ int smc_rmb_create(struct smc_sock *smc) kfree(rmb_desc); continue; /* if mapping failed, try smaller one */ } + rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_LOCAL_WRITE, + &rmb_desc->mr_rx[SMC_SINGLE_LINK]); + if (rc) { + smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + tmp_bufsize, rmb_desc, + DMA_FROM_DEVICE); + kfree(rmb_desc->cpu_addr); + kfree(rmb_desc); + continue; + } rmb_desc->used = 1; write_lock_bh(&lgr->rmbs_lock); list_add(&rmb_desc->list, @@ -567,3 +608,38 @@ int smc_rmb_create(struct smc_sock *smc) return -ENOMEM; } } + +static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr) +{ + int i; + + for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) { + if (!test_and_set_bit(i, lgr->rtokens_used_mask)) + return i; + } + return -ENOSPC; +} + +/* save rkey and dma_addr received from peer during clc handshake */ +int smc_rmb_rtoken_handling(struct smc_connection *conn, + struct smc_clc_msg_accept_confirm *clc) +{ + u64 dma_addr = be64_to_cpu(clc->rmb_dma_addr); + struct smc_link_group *lgr = conn->lgr; + u32 rkey = ntohl(clc->rmb_rkey); + int i; + + for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { + if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && + test_bit(i, lgr->rtokens_used_mask)) { + conn->rtoken_idx = i; + return 0; + } + } + conn->rtoken_idx = smc_rmb_reserve_rtoken_idx(lgr); + if (conn->rtoken_idx < 0) + return conn->rtoken_idx; + lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey = rkey; + lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr = dma_addr; + return 0; +} diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index ca4587a95450..f5ea52086d6d 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -90,9 +90,18 @@ struct smc_buf_desc { u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; /* mapped address of buffer */ void *cpu_addr; /* virtual address of buffer */ + struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; + /* for rmb only: + * rkey provided to peer + */ u32 used; /* currently used / unused */ }; +struct smc_rtoken { /* address/key of remote RMB */ + u64 dma_addr; + u32 rkey; +}; + struct smc_link_group { struct list_head list; enum smc_lgr_role role; /* client or server */ @@ -109,6 +118,13 @@ struct smc_link_group { rwlock_t sndbufs_lock; /* protects tx buffers */ struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */ rwlock_t rmbs_lock; /* protects rx buffers */ + struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX] + [SMC_LINKS_PER_LGR_MAX]; + /* remote addr/key pairs */ + unsigned long rtokens_used_mask[BITS_TO_LONGS( + SMC_RMBS_PER_LGR_MAX)]; + /* used rtoken elements */ + struct delayed_work free_work; /* delayed freeing of an lgr */ bool sync_err; /* lgr no longer fits to peer */ }; @@ -153,5 +169,7 @@ void smc_lgr_free(struct smc_link_group *lgr); void smc_lgr_terminate(struct smc_link_group *lgr); int smc_sndbuf_create(struct smc_sock *smc); int smc_rmb_create(struct smc_sock *smc); +int smc_rmb_rtoken_handling(struct smc_connection *conn, + struct smc_clc_msg_accept_confirm *clc); #endif diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 9fb46a63a17e..e6743c008ac5 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -12,6 +12,7 @@ */ #include +#include #include #include "smc_pnet.h" @@ -20,6 +21,11 @@ #include "smc_wr.h" #include "smc.h" +#define SMC_QP_MIN_RNR_TIMER 5 +#define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */ +#define SMC_QP_RETRY_CNT 7 /* 7: infinite */ +#define SMC_QP_RNR_RETRY 7 /* 7: infinite */ + struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock), .list = LIST_HEAD_INIT(smc_ib_devices.list), @@ -31,6 +37,172 @@ u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system * identifier */ +int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, + struct ib_mr **mr) +{ + int rc; + + if (*mr) + return 0; /* already done */ + + /* obtain unique key - + * next invocation of get_dma_mr returns a different key! + */ + *mr = pd->device->get_dma_mr(pd, access_flags); + rc = PTR_ERR_OR_ZERO(*mr); + if (IS_ERR(*mr)) + *mr = NULL; + return rc; +} + +static int smc_ib_modify_qp_init(struct smc_link *lnk) +{ + struct ib_qp_attr qp_attr; + + memset(&qp_attr, 0, sizeof(qp_attr)); + qp_attr.qp_state = IB_QPS_INIT; + qp_attr.pkey_index = 0; + qp_attr.port_num = lnk->ibport; + qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE + | IB_ACCESS_REMOTE_WRITE; + return ib_modify_qp(lnk->roce_qp, &qp_attr, + IB_QP_STATE | IB_QP_PKEY_INDEX | + IB_QP_ACCESS_FLAGS | IB_QP_PORT); +} + +static int smc_ib_modify_qp_rtr(struct smc_link *lnk) +{ + enum ib_qp_attr_mask qp_attr_mask = + IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | + IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; + struct ib_qp_attr qp_attr; + + memset(&qp_attr, 0, sizeof(qp_attr)); + qp_attr.qp_state = IB_QPS_RTR; + qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu); + qp_attr.ah_attr.port_num = lnk->ibport; + qp_attr.ah_attr.ah_flags = IB_AH_GRH; + qp_attr.ah_attr.grh.hop_limit = 1; + memcpy(&qp_attr.ah_attr.grh.dgid, lnk->peer_gid, + sizeof(lnk->peer_gid)); + memcpy(&qp_attr.ah_attr.dmac, lnk->peer_mac, + sizeof(lnk->peer_mac)); + qp_attr.dest_qp_num = lnk->peer_qpn; + qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */ + qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming + * requests + */ + qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER; + + return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask); +} + +int smc_ib_modify_qp_rts(struct smc_link *lnk) +{ + struct ib_qp_attr qp_attr; + + memset(&qp_attr, 0, sizeof(qp_attr)); + qp_attr.qp_state = IB_QPS_RTS; + qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */ + qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */ + qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */ + qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */ + qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and + * atomic ops allowed + */ + return ib_modify_qp(lnk->roce_qp, &qp_attr, + IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | + IB_QP_SQ_PSN | IB_QP_RNR_RETRY | + IB_QP_MAX_QP_RD_ATOMIC); +} + +int smc_ib_modify_qp_reset(struct smc_link *lnk) +{ + struct ib_qp_attr qp_attr; + + memset(&qp_attr, 0, sizeof(qp_attr)); + qp_attr.qp_state = IB_QPS_RESET; + return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE); +} + +int smc_ib_ready_link(struct smc_link *lnk) +{ + struct smc_link_group *lgr = + container_of(lnk, struct smc_link_group, lnk[0]); + int rc = 0; + + rc = smc_ib_modify_qp_init(lnk); + if (rc) + goto out; + + rc = smc_ib_modify_qp_rtr(lnk); + if (rc) + goto out; + smc_wr_remember_qp_attr(lnk); + rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv, + IB_CQ_SOLICITED_MASK); + if (rc) + goto out; + rc = smc_wr_rx_post_init(lnk); + if (rc) + goto out; + smc_wr_remember_qp_attr(lnk); + + if (lgr->role == SMC_SERV) { + rc = smc_ib_modify_qp_rts(lnk); + if (rc) + goto out; + smc_wr_remember_qp_attr(lnk); + } +out: + return rc; +} + +/* process context wrapper for might_sleep smc_ib_remember_port_attr */ +static void smc_ib_port_event_work(struct work_struct *work) +{ + struct smc_ib_device *smcibdev = container_of( + work, struct smc_ib_device, port_event_work); + u8 port_idx; + + for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) { + smc_ib_remember_port_attr(smcibdev, port_idx + 1); + clear_bit(port_idx, &smcibdev->port_event_mask); + } +} + +/* can be called in IRQ context */ +static void smc_ib_global_event_handler(struct ib_event_handler *handler, + struct ib_event *ibevent) +{ + struct smc_ib_device *smcibdev; + u8 port_idx; + + smcibdev = container_of(handler, struct smc_ib_device, event_handler); + if (!smc_pnet_find_ib(smcibdev->ibdev->name)) + return; + + switch (ibevent->event) { + case IB_EVENT_PORT_ERR: + port_idx = ibevent->element.port_num - 1; + set_bit(port_idx, &smcibdev->port_event_mask); + schedule_work(&smcibdev->port_event_work); + /* fall through */ + case IB_EVENT_DEVICE_FATAL: + /* tbd in follow-on patch: + * abnormal close of corresponding connections + */ + break; + case IB_EVENT_PORT_ACTIVE: + port_idx = ibevent->element.port_num - 1; + set_bit(port_idx, &smcibdev->port_event_mask); + schedule_work(&smcibdev->port_event_work); + break; + default: + break; + } +} + void smc_ib_dealloc_protection_domain(struct smc_link *lnk) { ib_dealloc_pd(lnk->roce_pd); @@ -121,6 +293,17 @@ int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, return rc; } +void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int buf_size, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction) +{ + if (!buf_slot->dma_addr[SMC_SINGLE_LINK]) + return; /* already unmapped */ + ib_dma_unmap_single(smcibdev->ibdev, *buf_slot->dma_addr, buf_size, + data_direction); + buf_slot->dma_addr[SMC_SINGLE_LINK] = 0; +} + static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) { struct net_device *ndev; @@ -184,6 +367,50 @@ out: return rc; } +long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) +{ + struct ib_cq_init_attr cqattr = { + .cqe = SMC_WR_MAX_CQE, .comp_vector = 0 }; + long rc; + + smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, + smc_wr_tx_cq_handler, NULL, + smcibdev, &cqattr); + rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send); + if (IS_ERR(smcibdev->roce_cq_send)) { + smcibdev->roce_cq_send = NULL; + return rc; + } + smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev, + smc_wr_rx_cq_handler, NULL, + smcibdev, &cqattr); + rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv); + if (IS_ERR(smcibdev->roce_cq_recv)) { + smcibdev->roce_cq_recv = NULL; + goto err; + } + INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, + smc_ib_global_event_handler); + ib_register_event_handler(&smcibdev->event_handler); + smc_wr_add_dev(smcibdev); + smcibdev->initialized = 1; + return rc; + +err: + ib_destroy_cq(smcibdev->roce_cq_send); + return rc; +} + +static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) +{ + if (!smcibdev->initialized) + return; + smc_wr_remove_dev(smcibdev); + ib_unregister_event_handler(&smcibdev->event_handler); + ib_destroy_cq(smcibdev->roce_cq_recv); + ib_destroy_cq(smcibdev->roce_cq_send); +} + static struct ib_client smc_ib_client; /* callback function for ib_register_client() */ @@ -199,6 +426,7 @@ static void smc_ib_add_dev(struct ib_device *ibdev) return; smcibdev->ibdev = ibdev; + INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); spin_lock(&smc_ib_devices.lock); list_add_tail(&smcibdev->list, &smc_ib_devices.list); @@ -217,6 +445,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ spin_unlock(&smc_ib_devices.lock); smc_pnet_remove_by_ibdev(smcibdev); + smc_ib_cleanup_per_ibdev(smcibdev); kfree(smcibdev); } diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index 441fd168911d..3fe2d55c6051 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -29,6 +29,7 @@ struct smc_ib_device { /* ib-device infos for smc */ struct list_head list; struct ib_device *ibdev; struct ib_port_attr pattr[SMC_MAX_PORTS]; /* ib dev. port attrs */ + struct ib_event_handler event_handler; /* global ib_event handler */ struct ib_cq *roce_cq_send; /* send completion queue */ struct ib_cq *roce_cq_recv; /* recv completion queue */ struct tasklet_struct send_tasklet; /* called by send cq handler */ @@ -36,6 +37,8 @@ struct smc_ib_device { /* ib-device infos for smc */ char mac[SMC_MAX_PORTS][6]; /* mac address per port*/ union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */ u8 initialized : 1; /* ib dev CQ, evthdl done */ + struct work_struct port_event_work; + unsigned long port_event_mask; }; struct smc_buf_desc; @@ -48,9 +51,19 @@ int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport); int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction); +void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int bufsize, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction); void smc_ib_dealloc_protection_domain(struct smc_link *lnk); int smc_ib_create_protection_domain(struct smc_link *lnk); void smc_ib_destroy_queue_pair(struct smc_link *lnk); int smc_ib_create_queue_pair(struct smc_link *lnk); +int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, + struct ib_mr **mr); +int smc_ib_ready_link(struct smc_link *lnk); +int smc_ib_modify_qp_rts(struct smc_link *lnk); +int smc_ib_modify_qp_reset(struct smc_link *lnk); +long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev); + #endif diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index a2bc6b612d03..5cbc1e5f7606 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -393,6 +393,7 @@ static void smc_wr_init_sge(struct smc_link *lnk) lnk->wr_tx_sges[i].addr = lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; + lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; lnk->wr_tx_ibs[i].next = NULL; lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; lnk->wr_tx_ibs[i].num_sge = 1; @@ -404,6 +405,7 @@ static void smc_wr_init_sge(struct smc_link *lnk) lnk->wr_rx_sges[i].addr = lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE; lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE; + lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; lnk->wr_rx_ibs[i].next = NULL; lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i]; lnk->wr_rx_ibs[i].num_sge = 1; -- cgit v1.2.3 From 9bf9abead28abaf11d0776b6e0c5d34b6525e846 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:21 +0100 Subject: smc: link layer control (LLC) send and receive LLC messages CONFIRM_LINK (via IB message send and CQE) Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/Makefile | 2 +- net/smc/af_smc.c | 94 ++++++++++++++++++++++++++++++- net/smc/smc_clc.h | 2 + net/smc/smc_core.c | 8 +++ net/smc/smc_core.h | 6 ++ net/smc/smc_llc.c | 158 +++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_llc.h | 63 +++++++++++++++++++++ 7 files changed, 330 insertions(+), 3 deletions(-) create mode 100644 net/smc/smc_llc.c create mode 100644 net/smc/smc_llc.h diff --git a/net/smc/Makefile b/net/smc/Makefile index b19120ed7102..73320bf452b0 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_SMC) += smc.o -smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o +smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 1026fad35998..1ae986d2762d 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -31,6 +31,7 @@ #include "smc.h" #include "smc_clc.h" +#include "smc_llc.h" #include "smc_core.h" #include "smc_ib.h" #include "smc_pnet.h" @@ -245,6 +246,41 @@ out: return rc; } +static int smc_clnt_conf_first_link(struct smc_sock *smc, union ib_gid *gid) +{ + struct smc_link_group *lgr = smc->conn.lgr; + struct smc_link *link; + int rest; + int rc; + + link = &lgr->lnk[SMC_SINGLE_LINK]; + /* receive CONFIRM LINK request from server over RoCE fabric */ + rest = wait_for_completion_interruptible_timeout( + &link->llc_confirm, + SMC_LLC_WAIT_FIRST_TIME); + if (rest <= 0) { + struct smc_clc_msg_decline dclc; + + rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), + SMC_CLC_DECLINE); + return rc; + } + + rc = smc_ib_modify_qp_rts(link); + if (rc) + return SMC_CLC_DECL_INTERR; + + smc_wr_remember_qp_attr(link); + /* send CONFIRM LINK response over RoCE fabric */ + rc = smc_llc_send_confirm_link(link, + link->smcibdev->mac[link->ibport - 1], + gid, SMC_LLC_RESP); + if (rc < 0) + return SMC_CLC_DECL_TCL; + + return rc; +} + static void smc_conn_save_peer_info(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { @@ -358,7 +394,17 @@ static int smc_connect_rdma(struct smc_sock *smc) if (rc) goto out_err_unlock; - /* tbd in follow-on patch: llc_confirm */ + if (local_contact == SMC_FIRST_CONTACT) { + /* QP confirmation over RoCE fabric */ + reason_code = smc_clnt_conf_first_link( + smc, &smcibdev->gid[ibport - 1]); + if (reason_code < 0) { + rc = reason_code; + goto out_err_unlock; + } + if (reason_code > 0) + goto decline_rdma_unlock; + } mutex_unlock(&smc_create_lgr_pending); out_connected: @@ -543,6 +589,36 @@ static void smc_close_non_accepted(struct sock *sk) sock_put(sk); } +static int smc_serv_conf_first_link(struct smc_sock *smc) +{ + struct smc_link_group *lgr = smc->conn.lgr; + struct smc_link *link; + int rest; + int rc; + + link = &lgr->lnk[SMC_SINGLE_LINK]; + /* send CONFIRM LINK request to client over the RoCE fabric */ + rc = smc_llc_send_confirm_link(link, + link->smcibdev->mac[link->ibport - 1], + &link->smcibdev->gid[link->ibport - 1], + SMC_LLC_REQ); + if (rc < 0) + return SMC_CLC_DECL_TCL; + + /* receive CONFIRM LINK response from client over the RoCE fabric */ + rest = wait_for_completion_interruptible_timeout( + &link->llc_confirm_resp, + SMC_LLC_WAIT_FIRST_TIME); + if (rest <= 0) { + struct smc_clc_msg_decline dclc; + + rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), + SMC_CLC_DECLINE); + } + + return rc; +} + /* setup for RDMA connection of server */ static void smc_listen_work(struct work_struct *work) { @@ -655,13 +731,21 @@ static void smc_listen_work(struct work_struct *work) goto decline_rdma; } - /* tbd in follow-on patch: modify_qp, llc_confirm */ if (local_contact == SMC_FIRST_CONTACT) { rc = smc_ib_ready_link(link); if (rc) { reason_code = SMC_CLC_DECL_INTERR; goto decline_rdma; } + /* QP confirmation over RoCE fabric */ + reason_code = smc_serv_conf_first_link(new_smc); + if (reason_code < 0) { + /* peer is not aware of a problem */ + rc = reason_code; + goto out_err; + } + if (reason_code > 0) + goto decline_rdma; } out_connected: @@ -1111,6 +1195,12 @@ static int __init smc_init(void) if (rc) return rc; + rc = smc_llc_init(); + if (rc) { + pr_err("%s: smc_llc_init fails with %d\n", __func__, rc); + goto out_pnet; + } + rc = proto_register(&smc_proto, 1); if (rc) { pr_err("%s: proto_register fails with %d\n", __func__, rc); diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 5924d998b5ca..13db8ce177c9 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -33,6 +33,8 @@ static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'}; #define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */ #define SMC_CLC_DECL_REPLY 0x06000000 /* reply to a received decline */ #define SMC_CLC_DECL_INTERR 0x99990000 /* internal error */ +#define SMC_CLC_DECL_TCL 0x02040000 /* timeout w4 QP confirm */ +#define SMC_CLC_DECL_SEND 0x07000000 /* sending problem */ struct smc_clc_msg_hdr { /* header1 of clc messages */ u8 eyecatcher[4]; /* eye catcher */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 0e9adbd9cd68..906d88c266c0 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -21,9 +21,13 @@ #include "smc_core.h" #include "smc_ib.h" #include "smc_wr.h" +#include "smc_llc.h" +#define SMC_LGR_NUM_INCR 256 #define SMC_LGR_FREE_DELAY (600 * HZ) +static u32 smc_lgr_num; /* unique link group number */ + /* Register connection's alert token in our lookup structure. * To use rbtrees we have to implement our own insert core. * Requires @conns_lock @@ -152,6 +156,8 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, INIT_LIST_HEAD(&lgr->sndbufs[i]); INIT_LIST_HEAD(&lgr->rmbs[i]); } + smc_lgr_num += SMC_LGR_NUM_INCR; + memcpy(&lgr->id, (u8 *)&smc_lgr_num, SMC_LGR_ID_SIZE); INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work); lgr->conns_all = RB_ROOT; @@ -177,6 +183,8 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, rc = smc_wr_create_link(lnk); if (rc) goto destroy_qp; + init_completion(&lnk->llc_confirm); + init_completion(&lnk->llc_confirm_resp); smc->conn.lgr = lgr; rwlock_init(&lgr->conns_lock); diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index f5ea52086d6d..27eb38056a27 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -73,6 +73,9 @@ struct smc_link { u32 peer_psn; /* QP rx initial packet seqno */ u8 peer_mac[ETH_ALEN]; /* = gid[8:10||13:15] */ u8 peer_gid[sizeof(union ib_gid)]; /* gid of peer*/ + u8 link_id; /* unique # within link group */ + struct completion llc_confirm; /* wait for rx of conf link */ + struct completion llc_confirm_resp; /* wait 4 rx of cnf lnk rsp */ }; /* For now we just allow one parallel link per link group. The SMC protocol @@ -102,6 +105,8 @@ struct smc_rtoken { /* address/key of remote RMB */ u32 rkey; }; +#define SMC_LGR_ID_SIZE 4 + struct smc_link_group { struct list_head list; enum smc_lgr_role role; /* client or server */ @@ -125,6 +130,7 @@ struct smc_link_group { SMC_RMBS_PER_LGR_MAX)]; /* used rtoken elements */ + u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */ struct delayed_work free_work; /* delayed freeing of an lgr */ bool sync_err; /* lgr no longer fits to peer */ }; diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c new file mode 100644 index 000000000000..c2f9165d13ef --- /dev/null +++ b/net/smc/smc_llc.c @@ -0,0 +1,158 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Link Layer Control (LLC) + * + * For now, we only support the necessary "confirm link" functionality + * which happens for the first RoCE link after successful CLC handshake. + * + * Copyright IBM Corp. 2016 + * + * Author(s): Klaus Wacker + * Ursula Braun + */ + +#include +#include + +#include "smc.h" +#include "smc_core.h" +#include "smc_clc.h" +#include "smc_llc.h" + +/********************************** send *************************************/ + +struct smc_llc_tx_pend { +}; + +/* handler for send/transmission completion of an LLC msg */ +static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend, + struct smc_link *link, + enum ib_wc_status wc_status) +{ + /* future work: handle wc_status error for recovery and failover */ +} + +/** + * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits + * @link: Pointer to SMC link used for sending LLC control message. + * @wr_buf: Out variable returning pointer to work request payload buffer. + * @pend: Out variable returning pointer to private pending WR tracking. + * It's the context the transmit complete handler will get. + * + * Reserves and pre-fills an entry for a pending work request send/tx. + * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx. + * Can sleep due to smc_get_ctrl_buf (if not in softirq context). + * + * Return: 0 on success, otherwise an error value. + */ +static int smc_llc_add_pending_send(struct smc_link *link, + struct smc_wr_buf **wr_buf, + struct smc_wr_tx_pend_priv **pend) +{ + int rc; + + rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend); + if (rc < 0) + return rc; + BUILD_BUG_ON_MSG( + sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE, + "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)"); + BUILD_BUG_ON_MSG( + sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE, + "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); + BUILD_BUG_ON_MSG( + sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, + "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)"); + return 0; +} + +/* high-level API to send LLC confirm link */ +int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[], + union ib_gid *gid, + enum smc_llc_reqresp reqresp) +{ + struct smc_link_group *lgr = container_of(link, struct smc_link_group, + lnk[SMC_SINGLE_LINK]); + struct smc_llc_msg_confirm_link *confllc; + struct smc_wr_tx_pend_priv *pend; + struct smc_wr_buf *wr_buf; + int rc; + + rc = smc_llc_add_pending_send(link, &wr_buf, &pend); + if (rc) + return rc; + confllc = (struct smc_llc_msg_confirm_link *)wr_buf; + memset(confllc, 0, sizeof(*confllc)); + confllc->hd.common.type = SMC_LLC_CONFIRM_LINK; + confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link); + if (reqresp == SMC_LLC_RESP) + confllc->hd.flags |= SMC_LLC_FLAG_RESP; + memcpy(confllc->sender_mac, mac, ETH_ALEN); + memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); + hton24(confllc->sender_qp_num, link->roce_qp->qp_num); + /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */ + memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); + confllc->max_links = SMC_LINKS_PER_LGR_MAX; + /* send llc message */ + rc = smc_wr_tx_send(link, pend); + return rc; +} + +/********************************* receive ***********************************/ + +static void smc_llc_rx_confirm_link(struct smc_link *link, + struct smc_llc_msg_confirm_link *llc) +{ + struct smc_link_group *lgr; + + lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]); + if (llc->hd.flags & SMC_LLC_FLAG_RESP) { + if (lgr->role == SMC_SERV) + complete(&link->llc_confirm_resp); + } else { + if (lgr->role == SMC_CLNT) { + link->link_id = llc->link_num; + complete(&link->llc_confirm); + } + } +} + +static void smc_llc_rx_handler(struct ib_wc *wc, void *buf) +{ + struct smc_link *link = (struct smc_link *)wc->qp->qp_context; + union smc_llc_msg *llc = buf; + + if (wc->byte_len < sizeof(*llc)) + return; /* short message */ + if (llc->raw.hdr.length != sizeof(*llc)) + return; /* invalid message */ + if (llc->raw.hdr.common.type == SMC_LLC_CONFIRM_LINK) + smc_llc_rx_confirm_link(link, &llc->confirm_link); +} + +/***************************** init, exit, misc ******************************/ + +static struct smc_wr_rx_handler smc_llc_rx_handlers[] = { + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_CONFIRM_LINK + }, + { + .handler = NULL, + } +}; + +int __init smc_llc_init(void) +{ + struct smc_wr_rx_handler *handler; + int rc = 0; + + for (handler = smc_llc_rx_handlers; handler->handler; handler++) { + INIT_HLIST_NODE(&handler->list); + rc = smc_wr_rx_register_handler(handler); + if (rc) + break; + } + return rc; +} diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h new file mode 100644 index 000000000000..b472f853953a --- /dev/null +++ b/net/smc/smc_llc.h @@ -0,0 +1,63 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Definitions for LLC (link layer control) message handling + * + * Copyright IBM Corp. 2016 + * + * Author(s): Klaus Wacker + * Ursula Braun + */ + +#ifndef SMC_LLC_H +#define SMC_LLC_H + +#include "smc_wr.h" + +#define SMC_LLC_FLAG_RESP 0x80 + +#define SMC_LLC_WAIT_FIRST_TIME (5 * HZ) + +enum smc_llc_reqresp { + SMC_LLC_REQ, + SMC_LLC_RESP +}; + +enum smc_llc_msg_type { + SMC_LLC_CONFIRM_LINK = 0x01, +}; + +#define SMC_LLC_DATA_LEN 40 + +struct smc_llc_hdr { + struct smc_wr_rx_hdr common; + u8 length; /* 44 */ + u8 reserved; + u8 flags; +}; + +struct smc_llc_msg_confirm_link { /* type 0x01 */ + struct smc_llc_hdr hd; + u8 sender_mac[ETH_ALEN]; + u8 sender_gid[SMC_GID_SIZE]; + u8 sender_qp_num[3]; + u8 link_num; + u8 link_uid[SMC_LGR_ID_SIZE]; + u8 max_links; + u8 reserved[9]; +}; + +union smc_llc_msg { + struct smc_llc_msg_confirm_link confirm_link; + struct { + struct smc_llc_hdr hdr; + u8 data[SMC_LLC_DATA_LEN]; + } raw; +}; + +/* transmit */ +int smc_llc_send_confirm_link(struct smc_link *lnk, u8 mac[], union ib_gid *gid, + enum smc_llc_reqresp reqresp); +int smc_llc_init(void) __init; + +#endif /* SMC_LLC_H */ -- cgit v1.2.3 From 5f08318f617b05b6ee389d8bd174c7af921ebf19 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:22 +0100 Subject: smc: connection data control (CDC) send and receive CDC messages (via IB message send and CQE) Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/Makefile | 1 + net/smc/af_smc.c | 9 ++ net/smc/smc.h | 97 +++++++++++++++++++ net/smc/smc_cdc.c | 280 +++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_cdc.h | 217 +++++++++++++++++++++++++++++++++++++++++ net/smc/smc_core.c | 9 ++ net/smc/smc_wr.c | 19 ++++ net/smc/smc_wr.h | 9 ++ 8 files changed, 641 insertions(+) create mode 100644 net/smc/smc_cdc.c create mode 100644 net/smc/smc_cdc.h diff --git a/net/smc/Makefile b/net/smc/Makefile index 73320bf452b0..ec0fd0338c6d 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_SMC) += smc.o smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o +smc-y += smc_cdc.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 1ae986d2762d..4b1611d46ef7 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -26,12 +26,14 @@ #include #include #include +#include #include #include #include "smc.h" #include "smc_clc.h" #include "smc_llc.h" +#include "smc_cdc.h" #include "smc_core.h" #include "smc_ib.h" #include "smc_pnet.h" @@ -285,6 +287,7 @@ static void smc_conn_save_peer_info(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { smc->conn.peer_conn_idx = clc->conn_idx; + smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token); smc->conn.peer_rmbe_size = smc_uncompress_bufsize(clc->rmbe_size); atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); } @@ -1201,6 +1204,12 @@ static int __init smc_init(void) goto out_pnet; } + rc = smc_cdc_init(); + if (rc) { + pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc); + goto out_pnet; + } + rc = proto_register(&smc_proto, 1); if (rc) { pr_err("%s: proto_register fails with %d\n", __func__, rc); diff --git a/net/smc/smc.h b/net/smc/smc.h index 4d2e54d176f2..a8d4a68aac30 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -21,6 +21,10 @@ #define SMC_MAX_PORTS 2 /* Max # of ports */ +#ifdef ATOMIC64_INIT +#define KERNEL_HAS_ATOMIC64 +#endif + enum smc_state { /* possible states of an SMC socket */ SMC_ACTIVE = 1, SMC_INIT = 2, @@ -34,6 +38,67 @@ struct smc_wr_rx_hdr { /* common prefix part of LLC and CDC to demultiplex */ u8 type; } __aligned(1); +struct smc_cdc_conn_state_flags { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 peer_done_writing : 1; /* Sending done indicator */ + u8 peer_conn_closed : 1; /* Peer connection closed indicator */ + u8 peer_conn_abort : 1; /* Abnormal close indicator */ + u8 reserved : 5; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 reserved : 5; + u8 peer_conn_abort : 1; + u8 peer_conn_closed : 1; + u8 peer_done_writing : 1; +#endif +}; + +struct smc_cdc_producer_flags { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 write_blocked : 1; /* Writing Blocked, no rx buf space */ + u8 urg_data_pending : 1; /* Urgent Data Pending */ + u8 urg_data_present : 1; /* Urgent Data Present */ + u8 cons_curs_upd_req : 1; /* cursor update requested */ + u8 failover_validation : 1;/* message replay due to failover */ + u8 reserved : 3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 reserved : 3; + u8 failover_validation : 1; + u8 cons_curs_upd_req : 1; + u8 urg_data_present : 1; + u8 urg_data_pending : 1; + u8 write_blocked : 1; +#endif +}; + +/* in host byte order */ +union smc_host_cursor { /* SMC cursor - an offset in an RMBE */ + struct { + u16 reserved; + u16 wrap; /* window wrap sequence number */ + u32 count; /* cursor (= offset) part */ + }; +#ifdef KERNEL_HAS_ATOMIC64 + atomic64_t acurs; /* for atomic processing */ +#else + u64 acurs; /* for atomic processing */ +#endif +} __aligned(8); + +/* in host byte order, except for flag bitfields in network byte order */ +struct smc_host_cdc_msg { /* Connection Data Control message */ + struct smc_wr_rx_hdr common; /* .type = 0xFE */ + u8 len; /* length = 44 */ + u16 seqno; /* connection seq # */ + u32 token; /* alert_token */ + union smc_host_cursor prod; /* producer cursor */ + union smc_host_cursor cons; /* consumer cursor, + * piggy backed "ack" + */ + struct smc_cdc_producer_flags prod_flags; /* conn. tx/rx status */ + struct smc_cdc_conn_state_flags conn_state_flags; /* peer conn. status*/ + u8 reserved[18]; +} __aligned(8); + struct smc_connection { struct rb_node alert_node; struct smc_link_group *lgr; /* link group of connection */ @@ -50,6 +115,38 @@ struct smc_connection { struct smc_buf_desc *rmb_desc; /* RMBE descriptor */ int rmbe_size; /* RMBE size <== sock rmem */ int rmbe_size_short;/* compressed notation */ + + struct smc_host_cdc_msg local_tx_ctrl; /* host byte order staging + * buffer for CDC msg send + * .prod cf. TCP snd_nxt + * .cons cf. TCP sends ack + */ + union smc_host_cursor tx_curs_prep; /* tx - prepared data + * snd_max..wmem_alloc + */ + union smc_host_cursor tx_curs_sent; /* tx - sent data + * snd_nxt ? + */ + union smc_host_cursor tx_curs_fin; /* tx - confirmed by peer + * snd-wnd-begin ? + */ + atomic_t sndbuf_space; /* remaining space in sndbuf */ + u16 tx_cdc_seq; /* sequence # for CDC send */ + spinlock_t send_lock; /* protect wr_sends */ + + struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl. + * .prod cf. TCP rcv_nxt + * .cons cf. TCP snd_una + */ + union smc_host_cursor rx_curs_confirmed; /* confirmed to peer + * source of snd_una ? + */ + atomic_t bytes_to_rcv; /* arrived data, + * not yet received + */ +#ifndef KERNEL_HAS_ATOMIC64 + spinlock_t acurs_lock; /* protect cursors */ +#endif }; struct smc_sock { /* smc sock container */ diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c new file mode 100644 index 000000000000..111d23ff9e17 --- /dev/null +++ b/net/smc/smc_cdc.c @@ -0,0 +1,280 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Connection Data Control (CDC) + * handles flow control + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#include + +#include "smc.h" +#include "smc_wr.h" +#include "smc_cdc.h" + +/********************************** send *************************************/ + +struct smc_cdc_tx_pend { + struct smc_connection *conn; /* socket connection */ + union smc_host_cursor cursor; /* tx sndbuf cursor sent */ + union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ + u16 ctrl_seq; /* conn. tx sequence # */ +}; + +/* handler for send/transmission completion of a CDC msg */ +static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, + struct smc_link *link, + enum ib_wc_status wc_status) +{ + struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd; + struct smc_sock *smc; + int diff; + + if (!cdcpend->conn) + /* already dismissed */ + return; + + smc = container_of(cdcpend->conn, struct smc_sock, conn); + bh_lock_sock(&smc->sk); + if (!wc_status) { + diff = smc_curs_diff(cdcpend->conn->sndbuf_size, + &cdcpend->conn->tx_curs_fin, + &cdcpend->cursor); + /* sndbuf_space is decreased in smc_sendmsg */ + smp_mb__before_atomic(); + atomic_add(diff, &cdcpend->conn->sndbuf_space); + /* guarantee 0 <= sndbuf_space <= sndbuf_size */ + smp_mb__after_atomic(); + smc_curs_write(&cdcpend->conn->tx_curs_fin, + smc_curs_read(&cdcpend->cursor, cdcpend->conn), + cdcpend->conn); + } + /* subsequent patch: wake if send buffer space available */ + bh_unlock_sock(&smc->sk); +} + +int smc_cdc_get_free_slot(struct smc_link *link, + struct smc_wr_buf **wr_buf, + struct smc_cdc_tx_pend **pend) +{ + return smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, + (struct smc_wr_tx_pend_priv **)pend); +} + +static inline void smc_cdc_add_pending_send(struct smc_connection *conn, + struct smc_cdc_tx_pend *pend) +{ + BUILD_BUG_ON_MSG( + sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, + "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); + BUILD_BUG_ON_MSG( + offsetof(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE, + "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); + BUILD_BUG_ON_MSG( + sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, + "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_cdc_tx_pend)"); + pend->conn = conn; + pend->cursor = conn->tx_curs_sent; + pend->p_cursor = conn->local_tx_ctrl.prod; + pend->ctrl_seq = conn->tx_cdc_seq; +} + +int smc_cdc_msg_send(struct smc_connection *conn, + struct smc_wr_buf *wr_buf, + struct smc_cdc_tx_pend *pend) +{ + struct smc_link *link; + int rc; + + link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + + smc_cdc_add_pending_send(conn, pend); + + conn->tx_cdc_seq++; + conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; + smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, + &conn->local_tx_ctrl, conn); + rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); + if (!rc) + smc_curs_write(&conn->rx_curs_confirmed, + smc_curs_read(&conn->local_tx_ctrl.cons, conn), + conn); + + return rc; +} + +int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) +{ + struct smc_cdc_tx_pend *pend; + struct smc_wr_buf *wr_buf; + int rc; + + rc = smc_cdc_get_free_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], &wr_buf, + &pend); + if (rc) + return rc; + + return smc_cdc_msg_send(conn, wr_buf, pend); +} + +static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend, + unsigned long data) +{ + struct smc_connection *conn = (struct smc_connection *)data; + struct smc_cdc_tx_pend *cdc_pend = + (struct smc_cdc_tx_pend *)tx_pend; + + return cdc_pend->conn == conn; +} + +static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend) +{ + struct smc_cdc_tx_pend *cdc_pend = + (struct smc_cdc_tx_pend *)tx_pend; + + cdc_pend->conn = NULL; +} + +void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) +{ + struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + + smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE, + smc_cdc_tx_filter, smc_cdc_tx_dismisser, + (unsigned long)conn); +} + +/********************************* receive ***********************************/ + +static inline bool smc_cdc_before(u16 seq1, u16 seq2) +{ + return (s16)(seq1 - seq2) < 0; +} + +static void smc_cdc_msg_recv_action(struct smc_sock *smc, + struct smc_link *link, + struct smc_cdc_msg *cdc) +{ + union smc_host_cursor cons_old, prod_old; + struct smc_connection *conn = &smc->conn; + int diff_cons, diff_prod; + + if (!cdc->prod_flags.failover_validation) { + if (smc_cdc_before(ntohs(cdc->seqno), + conn->local_rx_ctrl.seqno)) + /* received seqno is old */ + return; + } + smc_curs_write(&prod_old, + smc_curs_read(&conn->local_rx_ctrl.prod, conn), + conn); + smc_curs_write(&cons_old, + smc_curs_read(&conn->local_rx_ctrl.cons, conn), + conn); + smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn); + + diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old, + &conn->local_rx_ctrl.cons); + if (diff_cons) { + /* peer_rmbe_space is decreased during data transfer with RDMA + * write + */ + smp_mb__before_atomic(); + atomic_add(diff_cons, &conn->peer_rmbe_space); + /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */ + smp_mb__after_atomic(); + } + + diff_prod = smc_curs_diff(conn->rmbe_size, &prod_old, + &conn->local_rx_ctrl.prod); + if (diff_prod) { + /* bytes_to_rcv is decreased in smc_recvmsg */ + smp_mb__before_atomic(); + atomic_add(diff_prod, &conn->bytes_to_rcv); + /* guarantee 0 <= bytes_to_rcv <= rmbe_size */ + smp_mb__after_atomic(); + } + + if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) + smc->sk.sk_err = ECONNRESET; + if (smc_cdc_rxed_any_close_or_senddone(conn)) + /* subsequent patch: terminate connection */ + + /* piggy backed tx info */ + /* subsequent patch: wake receivers if receive buffer space available */ + + /* subsequent patch: trigger socket release if connection closed */ + + /* socket connected but not accepted */ + if (!smc->sk.sk_socket) + return; + + /* data available */ + /* subsequent patch: send delayed ack, wake receivers */ +} + +/* called under tasklet context */ +static inline void smc_cdc_msg_recv(struct smc_cdc_msg *cdc, + struct smc_link *link, u64 wr_id) +{ + struct smc_link_group *lgr = container_of(link, struct smc_link_group, + lnk[SMC_SINGLE_LINK]); + struct smc_connection *connection; + struct smc_sock *smc; + + /* lookup connection */ + read_lock_bh(&lgr->conns_lock); + connection = smc_lgr_find_conn(ntohl(cdc->token), lgr); + if (!connection) { + read_unlock_bh(&lgr->conns_lock); + return; + } + smc = container_of(connection, struct smc_sock, conn); + sock_hold(&smc->sk); + read_unlock_bh(&lgr->conns_lock); + bh_lock_sock(&smc->sk); + smc_cdc_msg_recv_action(smc, link, cdc); + bh_unlock_sock(&smc->sk); + sock_put(&smc->sk); /* no free sk in softirq-context */ +} + +/***************************** init, exit, misc ******************************/ + +static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf) +{ + struct smc_link *link = (struct smc_link *)wc->qp->qp_context; + struct smc_cdc_msg *cdc = buf; + + if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) + return; /* short message */ + if (cdc->len != sizeof(*cdc)) + return; /* invalid message */ + smc_cdc_msg_recv(cdc, link, wc->wr_id); +} + +static struct smc_wr_rx_handler smc_cdc_rx_handlers[] = { + { + .handler = smc_cdc_rx_handler, + .type = SMC_CDC_MSG_TYPE + }, + { + .handler = NULL, + } +}; + +int __init smc_cdc_init(void) +{ + struct smc_wr_rx_handler *handler; + int rc = 0; + + for (handler = smc_cdc_rx_handlers; handler->handler; handler++) { + INIT_HLIST_NODE(&handler->list); + rc = smc_wr_rx_register_handler(handler); + if (rc) + break; + } + return rc; +} diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h new file mode 100644 index 000000000000..135f61369a60 --- /dev/null +++ b/net/smc/smc_cdc.h @@ -0,0 +1,217 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Connection Data Control (CDC) + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#ifndef SMC_CDC_H +#define SMC_CDC_H + +#include /* max_t */ +#include +#include +#include + +#include "smc.h" +#include "smc_core.h" +#include "smc_wr.h" + +#define SMC_CDC_MSG_TYPE 0xFE + +/* in network byte order */ +union smc_cdc_cursor { /* SMC cursor */ + struct { + __be16 reserved; + __be16 wrap; + __be32 count; + }; +#ifdef KERNEL_HAS_ATOMIC64 + atomic64_t acurs; /* for atomic processing */ +#else + u64 acurs; /* for atomic processing */ +#endif +} __aligned(8); + +/* in network byte order */ +struct smc_cdc_msg { + struct smc_wr_rx_hdr common; /* .type = 0xFE */ + u8 len; /* 44 */ + __be16 seqno; + __be32 token; + union smc_cdc_cursor prod; + union smc_cdc_cursor cons; /* piggy backed "ack" */ + struct smc_cdc_producer_flags prod_flags; + struct smc_cdc_conn_state_flags conn_state_flags; + u8 reserved[18]; +} __aligned(8); + +static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) +{ + return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort || + conn->local_rx_ctrl.conn_state_flags.peer_conn_closed; +} + +static inline bool smc_cdc_rxed_any_close_or_senddone( + struct smc_connection *conn) +{ + return smc_cdc_rxed_any_close(conn) || + conn->local_rx_ctrl.conn_state_flags.peer_done_writing; +} + +static inline void smc_curs_add(int size, union smc_host_cursor *curs, + int value) +{ + curs->count += value; + if (curs->count >= size) { + curs->wrap++; + curs->count -= size; + } +} + +/* SMC cursors are 8 bytes long and require atomic reading and writing */ +static inline u64 smc_curs_read(union smc_host_cursor *curs, + struct smc_connection *conn) +{ +#ifndef KERNEL_HAS_ATOMIC64 + unsigned long flags; + u64 ret; + + spin_lock_irqsave(&conn->acurs_lock, flags); + ret = curs->acurs; + spin_unlock_irqrestore(&conn->acurs_lock, flags); + return ret; +#else + return atomic64_read(&curs->acurs); +#endif +} + +static inline u64 smc_curs_read_net(union smc_cdc_cursor *curs, + struct smc_connection *conn) +{ +#ifndef KERNEL_HAS_ATOMIC64 + unsigned long flags; + u64 ret; + + spin_lock_irqsave(&conn->acurs_lock, flags); + ret = curs->acurs; + spin_unlock_irqrestore(&conn->acurs_lock, flags); + return ret; +#else + return atomic64_read(&curs->acurs); +#endif +} + +static inline void smc_curs_write(union smc_host_cursor *curs, u64 val, + struct smc_connection *conn) +{ +#ifndef KERNEL_HAS_ATOMIC64 + unsigned long flags; + + spin_lock_irqsave(&conn->acurs_lock, flags); + curs->acurs = val; + spin_unlock_irqrestore(&conn->acurs_lock, flags); +#else + atomic64_set(&curs->acurs, val); +#endif +} + +static inline void smc_curs_write_net(union smc_cdc_cursor *curs, u64 val, + struct smc_connection *conn) +{ +#ifndef KERNEL_HAS_ATOMIC64 + unsigned long flags; + + spin_lock_irqsave(&conn->acurs_lock, flags); + curs->acurs = val; + spin_unlock_irqrestore(&conn->acurs_lock, flags); +#else + atomic64_set(&curs->acurs, val); +#endif +} + +/* calculate cursor difference between old and new, where old <= new */ +static inline int smc_curs_diff(unsigned int size, + union smc_host_cursor *old, + union smc_host_cursor *new) +{ + if (old->wrap != new->wrap) + return max_t(int, 0, + ((size - old->count) + new->count)); + + return max_t(int, 0, (new->count - old->count)); +} + +static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, + union smc_host_cursor *local, + struct smc_connection *conn) +{ + union smc_host_cursor temp; + + smc_curs_write(&temp, smc_curs_read(local, conn), conn); + peer->count = htonl(temp.count); + peer->wrap = htons(temp.wrap); + /* peer->reserved = htons(0); must be ensured by caller */ +} + +static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, + struct smc_host_cdc_msg *local, + struct smc_connection *conn) +{ + peer->common.type = local->common.type; + peer->len = local->len; + peer->seqno = htons(local->seqno); + peer->token = htonl(local->token); + smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn); + smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn); + peer->prod_flags = local->prod_flags; + peer->conn_state_flags = local->conn_state_flags; +} + +static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local, + union smc_cdc_cursor *peer, + struct smc_connection *conn) +{ + union smc_host_cursor temp, old; + union smc_cdc_cursor net; + + smc_curs_write(&old, smc_curs_read(local, conn), conn); + smc_curs_write_net(&net, smc_curs_read_net(peer, conn), conn); + temp.count = ntohl(net.count); + temp.wrap = ntohs(net.wrap); + if ((old.wrap > temp.wrap) && temp.wrap) + return; + if ((old.wrap == temp.wrap) && + (old.count > temp.count)) + return; + smc_curs_write(local, smc_curs_read(&temp, conn), conn); +} + +static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, + struct smc_cdc_msg *peer, + struct smc_connection *conn) +{ + local->common.type = peer->common.type; + local->len = peer->len; + local->seqno = ntohs(peer->seqno); + local->token = ntohl(peer->token); + smc_cdc_cursor_to_host(&local->prod, &peer->prod, conn); + smc_cdc_cursor_to_host(&local->cons, &peer->cons, conn); + local->prod_flags = peer->prod_flags; + local->conn_state_flags = peer->conn_state_flags; +} + +struct smc_cdc_tx_pend; + +int smc_cdc_get_free_slot(struct smc_link *link, struct smc_wr_buf **wr_buf, + struct smc_cdc_tx_pend **pend); +void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); +int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, + struct smc_cdc_tx_pend *pend); +int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn); +int smc_cdc_init(void) __init; + +#endif /* SMC_CDC_H */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 906d88c266c0..537e387b9e85 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -22,6 +22,7 @@ #include "smc_ib.h" #include "smc_wr.h" #include "smc_llc.h" +#include "smc_cdc.h" #define SMC_LGR_NUM_INCR 256 #define SMC_LGR_FREE_DELAY (600 * HZ) @@ -228,6 +229,7 @@ void smc_conn_free(struct smc_connection *conn) if (!lgr) return; + smc_cdc_tx_dismiss_slots(conn); smc_lgr_unregister_conn(conn); smc_rmb_unuse(conn); smc_sndbuf_unuse(conn); @@ -435,6 +437,11 @@ create: smc_lgr_register_conn(conn); /* add smc conn to lgr */ rc = smc_link_determine_gid(conn->lgr); } + conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; + conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg); +#ifndef KERNEL_HAS_ATOMIC64 + spin_lock_init(&conn->acurs_lock); +#endif out: return rc ? rc : local_contact; @@ -535,6 +542,7 @@ int smc_sndbuf_create(struct smc_sock *smc) conn->sndbuf_desc = sndbuf_desc; conn->sndbuf_size = tmp_bufsize; smc->sk.sk_sndbuf = tmp_bufsize * 2; + atomic_set(&conn->sndbuf_space, tmp_bufsize); return 0; } else { return -ENOMEM; @@ -611,6 +619,7 @@ int smc_rmb_create(struct smc_sock *smc) conn->rmbe_size = tmp_bufsize; conn->rmbe_size_short = tmp_bufsize_short; smc->sk.sk_rcvbuf = tmp_bufsize * 2; + atomic_set(&conn->bytes_to_rcv, 0); return 0; } else { return -ENOMEM; diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 5cbc1e5f7606..14f3f3fa6e52 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -237,6 +237,25 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) return rc; } +void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_rx_hdr_type, + smc_wr_tx_filter filter, + smc_wr_tx_dismisser dismisser, + unsigned long data) +{ + struct smc_wr_tx_pend_priv *tx_pend; + struct smc_wr_rx_hdr *wr_rx; + int i; + + for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { + wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[i]; + if (wr_rx->type != wr_rx_hdr_type) + continue; + tx_pend = &link->wr_tx_pends[i].priv; + if (filter(tx_pend, data)) + dismisser(tx_pend); + } +} + /****************************** receive queue ********************************/ int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler) diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 0b626728ce44..124f857ddaa8 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -36,6 +36,11 @@ typedef void (*smc_wr_tx_handler)(struct smc_wr_tx_pend_priv *, struct smc_link *, enum ib_wc_status); +typedef bool (*smc_wr_tx_filter)(struct smc_wr_tx_pend_priv *, + unsigned long); + +typedef void (*smc_wr_tx_dismisser)(struct smc_wr_tx_pend_priv *); + struct smc_wr_rx_handler { struct hlist_node list; /* hash table collision resolution */ void (*handler)(struct ib_wc *, void *); @@ -87,6 +92,10 @@ int smc_wr_tx_put_slot(struct smc_link *link, int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *wr_pend_priv); void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context); +void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type, + smc_wr_tx_filter filter, + smc_wr_tx_dismisser dismisser, + unsigned long data); int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler); int smc_wr_rx_post_init(struct smc_link *link); -- cgit v1.2.3 From e6727f39004bd95725342b3b343a14c7d59df07f Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:23 +0100 Subject: smc: send data (through RDMA) copy data to kernel send buffer, and trigger RDMA write Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/Makefile | 2 +- net/smc/af_smc.c | 13 +- net/smc/smc.h | 1 + net/smc/smc_cdc.c | 7 +- net/smc/smc_tx.c | 438 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_tx.h | 34 +++++ 6 files changed, 491 insertions(+), 4 deletions(-) create mode 100644 net/smc/smc_tx.c create mode 100644 net/smc/smc_tx.h diff --git a/net/smc/Makefile b/net/smc/Makefile index ec0fd0338c6d..fc28d79aec7d 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_SMC) += smc.o smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o -smc-y += smc_cdc.o +smc-y += smc_cdc.o smc_tx.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 4b1611d46ef7..b62b69c6c718 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -37,6 +37,7 @@ #include "smc_core.h" #include "smc_ib.h" #include "smc_pnet.h" +#include "smc_tx.h" static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group * creation @@ -410,6 +411,8 @@ static int smc_connect_rdma(struct smc_sock *smc) } mutex_unlock(&smc_create_lgr_pending); + smc_tx_init(smc); + out_connected: smc_copy_sock_settings_to_clc(smc); smc->sk.sk_state = SMC_ACTIVE; @@ -751,6 +754,8 @@ static void smc_listen_work(struct work_struct *work) goto decline_rdma; } + smc_tx_init(new_smc); + out_connected: sk_refcnt_debug_inc(newsmcsk); newsmcsk->sk_state = SMC_ACTIVE; @@ -924,7 +929,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) if (smc->use_fallback) rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len); else - rc = sock_no_sendmsg(sock, msg, len); + rc = smc_tx_sendmsg(smc, msg, len); out: release_sock(sk); return rc; @@ -1005,6 +1010,12 @@ static unsigned int smc_poll(struct file *file, struct socket *sock, mask |= smc_accept_poll(sk); if (sk->sk_err) mask |= POLLERR; + if (atomic_read(&smc->conn.sndbuf_space)) { + mask |= POLLOUT | POLLWRNORM; + } else { + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + } /* for now - to be enhanced in follow-on patch */ } diff --git a/net/smc/smc.h b/net/smc/smc.h index a8d4a68aac30..0c47d84c1db1 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -133,6 +133,7 @@ struct smc_connection { atomic_t sndbuf_space; /* remaining space in sndbuf */ u16 tx_cdc_seq; /* sequence # for CDC send */ spinlock_t send_lock; /* protect wr_sends */ + struct work_struct tx_work; /* retry of smc_cdc_msg_send */ struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl. * .prod cf. TCP rcv_nxt diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index 111d23ff9e17..77fe16967376 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -14,6 +14,7 @@ #include "smc.h" #include "smc_wr.h" #include "smc_cdc.h" +#include "smc_tx.h" /********************************** send *************************************/ @@ -52,7 +53,7 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, smc_curs_read(&cdcpend->cursor, cdcpend->conn), cdcpend->conn); } - /* subsequent patch: wake if send buffer space available */ + smc_tx_sndbuf_nonfull(smc); bh_unlock_sock(&smc->sk); } @@ -204,7 +205,9 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, /* subsequent patch: terminate connection */ /* piggy backed tx info */ - /* subsequent patch: wake receivers if receive buffer space available */ + /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */ + if (diff_cons && smc_tx_prepared_sends(conn)) + smc_tx_sndbuf_nonempty(conn); /* subsequent patch: trigger socket release if connection closed */ diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c new file mode 100644 index 000000000000..d86bef6cb681 --- /dev/null +++ b/net/smc/smc_tx.c @@ -0,0 +1,438 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Manage send buffer. + * Producer: + * Copy user space data into send buffer, if send buffer space available. + * Consumer: + * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available. + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#include +#include +#include +#include + +#include "smc.h" +#include "smc_wr.h" +#include "smc_cdc.h" +#include "smc_tx.h" + +/***************************** sndbuf producer *******************************/ + +/* callback implementation for sk.sk_write_space() + * to wakeup sndbuf producers that blocked with smc_tx_wait_memory(). + * called under sk_socket lock. + */ +static void smc_tx_write_space(struct sock *sk) +{ + struct socket *sock = sk->sk_socket; + struct smc_sock *smc = smc_sk(sk); + struct socket_wq *wq; + + /* similar to sk_stream_write_space */ + if (atomic_read(&smc->conn.sndbuf_space) && sock) { + clear_bit(SOCK_NOSPACE, &sock->flags); + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (skwq_has_sleeper(wq)) + wake_up_interruptible_poll(&wq->wait, + POLLOUT | POLLWRNORM | + POLLWRBAND); + if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) + sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); + rcu_read_unlock(); + } +} + +/* Wakeup sndbuf producers that blocked with smc_tx_wait_memory(). + * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space(). + */ +void smc_tx_sndbuf_nonfull(struct smc_sock *smc) +{ + if (smc->sk.sk_socket && + test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags)) + smc->sk.sk_write_space(&smc->sk); +} + +/* blocks sndbuf producer until at least one byte of free space available */ +static int smc_tx_wait_memory(struct smc_sock *smc, int flags) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct smc_connection *conn = &smc->conn; + struct sock *sk = &smc->sk; + bool noblock; + long timeo; + int rc = 0; + + /* similar to sk_stream_wait_memory */ + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); + noblock = timeo ? false : true; + add_wait_queue(sk_sleep(sk), &wait); + while (1) { + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + if (sk->sk_err || + (sk->sk_shutdown & SEND_SHUTDOWN) || + conn->local_tx_ctrl.conn_state_flags.peer_done_writing) { + rc = -EPIPE; + break; + } + if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) { + rc = -ECONNRESET; + break; + } + if (!timeo) { + if (noblock) + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + rc = -EAGAIN; + break; + } + if (signal_pending(current)) { + rc = sock_intr_errno(timeo); + break; + } + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); + if (atomic_read(&conn->sndbuf_space)) + break; /* at least 1 byte of free space available */ + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + sk->sk_write_pending++; + sk_wait_event(sk, &timeo, + sk->sk_err || + (sk->sk_shutdown & SEND_SHUTDOWN) || + smc_cdc_rxed_any_close_or_senddone(conn) || + atomic_read(&conn->sndbuf_space), + &wait); + sk->sk_write_pending--; + } + remove_wait_queue(sk_sleep(sk), &wait); + return rc; +} + +/* sndbuf producer: main API called by socket layer. + * called under sock lock. + */ +int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) +{ + size_t copylen, send_done = 0, send_remaining = len; + size_t chunk_len, chunk_off, chunk_len_sum; + struct smc_connection *conn = &smc->conn; + union smc_host_cursor prep; + struct sock *sk = &smc->sk; + char *sndbuf_base; + int tx_cnt_prep; + int writespace; + int rc, chunk; + + /* This should be in poll */ + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); + + if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { + rc = -EPIPE; + goto out_err; + } + + while (msg_data_left(msg)) { + if (sk->sk_state == SMC_INIT) + return -ENOTCONN; + if (smc->sk.sk_shutdown & SEND_SHUTDOWN || + conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) + return -EPIPE; + if (smc_cdc_rxed_any_close(conn)) + return send_done ?: -ECONNRESET; + + if (!atomic_read(&conn->sndbuf_space)) { + rc = smc_tx_wait_memory(smc, msg->msg_flags); + if (rc) { + if (send_done) + return send_done; + goto out_err; + } + continue; + } + + /* initialize variables for 1st iteration of subsequent loop */ + /* could be just 1 byte, even after smc_tx_wait_memory above */ + writespace = atomic_read(&conn->sndbuf_space); + /* not more than what user space asked for */ + copylen = min_t(size_t, send_remaining, writespace); + /* determine start of sndbuf */ + sndbuf_base = conn->sndbuf_desc->cpu_addr; + smc_curs_write(&prep, + smc_curs_read(&conn->tx_curs_prep, conn), + conn); + tx_cnt_prep = prep.count; + /* determine chunks where to write into sndbuf */ + /* either unwrapped case, or 1st chunk of wrapped case */ + chunk_len = min_t(size_t, + copylen, conn->sndbuf_size - tx_cnt_prep); + chunk_len_sum = chunk_len; + chunk_off = tx_cnt_prep; + for (chunk = 0; chunk < 2; chunk++) { + rc = memcpy_from_msg(sndbuf_base + chunk_off, + msg, chunk_len); + if (rc) { + if (send_done) + return send_done; + goto out_err; + } + send_done += chunk_len; + send_remaining -= chunk_len; + + if (chunk_len_sum == copylen) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + chunk_len = copylen - chunk_len; /* remainder */ + chunk_len_sum += chunk_len; + chunk_off = 0; /* modulo offset in send ring buffer */ + } + /* update cursors */ + smc_curs_add(conn->sndbuf_size, &prep, copylen); + smc_curs_write(&conn->tx_curs_prep, + smc_curs_read(&prep, conn), + conn); + /* increased in send tasklet smc_cdc_tx_handler() */ + smp_mb__before_atomic(); + atomic_sub(copylen, &conn->sndbuf_space); + /* guarantee 0 <= sndbuf_space <= sndbuf_size */ + smp_mb__after_atomic(); + /* since we just produced more new data into sndbuf, + * trigger sndbuf consumer: RDMA write into peer RMBE and CDC + */ + smc_tx_sndbuf_nonempty(conn); + } /* while (msg_data_left(msg)) */ + + return send_done; + +out_err: + rc = sk_stream_error(sk, msg->msg_flags, rc); + /* make sure we wake any epoll edge trigger waiter */ + if (unlikely(rc == -EAGAIN)) + sk->sk_write_space(sk); + return rc; +} + +/***************************** sndbuf consumer *******************************/ + +/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ +static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, + int num_sges, struct ib_sge sges[]) +{ + struct smc_link_group *lgr = conn->lgr; + struct ib_send_wr *failed_wr = NULL; + struct ib_rdma_wr rdma_wr; + struct smc_link *link; + int rc; + + memset(&rdma_wr, 0, sizeof(rdma_wr)); + link = &lgr->lnk[SMC_SINGLE_LINK]; + rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link); + rdma_wr.wr.sg_list = sges; + rdma_wr.wr.num_sge = num_sges; + rdma_wr.wr.opcode = IB_WR_RDMA_WRITE; + rdma_wr.remote_addr = + lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + + /* RMBE within RMB */ + ((conn->peer_conn_idx - 1) * conn->peer_rmbe_size) + + /* offset within RMBE */ + peer_rmbe_offset; + rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; + rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr); + if (rc) + conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; + return rc; +} + +/* sndbuf consumer */ +static inline void smc_tx_advance_cursors(struct smc_connection *conn, + union smc_host_cursor *prod, + union smc_host_cursor *sent, + size_t len) +{ + smc_curs_add(conn->peer_rmbe_size, prod, len); + /* increased in recv tasklet smc_cdc_msg_rcv() */ + smp_mb__before_atomic(); + /* data in flight reduces usable snd_wnd */ + atomic_sub(len, &conn->peer_rmbe_space); + /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */ + smp_mb__after_atomic(); + smc_curs_add(conn->sndbuf_size, sent, len); +} + +/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; + * usable snd_wnd as max transmit + */ +static int smc_tx_rdma_writes(struct smc_connection *conn) +{ + size_t src_off, src_len, dst_off, dst_len; /* current chunk values */ + size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk; + union smc_host_cursor sent, prep, prod, cons; + struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; + struct smc_link_group *lgr = conn->lgr; + int to_send, rmbespace; + struct smc_link *link; + int num_sges; + int rc; + + /* source: sndbuf */ + smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn); + smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn); + /* cf. wmem_alloc - (snd_max - snd_una) */ + to_send = smc_curs_diff(conn->sndbuf_size, &sent, &prep); + if (to_send <= 0) + return 0; + + /* destination: RMBE */ + /* cf. snd_wnd */ + rmbespace = atomic_read(&conn->peer_rmbe_space); + if (rmbespace <= 0) + return 0; + smc_curs_write(&prod, + smc_curs_read(&conn->local_tx_ctrl.prod, conn), + conn); + smc_curs_write(&cons, + smc_curs_read(&conn->local_rx_ctrl.cons, conn), + conn); + + /* if usable snd_wnd closes ask peer to advertise once it opens again */ + conn->local_tx_ctrl.prod_flags.write_blocked = (to_send >= rmbespace); + /* cf. usable snd_wnd */ + len = min(to_send, rmbespace); + + /* initialize variables for first iteration of subsequent nested loop */ + link = &lgr->lnk[SMC_SINGLE_LINK]; + dst_off = prod.count; + if (prod.wrap == cons.wrap) { + /* the filled destination area is unwrapped, + * hence the available free destination space is wrapped + * and we need 2 destination chunks of sum len; start with 1st + * which is limited by what's available in sndbuf + */ + dst_len = min_t(size_t, + conn->peer_rmbe_size - prod.count, len); + } else { + /* the filled destination area is wrapped, + * hence the available free destination space is unwrapped + * and we need a single destination chunk of entire len + */ + dst_len = len; + } + dst_len_sum = dst_len; + src_off = sent.count; + /* dst_len determines the maximum src_len */ + if (sent.count + dst_len <= conn->sndbuf_size) { + /* unwrapped src case: single chunk of entire dst_len */ + src_len = dst_len; + } else { + /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */ + src_len = conn->sndbuf_size - sent.count; + } + src_len_sum = src_len; + for (dstchunk = 0; dstchunk < 2; dstchunk++) { + num_sges = 0; + for (srcchunk = 0; srcchunk < 2; srcchunk++) { + sges[srcchunk].addr = + conn->sndbuf_desc->dma_addr[SMC_SINGLE_LINK] + + src_off; + sges[srcchunk].length = src_len; + sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; + num_sges++; + src_off += src_len; + if (src_off >= conn->sndbuf_size) + src_off -= conn->sndbuf_size; + /* modulo in send ring */ + if (src_len_sum == dst_len) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + src_len = dst_len - src_len; /* remainder */ + src_len_sum += src_len; + } + rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); + if (rc) + return rc; + if (dst_len_sum == len) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + dst_off = 0; /* modulo offset in RMBE ring buffer */ + dst_len = len - dst_len; /* remainder */ + dst_len_sum += dst_len; + src_len = min_t(int, + dst_len, conn->sndbuf_size - sent.count); + src_len_sum = src_len; + } + + smc_tx_advance_cursors(conn, &prod, &sent, len); + /* update connection's cursors with advanced local cursors */ + smc_curs_write(&conn->local_tx_ctrl.prod, + smc_curs_read(&prod, conn), + conn); + /* dst: peer RMBE */ + smc_curs_write(&conn->tx_curs_sent, + smc_curs_read(&sent, conn), + conn); + /* src: local sndbuf */ + + return 0; +} + +/* Wakeup sndbuf consumers from any context (IRQ or process) + * since there is more data to transmit; usable snd_wnd as max transmit + */ +int smc_tx_sndbuf_nonempty(struct smc_connection *conn) +{ + struct smc_cdc_tx_pend *pend; + struct smc_wr_buf *wr_buf; + int rc; + + spin_lock_bh(&conn->send_lock); + rc = smc_cdc_get_free_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], &wr_buf, + &pend); + if (rc < 0) { + if (rc == -EBUSY) { + rc = 0; + schedule_work(&conn->tx_work); + } + goto out_unlock; + } + + rc = smc_tx_rdma_writes(conn); + if (rc) { + smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], + (struct smc_wr_tx_pend_priv *)pend); + goto out_unlock; + } + + rc = smc_cdc_msg_send(conn, wr_buf, pend); + +out_unlock: + spin_unlock_bh(&conn->send_lock); + return rc; +} + +/* Wakeup sndbuf consumers from process context + * since there is more data to transmit + */ +static void smc_tx_work(struct work_struct *work) +{ + struct smc_connection *conn = container_of(work, + struct smc_connection, + tx_work); + struct smc_sock *smc = container_of(conn, struct smc_sock, conn); + + lock_sock(&smc->sk); + smc_tx_sndbuf_nonempty(conn); + release_sock(&smc->sk); +} + +/***************************** send initialize *******************************/ + +/* Initialize send properties on connection establishment. NB: not __init! */ +void smc_tx_init(struct smc_sock *smc) +{ + smc->sk.sk_write_space = smc_tx_write_space; + INIT_WORK(&smc->conn.tx_work, smc_tx_work); + spin_lock_init(&smc->conn.send_lock); +} diff --git a/net/smc/smc_tx.h b/net/smc/smc_tx.h new file mode 100644 index 000000000000..931c66645624 --- /dev/null +++ b/net/smc/smc_tx.h @@ -0,0 +1,34 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Manage send buffer + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#ifndef SMC_TX_H +#define SMC_TX_H + +#include +#include + +#include "smc.h" +#include "smc_cdc.h" + +static inline int smc_tx_prepared_sends(struct smc_connection *conn) +{ + union smc_host_cursor sent, prep; + + smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn); + smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn); + return smc_curs_diff(conn->sndbuf_size, &sent, &prep); +} + +void smc_tx_init(struct smc_sock *smc); +int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len); +int smc_tx_sndbuf_nonempty(struct smc_connection *conn); +void smc_tx_sndbuf_nonfull(struct smc_sock *smc); + +#endif /* SMC_TX_H */ -- cgit v1.2.3 From 952310ccf2d861966cfb8706f16d5e4eb585edb7 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:24 +0100 Subject: smc: receive data from RMBE move RMBE data into user space buffer and update managing cursors Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/Makefile | 2 +- net/smc/af_smc.c | 7 +- net/smc/smc.h | 4 + net/smc/smc_cdc.c | 6 +- net/smc/smc_core.c | 10 +++ net/smc/smc_rx.c | 217 +++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_rx.h | 23 ++++++ net/smc/smc_tx.c | 37 +++++++++ net/smc/smc_tx.h | 1 + 9 files changed, 304 insertions(+), 3 deletions(-) create mode 100644 net/smc/smc_rx.c create mode 100644 net/smc/smc_rx.h diff --git a/net/smc/Makefile b/net/smc/Makefile index fc28d79aec7d..6255e29090b5 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_SMC) += smc.o smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o -smc-y += smc_cdc.o smc_tx.o +smc-y += smc_cdc.o smc_tx.o smc_rx.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index b62b69c6c718..fc9c51c549e5 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -38,6 +38,7 @@ #include "smc_ib.h" #include "smc_pnet.h" #include "smc_tx.h" +#include "smc_rx.h" static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group * creation @@ -412,6 +413,7 @@ static int smc_connect_rdma(struct smc_sock *smc) mutex_unlock(&smc_create_lgr_pending); smc_tx_init(smc); + smc_rx_init(smc); out_connected: smc_copy_sock_settings_to_clc(smc); @@ -755,6 +757,7 @@ static void smc_listen_work(struct work_struct *work) } smc_tx_init(new_smc); + smc_rx_init(new_smc); out_connected: sk_refcnt_debug_inc(newsmcsk); @@ -950,7 +953,7 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (smc->use_fallback) rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags); else - rc = sock_no_recvmsg(sock, msg, len, flags); + rc = smc_rx_recvmsg(smc, msg, len, flags); out: release_sock(sk); return rc; @@ -1016,6 +1019,8 @@ static unsigned int smc_poll(struct file *file, struct socket *sock, sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); } + if (atomic_read(&smc->conn.bytes_to_rcv)) + mask |= POLLIN | POLLRDNORM; /* for now - to be enhanced in follow-on patch */ } diff --git a/net/smc/smc.h b/net/smc/smc.h index 0c47d84c1db1..2bb1540b5103 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -115,6 +115,10 @@ struct smc_connection { struct smc_buf_desc *rmb_desc; /* RMBE descriptor */ int rmbe_size; /* RMBE size <== sock rmem */ int rmbe_size_short;/* compressed notation */ + int rmbe_update_limit; + /* lower limit for consumer + * cursor update + */ struct smc_host_cdc_msg local_tx_ctrl; /* host byte order staging * buffer for CDC msg send diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index 77fe16967376..c0a69300b2f4 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -15,6 +15,7 @@ #include "smc_wr.h" #include "smc_cdc.h" #include "smc_tx.h" +#include "smc_rx.h" /********************************** send *************************************/ @@ -197,6 +198,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, atomic_add(diff_prod, &conn->bytes_to_rcv); /* guarantee 0 <= bytes_to_rcv <= rmbe_size */ smp_mb__after_atomic(); + smc->sk.sk_data_ready(&smc->sk); } if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) @@ -216,7 +218,9 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, return; /* data available */ - /* subsequent patch: send delayed ack, wake receivers */ + if ((conn->local_rx_ctrl.prod_flags.write_blocked) || + (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req)) + smc_tx_consumer_update(conn); } /* called under tasklet context */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 537e387b9e85..e5c63950fc28 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -489,6 +489,15 @@ struct smc_buf_desc *smc_rmb_get_slot(struct smc_link_group *lgr, return NULL; } +/* one of the conditions for announcing a receiver's current window size is + * that it "results in a minimum increase in the window size of 10% of the + * receive buffer space" [RFC7609] + */ +static inline int smc_rmb_wnd_update_limit(int rmbe_size) +{ + return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); +} + /* create the tx buffer for an SMC socket */ int smc_sndbuf_create(struct smc_sock *smc) { @@ -620,6 +629,7 @@ int smc_rmb_create(struct smc_sock *smc) conn->rmbe_size_short = tmp_bufsize_short; smc->sk.sk_rcvbuf = tmp_bufsize * 2; atomic_set(&conn->bytes_to_rcv, 0); + conn->rmbe_update_limit = smc_rmb_wnd_update_limit(tmp_bufsize); return 0; } else { return -ENOMEM; diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c new file mode 100644 index 000000000000..5d1878732f46 --- /dev/null +++ b/net/smc/smc_rx.c @@ -0,0 +1,217 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Manage RMBE + * copy new RMBE data into user space + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#include +#include +#include + +#include "smc.h" +#include "smc_core.h" +#include "smc_cdc.h" +#include "smc_tx.h" /* smc_tx_consumer_update() */ +#include "smc_rx.h" + +/* callback implementation for sk.sk_data_ready() + * to wakeup rcvbuf consumers that blocked with smc_rx_wait_data(). + * indirectly called by smc_cdc_msg_recv_action(). + */ +static void smc_rx_data_ready(struct sock *sk) +{ + struct socket_wq *wq; + + /* derived from sock_def_readable() */ + /* called already in smc_listen_work() */ + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (skwq_has_sleeper(wq)) + wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | + POLLRDNORM | POLLRDBAND); + if ((sk->sk_shutdown == SHUTDOWN_MASK) || + (sk->sk_state == SMC_CLOSED)) + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); + else + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + rcu_read_unlock(); +} + +/* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted + * @smc smc socket + * @timeo pointer to max seconds to wait, pointer to value 0 for no timeout + * Returns: + * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown. + * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted). + */ +static int smc_rx_wait_data(struct smc_sock *smc, long *timeo) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct smc_connection *conn = &smc->conn; + struct sock *sk = &smc->sk; + int rc; + + if (atomic_read(&conn->bytes_to_rcv)) + return 1; + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + add_wait_queue(sk_sleep(sk), &wait); + rc = sk_wait_event(sk, timeo, + sk->sk_err || + sk->sk_shutdown & RCV_SHUTDOWN || + sock_flag(sk, SOCK_DONE) || + atomic_read(&conn->bytes_to_rcv) || + smc_cdc_rxed_any_close_or_senddone(conn), + &wait); + remove_wait_queue(sk_sleep(sk), &wait); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); + return rc; +} + +/* rcvbuf consumer: main API called by socket layer. + * called under sk lock. + */ +int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len, + int flags) +{ + size_t copylen, read_done = 0, read_remaining = len; + size_t chunk_len, chunk_off, chunk_len_sum; + struct smc_connection *conn = &smc->conn; + union smc_host_cursor cons; + int readable, chunk; + char *rcvbuf_base; + struct sock *sk; + long timeo; + int target; /* Read at least these many bytes */ + int rc; + + if (unlikely(flags & MSG_ERRQUEUE)) + return -EINVAL; /* future work for sk.sk_family == AF_SMC */ + if (flags & MSG_OOB) + return -EINVAL; /* future work */ + + sk = &smc->sk; + if (sk->sk_state == SMC_LISTEN) + return -ENOTCONN; + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); + target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); + + msg->msg_namelen = 0; + /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */ + rcvbuf_base = conn->rmb_desc->cpu_addr; + + do { /* while (read_remaining) */ + if (read_done >= target) + break; + + if (atomic_read(&conn->bytes_to_rcv)) + goto copy; + + if (read_done) { + if (sk->sk_err || + sk->sk_state == SMC_CLOSED || + (sk->sk_shutdown & RCV_SHUTDOWN) || + !timeo || + signal_pending(current) || + smc_cdc_rxed_any_close_or_senddone(conn) || + conn->local_tx_ctrl.conn_state_flags. + peer_conn_abort) + break; + } else { + if (sock_flag(sk, SOCK_DONE)) + break; + if (sk->sk_err) { + read_done = sock_error(sk); + break; + } + if (sk->sk_shutdown & RCV_SHUTDOWN || + smc_cdc_rxed_any_close_or_senddone(conn) || + conn->local_tx_ctrl.conn_state_flags. + peer_conn_abort) + break; + if (sk->sk_state == SMC_CLOSED) { + if (!sock_flag(sk, SOCK_DONE)) { + /* This occurs when user tries to read + * from never connected socket. + */ + read_done = -ENOTCONN; + break; + } + break; + } + if (signal_pending(current)) { + read_done = sock_intr_errno(timeo); + break; + } + } + + if (!atomic_read(&conn->bytes_to_rcv)) { + smc_rx_wait_data(smc, &timeo); + continue; + } + +copy: + /* initialize variables for 1st iteration of subsequent loop */ + /* could be just 1 byte, even after smc_rx_wait_data above */ + readable = atomic_read(&conn->bytes_to_rcv); + /* not more than what user space asked for */ + copylen = min_t(size_t, read_remaining, readable); + smc_curs_write(&cons, + smc_curs_read(&conn->local_tx_ctrl.cons, conn), + conn); + /* determine chunks where to read from rcvbuf */ + /* either unwrapped case, or 1st chunk of wrapped case */ + chunk_len = min_t(size_t, + copylen, conn->rmbe_size - cons.count); + chunk_len_sum = chunk_len; + chunk_off = cons.count; + for (chunk = 0; chunk < 2; chunk++) { + if (!(flags & MSG_TRUNC)) { + rc = memcpy_to_msg(msg, rcvbuf_base + chunk_off, + chunk_len); + if (rc) { + if (!read_done) + read_done = -EFAULT; + goto out; + } + } + read_remaining -= chunk_len; + read_done += chunk_len; + + if (chunk_len_sum == copylen) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + chunk_len = copylen - chunk_len; /* remainder */ + chunk_len_sum += chunk_len; + chunk_off = 0; /* modulo offset in recv ring buffer */ + } + + /* update cursors */ + if (!(flags & MSG_PEEK)) { + smc_curs_add(conn->rmbe_size, &cons, copylen); + /* increased in recv tasklet smc_cdc_msg_rcv() */ + smp_mb__before_atomic(); + atomic_sub(copylen, &conn->bytes_to_rcv); + /* guarantee 0 <= bytes_to_rcv <= rmbe_size */ + smp_mb__after_atomic(); + smc_curs_write(&conn->local_tx_ctrl.cons, + smc_curs_read(&cons, conn), + conn); + /* send consumer cursor update if required */ + /* similar to advertising new TCP rcv_wnd if required */ + smc_tx_consumer_update(conn); + } + } while (read_remaining); +out: + return read_done; +} + +/* Initialize receive properties on connection establishment. NB: not __init! */ +void smc_rx_init(struct smc_sock *smc) +{ + smc->sk.sk_data_ready = smc_rx_data_ready; +} diff --git a/net/smc/smc_rx.h b/net/smc/smc_rx.h new file mode 100644 index 000000000000..b5b80e1f8b0f --- /dev/null +++ b/net/smc/smc_rx.h @@ -0,0 +1,23 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Manage RMBE + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#ifndef SMC_RX_H +#define SMC_RX_H + +#include +#include + +#include "smc.h" + +void smc_rx_init(struct smc_sock *smc); +int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len, + int flags); + +#endif /* SMC_RX_H */ diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index d86bef6cb681..7e8799fcd3a0 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -427,6 +427,43 @@ static void smc_tx_work(struct work_struct *work) release_sock(&smc->sk); } +void smc_tx_consumer_update(struct smc_connection *conn) +{ + union smc_host_cursor cfed, cons; + struct smc_cdc_tx_pend *pend; + struct smc_wr_buf *wr_buf; + int to_confirm, rc; + + smc_curs_write(&cons, + smc_curs_read(&conn->local_tx_ctrl.cons, conn), + conn); + smc_curs_write(&cfed, + smc_curs_read(&conn->rx_curs_confirmed, conn), + conn); + to_confirm = smc_curs_diff(conn->rmbe_size, &cfed, &cons); + + if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || + ((to_confirm > conn->rmbe_update_limit) && + ((to_confirm > (conn->rmbe_size / 2)) || + conn->local_rx_ctrl.prod_flags.write_blocked))) { + rc = smc_cdc_get_free_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], + &wr_buf, &pend); + if (!rc) + rc = smc_cdc_msg_send(conn, wr_buf, pend); + if (rc < 0) { + schedule_work(&conn->tx_work); + return; + } + smc_curs_write(&conn->rx_curs_confirmed, + smc_curs_read(&conn->local_tx_ctrl.cons, conn), + conn); + conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0; + } + if (conn->local_rx_ctrl.prod_flags.write_blocked && + !atomic_read(&conn->bytes_to_rcv)) + conn->local_rx_ctrl.prod_flags.write_blocked = 0; +} + /***************************** send initialize *******************************/ /* Initialize send properties on connection establishment. NB: not __init! */ diff --git a/net/smc/smc_tx.h b/net/smc/smc_tx.h index 931c66645624..1d6a0dcdcfe6 100644 --- a/net/smc/smc_tx.h +++ b/net/smc/smc_tx.h @@ -30,5 +30,6 @@ void smc_tx_init(struct smc_sock *smc); int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len); int smc_tx_sndbuf_nonempty(struct smc_connection *conn); void smc_tx_sndbuf_nonfull(struct smc_sock *smc); +void smc_tx_consumer_update(struct smc_connection *conn); #endif /* SMC_TX_H */ -- cgit v1.2.3 From b38d732477e4211351b2680e805d944f66bceec9 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:25 +0100 Subject: smc: socket closing and linkgroup cleanup smc_shutdown() and smc_release() handling delayed linkgroup cleanup for linkgroups without connections Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/Makefile | 2 +- net/smc/af_smc.c | 127 ++++++++++++--- net/smc/smc.h | 18 +++ net/smc/smc_cdc.c | 23 ++- net/smc/smc_cdc.h | 1 + net/smc/smc_close.c | 441 ++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_close.h | 28 ++++ net/smc/smc_core.c | 7 +- net/smc/smc_tx.c | 8 + net/smc/smc_wr.c | 47 ++++-- net/smc/smc_wr.h | 2 + 11 files changed, 668 insertions(+), 36 deletions(-) create mode 100644 net/smc/smc_close.c create mode 100644 net/smc/smc_close.h diff --git a/net/smc/Makefile b/net/smc/Makefile index 6255e29090b5..5cf0cafaa208 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_SMC) += smc.o smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o -smc-y += smc_cdc.o smc_tx.o smc_rx.o +smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index fc9c51c549e5..3f543d58bc5c 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -39,6 +39,7 @@ #include "smc_pnet.h" #include "smc_tx.h" #include "smc_rx.h" +#include "smc_close.h" static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group * creation @@ -70,14 +71,29 @@ static int smc_release(struct socket *sock) { struct sock *sk = sock->sk; struct smc_sock *smc; + int rc = 0; if (!sk) goto out; smc = smc_sk(sk); - lock_sock(sk); + sock_hold(sk); + if (sk->sk_state == SMC_LISTEN) + /* smc_close_non_accepted() is called and acquires + * sock lock for child sockets again + */ + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + else + lock_sock(sk); - sk->sk_state = SMC_CLOSED; + if (smc->use_fallback) { + sk->sk_state = SMC_CLOSED; + sk->sk_state_change(sk); + } else { + rc = smc_close_active(smc); + sock_set_flag(sk, SOCK_DEAD); + sk->sk_shutdown |= SHUTDOWN_MASK; + } if (smc->clcsock) { sock_release(smc->clcsock); smc->clcsock = NULL; @@ -86,11 +102,18 @@ static int smc_release(struct socket *sock) /* detach socket */ sock_orphan(sk); sock->sk = NULL; + if (smc->use_fallback) { + schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN); + } else if (sk->sk_state == SMC_CLOSED) { + smc_conn_free(&smc->conn); + schedule_delayed_work(&smc->sock_put_work, + SMC_CLOSE_SOCK_PUT_DELAY); + } release_sock(sk); sock_put(sk); out: - return 0; + return rc; } static void smc_destruct(struct sock *sk) @@ -120,6 +143,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock) INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); INIT_LIST_HEAD(&smc->accept_q); spin_lock_init(&smc->accept_q_lock); + INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work); sk_refcnt_debug_inc(sk); return sk; @@ -417,7 +441,8 @@ static int smc_connect_rdma(struct smc_sock *smc) out_connected: smc_copy_sock_settings_to_clc(smc); - smc->sk.sk_state = SMC_ACTIVE; + if (smc->sk.sk_state == SMC_INIT) + smc->sk.sk_state = SMC_ACTIVE; return rc ? rc : local_contact; @@ -559,8 +584,8 @@ static void smc_accept_unlink(struct sock *sk) /* remove a sock from the accept queue to bind it to a new socket created * for a socket accept call from user space */ -static struct sock *smc_accept_dequeue(struct sock *parent, - struct socket *new_sock) +struct sock *smc_accept_dequeue(struct sock *parent, + struct socket *new_sock) { struct smc_sock *isk, *n; struct sock *new_sk; @@ -581,11 +606,17 @@ static struct sock *smc_accept_dequeue(struct sock *parent, } /* clean up for a created but never accepted sock */ -static void smc_close_non_accepted(struct sock *sk) +void smc_close_non_accepted(struct sock *sk) { struct smc_sock *smc = smc_sk(sk); sock_hold(sk); + lock_sock(sk); + if (!sk->sk_lingertime) + /* wait for peer closing */ + sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT; + if (!smc->use_fallback) + smc_close_active(smc); if (smc->clcsock) { struct socket *tcp; @@ -593,7 +624,16 @@ static void smc_close_non_accepted(struct sock *sk) smc->clcsock = NULL; sock_release(tcp); } - /* more closing stuff to be added with socket closing patch */ + sock_set_flag(sk, SOCK_DEAD); + sk->sk_shutdown |= SHUTDOWN_MASK; + if (smc->use_fallback) { + schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN); + } else { + smc_conn_free(&smc->conn); + schedule_delayed_work(&smc->sock_put_work, + SMC_CLOSE_SOCK_PUT_DELAY); + } + release_sock(sk); sock_put(sk); } @@ -761,11 +801,12 @@ static void smc_listen_work(struct work_struct *work) out_connected: sk_refcnt_debug_inc(newsmcsk); - newsmcsk->sk_state = SMC_ACTIVE; + if (newsmcsk->sk_state == SMC_INIT) + newsmcsk->sk_state = SMC_ACTIVE; enqueue: if (local_contact == SMC_FIRST_CONTACT) mutex_unlock(&smc_create_lgr_pending); - lock_sock(&lsmc->sk); + lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); if (lsmc->sk.sk_state == SMC_LISTEN) { smc_accept_enqueue(&lsmc->sk, newsmcsk); } else { /* no longer listening */ @@ -791,6 +832,7 @@ decline_rdma: out_err: newsmcsk->sk_state = SMC_CLOSED; + smc_conn_free(&new_smc->conn); goto enqueue; /* queue new sock with sk_err set */ } @@ -911,7 +953,8 @@ static int smc_getname(struct socket *sock, struct sockaddr *addr, { struct smc_sock *smc; - if (peer && (sock->sk->sk_state != SMC_ACTIVE)) + if (peer && (sock->sk->sk_state != SMC_ACTIVE) && + (sock->sk->sk_state != SMC_APPCLOSEWAIT1)) return -ENOTCONN; smc = smc_sk(sock->sk); @@ -927,7 +970,9 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) smc = smc_sk(sk); lock_sock(sk); - if (sk->sk_state != SMC_ACTIVE) + if ((sk->sk_state != SMC_ACTIVE) && + (sk->sk_state != SMC_APPCLOSEWAIT1) && + (sk->sk_state != SMC_INIT)) goto out; if (smc->use_fallback) rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len); @@ -947,13 +992,21 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, smc = smc_sk(sk); lock_sock(sk); - if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_CLOSED)) + if ((sk->sk_state == SMC_INIT) || + (sk->sk_state == SMC_LISTEN) || + (sk->sk_state == SMC_CLOSED)) + goto out; + + if (sk->sk_state == SMC_PEERFINCLOSEWAIT) { + rc = 0; goto out; + } if (smc->use_fallback) rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags); else rc = smc_rx_recvmsg(smc, msg, len, flags); + out: release_sock(sk); return rc; @@ -1013,7 +1066,8 @@ static unsigned int smc_poll(struct file *file, struct socket *sock, mask |= smc_accept_poll(sk); if (sk->sk_err) mask |= POLLERR; - if (atomic_read(&smc->conn.sndbuf_space)) { + if (atomic_read(&smc->conn.sndbuf_space) || + (sk->sk_shutdown & SEND_SHUTDOWN)) { mask |= POLLOUT | POLLWRNORM; } else { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); @@ -1021,7 +1075,14 @@ static unsigned int smc_poll(struct file *file, struct socket *sock, } if (atomic_read(&smc->conn.bytes_to_rcv)) mask |= POLLIN | POLLRDNORM; - /* for now - to be enhanced in follow-on patch */ + if ((sk->sk_shutdown == SHUTDOWN_MASK) || + (sk->sk_state == SMC_CLOSED)) + mask |= POLLHUP; + if (sk->sk_shutdown & RCV_SHUTDOWN) + mask |= POLLIN | POLLRDNORM | POLLRDHUP; + if (sk->sk_state == SMC_APPCLOSEWAIT1) + mask |= POLLIN; + } return mask; @@ -1032,31 +1093,53 @@ static int smc_shutdown(struct socket *sock, int how) struct sock *sk = sock->sk; struct smc_sock *smc; int rc = -EINVAL; + int rc1 = 0; smc = smc_sk(sk); if ((how < SHUT_RD) || (how > SHUT_RDWR)) - goto out_err; + return rc; lock_sock(sk); rc = -ENOTCONN; - if (sk->sk_state == SMC_CLOSED) + if ((sk->sk_state != SMC_LISTEN) && + (sk->sk_state != SMC_ACTIVE) && + (sk->sk_state != SMC_PEERCLOSEWAIT1) && + (sk->sk_state != SMC_PEERCLOSEWAIT2) && + (sk->sk_state != SMC_APPCLOSEWAIT1) && + (sk->sk_state != SMC_APPCLOSEWAIT2) && + (sk->sk_state != SMC_APPFINCLOSEWAIT)) goto out; if (smc->use_fallback) { rc = kernel_sock_shutdown(smc->clcsock, how); sk->sk_shutdown = smc->clcsock->sk->sk_shutdown; if (sk->sk_shutdown == SHUTDOWN_MASK) sk->sk_state = SMC_CLOSED; - } else { - rc = sock_no_shutdown(sock, how); + goto out; + } + switch (how) { + case SHUT_RDWR: /* shutdown in both directions */ + rc = smc_close_active(smc); + break; + case SHUT_WR: + rc = smc_close_shutdown_write(smc); + break; + case SHUT_RD: + if (sk->sk_state == SMC_LISTEN) + rc = smc_close_active(smc); + else + rc = 0; + /* nothing more to do because peer is not involved */ + break; } + rc1 = kernel_sock_shutdown(smc->clcsock, how); + /* map sock_shutdown_cmd constants to sk_shutdown value range */ + sk->sk_shutdown |= how + 1; out: release_sock(sk); - -out_err: - return rc; + return rc ? rc : rc1; } static int smc_setsockopt(struct socket *sock, int level, int optname, diff --git a/net/smc/smc.h b/net/smc/smc.h index 2bb1540b5103..959a5d2014ab 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -30,6 +30,16 @@ enum smc_state { /* possible states of an SMC socket */ SMC_INIT = 2, SMC_CLOSED = 7, SMC_LISTEN = 10, + /* normal close */ + SMC_PEERCLOSEWAIT1 = 20, + SMC_PEERCLOSEWAIT2 = 21, + SMC_APPFINCLOSEWAIT = 24, + SMC_APPCLOSEWAIT1 = 22, + SMC_APPCLOSEWAIT2 = 23, + SMC_PEERFINCLOSEWAIT = 25, + /* abnormal close */ + SMC_PEERABORTWAIT = 26, + SMC_PROCESSABORT = 27, }; struct smc_link_group; @@ -164,7 +174,13 @@ struct smc_sock { /* smc sock container */ struct work_struct smc_listen_work;/* prepare new accept socket */ struct list_head accept_q; /* sockets to be accepted */ spinlock_t accept_q_lock; /* protects accept_q */ + struct delayed_work sock_put_work; /* final socket freeing */ bool use_fallback; /* fallback to tcp */ + u8 wait_close_tx_prepared : 1; + /* shutdown wr or close + * started, waiting for unsent + * data to be sent + */ }; static inline struct smc_sock *smc_sk(const struct sock *sk) @@ -250,5 +266,7 @@ void smc_conn_free(struct smc_connection *conn); int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr, struct smc_ib_device *smcibdev, u8 ibport, struct smc_clc_msg_local *lcl, int srv_first_contact); +struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock); +void smc_close_non_accepted(struct sock *sk); #endif /* __SMC_H */ diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index c0a69300b2f4..5a339493872e 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -16,6 +16,7 @@ #include "smc_cdc.h" #include "smc_tx.h" #include "smc_rx.h" +#include "smc_close.h" /********************************** send *************************************/ @@ -55,6 +56,9 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, cdcpend->conn); } smc_tx_sndbuf_nonfull(smc); + if (smc->sk.sk_state != SMC_ACTIVE) + /* wake up smc_close_wait_tx_pends() */ + smc->sk.sk_state_change(&smc->sk); bh_unlock_sock(&smc->sk); } @@ -149,6 +153,14 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) (unsigned long)conn); } +bool smc_cdc_tx_has_pending(struct smc_connection *conn) +{ + struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + + return smc_wr_tx_has_pending(link, SMC_CDC_MSG_TYPE, + smc_cdc_tx_filter, (unsigned long)conn); +} + /********************************* receive ***********************************/ static inline bool smc_cdc_before(u16 seq1, u16 seq2) @@ -201,15 +213,20 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, smc->sk.sk_data_ready(&smc->sk); } - if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) + if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) { smc->sk.sk_err = ECONNRESET; + conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; + } if (smc_cdc_rxed_any_close_or_senddone(conn)) - /* subsequent patch: terminate connection */ + smc_close_passive_received(smc); /* piggy backed tx info */ /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */ - if (diff_cons && smc_tx_prepared_sends(conn)) + if (diff_cons && smc_tx_prepared_sends(conn)) { smc_tx_sndbuf_nonempty(conn); + /* trigger socket release if connection closed */ + smc_close_wake_tx_prepared(smc); + } /* subsequent patch: trigger socket release if connection closed */ diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index 135f61369a60..8e1d76f26007 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h @@ -212,6 +212,7 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, struct smc_cdc_tx_pend *pend); int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn); +bool smc_cdc_tx_has_pending(struct smc_connection *conn); int smc_cdc_init(void) __init; #endif /* SMC_CDC_H */ diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c new file mode 100644 index 000000000000..d70c05b57021 --- /dev/null +++ b/net/smc/smc_close.c @@ -0,0 +1,441 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Socket Closing - normal and abnormal + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#include +#include + +#include "smc.h" +#include "smc_tx.h" +#include "smc_cdc.h" +#include "smc_close.h" + +#define SMC_CLOSE_WAIT_TX_PENDS_TIME (5 * HZ) + +static void smc_close_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + /* Close non-accepted connections */ + while ((sk = smc_accept_dequeue(parent, NULL))) + smc_close_non_accepted(sk); +} + +static void smc_close_wait_tx_pends(struct smc_sock *smc) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct sock *sk = &smc->sk; + signed long timeout; + + timeout = SMC_CLOSE_WAIT_TX_PENDS_TIME; + add_wait_queue(sk_sleep(sk), &wait); + while (!signal_pending(current) && timeout) { + int rc; + + rc = sk_wait_event(sk, &timeout, + !smc_cdc_tx_has_pending(&smc->conn), + &wait); + if (rc) + break; + } + remove_wait_queue(sk_sleep(sk), &wait); +} + +/* wait for sndbuf data being transmitted */ +static void smc_close_stream_wait(struct smc_sock *smc, long timeout) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct sock *sk = &smc->sk; + + if (!timeout) + return; + + if (!smc_tx_prepared_sends(&smc->conn)) + return; + + smc->wait_close_tx_prepared = 1; + add_wait_queue(sk_sleep(sk), &wait); + while (!signal_pending(current) && timeout) { + int rc; + + rc = sk_wait_event(sk, &timeout, + !smc_tx_prepared_sends(&smc->conn) || + (sk->sk_err == ECONNABORTED) || + (sk->sk_err == ECONNRESET), + &wait); + if (rc) + break; + } + remove_wait_queue(sk_sleep(sk), &wait); + smc->wait_close_tx_prepared = 0; +} + +void smc_close_wake_tx_prepared(struct smc_sock *smc) +{ + if (smc->wait_close_tx_prepared) + /* wake up socket closing */ + smc->sk.sk_state_change(&smc->sk); +} + +static int smc_close_wr(struct smc_connection *conn) +{ + conn->local_tx_ctrl.conn_state_flags.peer_done_writing = 1; + + return smc_cdc_get_slot_and_msg_send(conn); +} + +static int smc_close_final(struct smc_connection *conn) +{ + if (atomic_read(&conn->bytes_to_rcv)) + conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; + else + conn->local_tx_ctrl.conn_state_flags.peer_conn_closed = 1; + + return smc_cdc_get_slot_and_msg_send(conn); +} + +static int smc_close_abort(struct smc_connection *conn) +{ + conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; + + return smc_cdc_get_slot_and_msg_send(conn); +} + +/* terminate smc socket abnormally - active abort + * RDMA communication no longer possible + */ +void smc_close_active_abort(struct smc_sock *smc) +{ + struct smc_cdc_conn_state_flags *txflags = + &smc->conn.local_tx_ctrl.conn_state_flags; + + bh_lock_sock(&smc->sk); + smc->sk.sk_err = ECONNABORTED; + if (smc->clcsock && smc->clcsock->sk) { + smc->clcsock->sk->sk_err = ECONNABORTED; + smc->clcsock->sk->sk_state_change(smc->clcsock->sk); + } + switch (smc->sk.sk_state) { + case SMC_INIT: + smc->sk.sk_state = SMC_PEERABORTWAIT; + break; + case SMC_APPCLOSEWAIT1: + case SMC_APPCLOSEWAIT2: + txflags->peer_conn_abort = 1; + sock_release(smc->clcsock); + if (!smc_cdc_rxed_any_close(&smc->conn)) + smc->sk.sk_state = SMC_PEERABORTWAIT; + else + smc->sk.sk_state = SMC_CLOSED; + break; + case SMC_PEERCLOSEWAIT1: + case SMC_PEERCLOSEWAIT2: + if (!txflags->peer_conn_closed) { + smc->sk.sk_state = SMC_PEERABORTWAIT; + txflags->peer_conn_abort = 1; + sock_release(smc->clcsock); + } else { + smc->sk.sk_state = SMC_CLOSED; + } + break; + case SMC_PROCESSABORT: + case SMC_APPFINCLOSEWAIT: + if (!txflags->peer_conn_closed) { + txflags->peer_conn_abort = 1; + sock_release(smc->clcsock); + } + smc->sk.sk_state = SMC_CLOSED; + break; + case SMC_PEERFINCLOSEWAIT: + case SMC_PEERABORTWAIT: + case SMC_CLOSED: + break; + } + + sock_set_flag(&smc->sk, SOCK_DEAD); + bh_unlock_sock(&smc->sk); + smc->sk.sk_state_change(&smc->sk); +} + +int smc_close_active(struct smc_sock *smc) +{ + struct smc_cdc_conn_state_flags *txflags = + &smc->conn.local_tx_ctrl.conn_state_flags; + long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT; + struct smc_connection *conn = &smc->conn; + struct sock *sk = &smc->sk; + int old_state; + int rc = 0; + + if (sock_flag(sk, SOCK_LINGER) && + !(current->flags & PF_EXITING)) + timeout = sk->sk_lingertime; + +again: + old_state = sk->sk_state; + switch (old_state) { + case SMC_INIT: + sk->sk_state = SMC_CLOSED; + if (smc->smc_listen_work.func) + flush_work(&smc->smc_listen_work); + sock_put(sk); + break; + case SMC_LISTEN: + sk->sk_state = SMC_CLOSED; + sk->sk_state_change(sk); /* wake up accept */ + if (smc->clcsock && smc->clcsock->sk) { + rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); + /* wake up kernel_accept of smc_tcp_listen_worker */ + smc->clcsock->sk->sk_data_ready(smc->clcsock->sk); + } + release_sock(sk); + smc_close_cleanup_listen(sk); + flush_work(&smc->tcp_listen_work); + lock_sock(sk); + break; + case SMC_ACTIVE: + smc_close_stream_wait(smc, timeout); + release_sock(sk); + cancel_work_sync(&conn->tx_work); + lock_sock(sk); + if (sk->sk_state == SMC_ACTIVE) { + /* send close request */ + rc = smc_close_final(conn); + sk->sk_state = SMC_PEERCLOSEWAIT1; + } else { + /* peer event has changed the state */ + goto again; + } + break; + case SMC_APPFINCLOSEWAIT: + /* socket already shutdown wr or both (active close) */ + if (txflags->peer_done_writing && + !txflags->peer_conn_closed) { + /* just shutdown wr done, send close request */ + rc = smc_close_final(conn); + } + sk->sk_state = SMC_CLOSED; + smc_close_wait_tx_pends(smc); + break; + case SMC_APPCLOSEWAIT1: + case SMC_APPCLOSEWAIT2: + if (!smc_cdc_rxed_any_close(conn)) + smc_close_stream_wait(smc, timeout); + release_sock(sk); + cancel_work_sync(&conn->tx_work); + lock_sock(sk); + if (sk->sk_err != ECONNABORTED) { + /* confirm close from peer */ + rc = smc_close_final(conn); + if (rc) + break; + } + if (smc_cdc_rxed_any_close(conn)) + /* peer has closed the socket already */ + sk->sk_state = SMC_CLOSED; + else + /* peer has just issued a shutdown write */ + sk->sk_state = SMC_PEERFINCLOSEWAIT; + smc_close_wait_tx_pends(smc); + break; + case SMC_PEERCLOSEWAIT1: + case SMC_PEERCLOSEWAIT2: + case SMC_PEERFINCLOSEWAIT: + /* peer sending PeerConnectionClosed will cause transition */ + break; + case SMC_PROCESSABORT: + cancel_work_sync(&conn->tx_work); + smc_close_abort(conn); + sk->sk_state = SMC_CLOSED; + smc_close_wait_tx_pends(smc); + break; + case SMC_PEERABORTWAIT: + case SMC_CLOSED: + /* nothing to do, add tracing in future patch */ + break; + } + + if (old_state != sk->sk_state) + sk->sk_state_change(&smc->sk); + return rc; +} + +static void smc_close_passive_abort_received(struct smc_sock *smc) +{ + struct smc_cdc_conn_state_flags *txflags = + &smc->conn.local_tx_ctrl.conn_state_flags; + struct sock *sk = &smc->sk; + + switch (sk->sk_state) { + case SMC_ACTIVE: + case SMC_APPFINCLOSEWAIT: + case SMC_APPCLOSEWAIT1: + case SMC_APPCLOSEWAIT2: + smc_close_abort(&smc->conn); + sk->sk_state = SMC_PROCESSABORT; + break; + case SMC_PEERCLOSEWAIT1: + case SMC_PEERCLOSEWAIT2: + if (txflags->peer_done_writing && + !txflags->peer_conn_closed) { + /* just shutdown, but not yet closed locally */ + smc_close_abort(&smc->conn); + sk->sk_state = SMC_PROCESSABORT; + } else { + sk->sk_state = SMC_CLOSED; + } + break; + case SMC_PEERFINCLOSEWAIT: + case SMC_PEERABORTWAIT: + sk->sk_state = SMC_CLOSED; + break; + case SMC_INIT: + case SMC_PROCESSABORT: + /* nothing to do, add tracing in future patch */ + break; + } +} + +/* Some kind of closing has been received: peer_conn_closed, peer_conn_abort, + * or peer_done_writing. + * Called under tasklet context. + */ +void smc_close_passive_received(struct smc_sock *smc) +{ + struct smc_cdc_conn_state_flags *rxflags = + &smc->conn.local_rx_ctrl.conn_state_flags; + struct sock *sk = &smc->sk; + int old_state; + + sk->sk_shutdown |= RCV_SHUTDOWN; + if (smc->clcsock && smc->clcsock->sk) + smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN; + sock_set_flag(&smc->sk, SOCK_DONE); + + old_state = sk->sk_state; + + if (rxflags->peer_conn_abort) { + smc_close_passive_abort_received(smc); + goto wakeup; + } + + switch (sk->sk_state) { + case SMC_INIT: + if (atomic_read(&smc->conn.bytes_to_rcv) || + (rxflags->peer_done_writing && + !rxflags->peer_conn_closed)) + sk->sk_state = SMC_APPCLOSEWAIT1; + else + sk->sk_state = SMC_CLOSED; + break; + case SMC_ACTIVE: + sk->sk_state = SMC_APPCLOSEWAIT1; + break; + case SMC_PEERCLOSEWAIT1: + if (rxflags->peer_done_writing) + sk->sk_state = SMC_PEERCLOSEWAIT2; + /* fall through to check for closing */ + case SMC_PEERCLOSEWAIT2: + case SMC_PEERFINCLOSEWAIT: + if (!smc_cdc_rxed_any_close(&smc->conn)) + break; + if (sock_flag(sk, SOCK_DEAD) && + (sk->sk_shutdown == SHUTDOWN_MASK)) { + /* smc_release has already been called locally */ + sk->sk_state = SMC_CLOSED; + } else { + /* just shutdown, but not yet closed locally */ + sk->sk_state = SMC_APPFINCLOSEWAIT; + } + break; + case SMC_APPCLOSEWAIT1: + case SMC_APPCLOSEWAIT2: + case SMC_APPFINCLOSEWAIT: + case SMC_PEERABORTWAIT: + case SMC_PROCESSABORT: + case SMC_CLOSED: + /* nothing to do, add tracing in future patch */ + break; + } + +wakeup: + if (old_state != sk->sk_state) + sk->sk_state_change(sk); + sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */ + sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */ + + if ((sk->sk_state == SMC_CLOSED) && + (sock_flag(sk, SOCK_DEAD) || (old_state == SMC_INIT))) { + smc_conn_free(&smc->conn); + schedule_delayed_work(&smc->sock_put_work, + SMC_CLOSE_SOCK_PUT_DELAY); + } +} + +void smc_close_sock_put_work(struct work_struct *work) +{ + struct smc_sock *smc = container_of(to_delayed_work(work), + struct smc_sock, + sock_put_work); + + sock_put(&smc->sk); +} + +int smc_close_shutdown_write(struct smc_sock *smc) +{ + struct smc_connection *conn = &smc->conn; + long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT; + struct sock *sk = &smc->sk; + int old_state; + int rc = 0; + + if (sock_flag(sk, SOCK_LINGER)) + timeout = sk->sk_lingertime; + +again: + old_state = sk->sk_state; + switch (old_state) { + case SMC_ACTIVE: + smc_close_stream_wait(smc, timeout); + release_sock(sk); + cancel_work_sync(&conn->tx_work); + lock_sock(sk); + /* send close wr request */ + rc = smc_close_wr(conn); + if (sk->sk_state == SMC_ACTIVE) + sk->sk_state = SMC_PEERCLOSEWAIT1; + else + goto again; + break; + case SMC_APPCLOSEWAIT1: + /* passive close */ + if (!smc_cdc_rxed_any_close(conn)) + smc_close_stream_wait(smc, timeout); + release_sock(sk); + cancel_work_sync(&conn->tx_work); + lock_sock(sk); + /* confirm close from peer */ + rc = smc_close_wr(conn); + sk->sk_state = SMC_APPCLOSEWAIT2; + break; + case SMC_APPCLOSEWAIT2: + case SMC_PEERFINCLOSEWAIT: + case SMC_PEERCLOSEWAIT1: + case SMC_PEERCLOSEWAIT2: + case SMC_APPFINCLOSEWAIT: + case SMC_PROCESSABORT: + case SMC_PEERABORTWAIT: + /* nothing to do, add tracing in future patch */ + break; + } + + if (old_state != sk->sk_state) + sk->sk_state_change(&smc->sk); + return rc; +} diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h new file mode 100644 index 000000000000..bc9a2df3633c --- /dev/null +++ b/net/smc/smc_close.h @@ -0,0 +1,28 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Socket Closing + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#ifndef SMC_CLOSE_H +#define SMC_CLOSE_H + +#include + +#include "smc.h" + +#define SMC_MAX_STREAM_WAIT_TIMEOUT (2 * HZ) +#define SMC_CLOSE_SOCK_PUT_DELAY HZ + +void smc_close_wake_tx_prepared(struct smc_sock *smc); +void smc_close_active_abort(struct smc_sock *smc); +int smc_close_active(struct smc_sock *smc); +void smc_close_passive_received(struct smc_sock *smc); +void smc_close_sock_put_work(struct work_struct *work); +int smc_close_shutdown_write(struct smc_sock *smc); + +#endif /* SMC_CLOSE_H */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index e5c63950fc28..8b1d34378829 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -23,6 +23,7 @@ #include "smc_wr.h" #include "smc_llc.h" #include "smc_cdc.h" +#include "smc_close.h" #define SMC_LGR_NUM_INCR 256 #define SMC_LGR_FREE_DELAY (600 * HZ) @@ -295,6 +296,7 @@ void smc_lgr_free(struct smc_link_group *lgr) void smc_lgr_terminate(struct smc_link_group *lgr) { struct smc_connection *conn; + struct smc_sock *smc; struct rb_node *node; spin_lock_bh(&smc_lgr_list.lock); @@ -311,11 +313,14 @@ void smc_lgr_terminate(struct smc_link_group *lgr) node = rb_first(&lgr->conns_all); while (node) { conn = rb_entry(node, struct smc_connection, alert_node); + smc = container_of(conn, struct smc_sock, conn); + sock_hold(&smc->sk); __smc_lgr_unregister_conn(conn); + smc_close_active_abort(smc); + sock_put(&smc->sk); node = rb_first(&lgr->conns_all); } write_unlock_bh(&lgr->conns_lock); - schedule_delayed_work(&lgr->free_work, SMC_LGR_FREE_DELAY); } /* Determine vlan of internal TCP socket. diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 7e8799fcd3a0..6e73b28915ea 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -139,6 +139,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) if (sk->sk_state == SMC_INIT) return -ENOTCONN; if (smc->sk.sk_shutdown & SEND_SHUTDOWN || + (smc->sk.sk_err == ECONNABORTED) || conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) return -EPIPE; if (smc_cdc_rxed_any_close(conn)) @@ -392,6 +393,13 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn) &pend); if (rc < 0) { if (rc == -EBUSY) { + struct smc_sock *smc = + container_of(conn, struct smc_sock, conn); + + if (smc->sk.sk_err == ECONNABORTED) { + rc = sock_error(&smc->sk); + goto out_unlock; + } rc = 0; schedule_work(&conn->tx_work); } diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 14f3f3fa6e52..eadf157418dc 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -81,6 +81,8 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask)) return; if (wc->status) { + struct smc_link_group *lgr; + for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { /* clear full struct smc_wr_tx_pend including .priv */ memset(&link->wr_tx_pends[i], 0, @@ -89,9 +91,10 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) sizeof(link->wr_tx_bufs[i])); clear_bit(i, link->wr_tx_mask); } - /* tbd in future patch: terminate connections of this link - * group abnormally - */ + /* terminate connections of this link group abnormally */ + lgr = container_of(link, struct smc_link_group, + lnk[SMC_SINGLE_LINK]); + smc_lgr_terminate(lgr); } if (pnd_snd.handler) pnd_snd.handler(&pnd_snd.priv, link, wc->status); @@ -176,9 +179,12 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY), SMC_WR_TX_WAIT_FREE_SLOT_TIME); if (!rc) { - /* tbd in future patch: timeout - terminate connections - * of this link group abnormally - */ + /* timeout - terminate connections */ + struct smc_link_group *lgr; + + lgr = container_of(link, struct smc_link_group, + lnk[SMC_SINGLE_LINK]); + smc_lgr_terminate(lgr); return -EPIPE; } if (rc == -ERESTARTSYS) @@ -256,6 +262,24 @@ void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_rx_hdr_type, } } +bool smc_wr_tx_has_pending(struct smc_link *link, u8 wr_rx_hdr_type, + smc_wr_tx_filter filter, unsigned long data) +{ + struct smc_wr_tx_pend_priv *tx_pend; + struct smc_wr_rx_hdr *wr_rx; + int i; + + for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { + wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[i]; + if (wr_rx->type != wr_rx_hdr_type) + continue; + tx_pend = &link->wr_tx_pends[i].priv; + if (filter(tx_pend, data)) + return true; + } + return false; +} + /****************************** receive queue ********************************/ int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler) @@ -310,14 +334,19 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) smc_wr_rx_demultiplex(&wc[i]); smc_wr_rx_post(link); /* refill WR RX */ } else { + struct smc_link_group *lgr; + /* handle status errors */ switch (wc[i].status) { case IB_WC_RETRY_EXC_ERR: case IB_WC_RNR_RETRY_EXC_ERR: case IB_WC_WR_FLUSH_ERR: - /* tbd in future patch: terminate connections of this - * link group abnormally - */ + /* terminate connections of this link group + * abnormally + */ + lgr = container_of(link, struct smc_link_group, + lnk[SMC_SINGLE_LINK]); + smc_lgr_terminate(lgr); break; default: smc_wr_rx_post(link); /* refill WR RX */ diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 124f857ddaa8..0b9beeda6053 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -92,6 +92,8 @@ int smc_wr_tx_put_slot(struct smc_link *link, int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *wr_pend_priv); void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context); +bool smc_wr_tx_has_pending(struct smc_link *link, u8 wr_rx_hdr_type, + smc_wr_tx_filter filter, unsigned long data); void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type, smc_wr_tx_filter filter, smc_wr_tx_dismisser dismisser, -- cgit v1.2.3 From f16a7dd5cf27eeda187425c9c7d96802a549f9c4 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 9 Jan 2017 16:55:26 +0100 Subject: smc: netlink interface for SMC sockets Support for SMC socket monitoring via netlink sockets of protocol NETLINK_SOCK_DIAG. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- include/net/smc.h | 20 ++++ include/net/sock.h | 3 + include/uapi/linux/netlink.h | 1 + include/uapi/linux/smc_diag.h | 85 +++++++++++++++++ net/smc/Kconfig | 9 ++ net/smc/Makefile | 1 + net/smc/af_smc.c | 43 ++++++++- net/smc/smc.h | 2 + net/smc/smc_close.c | 1 + net/smc/smc_diag.c | 215 ++++++++++++++++++++++++++++++++++++++++++ 10 files changed, 379 insertions(+), 1 deletion(-) create mode 100644 include/net/smc.h create mode 100644 include/uapi/linux/smc_diag.h create mode 100644 net/smc/smc_diag.c diff --git a/include/net/smc.h b/include/net/smc.h new file mode 100644 index 000000000000..12d26358ad9f --- /dev/null +++ b/include/net/smc.h @@ -0,0 +1,20 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Definitions for the SMC module (socket related) + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ +#ifndef _SMC_H +#define _SMC_H + +struct smc_hashinfo { + rwlock_t lock; + struct hlist_head ht; +}; + +int smc_hash_sk(struct sock *sk); +void smc_unhash_sk(struct sock *sk); +#endif /* _SMC_H */ diff --git a/include/net/sock.h b/include/net/sock.h index 99deda67eba0..389a0a619b45 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -70,6 +70,7 @@ #include #include #include +#include /* * This structure really needs to be cleaned up. @@ -986,6 +987,7 @@ struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; +struct smc_hashinfo; struct module; /* @@ -1094,6 +1096,7 @@ struct proto { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; + struct smc_hashinfo *smc_hash; } h; struct module *owner; diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 0dba4e4ed2be..f3946a27bd07 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -27,6 +27,7 @@ #define NETLINK_ECRYPTFS 19 #define NETLINK_RDMA 20 #define NETLINK_CRYPTO 21 /* Crypto layer */ +#define NETLINK_SMC 22 /* SMC monitoring */ #define NETLINK_INET_DIAG NETLINK_SOCK_DIAG diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h new file mode 100644 index 000000000000..0063919fea34 --- /dev/null +++ b/include/uapi/linux/smc_diag.h @@ -0,0 +1,85 @@ +#ifndef _UAPI_SMC_DIAG_H_ +#define _UAPI_SMC_DIAG_H_ + +#include +#include +#include + +/* Request structure */ +struct smc_diag_req { + __u8 diag_family; + __u8 pad[2]; + __u8 diag_ext; /* Query extended information */ + struct inet_diag_sockid id; +}; + +/* Base info structure. It contains socket identity (addrs/ports/cookie) based + * on the internal clcsock, and more SMC-related socket data + */ +struct smc_diag_msg { + __u8 diag_family; + __u8 diag_state; + __u8 diag_fallback; + __u8 diag_shutdown; + struct inet_diag_sockid id; + + __u32 diag_uid; + __u64 diag_inode; +}; + +/* Extensions */ + +enum { + SMC_DIAG_NONE, + SMC_DIAG_CONNINFO, + SMC_DIAG_LGRINFO, + SMC_DIAG_SHUTDOWN, + __SMC_DIAG_MAX, +}; + +#define SMC_DIAG_MAX (__SMC_DIAG_MAX - 1) + +/* SMC_DIAG_CONNINFO */ + +struct smc_diag_cursor { + __u16 reserved; + __u16 wrap; + __u32 count; +}; + +struct smc_diag_conninfo { + __u32 token; /* unique connection id */ + __u32 sndbuf_size; /* size of send buffer */ + __u32 rmbe_size; /* size of RMB element */ + __u32 peer_rmbe_size; /* size of peer RMB element */ + /* local RMB element cursors */ + struct smc_diag_cursor rx_prod; /* received producer cursor */ + struct smc_diag_cursor rx_cons; /* received consumer cursor */ + /* peer RMB element cursors */ + struct smc_diag_cursor tx_prod; /* sent producer cursor */ + struct smc_diag_cursor tx_cons; /* sent consumer cursor */ + __u8 rx_prod_flags; /* received producer flags */ + __u8 rx_conn_state_flags; /* recvd connection flags*/ + __u8 tx_prod_flags; /* sent producer flags */ + __u8 tx_conn_state_flags; /* sent connection flags*/ + /* send buffer cursors */ + struct smc_diag_cursor tx_prep; /* prepared to be sent cursor */ + struct smc_diag_cursor tx_sent; /* sent cursor */ + struct smc_diag_cursor tx_fin; /* confirmed sent cursor */ +}; + +/* SMC_DIAG_LINKINFO */ + +struct smc_diag_linkinfo { + __u8 link_id; /* link identifier */ + __u8 ibname[IB_DEVICE_NAME_MAX]; /* name of the RDMA device */ + __u8 ibport; /* RDMA device port number */ + __u8 gid[40]; /* local GID */ + __u8 peer_gid[40]; /* peer GID */ +}; + +struct smc_diag_lgrinfo { + struct smc_diag_linkinfo lnk[1]; + __u8 role; +}; +#endif /* _UAPI_SMC_DIAG_H_ */ diff --git a/net/smc/Kconfig b/net/smc/Kconfig index bc029803e728..c717ef0896aa 100644 --- a/net/smc/Kconfig +++ b/net/smc/Kconfig @@ -9,3 +9,12 @@ config SMC a separate socket family SMC. Select this option if you want to run SMC socket applications + +config SMC_DIAG + tristate "SMC: socket monitoring interface" + depends on SMC + ---help--- + Support for SMC socket monitoring interface used by tools such as + smcss. + + if unsure, say Y. diff --git a/net/smc/Makefile b/net/smc/Makefile index 5cf0cafaa208..188104654b54 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_SMC) += smc.o +obj-$(CONFIG_SMC_DIAG) += smc_diag.o smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 3f543d58bc5c..5d4208ad029e 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "smc.h" #include "smc_clc.h" @@ -59,13 +60,48 @@ static void smc_set_keepalive(struct sock *sk, int val) smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val); } -static struct proto smc_proto = { +static struct smc_hashinfo smc_v4_hashinfo = { + .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock), +}; + +int smc_hash_sk(struct sock *sk) +{ + struct smc_hashinfo *h = sk->sk_prot->h.smc_hash; + struct hlist_head *head; + + head = &h->ht; + + write_lock_bh(&h->lock); + sk_add_node(sk, head); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); + write_unlock_bh(&h->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(smc_hash_sk); + +void smc_unhash_sk(struct sock *sk) +{ + struct smc_hashinfo *h = sk->sk_prot->h.smc_hash; + + write_lock_bh(&h->lock); + if (sk_del_node_init(sk)) + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); + write_unlock_bh(&h->lock); +} +EXPORT_SYMBOL_GPL(smc_unhash_sk); + +struct proto smc_proto = { .name = "SMC", .owner = THIS_MODULE, .keepalive = smc_set_keepalive, + .hash = smc_hash_sk, + .unhash = smc_unhash_sk, .obj_size = sizeof(struct smc_sock), + .h.smc_hash = &smc_v4_hashinfo, .slab_flags = SLAB_DESTROY_BY_RCU, }; +EXPORT_SYMBOL_GPL(smc_proto); static int smc_release(struct socket *sock) { @@ -109,6 +145,7 @@ static int smc_release(struct socket *sock) schedule_delayed_work(&smc->sock_put_work, SMC_CLOSE_SOCK_PUT_DELAY); } + sk->sk_prot->unhash(sk); release_sock(sk); sock_put(sk); @@ -144,6 +181,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock) INIT_LIST_HEAD(&smc->accept_q); spin_lock_init(&smc->accept_q_lock); INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work); + sk->sk_prot->hash(sk); sk_refcnt_debug_inc(sk); return sk; @@ -536,6 +574,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) lsmc->sk.sk_err = -rc; new_sk->sk_state = SMC_CLOSED; sock_set_flag(new_sk, SOCK_DEAD); + sk->sk_prot->unhash(new_sk); sock_put(new_sk); *new_smc = NULL; goto out; @@ -545,6 +584,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) sock_release(new_clcsock); new_sk->sk_state = SMC_CLOSED; sock_set_flag(new_sk, SOCK_DEAD); + sk->sk_prot->unhash(new_sk); sock_put(new_sk); *new_smc = NULL; goto out; @@ -1320,6 +1360,7 @@ static int __init smc_init(void) pr_err("%s: sock_register fails with %d\n", __func__, rc); goto out_proto; } + INIT_HLIST_HEAD(&smc_v4_hashinfo.ht); rc = smc_ib_register_client(); if (rc) { diff --git a/net/smc/smc.h b/net/smc/smc.h index 959a5d2014ab..ee5fbea24549 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -21,6 +21,8 @@ #define SMC_MAX_PORTS 2 /* Max # of ports */ +extern struct proto smc_proto; + #ifdef ATOMIC64_INIT #define KERNEL_HAS_ATOMIC64 #endif diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index d70c05b57021..03dfcc6b7661 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -384,6 +384,7 @@ void smc_close_sock_put_work(struct work_struct *work) struct smc_sock, sock_put_work); + smc->sk.sk_prot->unhash(&smc->sk); sock_put(&smc->sk); } diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c new file mode 100644 index 000000000000..d2d01cf70224 --- /dev/null +++ b/net/smc/smc_diag.c @@ -0,0 +1,215 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Monitoring SMC transport protocol sockets + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "smc.h" +#include "smc_core.h" + +static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw) +{ + sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", + be16_to_cpu(((__be16 *)gid_raw)[0]), + be16_to_cpu(((__be16 *)gid_raw)[1]), + be16_to_cpu(((__be16 *)gid_raw)[2]), + be16_to_cpu(((__be16 *)gid_raw)[3]), + be16_to_cpu(((__be16 *)gid_raw)[4]), + be16_to_cpu(((__be16 *)gid_raw)[5]), + be16_to_cpu(((__be16 *)gid_raw)[6]), + be16_to_cpu(((__be16 *)gid_raw)[7])); +} + +static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk) +{ + struct smc_sock *smc = smc_sk(sk); + + r->diag_family = sk->sk_family; + if (!smc->clcsock) + return; + r->id.idiag_sport = htons(smc->clcsock->sk->sk_num); + r->id.idiag_dport = smc->clcsock->sk->sk_dport; + r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if; + sock_diag_save_cookie(sk, r->id.idiag_cookie); + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); + r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr; + r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr; +} + +static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, + struct smc_diag_msg *r, + struct user_namespace *user_ns) +{ + if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown)) + return 1; + + r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); + r->diag_inode = sock_i_ino(sk); + return 0; +} + +static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, + struct netlink_callback *cb, + const struct smc_diag_req *req, + struct nlattr *bc) +{ + struct smc_sock *smc = smc_sk(sk); + struct user_namespace *user_ns; + struct smc_diag_msg *r; + struct nlmsghdr *nlh; + + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + r = nlmsg_data(nlh); + smc_diag_msg_common_fill(r, sk); + r->diag_state = sk->sk_state; + r->diag_fallback = smc->use_fallback; + user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk); + if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns)) + goto errout; + + if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) && smc->conn.lgr) { + struct smc_connection *conn = &smc->conn; + struct smc_diag_conninfo cinfo = { + .token = conn->alert_token_local, + .sndbuf_size = conn->sndbuf_size, + .rmbe_size = conn->rmbe_size, + .peer_rmbe_size = conn->peer_rmbe_size, + + .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap, + .rx_prod.count = conn->local_rx_ctrl.prod.count, + .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap, + .rx_cons.count = conn->local_rx_ctrl.cons.count, + + .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap, + .tx_prod.count = conn->local_tx_ctrl.prod.count, + .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap, + .tx_cons.count = conn->local_tx_ctrl.cons.count, + + .tx_prod_flags = + *(u8 *)&conn->local_tx_ctrl.prod_flags, + .tx_conn_state_flags = + *(u8 *)&conn->local_tx_ctrl.conn_state_flags, + .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags, + .rx_conn_state_flags = + *(u8 *)&conn->local_rx_ctrl.conn_state_flags, + + .tx_prep.wrap = conn->tx_curs_prep.wrap, + .tx_prep.count = conn->tx_curs_prep.count, + .tx_sent.wrap = conn->tx_curs_sent.wrap, + .tx_sent.count = conn->tx_curs_sent.count, + .tx_fin.wrap = conn->tx_curs_fin.wrap, + .tx_fin.count = conn->tx_curs_fin.count, + }; + + if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0) + goto errout; + } + + if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr) { + struct smc_diag_lgrinfo linfo = { + .role = smc->conn.lgr->role, + .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport, + .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id, + }; + + memcpy(linfo.lnk[0].ibname, + smc->conn.lgr->lnk[0].smcibdev->ibdev->name, + sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name)); + smc_gid_be16_convert(linfo.lnk[0].gid, + smc->conn.lgr->lnk[0].gid.raw); + smc_gid_be16_convert(linfo.lnk[0].peer_gid, + smc->conn.lgr->lnk[0].peer_gid); + + if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0) + goto errout; + } + + nlmsg_end(skb, nlh); + return 0; + +errout: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct nlattr *bc = NULL; + struct hlist_head *head; + struct sock *sk; + int rc = 0; + + read_lock(&smc_proto.h.smc_hash->lock); + head = &smc_proto.h.smc_hash->ht; + if (hlist_empty(head)) + goto out; + + sk_for_each(sk, head) { + if (!net_eq(sock_net(sk), net)) + continue; + rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc); + if (rc) + break; + } + +out: + read_unlock(&smc_proto.h.smc_hash->lock); + return rc; +} + +static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) +{ + struct net *net = sock_net(skb->sk); + + if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY && + h->nlmsg_flags & NLM_F_DUMP) { + { + struct netlink_dump_control c = { + .dump = smc_diag_dump, + .min_dump_alloc = SKB_WITH_OVERHEAD(32768), + }; + return netlink_dump_start(net->diag_nlsk, skb, h, &c); + } + } + return 0; +} + +static const struct sock_diag_handler smc_diag_handler = { + .family = AF_SMC, + .dump = smc_diag_handler_dump, +}; + +static int __init smc_diag_init(void) +{ + return sock_diag_register(&smc_diag_handler); +} + +static void __exit smc_diag_exit(void) +{ + sock_diag_unregister(&smc_diag_handler); +} + +module_init(smc_diag_init); +module_exit(smc_diag_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */); -- cgit v1.2.3 From dbcfe5f76dd5266b8f308b5a8f9ef52f74b2d6e7 Mon Sep 17 00:00:00 2001 From: Gianluca Borello Date: Mon, 9 Jan 2017 10:19:46 -0800 Subject: bpf: split check_mem_access logic for map values Move the logic to check memory accesses to a PTR_TO_MAP_VALUE_ADJ from check_mem_access() to a separate helper check_map_access_adj(). This enables to use those checks in other parts of the verifier as well, where boundaries on PTR_TO_MAP_VALUE_ADJ might need to be checked, for example when checking helper function arguments. The same thing is already happening for other types such as PTR_TO_PACKET and its check_packet_access() helper. The code has been copied verbatim, with the only difference of removing the "off += reg->max_value" statement and moving the sum into the call statement to check_map_access(), as that was only needed due to the earlier common check_map_access() call. Signed-off-by: Gianluca Borello Acked-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 88 ++++++++++++++++++++++++++++----------------------- 1 file changed, 49 insertions(+), 39 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 83ed2f8f6f22..8333fbcfbfe7 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -635,6 +635,51 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, return 0; } +/* check read/write into an adjusted map element */ +static int check_map_access_adj(struct bpf_verifier_env *env, u32 regno, + int off, int size) +{ + struct bpf_verifier_state *state = &env->cur_state; + struct bpf_reg_state *reg = &state->regs[regno]; + int err; + + /* We adjusted the register to this map value, so we + * need to change off and size to min_value and max_value + * respectively to make sure our theoretical access will be + * safe. + */ + if (log_level) + print_verifier_state(state); + env->varlen_map_value_access = true; + /* The minimum value is only important with signed + * comparisons where we can't assume the floor of a + * value is 0. If we are using signed variables for our + * index'es we need to make sure that whatever we use + * will have a set floor within our range. + */ + if (reg->min_value < 0) { + verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", + regno); + return -EACCES; + } + err = check_map_access(env, regno, reg->min_value + off, size); + if (err) { + verbose("R%d min value is outside of the array range\n", + regno); + return err; + } + + /* If we haven't set a max value then we need to bail + * since we can't be sure we won't do bad things. + */ + if (reg->max_value == BPF_REGISTER_MAX_RANGE) { + verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n", + regno); + return -EACCES; + } + return check_map_access(env, regno, reg->max_value + off, size); +} + #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, @@ -775,45 +820,10 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, return -EACCES; } - /* If we adjusted the register to this map value at all then we - * need to change off and size to min_value and max_value - * respectively to make sure our theoretical access will be - * safe. - */ - if (reg->type == PTR_TO_MAP_VALUE_ADJ) { - if (log_level) - print_verifier_state(state); - env->varlen_map_value_access = true; - /* The minimum value is only important with signed - * comparisons where we can't assume the floor of a - * value is 0. If we are using signed variables for our - * index'es we need to make sure that whatever we use - * will have a set floor within our range. - */ - if (reg->min_value < 0) { - verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", - regno); - return -EACCES; - } - err = check_map_access(env, regno, reg->min_value + off, - size); - if (err) { - verbose("R%d min value is outside of the array range\n", - regno); - return err; - } - - /* If we haven't set a max value then we need to bail - * since we can't be sure we won't do bad things. - */ - if (reg->max_value == BPF_REGISTER_MAX_RANGE) { - verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n", - regno); - return -EACCES; - } - off += reg->max_value; - } - err = check_map_access(env, regno, off, size); + if (reg->type == PTR_TO_MAP_VALUE_ADJ) + err = check_map_access_adj(env, regno, off, size); + else + err = check_map_access(env, regno, off, size); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown_value(state->regs, value_regno); -- cgit v1.2.3 From 5722569bb9c3bd922c4f10b5b2912fe88c255312 Mon Sep 17 00:00:00 2001 From: Gianluca Borello Date: Mon, 9 Jan 2017 10:19:47 -0800 Subject: bpf: allow helpers access to map element values Enable helpers to directly access a map element value by passing a register type PTR_TO_MAP_VALUE (or PTR_TO_MAP_VALUE_ADJ) to helper arguments ARG_PTR_TO_STACK or ARG_PTR_TO_RAW_STACK. This enables several use cases. For example, a typical tracing program might want to capture pathnames passed to sys_open() with: struct trace_data { char pathname[PATHLEN]; }; SEC("kprobe/sys_open") void bpf_sys_open(struct pt_regs *ctx) { struct trace_data data; bpf_probe_read(data.pathname, sizeof(data.pathname), ctx->di); /* consume data.pathname, for example via * bpf_trace_printk() or bpf_perf_event_output() */ } Such a program could easily hit the stack limit in case PATHLEN needs to be large or more local variables need to exist, both of which are quite common scenarios. Allowing direct helper access to map element values, one could do: struct bpf_map_def SEC("maps") scratch_map = { .type = BPF_MAP_TYPE_PERCPU_ARRAY, .key_size = sizeof(u32), .value_size = sizeof(struct trace_data), .max_entries = 1, }; SEC("kprobe/sys_open") int bpf_sys_open(struct pt_regs *ctx) { int id = 0; struct trace_data *p = bpf_map_lookup_elem(&scratch_map, &id); if (!p) return; bpf_probe_read(p->pathname, sizeof(p->pathname), ctx->di); /* consume p->pathname, for example via * bpf_trace_printk() or bpf_perf_event_output() */ } And wouldn't risk exhausting the stack. Code changes are loosely modeled after commit 6841de8b0d03 ("bpf: allow helpers access the packet directly"). Unlike with PTR_TO_PACKET, these changes just work with ARG_PTR_TO_STACK and ARG_PTR_TO_RAW_STACK (not ARG_PTR_TO_MAP_KEY, ARG_PTR_TO_MAP_VALUE, ...): adding those would be trivial, but since there is not currently a use case for that, it's reasonable to limit the set of changes. Also, add new tests to make sure accesses to map element values from helpers never go out of boundary, even when adjusted. Signed-off-by: Gianluca Borello Acked-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 9 +- tools/testing/selftests/bpf/test_verifier.c | 491 ++++++++++++++++++++++++++++ 2 files changed, 498 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8333fbcfbfe7..b7014606745b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -627,7 +627,7 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, { struct bpf_map *map = env->cur_state.regs[regno].map_ptr; - if (off < 0 || off + size > map->value_size) { + if (off < 0 || size <= 0 || off + size > map->value_size) { verbose("invalid access to map value, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; @@ -1025,7 +1025,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, */ if (type == CONST_IMM && reg->imm == 0) /* final test in check_stack_boundary() */; - else if (type != PTR_TO_PACKET && type != expected_type) + else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE && + type != PTR_TO_MAP_VALUE_ADJ && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK; } else { @@ -1088,6 +1089,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, } if (regs[regno - 1].type == PTR_TO_PACKET) err = check_packet_access(env, regno - 1, 0, reg->imm); + else if (regs[regno - 1].type == PTR_TO_MAP_VALUE) + err = check_map_access(env, regno - 1, 0, reg->imm); + else if (regs[regno - 1].type == PTR_TO_MAP_VALUE_ADJ) + err = check_map_access_adj(env, regno - 1, 0, reg->imm); else err = check_stack_boundary(env, regno - 1, reg->imm, zero_size_allowed, meta); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 853d7e43434a..b7732e557bf9 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -2905,6 +2905,497 @@ static struct bpf_test tests[] = { .result = REJECT, .errstr = "invalid bpf_context access", }, + { + "helper access to map: full range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: partial range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: empty range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=0 size=0", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: out-of-bound range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=0 size=56", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: negative range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=0 size=-8", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const imm): full range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, + offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - + offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const imm): partial range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, + offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const imm): empty range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, + offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "R1 min value is outside of the array range", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const imm): out-of-bound range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, + offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - + offsetof(struct test_val, foo) + 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=4 size=52", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const imm): negative range (> adjustment)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, + offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=4 size=-8", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const imm): negative range (< adjustment)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, + offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "R1 min value is outside of the array range", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const reg): full range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, + offsetof(struct test_val, foo)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - + offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const reg): partial range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, + offsetof(struct test_val, foo)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const reg): empty range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "R1 min value is outside of the array range", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const reg): out-of-bound range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, + offsetof(struct test_val, foo)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - + offsetof(struct test_val, foo) + 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=4 size=52", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const reg): negative range (> adjustment)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, + offsetof(struct test_val, foo)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=4 size=-8", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via const reg): negative range (< adjustment)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, + offsetof(struct test_val, foo)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "R1 min value is outside of the array range", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via variable): full range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, + offsetof(struct test_val, foo), 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - + offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via variable): partial range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, + offsetof(struct test_val, foo), 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via variable): empty range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, + offsetof(struct test_val, foo), 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "R1 min value is outside of the array range", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via variable): no max check", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to adjusted map (via variable): wrong max check", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, + offsetof(struct test_val, foo), 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - + offsetof(struct test_val, foo) + 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=4 size=45", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) -- cgit v1.2.3 From f0318d01b694485af9678a4e120328ae3555be6d Mon Sep 17 00:00:00 2001 From: Gianluca Borello Date: Mon, 9 Jan 2017 10:19:48 -0800 Subject: bpf: allow adjusted map element values to spill commit 484611357c19 ("bpf: allow access into map value arrays") introduces the ability to do pointer math inside a map element value via the PTR_TO_MAP_VALUE_ADJ register type. The current support doesn't handle the case where a PTR_TO_MAP_VALUE_ADJ is spilled into the stack, limiting several use cases, especially when generating bpf code from a compiler. Handle this case by explicitly enabling the register type PTR_TO_MAP_VALUE_ADJ to be spilled. Also, make sure that min_value and max_value are reset just for BPF_LDX operations that don't result in a restore of a spilled register from stack. Signed-off-by: Gianluca Borello Acked-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 21 +++++++++---- tools/testing/selftests/bpf/test_verifier.c | 46 +++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b7014606745b..59ed07b9b4ea 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -481,6 +481,13 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) regs[regno].max_value = BPF_REGISTER_MAX_RANGE; } +static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs, + u32 regno) +{ + mark_reg_unknown_value(regs, regno); + reset_reg_range_values(regs, regno); +} + enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ @@ -532,6 +539,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type) switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: + case PTR_TO_MAP_VALUE_ADJ: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: @@ -616,7 +624,8 @@ static int check_stack_read(struct bpf_verifier_state *state, int off, int size, } if (value_regno >= 0) /* have read misc data from the stack */ - mark_reg_unknown_value(state->regs, value_regno); + mark_reg_unknown_value_and_range(state->regs, + value_regno); return 0; } } @@ -825,7 +834,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, else err = check_map_access(env, regno, off, size); if (!err && t == BPF_READ && value_regno >= 0) - mark_reg_unknown_value(state->regs, value_regno); + mark_reg_unknown_value_and_range(state->regs, + value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = UNKNOWN_VALUE; @@ -837,7 +847,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, } err = check_ctx_access(env, off, size, t, ®_type); if (!err && t == BPF_READ && value_regno >= 0) { - mark_reg_unknown_value(state->regs, value_regno); + mark_reg_unknown_value_and_range(state->regs, + value_regno); /* note that reg.[id|off|range] == 0 */ state->regs[value_regno].type = reg_type; } @@ -870,7 +881,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, } err = check_packet_access(env, regno, off, size); if (!err && t == BPF_READ && value_regno >= 0) - mark_reg_unknown_value(state->regs, value_regno); + mark_reg_unknown_value_and_range(state->regs, + value_regno); } else { verbose("R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -2744,7 +2756,6 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; - reset_reg_range_values(regs, insn->dst_reg); if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { insn_idx++; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index b7732e557bf9..e7b075819c08 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -3396,6 +3396,52 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, + { + "map element value is preserved across register spilling", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result = ACCEPT, + .result_unpriv = REJECT, + }, + { + "map element value (adjusted) is preserved across register spilling", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, + offsetof(struct test_val, foo)), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .result = ACCEPT, + .result_unpriv = REJECT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) -- cgit v1.2.3 From 06c1c049721a995dee2829ad13b24aaf5d7c5cce Mon Sep 17 00:00:00 2001 From: Gianluca Borello Date: Mon, 9 Jan 2017 10:19:49 -0800 Subject: bpf: allow helpers access to variable memory Currently, helpers that read and write from/to the stack can do so using a pair of arguments of type ARG_PTR_TO_STACK and ARG_CONST_STACK_SIZE. ARG_CONST_STACK_SIZE accepts a constant register of type CONST_IMM, so that the verifier can safely check the memory access. However, requiring the argument to be a constant can be limiting in some circumstances. Since the current logic keeps track of the minimum and maximum value of a register throughout the simulated execution, ARG_CONST_STACK_SIZE can be changed to also accept an UNKNOWN_VALUE register in case its boundaries have been set and the range doesn't cause invalid memory accesses. One common situation when this is useful: int len; char buf[BUFSIZE]; /* BUFSIZE is 128 */ if (some_condition) len = 42; else len = 84; some_helper(..., buf, len & (BUFSIZE - 1)); The compiler can often decide to assign the constant values 42 or 48 into a variable on the stack, instead of keeping it in a register. When the variable is then read back from stack into the register in order to be passed to the helper, the verifier will not be able to recognize the register as constant (the verifier is not currently tracking all constant writes into memory), and the program won't be valid. However, by allowing the helper to accept an UNKNOWN_VALUE register, this program will work because the bitwise AND operation will set the range of possible values for the UNKNOWN_VALUE register to [0, BUFSIZE), so the verifier can guarantee the helper call will be safe (assuming the argument is of type ARG_CONST_STACK_SIZE_OR_ZERO, otherwise one more check against 0 would be needed). Custom ranges can be set not only with ALU operations, but also by explicitly comparing the UNKNOWN_VALUE register with constants. Another very common example happens when intercepting system call arguments and accessing user-provided data of variable size using bpf_probe_read(). One can load at runtime the user-provided length in an UNKNOWN_VALUE register, and then read that exact amount of data up to a compile-time determined limit in order to fit into the proper local storage allocated on the stack, without having to guess a suboptimal access size at compile time. Also, in case the helpers accepting the UNKNOWN_VALUE register operate in raw mode, disable the raw mode so that the program is required to initialize all memory, since there is no guarantee the helper will fill it completely, leaving possibilities for data leak (just relevant when the memory used by the helper is the stack, not when using a pointer to map element value or packet). In other words, ARG_PTR_TO_RAW_STACK will be treated as ARG_PTR_TO_STACK. Signed-off-by: Gianluca Borello Acked-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 74 ++++- tools/testing/selftests/bpf/test_verifier.c | 410 ++++++++++++++++++++++++++++ 2 files changed, 474 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 59ed07b9b4ea..3d4f7bf32aaf 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -980,6 +980,25 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, return 0; } +static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, + int access_size, bool zero_size_allowed, + struct bpf_call_arg_meta *meta) +{ + struct bpf_reg_state *regs = env->cur_state.regs; + + switch (regs[regno].type) { + case PTR_TO_PACKET: + return check_packet_access(env, regno, 0, access_size); + case PTR_TO_MAP_VALUE: + return check_map_access(env, regno, 0, access_size); + case PTR_TO_MAP_VALUE_ADJ: + return check_map_access_adj(env, regno, 0, access_size); + default: /* const_imm|ptr_to_stack or invalid ptr */ + return check_stack_boundary(env, regno, access_size, + zero_size_allowed, meta); + } +} + static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) @@ -1018,7 +1037,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, } else if (arg_type == ARG_CONST_STACK_SIZE || arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { expected_type = CONST_IMM; - if (type != expected_type) + /* One exception. Allow UNKNOWN_VALUE registers when the + * boundaries are known and don't cause unsafe memory accesses + */ + if (type != UNKNOWN_VALUE && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; @@ -1099,15 +1121,47 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, verbose("ARG_CONST_STACK_SIZE cannot be first argument\n"); return -EACCES; } - if (regs[regno - 1].type == PTR_TO_PACKET) - err = check_packet_access(env, regno - 1, 0, reg->imm); - else if (regs[regno - 1].type == PTR_TO_MAP_VALUE) - err = check_map_access(env, regno - 1, 0, reg->imm); - else if (regs[regno - 1].type == PTR_TO_MAP_VALUE_ADJ) - err = check_map_access_adj(env, regno - 1, 0, reg->imm); - else - err = check_stack_boundary(env, regno - 1, reg->imm, - zero_size_allowed, meta); + + /* If the register is UNKNOWN_VALUE, the access check happens + * using its boundaries. Otherwise, just use its imm + */ + if (type == UNKNOWN_VALUE) { + /* For unprivileged variable accesses, disable raw + * mode so that the program is required to + * initialize all the memory that the helper could + * just partially fill up. + */ + meta = NULL; + + if (reg->min_value < 0) { + verbose("R%d min value is negative, either use unsigned or 'var &= const'\n", + regno); + return -EACCES; + } + + if (reg->min_value == 0) { + err = check_helper_mem_access(env, regno - 1, 0, + zero_size_allowed, + meta); + if (err) + return err; + } + + if (reg->max_value == BPF_REGISTER_MAX_RANGE) { + verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", + regno); + return -EACCES; + } + err = check_helper_mem_access(env, regno - 1, + reg->max_value, + zero_size_allowed, meta); + if (err) + return err; + } else { + /* register is CONST_IMM */ + err = check_helper_mem_access(env, regno - 1, reg->imm, + zero_size_allowed, meta); + } } return err; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index e7b075819c08..9bb45346dc72 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -3442,6 +3442,416 @@ static struct bpf_test tests[] = { .result = ACCEPT, .result_unpriv = REJECT, }, + { + "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: stack, bitwise AND, zero included", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-64 access_size=0", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: stack, bitwise AND + JMP, wrong max", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-64 access_size=65", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: stack, JMP, correct bounds", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: stack, JMP (signed), correct bounds", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: stack, JMP, bounds + offset", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-64 access_size=65", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: stack, JMP, wrong max", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-64 access_size=65", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: stack, JMP, no max check", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R2 unbounded memory access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: stack, JMP, no min check", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-64 access_size=0", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: stack, JMP (signed), no min check", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R2 min value is negative", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: map, JMP, correct bounds", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, + sizeof(struct test_val), 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: map, JMP, wrong max", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, + sizeof(struct test_val) + 1, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=0 size=49", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: map adjusted, JMP, correct bounds", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, + sizeof(struct test_val) - 20, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: map adjusted, JMP, wrong max", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, + sizeof(struct test_val) - 19, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr = "R1 min value is outside of the array range", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: size > 0 not allowed on NULL", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_csum_diff), + BPF_EXIT_INSN(), + }, + .errstr = "R1 type=imm expected=fp", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "helper access to variable memory: size = 0 not allowed on != NULL", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_csum_diff), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-8 access_size=0", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "helper access to variable memory: 8 bytes leak", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_EXIT_INSN(), + }, + .errstr = "invalid indirect read from stack off -64+32 size 64", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: 8 bytes no leak (init memory)", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) -- cgit v1.2.3 From 39f19ebbf57b403695f7b5f9cf322fe1ddb5d7fb Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 9 Jan 2017 10:19:50 -0800 Subject: bpf: rename ARG_PTR_TO_STACK since ARG_PTR_TO_STACK is no longer just pointer to stack rename it to ARG_PTR_TO_MEM and adjust comment. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf.h | 12 ++++++------ kernel/bpf/helpers.c | 4 ++-- kernel/bpf/verifier.c | 28 ++++++++++++++-------------- kernel/trace/bpf_trace.c | 20 ++++++++++---------- net/core/filter.c | 40 ++++++++++++++++++++-------------------- 5 files changed, 52 insertions(+), 52 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f74ae68086dc..94ea8d2383e6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -69,14 +69,14 @@ enum bpf_arg_type { /* the following constraints used to prototype bpf_memcmp() and other * functions that access data on eBPF program stack */ - ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ - ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not - * need to be initialized, helper function must fill - * all bytes or clear them in error case. + ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ + ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, + * helper function must fill all bytes or clear + * them in error case. */ - ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ - ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */ + ARG_CONST_SIZE, /* number of bytes accessed from memory */ + ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ ARG_PTR_TO_CTX, /* pointer to context */ ARG_ANYTHING, /* any (initialized) argument is ok */ diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 045cbe673356..3d24e238221e 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -176,6 +176,6 @@ const struct bpf_func_proto bpf_get_current_comm_proto = { .func = bpf_get_current_comm, .gpl_only = false, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_RAW_STACK, - .arg2_type = ARG_CONST_STACK_SIZE, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE, }; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3d4f7bf32aaf..2efdc9128e3c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1034,8 +1034,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, expected_type = PTR_TO_STACK; if (type != PTR_TO_PACKET && type != expected_type) goto err_type; - } else if (arg_type == ARG_CONST_STACK_SIZE || - arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { + } else if (arg_type == ARG_CONST_SIZE || + arg_type == ARG_CONST_SIZE_OR_ZERO) { expected_type = CONST_IMM; /* One exception. Allow UNKNOWN_VALUE registers when the * boundaries are known and don't cause unsafe memory accesses @@ -1050,8 +1050,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, expected_type = PTR_TO_CTX; if (type != expected_type) goto err_type; - } else if (arg_type == ARG_PTR_TO_STACK || - arg_type == ARG_PTR_TO_RAW_STACK) { + } else if (arg_type == ARG_PTR_TO_MEM || + arg_type == ARG_PTR_TO_UNINIT_MEM) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a CONST_IMM type. Final test @@ -1062,7 +1062,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE && type != PTR_TO_MAP_VALUE_ADJ && type != expected_type) goto err_type; - meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK; + meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { verbose("unsupported arg_type %d\n", arg_type); return -EFAULT; @@ -1108,9 +1108,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, err = check_stack_boundary(env, regno, meta->map_ptr->value_size, false, NULL); - } else if (arg_type == ARG_CONST_STACK_SIZE || - arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { - bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO); + } else if (arg_type == ARG_CONST_SIZE || + arg_type == ARG_CONST_SIZE_OR_ZERO) { + bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* bpf_xxx(..., buf, len) call will access 'len' bytes * from stack pointer 'buf'. Check it @@ -1118,7 +1118,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, */ if (regno == 0) { /* kernel subsystem misconfigured verifier */ - verbose("ARG_CONST_STACK_SIZE cannot be first argument\n"); + verbose("ARG_CONST_SIZE cannot be first argument\n"); return -EACCES; } @@ -1235,15 +1235,15 @@ static int check_raw_mode(const struct bpf_func_proto *fn) { int count = 0; - if (fn->arg1_type == ARG_PTR_TO_RAW_STACK) + if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; - if (fn->arg2_type == ARG_PTR_TO_RAW_STACK) + if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; - if (fn->arg3_type == ARG_PTR_TO_RAW_STACK) + if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; - if (fn->arg4_type == ARG_PTR_TO_RAW_STACK) + if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; - if (fn->arg5_type == ARG_PTR_TO_RAW_STACK) + if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; return count > 1 ? -EINVAL : 0; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index fa77311dadb2..f883c43c96f3 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -76,8 +76,8 @@ static const struct bpf_func_proto bpf_probe_read_proto = { .func = bpf_probe_read, .gpl_only = true, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_RAW_STACK, - .arg2_type = ARG_CONST_STACK_SIZE, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, }; @@ -109,8 +109,8 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = { .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, - .arg2_type = ARG_PTR_TO_STACK, - .arg3_type = ARG_CONST_STACK_SIZE, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, }; static const struct bpf_func_proto *bpf_get_probe_write_proto(void) @@ -213,8 +213,8 @@ static const struct bpf_func_proto bpf_trace_printk_proto = { .func = bpf_trace_printk, .gpl_only = true, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_STACK, - .arg2_type = ARG_CONST_STACK_SIZE, + .arg1_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_CONST_SIZE, }; const struct bpf_func_proto *bpf_get_trace_printk_proto(void) @@ -329,8 +329,8 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_STACK, - .arg5_type = ARG_CONST_STACK_SIZE, + .arg4_type = ARG_PTR_TO_MEM, + .arg5_type = ARG_CONST_SIZE, }; static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); @@ -492,8 +492,8 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_STACK, - .arg5_type = ARG_CONST_STACK_SIZE, + .arg4_type = ARG_PTR_TO_MEM, + .arg5_type = ARG_CONST_SIZE, }; BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, diff --git a/net/core/filter.c b/net/core/filter.c index 1969b3f118c1..f4d16a905754 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1416,8 +1416,8 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_STACK, - .arg4_type = ARG_CONST_STACK_SIZE, + .arg3_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; @@ -1447,8 +1447,8 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_RAW_STACK, - .arg4_type = ARG_CONST_STACK_SIZE, + .arg3_type = ARG_PTR_TO_UNINIT_MEM, + .arg4_type = ARG_CONST_SIZE, }; BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) @@ -1601,10 +1601,10 @@ static const struct bpf_func_proto bpf_csum_diff_proto = { .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_STACK, - .arg2_type = ARG_CONST_STACK_SIZE_OR_ZERO, - .arg3_type = ARG_PTR_TO_STACK, - .arg4_type = ARG_CONST_STACK_SIZE_OR_ZERO, + .arg1_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_ANYTHING, }; @@ -2306,8 +2306,8 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_STACK, - .arg5_type = ARG_CONST_STACK_SIZE, + .arg4_type = ARG_PTR_TO_MEM, + .arg5_type = ARG_CONST_SIZE, }; static unsigned short bpf_tunnel_key_af(u64 flags) @@ -2377,8 +2377,8 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_RAW_STACK, - .arg3_type = ARG_CONST_STACK_SIZE, + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; @@ -2412,8 +2412,8 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_RAW_STACK, - .arg3_type = ARG_CONST_STACK_SIZE, + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE, }; static struct metadata_dst __percpu *md_dst; @@ -2483,8 +2483,8 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_STACK, - .arg3_type = ARG_CONST_STACK_SIZE, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; @@ -2509,8 +2509,8 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_STACK, - .arg3_type = ARG_CONST_STACK_SIZE, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, }; static const struct bpf_func_proto * @@ -2593,8 +2593,8 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_STACK, - .arg5_type = ARG_CONST_STACK_SIZE, + .arg4_type = ARG_PTR_TO_MEM, + .arg5_type = ARG_CONST_SIZE, }; static const struct bpf_func_proto * -- cgit v1.2.3 From b369e7fd41f7cdbe2488cb736ef4f958bb94b5e2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 9 Jan 2017 10:29:27 -0800 Subject: tcp: make TCP_INFO more consistent tcp_get_info() has to lock the socket, so lets lock it for an extended critical section, so that various fields have consistent values. This solves an annoying issue that some applications reported when multiple counters are updated during one particular rx/rx event, and TCP_INFO was called from another cpu. Signed-off-by: Eric Dumazet Cc: Yuchung Cheng Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index ec97e4b4a62f..c8d46c140b4a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2766,6 +2766,9 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_sacked = sk->sk_max_ack_backlog; return; } + + slow = lock_sock_fast(sk); + info->tcpi_ca_state = icsk->icsk_ca_state; info->tcpi_retransmits = icsk->icsk_retransmits; info->tcpi_probes = icsk->icsk_probes_out; @@ -2816,15 +2819,11 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_total_retrans = tp->total_retrans; - slow = lock_sock_fast(sk); - info->tcpi_bytes_acked = tp->bytes_acked; info->tcpi_bytes_received = tp->bytes_received; info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); tcp_get_info_chrono_stats(tp, info); - unlock_sock_fast(sk, slow); - info->tcpi_segs_out = tp->segs_out; info->tcpi_segs_in = tp->segs_in; @@ -2840,6 +2839,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) do_div(rate64, intv); info->tcpi_delivery_rate = rate64; } + unlock_sock_fast(sk, slow); } EXPORT_SYMBOL_GPL(tcp_get_info); -- cgit v1.2.3 From 3a89eaa65db68bf53bf92dedc60084f810e1779a Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 Jan 2017 16:49:26 -0500 Subject: net: dsa: select NET_SWITCHDEV The support for DSA Ethernet switch chips depends on TCP/IP networking, thus explicit that HAVE_NET_DSA depends on INET. DSA uses SWITCHDEV, thus select it instead of depending on it. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Tested-by: Randy Dunlap Signed-off-by: David S. Miller --- net/dsa/Kconfig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 96e47c539bee..39bb5b3a82f2 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -1,12 +1,13 @@ config HAVE_NET_DSA def_bool y - depends on NETDEVICES && !S390 + depends on INET && NETDEVICES && !S390 # Drivers must select NET_DSA and the appropriate tagging format config NET_DSA tristate "Distributed Switch Architecture" - depends on HAVE_NET_DSA && NET_SWITCHDEV + depends on HAVE_NET_DSA + select NET_SWITCHDEV select PHYLIB ---help--- Say Y if you want to enable support for the hardware switches supported -- cgit v1.2.3 From 961a9c0a4a9783e47f8cb76134b2303c4872380b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 9 Jan 2017 14:20:45 +0100 Subject: xfrm: remove unused function Has been ifdef'd out for more than 10 years, remove it. Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- net/ipv4/xfrm4_state.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 542074c00c78..d6660a8c0ea5 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c @@ -90,11 +90,3 @@ void __init xfrm4_state_init(void) { xfrm_state_register_afinfo(&xfrm4_state_afinfo); } - -#if 0 -void __exit xfrm4_state_fini(void) -{ - xfrm_state_unregister_afinfo(&xfrm4_state_afinfo); -} -#endif /* 0 */ - -- cgit v1.2.3 From 423826a7b104724a16676a75d597a35d3bdb18b8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 9 Jan 2017 14:20:46 +0100 Subject: xfrm: avoid rcu sparse warning xfrm/xfrm_state.c:1973:21: error: incompatible types in comparison expression (different address spaces) Harmless, but lets fix it to reduce the noise. While at it, get rid of unneeded NULL check, its never hit: net/ipv4/xfrm4_state.c: xfrm_state_register_afinfo(&xfrm4_state_afinfo); net/ipv6/xfrm6_state.c: return xfrm_state_register_afinfo(&xfrm6_state_afinfo); net/ipv6/xfrm6_state.c: xfrm_state_unregister_afinfo(&xfrm6_state_afinfo); ... are the only callsites. Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_state.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 6b3366fef019..57e9578c35e2 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1932,10 +1932,10 @@ EXPORT_SYMBOL(xfrm_unregister_km); int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) { int err = 0; - if (unlikely(afinfo == NULL)) - return -EINVAL; - if (unlikely(afinfo->family >= NPROTO)) + + if (WARN_ON(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; + spin_lock_bh(&xfrm_state_afinfo_lock); if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) err = -EEXIST; @@ -1948,14 +1948,14 @@ EXPORT_SYMBOL(xfrm_state_register_afinfo); int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) { - int err = 0; - if (unlikely(afinfo == NULL)) - return -EINVAL; - if (unlikely(afinfo->family >= NPROTO)) + int err = 0, family = afinfo->family; + + if (WARN_ON(family >= NPROTO)) return -EAFNOSUPPORT; + spin_lock_bh(&xfrm_state_afinfo_lock); if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { - if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo)) + if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo) err = -EINVAL; else RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL); -- cgit v1.2.3 From af5d27c4e12b804c065c0e7c87507fea5683dab4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 9 Jan 2017 14:20:47 +0100 Subject: xfrm: remove xfrm_state_put_afinfo commit 44abdc3047aecafc141dfbaf1ed ("xfrm: replace rwlock on xfrm_state_afinfo with rcu") made xfrm_state_put_afinfo equivalent to rcu_read_unlock. Use spatch to replace it with direct calls to rcu_read_unlock: @@ struct xfrm_state_afinfo *a; @@ - xfrm_state_put_afinfo(a); + rcu_read_unlock(); old: text data bss dec hex filename 22570 72 424 23066 5a1a xfrm_state.o 1612 0 0 1612 64c xfrm_output.o new: 22554 72 424 23050 5a0a xfrm_state.o 1596 0 0 1596 63c xfrm_output.o Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 1 - net/xfrm/xfrm_output.c | 8 +++----- net/xfrm/xfrm_state.c | 31 +++++++++++++------------------ 3 files changed, 16 insertions(+), 24 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 31947b9c21d6..957d0cc30691 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -343,7 +343,6 @@ struct xfrm_state_afinfo { int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); -void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); struct xfrm_input_afinfo { unsigned int family; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 637387bbaaea..8ba29fe58352 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -246,10 +246,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu) return; afinfo = xfrm_state_get_afinfo(proto); - if (!afinfo) - return; - - afinfo->local_error(skb, mtu); - xfrm_state_put_afinfo(afinfo); + if (afinfo) + afinfo->local_error(skb, mtu); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(xfrm_local_error); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 57e9578c35e2..783084484582 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -192,7 +192,7 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family) else err = -EEXIST; spin_unlock_bh(&xfrm_type_lock); - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(xfrm_register_type); @@ -213,7 +213,7 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family) else typemap[type->proto] = NULL; spin_unlock_bh(&xfrm_type_lock); - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(xfrm_unregister_type); @@ -235,13 +235,13 @@ retry: if (unlikely(type && !try_module_get(type->owner))) type = NULL; if (!type && !modload_attempted) { - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); request_module("xfrm-type-%d-%d", family, proto); modload_attempted = 1; goto retry; } - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); return type; } @@ -280,7 +280,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family) out: spin_unlock_bh(&xfrm_mode_lock); - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(xfrm_register_mode); @@ -308,7 +308,7 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family) } spin_unlock_bh(&xfrm_mode_lock); - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(xfrm_unregister_mode); @@ -331,13 +331,13 @@ retry: if (unlikely(mode && !try_module_get(mode->owner))) mode = NULL; if (!mode && !modload_attempted) { - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); request_module("xfrm-mode-%d-%d", family, encap); modload_attempted = 1; goto retry; } - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); return mode; } @@ -651,13 +651,13 @@ xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl, afinfo->init_tempsel(&x->sel, fl); if (family != tmpl->encap_family) { - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); afinfo = xfrm_state_get_afinfo(tmpl->encap_family); if (!afinfo) return -1; } afinfo->init_temprop(x, tmpl, daddr, saddr); - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); return 0; } @@ -1474,7 +1474,7 @@ xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, if (afinfo->tmpl_sort) err = afinfo->tmpl_sort(dst, src, n); spin_unlock_bh(&net->xfrm.xfrm_state_lock); - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(xfrm_tmpl_sort); @@ -1494,7 +1494,7 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, if (afinfo->state_sort) err = afinfo->state_sort(dst, src, n); spin_unlock_bh(&net->xfrm.xfrm_state_lock); - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(xfrm_state_sort); @@ -1978,11 +1978,6 @@ struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) return afinfo; } -void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) -{ - rcu_read_unlock(); -} - /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ void xfrm_state_delete_tunnel(struct xfrm_state *x) { @@ -2025,7 +2020,7 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay) if (afinfo->init_flags) err = afinfo->init_flags(x); - xfrm_state_put_afinfo(afinfo); + rcu_read_unlock(); if (err) goto error; -- cgit v1.2.3 From 711059b9752ad09ae6bcd4be8e48d30e5db483d8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 9 Jan 2017 14:20:48 +0100 Subject: xfrm: add and use xfrm_state_afinfo_get_rcu xfrm_init_tempstate is always called from within rcu read side section. We can thus use a simpler function that doesn't call rcu_read_lock again. While at it, also make xfrm_init_tempstate return value void, the return value was never tested. A followup patch will replace remaining callers of xfrm_state_get_afinfo with xfrm_state_afinfo_get_rcu variant and then remove the 'old' get_afinfo interface. Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 1 + net/xfrm/xfrm_state.c | 25 +++++++++++++++---------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 957d0cc30691..c52197cf51dc 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -343,6 +343,7 @@ struct xfrm_state_afinfo { int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); +struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family); struct xfrm_input_afinfo { unsigned int family; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 783084484582..b5dad899fb0e 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -639,26 +639,23 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) } EXPORT_SYMBOL(xfrm_sad_getinfo); -static int +static void xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl, const struct xfrm_tmpl *tmpl, const xfrm_address_t *daddr, const xfrm_address_t *saddr, unsigned short family) { - struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); - if (!afinfo) - return -1; - afinfo->init_tempsel(&x->sel, fl); + struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family); + + if (afinfo) + afinfo->init_tempsel(&x->sel, fl); if (family != tmpl->encap_family) { - rcu_read_unlock(); - afinfo = xfrm_state_get_afinfo(tmpl->encap_family); + afinfo = xfrm_state_afinfo_get_rcu(tmpl->encap_family); if (!afinfo) - return -1; + return; } afinfo->init_temprop(x, tmpl, daddr, saddr); - rcu_read_unlock(); - return 0; } static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, @@ -1966,6 +1963,14 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_state_unregister_afinfo); +struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family) +{ + if (unlikely(family >= NPROTO)) + return NULL; + + return rcu_dereference(xfrm_state_afinfo[family]); +} + struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) { struct xfrm_state_afinfo *afinfo; -- cgit v1.2.3 From 75cda62d9ca2cd3fab0412d438fcd1bfa0580b3d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 9 Jan 2017 14:20:49 +0100 Subject: xfrm: state: simplify rcu_read_unlock handling in two spots Instead of: if (foo) { unlock(); return bar(); } unlock(); do: unlock(); if (foo) return bar(); This is ok because rcu protected structure is only dereferenced before the conditional. Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_state.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index b5dad899fb0e..a62097e640b5 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -231,17 +231,18 @@ retry: return NULL; typemap = afinfo->type_map; - type = typemap[proto]; + type = READ_ONCE(typemap[proto]); if (unlikely(type && !try_module_get(type->owner))) type = NULL; + + rcu_read_unlock(); + if (!type && !modload_attempted) { - rcu_read_unlock(); request_module("xfrm-type-%d-%d", family, proto); modload_attempted = 1; goto retry; } - rcu_read_unlock(); return type; } @@ -327,17 +328,17 @@ retry: if (unlikely(afinfo == NULL)) return NULL; - mode = afinfo->mode_map[encap]; + mode = READ_ONCE(afinfo->mode_map[encap]); if (unlikely(mode && !try_module_get(mode->owner))) mode = NULL; + + rcu_read_unlock(); if (!mode && !modload_attempted) { - rcu_read_unlock(); request_module("xfrm-mode-%d-%d", family, encap); modload_attempted = 1; goto retry; } - rcu_read_unlock(); return mode; } -- cgit v1.2.3 From 79f664edc1cf51a87d6681514ec89b6da5fef8e1 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Mon, 9 Jan 2017 12:03:12 -0600 Subject: net: qcom/emac: add ethtool support Add support for some ethtool methods: get/set link settings, get/set message level, get statistics, get link status, get ring params, get pause params, and restart autonegotiation. The code to collect the hardware statistics is moved into its own function so that it can be used by "get statistics" method. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/Makefile | 2 +- drivers/net/ethernet/qualcomm/emac/emac-ethtool.c | 185 ++++++++++++++++++++++ drivers/net/ethernet/qualcomm/emac/emac.c | 52 +++--- drivers/net/ethernet/qualcomm/emac/emac.h | 3 + 4 files changed, 221 insertions(+), 21 deletions(-) create mode 100644 drivers/net/ethernet/qualcomm/emac/emac-ethtool.c diff --git a/drivers/net/ethernet/qualcomm/emac/Makefile b/drivers/net/ethernet/qualcomm/emac/Makefile index 7a6687982dae..fc57cedf4c0c 100644 --- a/drivers/net/ethernet/qualcomm/emac/Makefile +++ b/drivers/net/ethernet/qualcomm/emac/Makefile @@ -4,6 +4,6 @@ obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o -qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o \ +qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o emac-ethtool.o \ emac-sgmii-fsm9900.o emac-sgmii-qdf2432.o \ emac-sgmii-qdf2400.o diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c new file mode 100644 index 000000000000..cfc57d2c64f9 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -0,0 +1,185 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "emac.h" + +static const char * const emac_ethtool_stat_strings[] = { + "rx_ok", + "rx_bcast", + "rx_mcast", + "rx_pause", + "rx_ctrl", + "rx_fcs_err", + "rx_len_err", + "rx_byte_cnt", + "rx_runt", + "rx_frag", + "rx_sz_64", + "rx_sz_65_127", + "rx_sz_128_255", + "rx_sz_256_511", + "rx_sz_512_1023", + "rx_sz_1024_1518", + "rx_sz_1519_max", + "rx_sz_ov", + "rx_rxf_ov", + "rx_align_err", + "rx_bcast_byte_cnt", + "rx_mcast_byte_cnt", + "rx_err_addr", + "rx_crc_align", + "rx_jabbers", + "tx_ok", + "tx_bcast", + "tx_mcast", + "tx_pause", + "tx_exc_defer", + "tx_ctrl", + "tx_defer", + "tx_byte_cnt", + "tx_sz_64", + "tx_sz_65_127", + "tx_sz_128_255", + "tx_sz_256_511", + "tx_sz_512_1023", + "tx_sz_1024_1518", + "tx_sz_1519_max", + "tx_1_col", + "tx_2_col", + "tx_late_col", + "tx_abort_col", + "tx_underrun", + "tx_rd_eop", + "tx_len_err", + "tx_trunc", + "tx_bcast_byte", + "tx_mcast_byte", + "tx_col", +}; + +#define EMAC_STATS_LEN ARRAY_SIZE(emac_ethtool_stat_strings) + +static u32 emac_get_msglevel(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + return adpt->msg_enable; +} + +static void emac_set_msglevel(struct net_device *netdev, u32 data) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + adpt->msg_enable = data; +} + +static int emac_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return EMAC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < EMAC_STATS_LEN; i++) { + strlcpy(data, emac_ethtool_stat_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static void emac_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + spin_lock(&adpt->stats.lock); + + emac_update_hw_stats(adpt); + memcpy(data, &adpt->stats, EMAC_STATS_LEN * sizeof(u64)); + + spin_unlock(&adpt->stats.lock); +} + +static int emac_nway_reset(struct net_device *netdev) +{ + struct phy_device *phydev = netdev->phydev; + + if (!phydev) + return -ENODEV; + + return genphy_restart_aneg(phydev); +} + +static void emac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + ring->rx_max_pending = EMAC_MAX_RX_DESCS; + ring->tx_max_pending = EMAC_MAX_TX_DESCS; + ring->rx_pending = adpt->rx_desc_cnt; + ring->tx_pending = adpt->tx_desc_cnt; +} + +static void emac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct phy_device *phydev = netdev->phydev; + + if (phydev) { + if (phydev->autoneg) + pause->autoneg = 1; + if (phydev->pause) + pause->rx_pause = 1; + if (phydev->pause != phydev->asym_pause) + pause->tx_pause = 1; + } +} + +static const struct ethtool_ops emac_ethtool_ops = { + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + + .get_msglevel = emac_get_msglevel, + .set_msglevel = emac_set_msglevel, + + .get_sset_count = emac_get_sset_count, + .get_strings = emac_get_strings, + .get_ethtool_stats = emac_get_ethtool_stats, + + .get_ringparam = emac_get_ringparam, + .get_pauseparam = emac_get_pauseparam, + + .nway_reset = emac_nway_reset, + + .get_link = ethtool_op_get_link, +}; + +void emac_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &emac_ethtool_ops; +} diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 40ebe010b06f..6ffe1920b6dd 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -311,45 +311,56 @@ static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return phy_mii_ioctl(netdev->phydev, ifr, cmd); } -/* Provide network statistics info for the interface */ -static void emac_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *net_stats) +/** + * emac_update_hw_stats - read the EMAC stat registers + * + * Reads the stats registers and write the values to adpt->stats. + * + * adpt->stats.lock must be held while calling this function, + * and while reading from adpt->stats. + */ +void emac_update_hw_stats(struct emac_adapter *adpt) { - struct emac_adapter *adpt = netdev_priv(netdev); - unsigned int addr = REG_MAC_RX_STATUS_BIN; struct emac_stats *stats = &adpt->stats; u64 *stats_itr = &adpt->stats.rx_ok; - u32 val; - - spin_lock(&stats->lock); + void __iomem *base = adpt->base; + unsigned int addr; + addr = REG_MAC_RX_STATUS_BIN; while (addr <= REG_MAC_RX_STATUS_END) { - val = readl_relaxed(adpt->base + addr); - *stats_itr += val; + *stats_itr += readl_relaxed(base + addr); stats_itr++; addr += sizeof(u32); } /* additional rx status */ - val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG23); - adpt->stats.rx_crc_align += val; - val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG24); - adpt->stats.rx_jabbers += val; + stats->rx_crc_align += readl_relaxed(base + EMAC_RXMAC_STATC_REG23); + stats->rx_jabbers += readl_relaxed(base + EMAC_RXMAC_STATC_REG24); /* update tx status */ addr = REG_MAC_TX_STATUS_BIN; - stats_itr = &adpt->stats.tx_ok; + stats_itr = &stats->tx_ok; while (addr <= REG_MAC_TX_STATUS_END) { - val = readl_relaxed(adpt->base + addr); - *stats_itr += val; - ++stats_itr; + *stats_itr += readl_relaxed(base + addr); + stats_itr++; addr += sizeof(u32); } /* additional tx status */ - val = readl_relaxed(adpt->base + EMAC_TXMAC_STATC_REG25); - adpt->stats.tx_col += val; + stats->tx_col += readl_relaxed(base + EMAC_TXMAC_STATC_REG25); +} + +/* Provide network statistics info for the interface */ +static void emac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *net_stats) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_stats *stats = &adpt->stats; + + spin_lock(&stats->lock); + + emac_update_hw_stats(adpt); /* return parsed statistics */ net_stats->rx_packets = stats->rx_ok; @@ -618,6 +629,7 @@ static int emac_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); + emac_set_ethtool_ops(netdev); adpt = netdev_priv(netdev); adpt->netdev = netdev; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 0c76e6cb8c9e..4b8483cc2c7f 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -332,4 +332,7 @@ int emac_reinit_locked(struct emac_adapter *adpt); void emac_reg_update32(void __iomem *addr, u32 mask, u32 val); irqreturn_t emac_isr(int irq, void *data); +void emac_set_ethtool_ops(struct net_device *netdev); +void emac_update_hw_stats(struct emac_adapter *adpt); + #endif /* _EMAC_H_ */ -- cgit v1.2.3 From 08a7b29be9734adab7f64a7675764a39ab7d29e1 Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Tue, 10 Jan 2017 16:23:33 +0000 Subject: sfc: support ndo_get_phys_port_id even when !CONFIG_SFC_SRIOV There's no good reason why this should be an SRIOV-only thing. Thus, also move it out of SRIOV-specific files. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 17 ++++++++++++++++- drivers/net/ethernet/sfc/ef10_sriov.c | 14 -------------- drivers/net/ethernet/sfc/ef10_sriov.h | 3 --- drivers/net/ethernet/sfc/efx.c | 13 ++++++++++++- drivers/net/ethernet/sfc/net_driver.h | 5 +++-- drivers/net/ethernet/sfc/sriov.c | 11 ----------- drivers/net/ethernet/sfc/sriov.h | 3 --- 7 files changed, 31 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 5eb0e684fd76..4f3c965f5997 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -5540,6 +5540,20 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, } } +static int efx_ef10_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!is_valid_ether_addr(nic_data->port_id)) + return -EOPNOTSUPP; + + ppid->id_len = ETH_ALEN; + memcpy(ppid->id, nic_data->port_id, ppid->id_len); + + return 0; +} + static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) { if (proto != htons(ETH_P_8021Q)) @@ -5647,11 +5661,11 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .vswitching_probe = efx_ef10_vswitching_probe_vf, .vswitching_restore = efx_ef10_vswitching_restore_vf, .vswitching_remove = efx_ef10_vswitching_remove_vf, - .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id, #endif .get_mac_address = efx_ef10_get_mac_address_vf, .set_mac_address = efx_ef10_set_mac_address, + .get_phys_port_id = efx_ef10_get_phys_port_id, .revision = EFX_REV_HUNT_A0, .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, @@ -5776,6 +5790,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .set_mac_address = efx_ef10_set_mac_address, .tso_versions = efx_ef10_tso_versions, + .get_phys_port_id = efx_ef10_get_phys_port_id, .revision = EFX_REV_HUNT_A0, .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index a949b9d27329..a55c53d6f559 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -760,17 +760,3 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, return 0; } - -int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, - struct netdev_phys_item_id *ppid) -{ - struct efx_ef10_nic_data *nic_data = efx->nic_data; - - if (!is_valid_ether_addr(nic_data->port_id)) - return -EOPNOTSUPP; - - ppid->id_len = ETH_ALEN; - memcpy(ppid->id, nic_data->port_id, ppid->id_len); - - return 0; -} diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index 9ceb7ef0a210..2aa444ed42de 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -56,9 +56,6 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i, int link_state); -int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, - struct netdev_phys_item_id *ppid); - int efx_ef10_vswitching_probe_pf(struct efx_nic *efx); int efx_ef10_vswitching_probe_vf(struct efx_nic *efx); int efx_ef10_vswitching_restore_pf(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index ebeecb8fed45..eaae42fa109c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2334,6 +2334,17 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) return 0; } +int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->get_phys_port_id) + return efx->type->get_phys_port_id(efx, ppid); + else + return -EOPNOTSUPP; +} + static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) { struct efx_nic *efx = netdev_priv(net_dev); @@ -2374,8 +2385,8 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, .ndo_get_vf_config = efx_sriov_get_vf_config, .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, - .ndo_get_phys_port_id = efx_sriov_get_phys_port_id, #endif + .ndo_get_phys_port_id = efx_get_phys_port_id, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 1c62c1a00fca..49db9e833c96 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1220,6 +1220,7 @@ struct efx_mtd_partition { * @ptp_set_ts_config: Set hardware timestamp configuration. The flags * and tx_type will already have been validated but this operation * must validate and update rx_filter. + * @get_phys_port_id: Get the underlying physical port id. * @set_mac_address: Set the MAC address of the device * @tso_versions: Returns mask of firmware-assisted TSO versions supported. * If %NULL, then device does not support any TSO version. @@ -1358,6 +1359,8 @@ struct efx_nic_type { int (*sriov_configure)(struct efx_nic *efx, int num_vfs); int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid); int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid); + int (*get_phys_port_id)(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); int (*sriov_init)(struct efx_nic *efx); void (*sriov_fini)(struct efx_nic *efx); bool (*sriov_wanted)(struct efx_nic *efx); @@ -1372,8 +1375,6 @@ struct efx_nic_type { struct ifla_vf_info *ivi); int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i, int link_state); - int (*sriov_get_phys_port_id)(struct efx_nic *efx, - struct netdev_phys_item_id *ppid); int (*vswitching_probe)(struct efx_nic *efx); int (*vswitching_restore)(struct efx_nic *efx); void (*vswitching_remove)(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c index 9abcf4aded30..0b766fdbcddb 100644 --- a/drivers/net/ethernet/sfc/sriov.c +++ b/drivers/net/ethernet/sfc/sriov.c @@ -73,14 +73,3 @@ int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, else return -EOPNOTSUPP; } - -int efx_sriov_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->type->sriov_get_phys_port_id) - return efx->type->sriov_get_phys_port_id(efx, ppid); - else - return -EOPNOTSUPP; -} diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h index ba1762e7f216..84c7984edcaf 100644 --- a/drivers/net/ethernet/sfc/sriov.h +++ b/drivers/net/ethernet/sfc/sriov.h @@ -23,9 +23,6 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, struct ifla_vf_info *ivi); int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, int link_state); -int efx_sriov_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid); - #endif /* CONFIG_SFC_SRIOV */ #endif /* EFX_SRIOV_H */ -- cgit v1.2.3 From ac019f08953278a280cb30849c3ebab9a7df31de Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Tue, 10 Jan 2017 16:23:56 +0000 Subject: sfc: implement ndo_get_phys_port_name Output is of the form p. Note that the port numbers don't necessarily map one-to-one to physical cages, partly because of 4x10G port modes on QSFP+ and partly because of hw/fw implementation details. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index eaae42fa109c..543fa488d049 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2345,6 +2345,16 @@ int efx_get_phys_port_id(struct net_device *net_dev, return -EOPNOTSUPP; } +static int efx_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (snprintf(name, len, "p%u", efx->port_num) >= len) + return -EINVAL; + return 0; +} + static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) { struct efx_nic *efx = netdev_priv(net_dev); @@ -2387,6 +2397,7 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, #endif .ndo_get_phys_port_id = efx_get_phys_port_id, + .ndo_get_phys_port_name = efx_get_phys_port_name, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif -- cgit v1.2.3 From 0d71a84c7448a9666a99e0aa1293cecc095f2e5d Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Tue, 10 Jan 2017 16:24:31 +0000 Subject: sfc: stop setting dev_port Setting dev_port changes the device names allocated by systemd. Any devices with a dev_port >0 will (in default distro configurations) have a suffix of "d" appended. This is not something done by other drivers, and causes confusion for users. Fixes: 8be41320f346 ("sfc: Add code to export port_num in netdev->dev_port") Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 4f3c965f5997..208e0048709b 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -547,7 +547,6 @@ static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); static int efx_ef10_probe(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data; - struct net_device *net_dev = efx->net_dev; int i, rc; /* We can have one VI for each 8K region. However, until we @@ -637,7 +636,6 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc < 0) goto fail5; efx->port_num = rc; - net_dev->dev_port = rc; rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); if (rc) -- cgit v1.2.3 From de28c99d71d91251713b67c545fa05b2b5e0d232 Mon Sep 17 00:00:00 2001 From: Prasad Kanneganti Date: Mon, 9 Jan 2017 14:42:40 -0800 Subject: liquidio: store the L4 hash of rx packets in skb Store the L4 hash of received packets in the skb; the hash is computed in the NIC firmware. Signed-off-by: Prasad Kanneganti Signed-off-by: Felix Manlunas Signed-off-by: Derek Chickles Signed-off-by: Satanand Burla Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 18 ++++++++++++++++-- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 16 +++++++++++++++- drivers/net/ethernet/cavium/liquidio/liquidio_common.h | 2 ++ 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 13f67a32cc4d..b8b579d8043f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2263,6 +2263,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), struct skb_shared_hwtstamps *shhwtstamps; u64 ns; u16 vtag = 0; + u32 r_dh_off; struct net_device *netdev = (struct net_device *)arg; struct octeon_droq *droq = container_of(param, struct octeon_droq, napi); @@ -2308,6 +2309,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), put_page(pg_info->page); } + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + if (((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX)) && ptp_enable) { @@ -2320,16 +2323,27 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), /* Nanoseconds are in the first 64-bits * of the packet. */ - memcpy(&ns, (skb->data), sizeof(ns)); + memcpy(&ns, (skb->data + r_dh_off), + sizeof(ns)); + r_dh_off -= BYTES_PER_DHLEN_UNIT; shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); } - skb_pull(skb, sizeof(ns)); } } + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); + skb->protocol = eth_type_trans(skb, skb->dev); if ((netdev->features & NETIF_F_RXCSUM) && (((rh->r_dh.encap_on) && diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 55846f2d7e46..ad2e72d7d522 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1497,6 +1497,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), struct net_device *netdev = (struct net_device *)arg; struct sk_buff *skb = (struct sk_buff *)skbuff; u16 vtag = 0; + u32 r_dh_off; if (netdev) { struct lio *lio = GET_LIO(netdev); @@ -1540,7 +1541,20 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), put_page(pg_info->page); } - skb_pull(skb, rh->r_dh.len * 8); + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + + if (rh->r_dh.has_hwtstamp) + r_dh_off -= BYTES_PER_DHLEN_UNIT; + + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); skb->protocol = eth_type_trans(skb, skb->dev); if ((netdev->features & NETIF_F_RXCSUM) && diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index ba329f6ca779..bc0af8af1a0c 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -98,6 +98,8 @@ enum octeon_tag_type { #define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2) #define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1) +#define BYTES_PER_DHLEN_UNIT 8 + static inline u32 incr_index(u32 index, u32 count, u32 max) { if ((index + count) >= max) -- cgit v1.2.3 From da36e13cf653541e385a8d2ec2637fff6ea3461a Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Mon, 9 Jan 2017 15:05:54 -0800 Subject: ipvlan: improvise dev_id generation logic in IPvlan The patch 009146d117b ("ipvlan: assign unique dev-id for each slave device.") used ida_simple_get() to generate dev_ids assigned to the slave devices. However (Eric has pointed out that) there is a shortcoming with that approach as it always uses the first available ID. This becomes a problem when a slave gets deleted and a new slave gets added. The ID gets reassigned causing the new slave to get the same link-local address. This side-effect is undesirable. This patch adds a per-port variable that keeps track of the IDs assigned and used as the stat-base for the IDR api. This base will be wrapped around when it reaches the MAX (0xFFFE) value possibly on a busy system where slaves are added and deleted routinely. Fixes: 009146d117b ("ipvlan: assign unique dev-id for each slave device.") Signed-off-by: Mahesh Bandewar CC: Eric Dumazet CC: David Miller Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan.h | 1 + drivers/net/ipvlan/ipvlan_main.c | 13 ++++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 0a9068fdee0f..406ae4ff0ae8 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -94,6 +94,7 @@ struct ipvl_port { struct hlist_head hlhead[IPVLAN_HASH_SIZE]; struct list_head ipvlans; u16 mode; + u16 dev_id_start; struct work_struct wq; struct sk_buff_head backlog; int count; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1cdb8c5ec403..92b221a03350 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -120,6 +120,7 @@ static int ipvlan_port_create(struct net_device *dev) skb_queue_head_init(&port->backlog); INIT_WORK(&port->wq, ipvlan_process_multicast); ida_init(&port->ida); + port->dev_id_start = 1; err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port); if (err) @@ -534,15 +535,25 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan_adjust_mtu(ipvlan, phy_dev); INIT_LIST_HEAD(&ipvlan->addrs); + /* If the port-id base is at the MAX value, then wrap it around and + * begin from 0x1 again. This may be due to a busy system where lots + * of slaves are getting created and deleted. + */ + if (port->dev_id_start == 0xFFFE) + port->dev_id_start = 0x1; + /* Since L2 address is shared among all IPvlan slaves including * master, use unique 16 bit dev-ids to diffentiate among them. * Assign IDs between 0x1 and 0xFFFE (used by the master) to each * slave link [see addrconf_ifid_eui48()]. */ - err = ida_simple_get(&port->ida, 1, 0xFFFE, GFP_KERNEL); + err = ida_simple_get(&port->ida, port->dev_id_start, 0xFFFE, + GFP_KERNEL); if (err < 0) goto destroy_ipvlan_port; dev->dev_id = err; + /* Increment id-base to the next slot for the future assignment */ + port->dev_id_start = err + 1; /* TODO Probably put random address here to be presented to the * world but keep using the physical-dev address for the outgoing -- cgit v1.2.3 From a5ef01aaac245d37edf113d65b0c146e96d841d1 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 10 Jan 2017 15:02:07 +0100 Subject: bpf: Remove unused but set variable in __bpf_lru_list_shrink_inactive() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the unused but set variable 'first_node' in __bpf_lru_list_shrink_inactive() to fix the following GCC warning when building with 'W=1': kernel/bpf/bpf_lru_list.c:216:41: warning: variable ‘first_node’ set but not used [-Wunused-but-set-variable] Cc: Martin KaFai Lau Signed-off-by: Tobias Klauser Acked-by: Martin KaFai Lau Signed-off-by: David S. Miller --- kernel/bpf/bpf_lru_list.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c index 89b7ef41c86b..d78501ee0609 100644 --- a/kernel/bpf/bpf_lru_list.c +++ b/kernel/bpf/bpf_lru_list.c @@ -213,11 +213,10 @@ __bpf_lru_list_shrink_inactive(struct bpf_lru *lru, enum bpf_lru_list_type tgt_free_type) { struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; - struct bpf_lru_node *node, *tmp_node, *first_node; + struct bpf_lru_node *node, *tmp_node; unsigned int nshrinked = 0; unsigned int i = 0; - first_node = list_first_entry(inactive, struct bpf_lru_node, list); list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) { if (bpf_lru_node_is_ref(node)) { __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); -- cgit v1.2.3 From 3bf003335ba356aac5a43e28640159d4ae8a2a60 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 10 Jan 2017 15:02:16 +0100 Subject: bpf: Make unnecessarily global functions static MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the functions __local_list_pop_free(), __local_list_pop_pending(), bpf_common_lru_populate() and bpf_percpu_lru_populate() static as they are not used outide of bpf_lru_list.c This fixes the following GCC warnings when building with 'W=1': kernel/bpf/bpf_lru_list.c:363:22: warning: no previous prototype for ‘__local_list_pop_free’ [-Wmissing-prototypes] kernel/bpf/bpf_lru_list.c:376:22: warning: no previous prototype for ‘__local_list_pop_pending’ [-Wmissing-prototypes] kernel/bpf/bpf_lru_list.c:560:6: warning: no previous prototype for ‘bpf_common_lru_populate’ [-Wmissing-prototypes] kernel/bpf/bpf_lru_list.c:577:6: warning: no previous prototype for ‘bpf_percpu_lru_populate’ [-Wmissing-prototypes] Cc: Martin KaFai Lau Signed-off-by: Tobias Klauser Acked-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Signed-off-by: David S. Miller --- kernel/bpf/bpf_lru_list.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c index d78501ee0609..f62d1d56f41d 100644 --- a/kernel/bpf/bpf_lru_list.c +++ b/kernel/bpf/bpf_lru_list.c @@ -360,7 +360,8 @@ static void __local_list_add_pending(struct bpf_lru *lru, list_add(&node->list, local_pending_list(loc_l)); } -struct bpf_lru_node *__local_list_pop_free(struct bpf_lru_locallist *loc_l) +static struct bpf_lru_node * +__local_list_pop_free(struct bpf_lru_locallist *loc_l) { struct bpf_lru_node *node; @@ -373,8 +374,8 @@ struct bpf_lru_node *__local_list_pop_free(struct bpf_lru_locallist *loc_l) return node; } -struct bpf_lru_node *__local_list_pop_pending(struct bpf_lru *lru, - struct bpf_lru_locallist *loc_l) +static struct bpf_lru_node * +__local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l) { struct bpf_lru_node *node; bool force = false; @@ -557,8 +558,9 @@ void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node) bpf_common_lru_push_free(lru, node); } -void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, - u32 elem_size, u32 nr_elems) +static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, + u32 node_offset, u32 elem_size, + u32 nr_elems) { struct bpf_lru_list *l = &lru->common_lru.lru_list; u32 i; @@ -574,8 +576,9 @@ void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, } } -void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, - u32 elem_size, u32 nr_elems) +static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, + u32 node_offset, u32 elem_size, + u32 nr_elems) { u32 i, pcpu_entries; int cpu; -- cgit v1.2.3 From a505e58252715bbc18a0ee1abae23615fe2586db Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Tue, 10 Jan 2017 07:47:15 -0800 Subject: packet: pdiag_put_ring() should return TX_RING info for TPACKET_V3 Commit 7f953ab2ba46 ("af_packet: TX_RING support for TPACKET_V3") now makes it possible to use TX_RING with TPACKET_V3, so make the the relevant information available via 'ss -e -a --packet' Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- net/packet/diag.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/packet/diag.c b/net/packet/diag.c index 0ed68f0238bf..7ef1c881ae74 100644 --- a/net/packet/diag.c +++ b/net/packet/diag.c @@ -73,8 +73,7 @@ static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type, { struct packet_diag_ring pdr; - if (!ring->pg_vec || ((ver > TPACKET_V2) && - (nl_type == PACKET_DIAG_TX_RING))) + if (!ring->pg_vec) return 0; pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT; -- cgit v1.2.3 From 7acec26cec5aa75e0dc54da1fb0b1f8acf575273 Mon Sep 17 00:00:00 2001 From: Jorge Ramirez-Ortiz Date: Mon, 9 Jan 2017 15:25:49 +0100 Subject: cfg80211: wext does not need to set monitor channel in managed mode There is not a valid reason to attempt setting the monitor channel while in managed mode. Since this code path only deals with this mode, remove the code block. Johannes: I'll note that the comment indicated it was for backward compatibility, but the code wasn't functional since switching the monitor channel isn't supported (any more?) when in managed mode, as that mode owns the channel configuration. Additionally, since monitor can't be done on a managed mode interface, this would only have had any effect to start with if a separate monitor interface is present, in which case it's better to change the channel through that anyway, if even possible. Signed-off-by: Jorge Ramirez-Ortiz Signed-off-by: Johannes Berg --- net/wireless/wext-sme.c | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index 995163830a61..c434f193f39a 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -105,30 +105,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, goto out; } - wdev->wext.connect.channel = chan; - - /* - * SSID is not set, we just want to switch monitor channel, - * this is really just backward compatibility, if the SSID - * is set then we use the channel to select the BSS to use - * to connect to instead. If we were connected on another - * channel we disconnected above and reconnect below. - */ - if (chan && !wdev->wext.connect.ssid_len) { - struct cfg80211_chan_def chandef = { - .width = NL80211_CHAN_WIDTH_20_NOHT, - .center_freq1 = freq, - }; - - chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); - if (chandef.chan) - err = cfg80211_set_monitor_channel(rdev, &chandef); - else - err = -EINVAL; - goto out; - } - err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); -- cgit v1.2.3 From 9f91484f6fcc28f9b5ebe11755e7488e39ea75e4 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 Jan 2017 18:13:51 -0500 Subject: net: dsa: make "label" property optional for dsa2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the new DTS bindings for DSA (dsa2), the "ethernet" and "link" phandles are respectively mandatory and exclusive to CPU port and DSA link device tree nodes. Simplify dsa2.c a bit by checking the presence of such phandle instead of checking the redundant "label" property. Then the Linux philosophy for Ethernet switch ports is to expose them to userspace as standard NICs by default. Thus use the standard enumerated "eth%d" device name if no "label" property is provided for a user port. This allows to save DTS files from subjective net device names. If one wants to rename an interface, udev rules can be used as usual. Of course the current behavior is unchanged, and the optional "label" property for user ports has precedence over the enumerated name. Signed-off-by: Vivien Didelot Acked-by: Uwe Kleine-König Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/dsa/dsa.txt | 20 ++++++++----------- net/dsa/dsa2.c | 24 ++++------------------- 2 files changed, 12 insertions(+), 32 deletions(-) diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index a4a570fb2494..cfe8f64eca4f 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt @@ -34,13 +34,9 @@ Required properties: Each port children node must have the following mandatory properties: - reg : Describes the port address in the switch -- label : Describes the label associated with this port, which - will become the netdev name. Special labels are - "cpu" to indicate a CPU port and "dsa" to - indicate an uplink/downlink port between switches in - the cluster. -A port labelled "dsa" has the following mandatory property: +An uplink/downlink port between switches in the cluster has the following +mandatory property: - link : Should be a list of phandles to other switch's DSA port. This port is used as the outgoing port @@ -48,12 +44,17 @@ A port labelled "dsa" has the following mandatory property: information must be given, not just the one hop routes to neighbouring switches. -A port labelled "cpu" has the following mandatory property: +A CPU port has the following mandatory property: - ethernet : Should be a phandle to a valid Ethernet device node. This host device is what the switch port is connected to. +A user port has the following optional property: + +- label : Describes the label associated with this port, which + will become the netdev name. + Port child nodes may also contain the following optional standardised properties, described in binding documents: @@ -107,7 +108,6 @@ linked into one DSA cluster. switch0port5: port@5 { reg = <5>; - label = "dsa"; phy-mode = "rgmii-txid"; link = <&switch1port6 &switch2port9>; @@ -119,7 +119,6 @@ linked into one DSA cluster. port@6 { reg = <6>; - label = "cpu"; ethernet = <&fec1>; fixed-link { speed = <100>; @@ -165,7 +164,6 @@ linked into one DSA cluster. switch1port5: port@5 { reg = <5>; - label = "dsa"; link = <&switch2port9>; phy-mode = "rgmii-txid"; fixed-link { @@ -176,7 +174,6 @@ linked into one DSA cluster. switch1port6: port@6 { reg = <6>; - label = "dsa"; phy-mode = "rgmii-txid"; link = <&switch0port5>; fixed-link { @@ -255,7 +252,6 @@ linked into one DSA cluster. switch2port9: port@9 { reg = <9>; - label = "dsa"; phy-mode = "rgmii-txid"; link = <&switch1port5 &switch0port5>; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index bad119cee2a3..9526bdf2a34a 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -81,30 +81,12 @@ static void dsa_dst_del_ds(struct dsa_switch_tree *dst, static bool dsa_port_is_dsa(struct device_node *port) { - const char *name; - - name = of_get_property(port, "label", NULL); - if (!name) - return false; - - if (!strcmp(name, "dsa")) - return true; - - return false; + return !!of_parse_phandle(port, "link", 0); } static bool dsa_port_is_cpu(struct device_node *port) { - const char *name; - - name = of_get_property(port, "label", NULL); - if (!name) - return false; - - if (!strcmp(name, "cpu")) - return true; - - return false; + return !!of_parse_phandle(port, "ethernet", 0); } static bool dsa_ds_find_port(struct dsa_switch *ds, @@ -268,6 +250,8 @@ static int dsa_user_port_apply(struct device_node *port, u32 index, int err; name = of_get_property(port, "label", NULL); + if (!name) + name = "eth%d"; err = dsa_slave_create(ds, ds->dev, index, name); if (err) { -- cgit v1.2.3 From 44bb765cf07ab6622e6fdf4bce546b43bd20faee Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 10 Jan 2017 12:32:36 -0800 Subject: net: dsa: Implement ndo_get_phys_port_name() Return the physical port number of a DSA created network device using ndo_get_phys_port_name(). Signed-off-by: Florian Fainelli Tested-by: Vivien Didelot Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- net/dsa/slave.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 5cd5b8137c08..fed3fbd403cb 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -990,6 +990,15 @@ static int dsa_slave_get_phys_port_id(struct net_device *dev, ppid->id_len = sizeof(p->port); memcpy(ppid->id, &p->port, ppid->id_len); +} + +static int dsa_slave_get_phys_port_name(struct net_device *dev, + char *name, size_t len) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + + if (snprintf(name, len, "p%d", p->port) >= len) + return -EINVAL; return 0; } @@ -1042,6 +1051,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, .ndo_get_phys_port_id = dsa_slave_get_phys_port_id, + .ndo_get_phys_port_name = dsa_slave_get_phys_port_name, }; static const struct switchdev_ops dsa_slave_switchdev_ops = { -- cgit v1.2.3 From 592050b2541407d033da18226d3644644832d082 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 10 Jan 2017 12:32:37 -0800 Subject: Revert "net: dsa: Implement ndo_get_phys_port_id" This reverts commit 3a543ef479868e36c95935de320608a7e41466ca ("net: dsa: Implement ndo_get_phys_port_id") since it misuses the purpose of ndo_get_phys_port_id(). We have ndo_get_phys_port_name() to do the correct thing for us now. Signed-off-by: Florian Fainelli Reviewed-by: Vivien Didelot Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- net/dsa/slave.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index fed3fbd403cb..0cdcaf526987 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -983,15 +983,6 @@ static void dsa_slave_poll_controller(struct net_device *dev) } #endif -static int dsa_slave_get_phys_port_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) -{ - struct dsa_slave_priv *p = netdev_priv(dev); - - ppid->id_len = sizeof(p->port); - memcpy(ppid->id, &p->port, ppid->id_len); -} - static int dsa_slave_get_phys_port_name(struct net_device *dev, char *name, size_t len) { @@ -1050,7 +1041,6 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, - .ndo_get_phys_port_id = dsa_slave_get_phys_port_id, .ndo_get_phys_port_name = dsa_slave_get_phys_port_name, }; -- cgit v1.2.3 From 380043b9dee4579174027f45d4554459d6bb4c38 Mon Sep 17 00:00:00 2001 From: Keerthy Date: Wed, 11 Jan 2017 09:03:29 +0530 Subject: net: netcp: correct netcp_get_stats function signature Commit: bc1f44709cf2 - net: make ndo_get_stats64 a void function and Commit: 6a8162e99ef3 - net: netcp: store network statistics in 64 bits. The commit 6a8162e99ef3 adds ndo_get_stats64 function as per old signature which causes compilation error: drivers/net/ethernet/ti/netcp_core.c:1951:28: error: initialization from incompatible pointer type .ndo_get_stats64 = netcp_get_stats, Hence correct netcp_get_stats function signature as per the latest definition. Signed-off-by: Keerthy Fixes: 6a8162e99ef344fc ("net: netcp: store network statistics in 64 bits") Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 68a75cc0731c..2b56eed249f7 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1909,7 +1909,7 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, return 0; } -static struct rtnl_link_stats64 * +static void netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) { struct netcp_intf *netcp = netdev_priv(ndev); @@ -1938,8 +1938,6 @@ netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) stats->rx_errors = p->rx_errors; stats->rx_dropped = p->rx_dropped; stats->tx_dropped = p->tx_dropped; - - return stats; } static const struct net_device_ops netcp_netdev_ops = { -- cgit v1.2.3 From 55733350e5e8b70c5e54a30dbf98148c695f21f5 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 11 Jan 2017 14:05:42 +0100 Subject: flow disector: ARP support Allow dissection of (R)ARP operation hardware and protocol addresses for Ethernet hardware and IPv4 protocol addresses. There are currently no users of FLOW_DISSECTOR_KEY_ARP. A follow-up patch will allow FLOW_DISSECTOR_KEY_ARP to be used by the flower classifier. Signed-off-by: Simon Horman Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 19 +++++++++++++++ net/core/flow_dissector.c | 57 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index d896a33e00d4..ac9703018a3a 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -88,6 +88,24 @@ struct flow_dissector_key_addrs { }; }; +/** + * flow_dissector_key_arp: + * @ports: Operation, source and target addresses for an ARP header + * for Ethernet hardware addresses and IPv4 protocol addresses + * sip: Sender IP address + * tip: Target IP address + * op: Operation + * sha: Sender hardware address + * tpa: Target hardware address + */ +struct flow_dissector_key_arp { + __u32 sip; + __u32 tip; + __u8 op; + unsigned char sha[ETH_ALEN]; + unsigned char tha[ETH_ALEN]; +}; + /** * flow_dissector_key_tp_ports: * @ports: port numbers of Transport header @@ -141,6 +159,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ + FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index fe4e1531976c..5b3800fe20f3 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -138,6 +138,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_control *key_control; struct flow_dissector_key_basic *key_basic; struct flow_dissector_key_addrs *key_addrs; + struct flow_dissector_key_arp *key_arp; struct flow_dissector_key_ports *key_ports; struct flow_dissector_key_icmp *key_icmp; struct flow_dissector_key_tags *key_tags; @@ -379,6 +380,62 @@ mpls: nhoff += FCOE_HEADER_LEN; goto out_good; + + case htons(ETH_P_ARP): + case htons(ETH_P_RARP): { + struct { + unsigned char ar_sha[ETH_ALEN]; + unsigned char ar_sip[4]; + unsigned char ar_tha[ETH_ALEN]; + unsigned char ar_tip[4]; + } *arp_eth, _arp_eth; + const struct arphdr *arp; + struct arphdr *_arp; + + arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, + hlen, &_arp); + if (!arp) + goto out_bad; + + if (arp->ar_hrd != htons(ARPHRD_ETHER) || + arp->ar_pro != htons(ETH_P_IP) || + arp->ar_hln != ETH_ALEN || + arp->ar_pln != 4 || + (arp->ar_op != htons(ARPOP_REPLY) && + arp->ar_op != htons(ARPOP_REQUEST))) + goto out_bad; + + arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp), + sizeof(_arp_eth), data, + hlen - sizeof(_arp), + &_arp_eth); + if (!arp) + goto out_bad; + + if (dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_ARP)) { + + key_arp = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_ARP, + target_container); + + memcpy(&key_arp->sip, arp_eth->ar_sip, + sizeof(key_arp->sip)); + memcpy(&key_arp->tip, arp_eth->ar_tip, + sizeof(key_arp->tip)); + + /* Only store the lower byte of the opcode; + * this covers ARPOP_REPLY and ARPOP_REQUEST. + */ + key_arp->op = ntohs(arp->ar_op) & 0xff; + + ether_addr_copy(key_arp->sha, arp_eth->ar_sha); + ether_addr_copy(key_arp->tha, arp_eth->ar_tha); + } + + goto out_good; + } + default: goto out_bad; } -- cgit v1.2.3 From 99d31326cbe6951872af5c8a6bc2679388a4d9ef Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 11 Jan 2017 14:05:43 +0100 Subject: net/sched: cls_flower: Support matching on ARP Support matching on ARP operation, and hardware and protocol addresses for Ethernet hardware and IPv4 protocol addresses. Example usage: tc qdisc add dev eth0 ingress tc filter add dev eth0 protocol arp parent ffff: flower indev eth0 \ arp_op request arp_sip 10.0.0.1 action drop tc filter add dev eth0 protocol rarp parent ffff: flower indev eth0 \ arp_op reply arp_tha 52:54:3f:00:00:00/24 action drop Signed-off-by: Simon Horman Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 11 ++++++++++ net/sched/cls_flower.c | 51 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index a081efbd61a2..1e5e1ddfdaca 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -416,6 +416,17 @@ enum { TCA_FLOWER_KEY_ICMPV6_TYPE, /* u8 */ TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */ + TCA_FLOWER_KEY_ARP_SIP, /* be32 */ + TCA_FLOWER_KEY_ARP_SIP_MASK, /* be32 */ + TCA_FLOWER_KEY_ARP_TIP, /* be32 */ + TCA_FLOWER_KEY_ARP_TIP_MASK, /* be32 */ + TCA_FLOWER_KEY_ARP_OP, /* u8 */ + TCA_FLOWER_KEY_ARP_OP_MASK, /* u8 */ + TCA_FLOWER_KEY_ARP_SHA, /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_SHA_MASK, /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_THA, /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_THA_MASK, /* ETH_ALEN */ + __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 970db7a41684..a3bfda3091a4 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -40,6 +40,7 @@ struct fl_flow_key { }; struct flow_dissector_key_ports tp; struct flow_dissector_key_icmp icmp; + struct flow_dissector_key_arp arp; struct flow_dissector_key_keyid enc_key_id; union { struct flow_dissector_key_ipv4_addrs enc_ipv4; @@ -401,6 +402,16 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, + [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, + [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, + [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, }; static void fl_set_key_val(struct nlattr **tb, @@ -572,6 +583,23 @@ static int fl_set_key(struct net *net, struct nlattr **tb, &mask->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE_MASK, sizeof(key->icmp.code)); + } else if (key->basic.n_proto == htons(ETH_P_ARP) || + key->basic.n_proto == htons(ETH_P_RARP)) { + fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, + &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, + sizeof(key->arp.sip)); + fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, + &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, + sizeof(key->arp.tip)); + fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, + &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, + sizeof(key->arp.op)); + fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, + mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, + sizeof(key->arp.sha)); + fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, + mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, + sizeof(key->arp.tha)); } if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || @@ -688,6 +716,8 @@ static void fl_init_dissector(struct cls_fl_head *head, FLOW_DISSECTOR_KEY_PORTS, tp); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_ICMP, icmp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_ARP, arp); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_VLAN, vlan); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, @@ -1112,6 +1142,27 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, TCA_FLOWER_KEY_ICMPV6_CODE_MASK, sizeof(key->icmp.code)))) goto nla_put_failure; + else if ((key->basic.n_proto == htons(ETH_P_ARP) || + key->basic.n_proto == htons(ETH_P_RARP)) && + (fl_dump_key_val(skb, &key->arp.sip, + TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, + TCA_FLOWER_KEY_ARP_SIP_MASK, + sizeof(key->arp.sip)) || + fl_dump_key_val(skb, &key->arp.tip, + TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, + TCA_FLOWER_KEY_ARP_TIP_MASK, + sizeof(key->arp.tip)) || + fl_dump_key_val(skb, &key->arp.op, + TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, + TCA_FLOWER_KEY_ARP_OP_MASK, + sizeof(key->arp.op)) || + fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, + mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, + sizeof(key->arp.sha)) || + fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, + mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, + sizeof(key->arp.tha)))) + goto nla_put_failure; if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && (fl_dump_key_val(skb, &key->enc_ipv4.src, -- cgit v1.2.3 From b40296fc73f58f98b595164d52d9bfe99216efa6 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 11 Jan 2017 16:16:12 +0000 Subject: sfc: efx_get_phys_port_id() can be static Fixes the following sparse warning: drivers/net/ethernet/sfc/efx.c:2337:5: warning: symbol 'efx_get_phys_port_id' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 543fa488d049..f2ec853e356d 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2334,8 +2334,8 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) return 0; } -int efx_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid) +static int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) { struct efx_nic *efx = netdev_priv(net_dev); -- cgit v1.2.3 From 60dce04b81424940a3183c8e7e81e1234a27e906 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 11 Jan 2017 16:32:51 +0000 Subject: net: thunderx: Fix error return code in nicvf_open() Fix to return a negative error code from the error handling case instead of 0, as done elsewhere in this function. Fixes: 712c31853440 ("net: thunderx: Program LMAC credits based on MTU") Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 273eafdb1c57..a25bb6ea2ff4 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1274,7 +1274,8 @@ int nicvf_open(struct net_device *netdev) /* Configure receive side scaling and MTU */ if (!nic->sqs_mode) { nicvf_rss_init(nic); - if (nicvf_update_hw_max_frs(nic, netdev->mtu)) + err = nicvf_update_hw_max_frs(nic, netdev->mtu); + if (err) goto cleanup; /* Clear percpu stats */ -- cgit v1.2.3 From 171d87aca0da1fab6a15b96ad8e298216a5951b0 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 11 Jan 2017 18:04:32 +0100 Subject: net: thunderx: Make hfunc variable const type in nicvf_set_rxfh() >From struct ethtool_ops: int (*set_rxfh)(struct net_device *, const u32 *indir, const u8 *key, const u8 hfunc); Change function arg of hfunc to const type. V2: Fixed indentation. Signed-off-by: Robert Richter Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 2e74bbaa38e1..5ac474683c98 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -635,7 +635,7 @@ static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, } static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *hkey, u8 hfunc) + const u8 *hkey, const u8 hfunc) { struct nicvf *nic = netdev_priv(dev); struct nicvf_rss_info *rss = &nic->rss_info; -- cgit v1.2.3 From 93be2b74279c15c2844684b1a027fdc71dd5d9bf Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Jan 2017 15:35:25 +0100 Subject: wext: handle NULL extra data in iwe_stream_add_point better gcc-7 complains that wl3501_cs passes NULL into a function that then uses the argument as the input for memcpy: drivers/net/wireless/wl3501_cs.c: In function 'wl3501_get_scan': include/net/iw_handler.h:559:3: error: argument 2 null where non-null expected [-Werror=nonnull] memcpy(stream + point_len, extra, iwe->u.data.length); This works fine here because iwe->u.data.length is guaranteed to be 0 and the memcpy doesn't actually have an effect. Making the length check explicit avoids the warning and should have no other effect here. Also check the pointer itself, since otherwise we get warnings elsewhere in the code. Signed-off-by: Arnd Bergmann Signed-off-by: Johannes Berg --- include/net/iw_handler.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index e0f4109e64c6..c2aa73e5e6bb 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -556,7 +556,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, memcpy(stream + lcp_len, ((char *) &iwe->u) + IW_EV_POINT_OFF, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - memcpy(stream + point_len, extra, iwe->u.data.length); + if (iwe->u.data.length && extra) + memcpy(stream + point_len, extra, iwe->u.data.length); stream += event_len; } return stream; -- cgit v1.2.3 From cef0acd4d7d4811d2d19cd0195031bf0dfe41249 Mon Sep 17 00:00:00 2001 From: David Spinadel Date: Mon, 21 Nov 2016 16:58:40 +0200 Subject: mac80211: Add RX flag to indicate ICV stripped Add a flag that indicates that the WEP ICV was stripped from an RX packet, allowing the device to not transfer that if it's already checked. Signed-off-by: David Spinadel Signed-off-by: Johannes Berg --- include/net/mac80211.h | 5 ++++- net/mac80211/wep.c | 3 ++- net/mac80211/wpa.c | 3 ++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5f5cb194cd78..86967b85dfd0 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1017,7 +1017,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware. * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame, * verification has been done by the hardware. - * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame. + * @RX_FLAG_IV_STRIPPED: The IV and ICV are stripped from this frame. * If this flag is set, the stack cannot do any replay detection * hence the driver or hardware will have to do that. * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this @@ -1088,6 +1088,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before. * This is used for AMSDU subframes which can have the same PN as * the first subframe. + * @RX_FLAG_ICV_STRIPPED: The ICV is stripped from this frame. CRC checking must + * be done in the hardware. */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -1123,6 +1125,7 @@ enum mac80211_rx_flags { RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31), RX_FLAG_MIC_STRIPPED = BIT_ULL(32), RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33), + RX_FLAG_ICV_STRIPPED = BIT_ULL(34), }; #define RX_FLAG_STBC_SHIFT 26 diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index efa3f48f1ec5..73e8f347802e 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c @@ -293,7 +293,8 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) return RX_DROP_UNUSABLE; ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); /* remove ICV */ - if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN)) + if (!(status->flag & RX_FLAG_ICV_STRIPPED) && + pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN)) return RX_DROP_UNUSABLE; } diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 8af6dd388d11..c1ef22df865f 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -294,7 +294,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) return RX_DROP_UNUSABLE; /* Trim ICV */ - skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); + if (!(status->flag & RX_FLAG_ICV_STRIPPED)) + skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); /* Remove IV */ memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen); -- cgit v1.2.3 From 2cc6f5a715cf9f7d5ca8fd2bde62130fcfd90b94 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 19 Oct 2016 15:02:32 +0200 Subject: mac80211: set wifi_acked[_valid] bits for transmitted SKBs There may be situations in which the in-kernel originator of an SKB cares about its wifi transmission status. To have that, set the wifi_acked[_valid] bits before freeing/orphaning the SKB if the destructor is set. The originator can then use it in there. Signed-off-by: Johannes Berg --- net/mac80211/status.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/mac80211/status.c b/net/mac80211/status.c index f7c5ae597639..d6a1bfaa7a81 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -541,6 +541,11 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, } else if (info->ack_frame_id) { ieee80211_report_ack_skb(local, info, acked, dropped); } + + if (!dropped && skb->destructor) { + skb->wifi_acked_valid = 1; + skb->wifi_acked = acked; + } } /* -- cgit v1.2.3 From 6c0b2e833f1439ebcbd7258b655623c1aef23ffb Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 11 Jan 2017 16:32:17 +0200 Subject: soc: qcom: smem_state: Fix include for ERR_PTR() The correct include file for getting errno constants and ERR_PTR() is linux/err.h, rather than linux/errno.h, so fix the include. Fixes: e8b123e60084 ("soc: qcom: smem_state: Add stubs for disabled smem_state") Acked-by: Andy Gross Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- include/linux/soc/qcom/smem_state.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h index 7b88697929e9..b8478ee7a71f 100644 --- a/include/linux/soc/qcom/smem_state.h +++ b/include/linux/soc/qcom/smem_state.h @@ -1,7 +1,7 @@ #ifndef __QCOM_SMEM_STATE__ #define __QCOM_SMEM_STATE__ -#include +#include struct device_node; struct qcom_smem_state; -- cgit v1.2.3 From f303a931106580acebca909b635347a37269ba9f Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 11 Jan 2017 16:32:18 +0200 Subject: wcn36xx: Transition driver to SMD client The wcn36xx wifi driver follows the life cycle of the WLAN_CTRL SMD channel, as such it should be a SMD client. This patch makes this transition, now that we have the necessary frameworks available. Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/Kconfig | 2 + drivers/net/wireless/ath/wcn36xx/dxe.c | 16 +++--- drivers/net/wireless/ath/wcn36xx/main.c | 79 ++++++++++++++++++++---------- drivers/net/wireless/ath/wcn36xx/smd.c | 31 +++++------- drivers/net/wireless/ath/wcn36xx/smd.h | 5 ++ drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 21 +++----- 6 files changed, 88 insertions(+), 66 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig index 591ebaea8265..4b83e87f0b94 100644 --- a/drivers/net/wireless/ath/wcn36xx/Kconfig +++ b/drivers/net/wireless/ath/wcn36xx/Kconfig @@ -1,6 +1,8 @@ config WCN36XX tristate "Qualcomm Atheros WCN3660/3680 support" depends on MAC80211 && HAS_DMA + depends on QCOM_WCNSS_CTRL || QCOM_WCNSS_CTRL=n + depends on QCOM_SMD || QCOM_SMD=n ---help--- This module adds support for wireless adapters based on Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets. diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 231fd022f0f5..87dfdaf9044c 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -23,6 +23,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include "wcn36xx.h" #include "txrx.h" @@ -151,9 +152,12 @@ int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn) goto out_err; /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */ - ret = wcn->ctrl_ops->smsm_change_state( - WCN36XX_SMSM_WLAN_TX_ENABLE, - WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY); + ret = qcom_smem_state_update_bits(wcn->tx_enable_state, + WCN36XX_SMSM_WLAN_TX_ENABLE | + WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY, + WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY); + if (ret) + goto out_err; return 0; @@ -678,9 +682,9 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, * notify chip about new frame through SMSM bus. */ if (is_low && vif_priv->pw_state == WCN36XX_BMPS) { - wcn->ctrl_ops->smsm_change_state( - 0, - WCN36XX_SMSM_WLAN_TX_ENABLE); + qcom_smem_state_update_bits(wcn->tx_rings_empty_state, + WCN36XX_SMSM_WLAN_TX_ENABLE, + WCN36XX_SMSM_WLAN_TX_ENABLE); } else { /* indicate End Of Packet and generate interrupt on descriptor * done. diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index e1d59da2ad20..3c2522b07c90 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -21,6 +21,10 @@ #include #include #include +#include +#include +#include +#include #include "wcn36xx.h" unsigned int wcn36xx_dbg_mask; @@ -1058,8 +1062,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, int ret; /* Set TX IRQ */ - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, - "wcnss_wlantx_irq"); + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tx"); if (!res) { wcn36xx_err("failed to get tx_irq\n"); return -ENOENT; @@ -1067,14 +1070,29 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, wcn->tx_irq = res->start; /* Set RX IRQ */ - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, - "wcnss_wlanrx_irq"); + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "rx"); if (!res) { wcn36xx_err("failed to get rx_irq\n"); return -ENOENT; } wcn->rx_irq = res->start; + /* Acquire SMSM tx enable handle */ + wcn->tx_enable_state = qcom_smem_state_get(&pdev->dev, + "tx-enable", &wcn->tx_enable_state_bit); + if (IS_ERR(wcn->tx_enable_state)) { + wcn36xx_err("failed to get tx-enable state\n"); + return PTR_ERR(wcn->tx_enable_state); + } + + /* Acquire SMSM tx rings empty handle */ + wcn->tx_rings_empty_state = qcom_smem_state_get(&pdev->dev, + "tx-rings-empty", &wcn->tx_rings_empty_state_bit); + if (IS_ERR(wcn->tx_rings_empty_state)) { + wcn36xx_err("failed to get tx-rings-empty state\n"); + return PTR_ERR(wcn->tx_rings_empty_state); + } + mmio_node = of_parse_phandle(pdev->dev.parent->of_node, "qcom,mmio", 0); if (!mmio_node) { wcn36xx_err("failed to acquire qcom,mmio reference\n"); @@ -1115,11 +1133,14 @@ static int wcn36xx_probe(struct platform_device *pdev) { struct ieee80211_hw *hw; struct wcn36xx *wcn; + void *wcnss; int ret; - u8 addr[ETH_ALEN]; + const u8 *addr; wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n"); + wcnss = dev_get_drvdata(pdev->dev.parent); + hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops); if (!hw) { wcn36xx_err("failed to alloc hw\n"); @@ -1130,11 +1151,23 @@ static int wcn36xx_probe(struct platform_device *pdev) wcn = hw->priv; wcn->hw = hw; wcn->dev = &pdev->dev; - wcn->ctrl_ops = pdev->dev.platform_data; - mutex_init(&wcn->hal_mutex); - if (!wcn->ctrl_ops->get_hw_mac(addr)) { + wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process); + if (IS_ERR(wcn->smd_channel)) { + wcn36xx_err("failed to open WLAN_CTRL channel\n"); + ret = PTR_ERR(wcn->smd_channel); + goto out_wq; + } + + qcom_smd_set_drvdata(wcn->smd_channel, hw); + + addr = of_get_property(pdev->dev.of_node, "local-mac-address", &ret); + if (addr && ret != ETH_ALEN) { + wcn36xx_err("invalid local-mac-address\n"); + ret = -EINVAL; + goto out_wq; + } else if (addr) { wcn36xx_info("mac address: %pM\n", addr); SET_IEEE80211_PERM_ADDR(wcn->hw, addr); } @@ -1158,6 +1191,7 @@ out_wq: out_err: return ret; } + static int wcn36xx_remove(struct platform_device *pdev) { struct ieee80211_hw *hw = platform_get_drvdata(pdev); @@ -1168,42 +1202,33 @@ static int wcn36xx_remove(struct platform_device *pdev) mutex_destroy(&wcn->hal_mutex); ieee80211_unregister_hw(hw); + + qcom_smem_state_put(wcn->tx_enable_state); + qcom_smem_state_put(wcn->tx_rings_empty_state); + iounmap(wcn->dxe_base); iounmap(wcn->ccu_base); ieee80211_free_hw(hw); return 0; } -static const struct platform_device_id wcn36xx_platform_id_table[] = { - { - .name = "wcn36xx", - .driver_data = 0 - }, + +static const struct of_device_id wcn36xx_of_match[] = { + { .compatible = "qcom,wcnss-wlan" }, {} }; -MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table); +MODULE_DEVICE_TABLE(of, wcn36xx_of_match); static struct platform_driver wcn36xx_driver = { .probe = wcn36xx_probe, .remove = wcn36xx_remove, .driver = { .name = "wcn36xx", + .of_match_table = wcn36xx_of_match, }, - .id_table = wcn36xx_platform_id_table, }; -static int __init wcn36xx_init(void) -{ - platform_driver_register(&wcn36xx_driver); - return 0; -} -module_init(wcn36xx_init); - -static void __exit wcn36xx_exit(void) -{ - platform_driver_unregister(&wcn36xx_driver); -} -module_exit(wcn36xx_exit); +module_platform_driver(wcn36xx_driver); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com"); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index a443992320f2..af0260add841 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "smd.h" struct wcn36xx_cfg_val { @@ -253,7 +254,7 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len) init_completion(&wcn->hal_rsp_compl); start = jiffies; - ret = wcn->ctrl_ops->tx(wcn->hal_buf, len); + ret = qcom_smd_send(wcn->smd_channel, wcn->hal_buf, len); if (ret) { wcn36xx_err("HAL TX failed\n"); goto out; @@ -2180,9 +2181,12 @@ out: return ret; } -static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) +int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel, + const void *buf, size_t len) { - struct wcn36xx_hal_msg_header *msg_header = buf; + const struct wcn36xx_hal_msg_header *msg_header = buf; + struct ieee80211_hw *hw = qcom_smd_get_drvdata(channel); + struct wcn36xx *wcn = hw->priv; struct wcn36xx_hal_ind_msg *msg_ind; wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len); @@ -2233,15 +2237,11 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) case WCN36XX_HAL_OTA_TX_COMPL_IND: case WCN36XX_HAL_MISSED_BEACON_IND: case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: - msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_KERNEL); + msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC); if (!msg_ind) { - /* - * FIXME: Do something smarter then just - * printing an error. - */ wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n", msg_header->msg_type); - break; + return -ENOMEM; } msg_ind->msg_len = len; @@ -2257,6 +2257,8 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) wcn36xx_err("SMD_EVENT (%d) not supported\n", msg_header->msg_type); } + + return 0; } static void wcn36xx_ind_smd_work(struct work_struct *work) { @@ -2315,22 +2317,13 @@ int wcn36xx_smd_open(struct wcn36xx *wcn) INIT_LIST_HEAD(&wcn->hal_ind_queue); spin_lock_init(&wcn->hal_ind_lock); - ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process); - if (ret) { - wcn36xx_err("failed to open control channel\n"); - goto free_wq; - } - - return ret; + return 0; -free_wq: - destroy_workqueue(wcn->hal_ind_wq); out: return ret; } void wcn36xx_smd_close(struct wcn36xx *wcn) { - wcn->ctrl_ops->close(); destroy_workqueue(wcn->hal_ind_wq); } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index df80cbbd9d1b..40d829563c2b 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -51,6 +51,7 @@ struct wcn36xx_hal_ind_msg { }; struct wcn36xx; +struct qcom_smd_channel; int wcn36xx_smd_open(struct wcn36xx *wcn); void wcn36xx_smd_close(struct wcn36xx *wcn); @@ -127,6 +128,10 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index); int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index); int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value); + +int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel, + const void *buf, size_t len); + int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 22242d18e1fe..68cc06cf9bc0 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -103,19 +103,6 @@ struct nv_data { u8 table; }; -/* Interface for platform control path - * - * @open: hook must be called when wcn36xx wants to open control channel. - * @tx: sends a buffer. - */ -struct wcn36xx_platform_ctrl_ops { - int (*open)(void *drv_priv, void *rsp_cb); - void (*close)(void); - int (*tx)(char *buf, size_t len); - int (*get_hw_mac)(u8 *addr); - int (*smsm_change_state)(u32 clear_mask, u32 set_mask); -}; - /** * struct wcn36xx_vif - holds VIF related fields * @@ -205,7 +192,13 @@ struct wcn36xx { void __iomem *ccu_base; void __iomem *dxe_base; - struct wcn36xx_platform_ctrl_ops *ctrl_ops; + struct qcom_smd_channel *smd_channel; + + struct qcom_smem_state *tx_enable_state; + unsigned tx_enable_state_bit; + struct qcom_smem_state *tx_rings_empty_state; + unsigned tx_rings_empty_state_bit; + /* * smd_buf must be protected with smd_mutex to garantee * that all messages are sent one after another -- cgit v1.2.3 From 886039036c2004154f03c963c65cb94bdc300a92 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 11 Jan 2017 16:32:19 +0200 Subject: wcn36xx: Implement firmware assisted scan Using the software based channel scan mechanism from mac80211 keeps us offline for 10-15 second, we should instead issue a start_scan/end_scan on each channel reducing this time. Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 64 +++++++++++++++++++++++++----- drivers/net/wireless/ath/wcn36xx/smd.c | 8 ++-- drivers/net/wireless/ath/wcn36xx/smd.h | 4 +- drivers/net/wireless/ath/wcn36xx/txrx.c | 19 ++++++--- drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 9 +++++ 5 files changed, 81 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 3c2522b07c90..96a9584edcbb 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -568,23 +568,59 @@ out: return ret; } -static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const u8 *mac_addr) +static void wcn36xx_hw_scan_worker(struct work_struct *work) { - struct wcn36xx *wcn = hw->priv; + struct wcn36xx *wcn = container_of(work, struct wcn36xx, scan_work); + struct cfg80211_scan_request *req = wcn->scan_req; + u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX]; + struct cfg80211_scan_info scan_info = {}; + int i; + + wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels); + + for (i = 0; i < req->n_channels; i++) + channels[i] = req->channels[i]->hw_value; + + wcn36xx_smd_update_scan_params(wcn, channels, req->n_channels); wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN); - wcn36xx_smd_start_scan(wcn); + for (i = 0; i < req->n_channels; i++) { + wcn->scan_freq = req->channels[i]->center_freq; + wcn->scan_band = req->channels[i]->band; + + wcn36xx_smd_start_scan(wcn, req->channels[i]->hw_value); + msleep(30); + wcn36xx_smd_end_scan(wcn, req->channels[i]->hw_value); + + wcn->scan_freq = 0; + } + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN); + + scan_info.aborted = false; + ieee80211_scan_completed(wcn->hw, &scan_info); + + mutex_lock(&wcn->scan_lock); + wcn->scan_req = NULL; + mutex_unlock(&wcn->scan_lock); } -static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static int wcn36xx_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) { struct wcn36xx *wcn = hw->priv; - wcn36xx_smd_end_scan(wcn); - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN); + mutex_lock(&wcn->scan_lock); + if (wcn->scan_req) { + mutex_unlock(&wcn->scan_lock); + return -EBUSY; + } + wcn->scan_req = &hw_req->req; + mutex_unlock(&wcn->scan_lock); + + schedule_work(&wcn->scan_work); + + return 0; } static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, @@ -997,8 +1033,7 @@ static const struct ieee80211_ops wcn36xx_ops = { .configure_filter = wcn36xx_configure_filter, .tx = wcn36xx_tx, .set_key = wcn36xx_set_key, - .sw_scan_start = wcn36xx_sw_scan_start, - .sw_scan_complete = wcn36xx_sw_scan_complete, + .hw_scan = wcn36xx_hw_scan, .bss_info_changed = wcn36xx_bss_info_changed, .set_rts_threshold = wcn36xx_set_rts_threshold, .sta_add = wcn36xx_sta_add, @@ -1023,6 +1058,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) ieee80211_hw_set(wcn->hw, SUPPORTS_PS); ieee80211_hw_set(wcn->hw, SIGNAL_DBM); ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL); + ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS); wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | @@ -1032,6 +1068,9 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz; wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz; + wcn->hw->wiphy->max_scan_ssids = WCN36XX_MAX_SCAN_SSIDS; + wcn->hw->wiphy->max_scan_ie_len = WCN36XX_MAX_SCAN_IE_LEN; + wcn->hw->wiphy->cipher_suites = cipher_suites; wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); @@ -1152,6 +1191,9 @@ static int wcn36xx_probe(struct platform_device *pdev) wcn->hw = hw; wcn->dev = &pdev->dev; mutex_init(&wcn->hal_mutex); + mutex_init(&wcn->scan_lock); + + INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker); wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process); if (IS_ERR(wcn->smd_channel)) { diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index af0260add841..be5e5ea1e5c3 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -522,7 +522,7 @@ out: return ret; } -int wcn36xx_smd_start_scan(struct wcn36xx *wcn) +int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel) { struct wcn36xx_hal_start_scan_req_msg msg_body; int ret = 0; @@ -530,7 +530,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn) mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ); - msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn); + msg_body.scan_channel = scan_channel; PREPARE_HAL_BUF(wcn->hal_buf, msg_body); @@ -552,7 +552,7 @@ out: return ret; } -int wcn36xx_smd_end_scan(struct wcn36xx *wcn) +int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel) { struct wcn36xx_hal_end_scan_req_msg msg_body; int ret = 0; @@ -560,7 +560,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn) mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ); - msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn); + msg_body.scan_channel = scan_channel; PREPARE_HAL_BUF(wcn->hal_buf, msg_body); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 40d829563c2b..8892ccd67b14 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -60,8 +60,8 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn); int wcn36xx_smd_start(struct wcn36xx *wcn); int wcn36xx_smd_stop(struct wcn36xx *wcn); int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode); -int wcn36xx_smd_start_scan(struct wcn36xx *wcn); -int wcn36xx_smd_end_scan(struct wcn36xx *wcn); +int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel); +int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel); int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode); int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count); diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 1f34c2e912d7..8c387a0a3c09 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -45,9 +45,20 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len); skb_pull(skb, bd->pdu.mpdu_header_off); + hdr = (struct ieee80211_hdr *) skb->data; + fc = __le16_to_cpu(hdr->frame_control); + sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); + + /* When scanning associate beacons to this */ + if (ieee80211_is_beacon(hdr->frame_control) && wcn->scan_freq) { + status.freq = wcn->scan_freq; + status.band = wcn->scan_band; + } else { + status.freq = WCN36XX_CENTER_FREQ(wcn); + status.band = WCN36XX_BAND(wcn); + } + status.mactime = 10; - status.freq = WCN36XX_CENTER_FREQ(wcn); - status.band = WCN36XX_BAND(wcn); status.signal = -get_rssi0(bd); status.antenna = 1; status.rate_idx = 1; @@ -61,10 +72,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); - hdr = (struct ieee80211_hdr *) skb->data; - fc = __le16_to_cpu(hdr->frame_control); - sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); - if (ieee80211_is_beacon(hdr->frame_control)) { wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n", skb, skb->len, fc, sn); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 68cc06cf9bc0..35a6590c3ee5 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -35,6 +35,9 @@ /* How many frames until we start a-mpdu TX session */ #define WCN36XX_AMPDU_START_THRESH 20 +#define WCN36XX_MAX_SCAN_SSIDS 9 +#define WCN36XX_MAX_SCAN_IE_LEN 500 + extern unsigned int wcn36xx_dbg_mask; enum wcn36xx_debug_mask { @@ -212,6 +215,12 @@ struct wcn36xx { spinlock_t hal_ind_lock; struct list_head hal_ind_queue; + struct work_struct scan_work; + struct cfg80211_scan_request *scan_req; + int scan_freq; + int scan_band; + struct mutex scan_lock; + /* DXE channels */ struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */ struct wcn36xx_dxe_ch dxe_tx_h_ch; /* TX high */ -- cgit v1.2.3 From 43efa3c0f241e04862be8e6a68ff765d36cde1ba Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 11 Jan 2017 16:32:20 +0200 Subject: wcn36xx: Implement print_reg indication Some firmware versions sends a "print register indication", handle this by printing out the content. Cc: Nicolas Dechesne Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/hal.h | 16 ++++++++++++++++ drivers/net/wireless/ath/wcn36xx/smd.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 4f87ef1e1eb8..b765c647319d 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -350,6 +350,8 @@ enum wcn36xx_hal_host_msg_type { WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233, + WCN36XX_HAL_PRINT_REG_INFO_IND = 259, + WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE }; @@ -4703,4 +4705,18 @@ struct stats_class_b_ind { u32 rx_time_total; }; +/* WCN36XX_HAL_PRINT_REG_INFO_IND */ +struct wcn36xx_hal_print_reg_info_ind { + struct wcn36xx_hal_msg_header header; + + u32 count; + u32 scenario; + u32 reason; + + struct { + u32 addr; + u32 value; + } regs[]; +} __packed; + #endif /* _HAL_H_ */ diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index be5e5ea1e5c3..1c2966f7db7a 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -2109,6 +2109,30 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn, return -ENOENT; } +static int wcn36xx_smd_print_reg_info_ind(struct wcn36xx *wcn, + void *buf, + size_t len) +{ + struct wcn36xx_hal_print_reg_info_ind *rsp = buf; + int i; + + if (len < sizeof(*rsp)) { + wcn36xx_warn("Corrupted print reg info indication\n"); + return -EIO; + } + + wcn36xx_dbg(WCN36XX_DBG_HAL, + "reginfo indication, scenario: 0x%x reason: 0x%x\n", + rsp->scenario, rsp->reason); + + for (i = 0; i < rsp->count; i++) { + wcn36xx_dbg(WCN36XX_DBG_HAL, "\t0x%x: 0x%x\n", + rsp->regs[i].addr, rsp->regs[i].value); + } + + return 0; +} + int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value) { struct wcn36xx_hal_update_cfg_req_msg msg_body, *body; @@ -2237,6 +2261,7 @@ int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel, case WCN36XX_HAL_OTA_TX_COMPL_IND: case WCN36XX_HAL_MISSED_BEACON_IND: case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: + case WCN36XX_HAL_PRINT_REG_INFO_IND: msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC); if (!msg_ind) { wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n", @@ -2296,6 +2321,11 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) hal_ind_msg->msg, hal_ind_msg->msg_len); break; + case WCN36XX_HAL_PRINT_REG_INFO_IND: + wcn36xx_smd_print_reg_info_ind(wcn, + hal_ind_msg->msg, + hal_ind_msg->msg_len); + break; default: wcn36xx_err("SMD_EVENT (%d) not supported\n", msg_header->msg_type); -- cgit v1.2.3 From d53628882255481b710641dd0118fbd80af6e983 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 11 Jan 2017 16:32:21 +0200 Subject: wcn36xx: Don't use the destroyed hal_mutex ieee80211_unregister_hw() might invoke operations to stop the interface, that uses the hal_mutex. So don't destroy it until after we're done using it. Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 96a9584edcbb..0002190c9041 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1241,7 +1241,6 @@ static int wcn36xx_remove(struct platform_device *pdev) wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n"); release_firmware(wcn->nv); - mutex_destroy(&wcn->hal_mutex); ieee80211_unregister_hw(hw); @@ -1250,6 +1249,8 @@ static int wcn36xx_remove(struct platform_device *pdev) iounmap(wcn->dxe_base); iounmap(wcn->ccu_base); + + mutex_destroy(&wcn->hal_mutex); ieee80211_free_hw(hw); return 0; -- cgit v1.2.3 From 7bc7441e4da3868042d01eed8a1f61625d35a356 Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Wed, 11 Jan 2017 16:32:09 +0200 Subject: ath10k: htc: removal of unused struct members Removed tx_credits_per_max_message and tx_credit_size from struct ath10k_htc_ep since they are not used anywhere in the code. They are just written, never read. Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htc.c | 6 ------ drivers/net/wireless/ath/ath10k/htc.h | 2 -- 2 files changed, 8 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 175aae38c375..f2e065958c87 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -726,12 +726,6 @@ setup: ep->max_tx_queue_depth = conn_req->max_send_queue_depth; ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size); ep->tx_credits = tx_alloc; - ep->tx_credit_size = htc->target_credit_size; - ep->tx_credits_per_max_message = ep->max_ep_message_len / - htc->target_credit_size; - - if (ep->max_ep_message_len % htc->target_credit_size) - ep->tx_credits_per_max_message++; /* copy all the callbacks */ ep->ep_ops = conn_req->ep_ops; diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index 0c55cd92a951..ca150c91f181 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -314,8 +314,6 @@ struct ath10k_htc_ep { u8 seq_no; /* for debugging */ int tx_credits; - int tx_credit_size; - int tx_credits_per_max_message; bool tx_credit_flow_enabled; }; -- cgit v1.2.3 From d48b62ceeea2c229c637efda0c1a846dd33cf26a Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Wed, 11 Jan 2017 16:32:10 +0200 Subject: ath10k: htc: simplified credit distribution Simplified transmit credit distribution code somewhat. Since the WMI control service will get assigned all credits there is no need for having a credit_allocation array in struct ath10k_htc. Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htc.c | 29 +++++------------------------ drivers/net/wireless/ath/ath10k/htc.h | 1 - 2 files changed, 5 insertions(+), 25 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index f2e065958c87..9f6a915f91bf 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -474,33 +474,16 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc) } } -static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc) -{ - struct ath10k_htc_svc_tx_credits *entry; - - entry = &htc->service_tx_alloc[0]; - - /* - * for PCIE allocate all credists/HTC buffers to WMI. - * no buffers are used/required for data. data always - * remains on host. - */ - entry++; - entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; - entry->credit_allocation = htc->total_transmit_credits; -} - static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc, u16 service_id) { u8 allocation = 0; - int i; - for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) { - if (htc->service_tx_alloc[i].service_id == service_id) - allocation = - htc->service_tx_alloc[i].credit_allocation; - } + /* The WMI control service is the only service with flow control. + * Let it have all transmit credits. + */ + if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL) + allocation = htc->total_transmit_credits; return allocation; } @@ -574,8 +557,6 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) return -ECOMM; } - ath10k_htc_setup_target_buffer_assignments(htc); - /* setup our pseudo HTC control endpoint connection */ memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index ca150c91f181..6ababa345e2b 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -337,7 +337,6 @@ struct ath10k_htc { struct completion ctl_resp; int total_transmit_credits; - struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT]; int target_credit_size; }; -- cgit v1.2.3 From b59eb96181e7fe4abe844985362882aeb39a0593 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Wed, 11 Jan 2017 16:32:12 +0200 Subject: wil6210: constify cfg80211_ops structures cfg80211_ops structures are only passed as an argument to the function wiphy_new. This argument is of type const, so cfg80211_ops strutures having this property can be declared as const. Done using Coccinelle @r1 disable optional_qualifier @ identifier i; position p; @@ static struct cfg80211_ops i@p = {...}; @ok1@ identifier r1.i; position p; @@ wiphy_new(&i@p,...) @bad@ position p!={r1.p,ok1.p}; identifier r1.i; @@ i@p @depends on !bad disable optional_qualifier@ identifier r1.i; @@ +const struct cfg80211_ops i; File size before: text data bss dec hex filename 18133 6632 0 24765 60bd wireless/ath/wil6210/cfg80211.o File size after: text data bss dec hex filename 18933 5832 0 24765 60bd wireless/ath/wil6210/cfg80211.o Signed-off-by: Bhumika Goyal Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 6aa3ff4240a9..54dd11670fdb 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1499,7 +1499,7 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy, return rc; } -static struct cfg80211_ops wil_cfg80211_ops = { +static const struct cfg80211_ops wil_cfg80211_ops = { .add_virtual_intf = wil_cfg80211_add_iface, .del_virtual_intf = wil_cfg80211_del_iface, .scan = wil_cfg80211_scan, -- cgit v1.2.3 From a70e1d6fd6b5e1a81fa6171600942bee34f5128f Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 11 Jan 2017 16:32:13 +0200 Subject: ath5k: drop bogus warning on drv_set_key with unsupported cipher Simply return -EOPNOTSUPP instead. Cc: stable@vger.kernel.org Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath5k/mac80211-ops.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index dc44cfef7517..16e052d02c94 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -502,8 +502,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; return -EOPNOTSUPP; default: - WARN_ON(1); - return -EINVAL; + return -EOPNOTSUPP; } mutex_lock(&ah->lock); -- cgit v1.2.3 From d5a3a76a9cb814b377d12fe4ebe5cff20b63f390 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 11 Jan 2017 16:32:14 +0200 Subject: ath9k: ar9002_mac: kill off ACCESS_ONCE() For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't currently harmful. However, for some new features (e.g. KTSAN / Kernel Thread Sanitizer), it is necessary to instrument reads and writes separately, which is not possible with ACCESS_ONCE(). This distinction is critical to correct operation. It's possible to transform the bulk of kernel code using the Coccinelle script below. However, for some files (including the ath9k ar9002 mac driver), this mangles the formatting. As a preparatory step, this patch converts the driver to use {READ,WRITE}_ONCE() without said mangling. ---- virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland Cc: ath9k-devel@qca.qualcomm.com Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: ath9k-devel@lists.ath9k.org Cc: netdev@vger.kernel.org Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9002_mac.c | 64 ++++++++++++++--------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index f816909d9474..4b3c9b108197 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ads->ds_txstatus6 = ads->ds_txstatus7 = 0; ads->ds_txstatus8 = ads->ds_txstatus9 = 0; - ACCESS_ONCE(ads->ds_link) = i->link; - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0]; + WRITE_ONCE(ads->ds_link, i->link); + WRITE_ONCE(ads->ds_data, i->buf_addr[0]); ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore); ctl6 = SM(i->keytype, AR_EncrType); @@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) if ((i->is_first || i->is_last) && i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) { - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0) + WRITE_ONCE(ads->ds_ctl2, set11nTries(i->rates, 0) | set11nTries(i->rates, 1) | set11nTries(i->rates, 2) | set11nTries(i->rates, 3) | (i->dur_update ? AR_DurUpdateEna : 0) - | SM(0, AR_BurstDur); + | SM(0, AR_BurstDur)); - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0) + WRITE_ONCE(ads->ds_ctl3, set11nRate(i->rates, 0) | set11nRate(i->rates, 1) | set11nRate(i->rates, 2) - | set11nRate(i->rates, 3); + | set11nRate(i->rates, 3)); } else { - ACCESS_ONCE(ads->ds_ctl2) = 0; - ACCESS_ONCE(ads->ds_ctl3) = 0; + WRITE_ONCE(ads->ds_ctl2, 0); + WRITE_ONCE(ads->ds_ctl3, 0); } if (!i->is_first) { - ACCESS_ONCE(ads->ds_ctl0) = 0; - ACCESS_ONCE(ads->ds_ctl1) = ctl1; - ACCESS_ONCE(ads->ds_ctl6) = ctl6; + WRITE_ONCE(ads->ds_ctl0, 0); + WRITE_ONCE(ads->ds_ctl1, ctl1); + WRITE_ONCE(ads->ds_ctl6, ctl6); return; } @@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) break; } - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) + WRITE_ONCE(ads->ds_ctl0, (i->pkt_len & AR_FrameLen) | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) | SM(i->txpower[0], AR_XmitPower0) | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) @@ -287,29 +287,29 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) | (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0) | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0) | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : - (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); + (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0))); - ACCESS_ONCE(ads->ds_ctl1) = ctl1; - ACCESS_ONCE(ads->ds_ctl6) = ctl6; + WRITE_ONCE(ads->ds_ctl1, ctl1); + WRITE_ONCE(ads->ds_ctl6, ctl6); if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST) return; - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) - | set11nPktDurRTSCTS(i->rates, 1); + WRITE_ONCE(ads->ds_ctl4, set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1)); - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) - | set11nPktDurRTSCTS(i->rates, 3); + WRITE_ONCE(ads->ds_ctl5, set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3)); - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) + WRITE_ONCE(ads->ds_ctl7, set11nRateFlags(i->rates, 0) | set11nRateFlags(i->rates, 1) | set11nRateFlags(i->rates, 2) | set11nRateFlags(i->rates, 3) - | SM(i->rtscts_rate, AR_RTSCTSRate); + | SM(i->rtscts_rate, AR_RTSCTSRate)); - ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1); - ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2); - ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3); + WRITE_ONCE(ads->ds_ctl9, SM(i->txpower[1], AR_XmitPower1)); + WRITE_ONCE(ads->ds_ctl10, SM(i->txpower[2], AR_XmitPower2)); + WRITE_ONCE(ads->ds_ctl11, SM(i->txpower[3], AR_XmitPower3)); } static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, @@ -318,7 +318,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, struct ar5416_desc *ads = AR5416DESC(ds); u32 status; - status = ACCESS_ONCE(ads->ds_txstatus9); + status = READ_ONCE(ads->ds_txstatus9); if ((status & AR_TxDone) == 0) return -EINPROGRESS; @@ -332,7 +332,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ts_rateindex = MS(status, AR_FinalTxIdx); ts->ts_seqnum = MS(status, AR_SeqNum); - status = ACCESS_ONCE(ads->ds_txstatus0); + status = READ_ONCE(ads->ds_txstatus0); ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00); ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01); ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02); @@ -342,7 +342,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ba_high = ads->AR_BaBitmapHigh; } - status = ACCESS_ONCE(ads->ds_txstatus1); + status = READ_ONCE(ads->ds_txstatus1); if (status & AR_FrmXmitOK) ts->ts_status |= ATH9K_TX_ACKED; else { @@ -371,7 +371,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ts_longretry = MS(status, AR_DataFailCnt); ts->ts_virtcol = MS(status, AR_VirtRetryCnt); - status = ACCESS_ONCE(ads->ds_txstatus5); + status = READ_ONCE(ads->ds_txstatus5); ts->ts_rssi = MS(status, AR_TxRSSICombined); ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10); ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11); @@ -390,13 +390,13 @@ static int ar9002_hw_get_duration(struct ath_hw *ah, const void *ds, int index) switch (index) { case 0: - return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur0); + return MS(READ_ONCE(ads->ds_ctl4), AR_PacketDur0); case 1: - return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur1); + return MS(READ_ONCE(ads->ds_ctl4), AR_PacketDur1); case 2: - return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur2); + return MS(READ_ONCE(ads->ds_ctl5), AR_PacketDur2); case 3: - return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur3); + return MS(READ_ONCE(ads->ds_ctl5), AR_PacketDur3); default: return -1; } -- cgit v1.2.3 From 50f3818196f5d6a87867772b40637af1d51e11f3 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 11 Jan 2017 16:32:15 +0200 Subject: ath9k: ar9003_mac: kill off ACCESS_ONCE() For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't currently harmful. However, for some new features (e.g. KTSAN / Kernel Thread Sanitizer), it is necessary to instrument reads and writes separately, which is not possible with ACCESS_ONCE(). This distinction is critical to correct operation. It's possible to transform the bulk of kernel code using the Coccinelle script below. However, for some files (including the ath9k ar9003 mac driver), this mangles the formatting. As a preparatory step, this patch converts the driver to use {READ,WRITE}_ONCE() without said mangling. ---- virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland Cc: ath9k-devel@qca.qualcomm.com Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: ath9k-devel@lists.ath9k.org Cc: netdev@vger.kernel.org Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_mac.c | 92 ++++++++++++++--------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index da84b705cbcd..cc5bb0a76baf 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) (i->qcu << AR_TxQcuNum_S) | desc_len; checksum += val; - ACCESS_ONCE(ads->info) = val; + WRITE_ONCE(ads->info, val); checksum += i->link; - ACCESS_ONCE(ads->link) = i->link; + WRITE_ONCE(ads->link, i->link); checksum += i->buf_addr[0]; - ACCESS_ONCE(ads->data0) = i->buf_addr[0]; + WRITE_ONCE(ads->data0, i->buf_addr[0]); checksum += i->buf_addr[1]; - ACCESS_ONCE(ads->data1) = i->buf_addr[1]; + WRITE_ONCE(ads->data1, i->buf_addr[1]); checksum += i->buf_addr[2]; - ACCESS_ONCE(ads->data2) = i->buf_addr[2]; + WRITE_ONCE(ads->data2, i->buf_addr[2]); checksum += i->buf_addr[3]; - ACCESS_ONCE(ads->data3) = i->buf_addr[3]; + WRITE_ONCE(ads->data3, i->buf_addr[3]); checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl3) = val; + WRITE_ONCE(ads->ctl3, val); checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl5) = val; + WRITE_ONCE(ads->ctl5, val); checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl7) = val; + WRITE_ONCE(ads->ctl7, val); checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl9) = val; + WRITE_ONCE(ads->ctl9, val); checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff); - ACCESS_ONCE(ads->ctl10) = checksum; + WRITE_ONCE(ads->ctl10, checksum); if (i->is_first || i->is_last) { - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0) + WRITE_ONCE(ads->ctl13, set11nTries(i->rates, 0) | set11nTries(i->rates, 1) | set11nTries(i->rates, 2) | set11nTries(i->rates, 3) | (i->dur_update ? AR_DurUpdateEna : 0) - | SM(0, AR_BurstDur); + | SM(0, AR_BurstDur)); - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0) + WRITE_ONCE(ads->ctl14, set11nRate(i->rates, 0) | set11nRate(i->rates, 1) | set11nRate(i->rates, 2) - | set11nRate(i->rates, 3); + | set11nRate(i->rates, 3)); } else { - ACCESS_ONCE(ads->ctl13) = 0; - ACCESS_ONCE(ads->ctl14) = 0; + WRITE_ONCE(ads->ctl13, 0); + WRITE_ONCE(ads->ctl14, 0); } ads->ctl20 = 0; @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ctl17 = SM(i->keytype, AR_EncrType); if (!i->is_first) { - ACCESS_ONCE(ads->ctl11) = 0; - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore; - ACCESS_ONCE(ads->ctl15) = 0; - ACCESS_ONCE(ads->ctl16) = 0; - ACCESS_ONCE(ads->ctl17) = ctl17; - ACCESS_ONCE(ads->ctl18) = 0; - ACCESS_ONCE(ads->ctl19) = 0; + WRITE_ONCE(ads->ctl11, 0); + WRITE_ONCE(ads->ctl12, i->is_last ? 0 : AR_TxMore); + WRITE_ONCE(ads->ctl15, 0); + WRITE_ONCE(ads->ctl16, 0); + WRITE_ONCE(ads->ctl17, ctl17); + WRITE_ONCE(ads->ctl18, 0); + WRITE_ONCE(ads->ctl19, 0); return; } - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen) + WRITE_ONCE(ads->ctl11, (i->pkt_len & AR_FrameLen) | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) | SM(i->txpower[0], AR_XmitPower0) | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) @@ -107,7 +107,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) | (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0) | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0) | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : - (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); + (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0))); ctl12 = (i->keyix != ATH9K_TXKEYIX_INVALID ? SM(i->keyix, AR_DestIdx) : 0) @@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S; ctl12 |= SM(val, AR_PAPRDChainMask); - ACCESS_ONCE(ads->ctl12) = ctl12; - ACCESS_ONCE(ads->ctl17) = ctl17; + WRITE_ONCE(ads->ctl12, ctl12); + WRITE_ONCE(ads->ctl17, ctl17); - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) - | set11nPktDurRTSCTS(i->rates, 1); + WRITE_ONCE(ads->ctl15, set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1)); - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) - | set11nPktDurRTSCTS(i->rates, 3); + WRITE_ONCE(ads->ctl16, set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3)); - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0) + WRITE_ONCE(ads->ctl18, set11nRateFlags(i->rates, 0) | set11nRateFlags(i->rates, 1) | set11nRateFlags(i->rates, 2) | set11nRateFlags(i->rates, 3) - | SM(i->rtscts_rate, AR_RTSCTSRate); + | SM(i->rtscts_rate, AR_RTSCTSRate)); - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding; + WRITE_ONCE(ads->ctl19, AR_Not_Sounding); - ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1); - ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2); - ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3); + WRITE_ONCE(ads->ctl20, SM(i->txpower[1], AR_XmitPower1)); + WRITE_ONCE(ads->ctl21, SM(i->txpower[2], AR_XmitPower2)); + WRITE_ONCE(ads->ctl22, SM(i->txpower[3], AR_XmitPower3)); } static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads) @@ -359,7 +359,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, ads = &ah->ts_ring[ah->ts_tail]; - status = ACCESS_ONCE(ads->status8); + status = READ_ONCE(ads->status8); if ((status & AR_TxDone) == 0) return -EINPROGRESS; @@ -385,7 +385,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, if (status & AR_TxOpExceeded) ts->ts_status |= ATH9K_TXERR_XTXOP; - status = ACCESS_ONCE(ads->status2); + status = READ_ONCE(ads->status2); ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00); ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01); ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02); @@ -395,7 +395,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ba_high = ads->status6; } - status = ACCESS_ONCE(ads->status3); + status = READ_ONCE(ads->status3); if (status & AR_ExcessiveRetries) ts->ts_status |= ATH9K_TXERR_XRETRY; if (status & AR_Filtered) @@ -420,7 +420,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ts_longretry = MS(status, AR_DataFailCnt); ts->ts_virtcol = MS(status, AR_VirtRetryCnt); - status = ACCESS_ONCE(ads->status7); + status = READ_ONCE(ads->status7); ts->ts_rssi = MS(status, AR_TxRSSICombined); ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10); ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11); @@ -437,13 +437,13 @@ static int ar9003_hw_get_duration(struct ath_hw *ah, const void *ds, int index) switch (index) { case 0: - return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur0); + return MS(READ_ONCE(adc->ctl15), AR_PacketDur0); case 1: - return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur1); + return MS(READ_ONCE(adc->ctl15), AR_PacketDur1); case 2: - return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur2); + return MS(READ_ONCE(adc->ctl16), AR_PacketDur2); case 3: - return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur3); + return MS(READ_ONCE(adc->ctl16), AR_PacketDur3); default: return 0; } -- cgit v1.2.3 From 714ee339ff90df37a41485497aca27b2421803f5 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 11 Jan 2017 16:32:16 +0200 Subject: ath9k: fix spelling mistake: "meaurement" -> "measurement" Trivial fix to spelling mistake in ath_err message Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/hw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index a35f78be8dec..ac36873d6da4 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -731,7 +731,7 @@ u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) udelay(100); if (WARN_ON_ONCE(i >= 100)) { - ath_err(common, "PLL4 meaurement not done\n"); + ath_err(common, "PLL4 measurement not done\n"); break; } -- cgit v1.2.3 From 738b35ccee1bcd7cf4af147edd76e7880533ad9f Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 11 Jan 2017 21:13:02 -0800 Subject: net: core: Make netif_wake_subqueue a wrapper netif_wake_subqueue() is duplicating the same thing that netif_tx_wake_queue() does, so make it call it directly after looking up the queue from the index. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/netdevice.h | 14 +++++++++++++- net/core/dev.c | 22 ---------------------- 2 files changed, 13 insertions(+), 23 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ebd9e2c12f44..97ae0ac513ee 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3106,7 +3106,19 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev, return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); } -void netif_wake_subqueue(struct net_device *dev, u16 queue_index); +/** + * netif_wake_subqueue - allow sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Resume individual transmit queue of a device with multiple transmit queues. + */ +static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + netif_tx_wake_queue(txq); +} #ifdef CONFIG_XPS int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, diff --git a/net/core/dev.c b/net/core/dev.c index e98cc06d2577..ad5959e56116 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2408,28 +2408,6 @@ void netif_schedule_queue(struct netdev_queue *txq) } EXPORT_SYMBOL(netif_schedule_queue); -/** - * netif_wake_subqueue - allow sending packets on subqueue - * @dev: network device - * @queue_index: sub queue index - * - * Resume individual transmit queue of a device with multiple transmit queues. - */ -void netif_wake_subqueue(struct net_device *dev, u16 queue_index) -{ - struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); - - if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) { - struct Qdisc *q; - - rcu_read_lock(); - q = rcu_dereference(txq->qdisc); - __netif_schedule(q); - rcu_read_unlock(); - } -} -EXPORT_SYMBOL(netif_wake_subqueue); - void netif_tx_wake_queue(struct netdev_queue *dev_queue) { if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { -- cgit v1.2.3 From 038c35a86d213c69faf7aa9500ab9001c3774e15 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 12 Jan 2017 12:23:21 +0530 Subject: cxgb4: Initialize mbox lock and list for mgmt dev Initialize mbox lock and list for mgmt dev to avoid NULL pointer dereference when cxgb_set_vf_mac is called. And also allocate memory for private data while allocating mgmt netdev. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3349e1f376c3..e95bb6a0eca8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4516,7 +4516,8 @@ static int config_mgmt_dev(struct pci_dev *pdev) int err; snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf); - netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup); + netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN, + dummy_setup); if (!netdev) return -ENOMEM; @@ -4990,6 +4991,8 @@ sriov: err = -ENOMEM; goto free_adapter; } + spin_lock_init(&adapter->mbox_lock); + INIT_LIST_HEAD(&adapter->mlist.list); pci_set_drvdata(pdev, adapter); return 0; -- cgit v1.2.3 From 5bf15e3fb85d8c3957afc35a572ee5476f491c12 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 16 Nov 2016 18:39:05 +0530 Subject: mwifiex: don't wait for main_process in shutdown_drv main_process is not expected to be running when shutdown_drv function is called. currently we wait for main_process completion in the function. Actually the caller has already made sure main_process is completed by performing below actions. (1) disable interrupts in if_ops->disable_int. (2) set adapter->surprise_removed = true, main_process wont be queued. (3) mwifiex_terminate_workqueue(adapter), wait for workqueue to be completed. This patch removes redundant wait code and takes care of related cleanup. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/init.c | 19 ++-------- drivers/net/wireless/marvell/mwifiex/main.c | 55 ++++++++++------------------- drivers/net/wireless/marvell/mwifiex/main.h | 5 +-- drivers/net/wireless/marvell/mwifiex/util.c | 15 -------- 4 files changed, 22 insertions(+), 72 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 0e89ccfa244e..4af6ce4e388d 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -657,10 +657,9 @@ void mwifiex_free_priv(struct mwifiex_private *priv) * - Free the adapter * - Notify completion */ -int +void mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) { - int ret = -EINPROGRESS; struct mwifiex_private *priv; s32 i; unsigned long flags; @@ -668,15 +667,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) /* mwifiex already shutdown */ if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY) - return 0; - - adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING; - /* wait for mwifiex_process to complete */ - if (adapter->mwifiex_processing) { - mwifiex_dbg(adapter, WARN, - "main process is still running\n"); - return ret; - } + return; /* cancel current command */ if (adapter->curr_cmd) { @@ -727,11 +718,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) mwifiex_adapter_cleanup(adapter); spin_unlock(&adapter->mwifiex_lock); - - /* Notify completion */ - ret = mwifiex_shutdown_fw_complete(adapter); - - return ret; + adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY; } /* diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index e5c3a8aa3929..1ed17f816af6 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -248,15 +248,14 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) if (adapter->mwifiex_processing || adapter->main_locked) { adapter->more_task_flag = true; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); - goto exit_main_proc; + return 0; } else { adapter->mwifiex_processing = true; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); } process_start: do { - if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) || - (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)) + if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY) break; /* For non-USB interfaces, If we process interrupts first, it @@ -464,9 +463,6 @@ process_start: adapter->mwifiex_processing = false; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); -exit_main_proc: - if (adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) - mwifiex_shutdown_drv(adapter); return ret; } EXPORT_SYMBOL_GPL(mwifiex_main_process); @@ -645,16 +641,14 @@ err_dnld_fw: if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); + adapter->surprise_removed = true; + mwifiex_terminate_workqueue(adapter); + if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); - adapter->init_wait_q_woken = false; - - if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) - wait_event_interruptible(adapter->init_wait_q, - adapter->init_wait_q_woken); + mwifiex_shutdown_drv(adapter); } - adapter->surprise_removed = true; - mwifiex_terminate_workqueue(adapter); + init_failed = true; done: if (adapter->cal_data) { @@ -1399,11 +1393,8 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) } mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n"); - adapter->init_wait_q_woken = false; - if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) - wait_event_interruptible(adapter->init_wait_q, - adapter->init_wait_q_woken); + mwifiex_shutdown_drv(adapter); if (adapter->if_ops.down_dev) adapter->if_ops.down_dev(adapter); @@ -1509,19 +1500,16 @@ err_init_fw: mwifiex_dbg(adapter, ERROR, "info: %s: unregister device\n", __func__); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); + +err_kmalloc: + adapter->surprise_removed = true; + mwifiex_terminate_workqueue(adapter); if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { mwifiex_dbg(adapter, ERROR, "info: %s: shutdown mwifiex\n", __func__); - adapter->init_wait_q_woken = false; - - if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) - wait_event_interruptible(adapter->init_wait_q, - adapter->init_wait_q_woken); + mwifiex_shutdown_drv(adapter); } -err_kmalloc: - mwifiex_terminate_workqueue(adapter); - adapter->surprise_removed = true; complete_all(adapter->fw_done); mwifiex_dbg(adapter, INFO, "%s, error\n", __func__); @@ -1681,17 +1669,13 @@ err_init_fw: pr_debug("info: %s: unregister device\n", __func__); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); - if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { - pr_debug("info: %s: shutdown mwifiex\n", __func__); - adapter->init_wait_q_woken = false; - - if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) - wait_event_interruptible(adapter->init_wait_q, - adapter->init_wait_q_woken); - } err_registerdev: adapter->surprise_removed = true; mwifiex_terminate_workqueue(adapter); + if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { + pr_debug("info: %s: shutdown mwifiex\n", __func__); + mwifiex_shutdown_drv(adapter); + } err_kmalloc: mwifiex_free_adapter(adapter); @@ -1741,11 +1725,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n"); - adapter->init_wait_q_woken = false; - if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) - wait_event_interruptible(adapter->init_wait_q, - adapter->init_wait_q_woken); + mwifiex_shutdown_drv(adapter); mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n"); if (atomic_read(&adapter->rx_pending) || diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index d552a9d104d0..e915ea046a7f 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -248,7 +248,6 @@ enum MWIFIEX_HARDWARE_STATUS { MWIFIEX_HW_STATUS_INITIALIZING, MWIFIEX_HW_STATUS_INIT_DONE, MWIFIEX_HW_STATUS_RESET, - MWIFIEX_HW_STATUS_CLOSING, MWIFIEX_HW_STATUS_NOT_READY }; @@ -1041,9 +1040,7 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter); int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter); -int mwifiex_shutdown_drv(struct mwifiex_adapter *adapter); - -int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter); +void mwifiex_shutdown_drv(struct mwifiex_adapter *adapter); int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *); diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 18fbb96a46e9..b1ab8da121dd 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -145,21 +145,6 @@ int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter) return 0; } -/* - * Firmware shutdown complete callback handler. - * - * This function sets the hardware status to not ready and wakes up - * the function waiting on the init wait queue for the firmware - * shutdown to complete. - */ -int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter) -{ - adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY; - adapter->init_wait_q_woken = true; - wake_up_interruptible(&adapter->init_wait_q); - return 0; -} - /* * This function sends init/shutdown command * to firmware. -- cgit v1.2.3 From fb45bd0c6d6b25536e03290d9e0449b3e4c35e45 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 16 Nov 2016 18:39:06 +0530 Subject: mwifiex: do not free firmware dump memory in shutdown_drv mwifiex_upload_device_dump() already takes care of freeing firmware dump memory. Doing the same thing in mwifiex_shutdown_drv() is redundant. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/init.c | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 4af6ce4e388d..756948385b60 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -409,8 +409,6 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter) static void mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) { - int idx; - if (!adapter) { pr_err("%s: adapter is NULL\n", __func__); return; @@ -428,23 +426,6 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n"); mwifiex_free_cmd_buffer(adapter); - for (idx = 0; idx < adapter->num_mem_types; idx++) { - struct memory_type_mapping *entry = - &adapter->mem_type_mapping_tbl[idx]; - - if (entry->mem_ptr) { - vfree(entry->mem_ptr); - entry->mem_ptr = NULL; - } - entry->mem_size = 0; - } - - if (adapter->drv_info_dump) { - vfree(adapter->drv_info_dump); - adapter->drv_info_dump = NULL; - adapter->drv_info_size = 0; - } - if (adapter->sleep_cfm) dev_kfree_skb_any(adapter->sleep_cfm); } -- cgit v1.2.3 From d27121fca12957036077ef56ee0146c99b8a92f7 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 16 Nov 2016 18:39:07 +0530 Subject: mwifiex: get rid of drv_info* adapter variables We can avoid drv_info_dump and drv_info_size adapter variables. This info can be passed to mwifiex_upload_device_dump() as parameters Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 42 ++++++++++++----------------- drivers/net/wireless/marvell/mwifiex/main.h | 7 +++-- drivers/net/wireless/marvell/mwifiex/pcie.c | 7 +++-- drivers/net/wireless/marvell/mwifiex/sdio.c | 6 +++-- 4 files changed, 29 insertions(+), 33 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 1ed17f816af6..c1821aa3ef3c 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1026,7 +1026,7 @@ void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter) } EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync); -void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) +int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info) { void *p; char drv_version[64]; @@ -1036,21 +1036,17 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) int i, idx; struct netdev_queue *txq; struct mwifiex_debug_info *debug_info; - - if (adapter->drv_info_dump) { - vfree(adapter->drv_info_dump); - adapter->drv_info_dump = NULL; - adapter->drv_info_size = 0; - } + void *drv_info_dump; mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n"); - adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX); + /* memory allocate here should be free in mwifiex_upload_device_dump*/ + drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX); - if (!adapter->drv_info_dump) - return; + if (!drv_info_dump) + return 0; - p = (char *)(adapter->drv_info_dump); + p = (char *)(drv_info_dump); p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); mwifiex_drv_get_driver_version(adapter, drv_version, @@ -1134,18 +1130,20 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) kfree(debug_info); } - adapter->drv_info_size = p - adapter->drv_info_dump; mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n"); + *drv_info = drv_info_dump; + return p - drv_info_dump; } EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump); -void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter) +void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info, + int drv_info_size) { u8 idx, *dump_data, *fw_dump_ptr; u32 dump_len; dump_len = (strlen("========Start dump driverinfo========\n") + - adapter->drv_info_size + + drv_info_size + strlen("\n========End dump========\n")); for (idx = 0; idx < adapter->num_mem_types; idx++) { @@ -1175,8 +1173,8 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter) strcpy(fw_dump_ptr, "========Start dump driverinfo========\n"); fw_dump_ptr += strlen("========Start dump driverinfo========\n"); - memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size); - fw_dump_ptr += adapter->drv_info_size; + memcpy(fw_dump_ptr, drv_info, drv_info_size); + fw_dump_ptr += drv_info_size; strcpy(fw_dump_ptr, "\n========End dump========\n"); fw_dump_ptr += strlen("\n========End dump========\n"); @@ -1214,18 +1212,12 @@ done: struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; - if (entry->mem_ptr) { - vfree(entry->mem_ptr); - entry->mem_ptr = NULL; - } + vfree(entry->mem_ptr); + entry->mem_ptr = NULL; entry->mem_size = 0; } - if (adapter->drv_info_dump) { - vfree(adapter->drv_info_dump); - adapter->drv_info_dump = NULL; - adapter->drv_info_size = 0; - } + vfree(drv_info); } EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index e915ea046a7f..cf51e5dfc619 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -994,8 +994,6 @@ struct mwifiex_adapter { u8 key_api_major_ver, key_api_minor_ver; struct memory_type_mapping *mem_type_mapping_tbl; u8 num_mem_types; - void *drv_info_dump; - u32 drv_info_size; bool scan_chan_gap_enabled; struct sk_buff_head rx_data_q; bool mfg_mode; @@ -1641,8 +1639,9 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv, u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv, u8 rx_rate, u8 ht_info); -void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); -void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); +int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info); +void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info, + int drv_info_size); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 4db07da81d8d..fb9808a52927 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2715,9 +2715,12 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) { - mwifiex_drv_info_dump(adapter); + int drv_info_size; + void *drv_info; + + drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info); mwifiex_pcie_fw_dump(adapter); - mwifiex_upload_device_dump(adapter); + mwifiex_upload_device_dump(adapter, drv_info, drv_info_size); } static unsigned long iface_work_flags; diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 43facba641bf..3abc1dfb0ec4 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -2548,13 +2548,15 @@ done: static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; + int drv_info_size; + void *drv_info; - mwifiex_drv_info_dump(adapter); + drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info); if (card->fw_dump_enh) mwifiex_sdio_generic_fw_dump(adapter); else mwifiex_sdio_fw_dump(adapter); - mwifiex_upload_device_dump(adapter); + mwifiex_upload_device_dump(adapter, drv_info, drv_info_size); } static void mwifiex_sdio_work(struct work_struct *work) -- cgit v1.2.3 From 41efaf5824e7cb16c54bbec1273d86d80cdac283 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 16 Nov 2016 18:39:08 +0530 Subject: mwifiex: wait firmware dump complete during card remove process Wait for firmware dump complete in card remove function. For sdio interface, there are two diffenrent cases, card reset trigger sdio_work and firmware dump trigger sdio_work. Do code rearrangement for distinguish between these two cases. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Reviewed-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 6 +++++- drivers/net/wireless/marvell/mwifiex/sdio.c | 15 ++++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index fb9808a52927..d2f615463f24 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -51,6 +51,9 @@ static int mwifiex_pcie_probe_of(struct device *dev) return 0; } +static void mwifiex_pcie_work(struct work_struct *work); +static DECLARE_WORK(pcie_work, mwifiex_pcie_work); + static int mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, size_t size, int flags) @@ -254,6 +257,8 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) if (!adapter || !adapter->priv_num) return; + cancel_work_sync(&pcie_work); + if (user_rmmod && !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); @@ -2732,7 +2737,6 @@ static void mwifiex_pcie_work(struct work_struct *work) mwifiex_pcie_device_dump_work(save_adapter); } -static DECLARE_WORK(pcie_work, mwifiex_pcie_work); /* This function dumps FW information */ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) { diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 3abc1dfb0ec4..159ac8037cdc 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -46,6 +46,9 @@ */ static u8 user_rmmod; +static void mwifiex_sdio_work(struct work_struct *work); +static DECLARE_WORK(sdio_work, mwifiex_sdio_work); + static struct mwifiex_if_ops sdio_ops; static unsigned long iface_work_flags; @@ -218,7 +221,7 @@ static int mwifiex_sdio_resume(struct device *dev) * This function removes the interface and frees up the card structure. */ static void -mwifiex_sdio_remove(struct sdio_func *func) +__mwifiex_sdio_remove(struct sdio_func *func) { struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; @@ -247,6 +250,13 @@ mwifiex_sdio_remove(struct sdio_func *func) mwifiex_remove_card(adapter); } +static void +mwifiex_sdio_remove(struct sdio_func *func) +{ + cancel_work_sync(&sdio_work); + __mwifiex_sdio_remove(func); +} + /* * SDIO suspend. * @@ -2222,7 +2232,7 @@ static void mwifiex_recreate_adapter(struct sdio_mmc_card *card) * discovered and initializes them from scratch. */ - mwifiex_sdio_remove(func); + __mwifiex_sdio_remove(func); /* * Normally, we would let the driver core take care of releasing these. @@ -2569,7 +2579,6 @@ static void mwifiex_sdio_work(struct work_struct *work) mwifiex_sdio_card_reset_work(save_adapter); } -static DECLARE_WORK(sdio_work, mwifiex_sdio_work); /* This function resets the card */ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter) { -- cgit v1.2.3 From 3860e5e39532050c010a0c61c3a980f4a7d4542c Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Wed, 16 Nov 2016 18:39:09 +0530 Subject: mwifiex: move pcie_work and related variables inside card Currently pcie_work and related variables are global. It may create problem while supporting multiple devices simultaneously. Let's move it inside card structure so that separate instance will be created/ cancelled in init/teardown threads of each connected devices. Signed-off-by: Ganapathi Bhat Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 23 ++++++++++++----------- drivers/net/wireless/marvell/mwifiex/pcie.h | 2 ++ 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index d2f615463f24..837c2cba6b55 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -52,7 +52,6 @@ static int mwifiex_pcie_probe_of(struct device *dev) } static void mwifiex_pcie_work(struct work_struct *work); -static DECLARE_WORK(pcie_work, mwifiex_pcie_work); static int mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, @@ -222,6 +221,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl; card->pcie.num_mem_types = data->num_mem_types; card->pcie.can_ext_scan = data->can_ext_scan; + INIT_WORK(&card->work, mwifiex_pcie_work); } /* device tree node parsing and platform specific configuration*/ @@ -257,7 +257,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) if (!adapter || !adapter->priv_num) return; - cancel_work_sync(&pcie_work); + cancel_work_sync(&card->work); if (user_rmmod && !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); @@ -2728,25 +2728,27 @@ static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) mwifiex_upload_device_dump(adapter, drv_info, drv_info_size); } -static unsigned long iface_work_flags; -static struct mwifiex_adapter *save_adapter; static void mwifiex_pcie_work(struct work_struct *work) { + struct pcie_service_card *card = + container_of(work, struct pcie_service_card, work); + if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, - &iface_work_flags)) - mwifiex_pcie_device_dump_work(save_adapter); + &card->work_flags)) + mwifiex_pcie_device_dump_work(card->adapter); } /* This function dumps FW information */ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) { - save_adapter = adapter; - if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) + struct pcie_service_card *card = adapter->card; + + if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags)) return; - set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); + set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); - schedule_work(&pcie_work); + schedule_work(&card->work); } /* @@ -3212,7 +3214,6 @@ static void mwifiex_pcie_cleanup_module(void) /* Set the flag as user is removing this module. */ user_rmmod = 1; - cancel_work_sync(&pcie_work); pci_unregister_driver(&mwifiex_pcie); } diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index ae3365d1c34e..21ba5e6a4c2a 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -386,6 +386,8 @@ struct pcie_service_card { #endif struct mwifiex_msix_context msix_ctx[MWIFIEX_NUM_MSIX_VECTORS]; struct mwifiex_msix_context share_irq_ctx; + struct work_struct work; + unsigned long work_flags; }; static inline int -- cgit v1.2.3 From 90ff71f9557591218b5fce1ce7b67afd4cf83894 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 30 Nov 2016 20:22:16 +0530 Subject: mwifiex: code rearrangement in pcie.c and sdio.c Next patch in this series is going to use mwifiex_read_reg() in remove handlers. The changes here are prerequisites to avoid forward declarations. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 73 +++--- drivers/net/wireless/marvell/mwifiex/sdio.c | 336 ++++++++++++++-------------- 2 files changed, 201 insertions(+), 208 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 837c2cba6b55..ed86c129c9ac 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -80,6 +80,42 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter, pci_unmap_single(card->dev, mapping.addr, mapping.len, flags); } +/* + * This function writes data into PCIE card register. + */ +static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data) +{ + struct pcie_service_card *card = adapter->card; + + iowrite32(data, card->pci_mmap1 + reg); + + return 0; +} + +/* This function reads data from PCIE card register. + */ +static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data) +{ + struct pcie_service_card *card = adapter->card; + + *data = ioread32(card->pci_mmap1 + reg); + if (*data == 0xffffffff) + return 0xffffffff; + + return 0; +} + +/* This function reads u8 data from PCIE card register. */ +static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter, + int reg, u8 *data) +{ + struct pcie_service_card *card = adapter->card; + + *data = ioread8(card->pci_mmap1 + reg); + + return 0; +} + /* * This function reads sleep cookie and checks if FW is ready */ @@ -373,43 +409,6 @@ static struct pci_driver __refdata mwifiex_pcie = { .err_handler = mwifiex_pcie_err_handler, }; -/* - * This function writes data into PCIE card register. - */ -static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data) -{ - struct pcie_service_card *card = adapter->card; - - iowrite32(data, card->pci_mmap1 + reg); - - return 0; -} - -/* - * This function reads data from PCIE card register. - */ -static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data) -{ - struct pcie_service_card *card = adapter->card; - - *data = ioread32(card->pci_mmap1 + reg); - if (*data == 0xffffffff) - return 0xffffffff; - - return 0; -} - -/* This function reads u8 data from PCIE card register. */ -static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter, - int reg, u8 *data) -{ - struct pcie_service_card *card = adapter->card; - - *data = ioread8(card->pci_mmap1 + reg); - - return 0; -} - /* * This function adds delay loop to ensure FW is awake before proceeding. */ diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 159ac8037cdc..ba3bc95a2d35 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -215,6 +215,171 @@ static int mwifiex_sdio_resume(struct device *dev) return 0; } +/* Write data into SDIO card register. Caller claims SDIO device. */ +static int +mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data) +{ + int ret = -1; + + sdio_writeb(func, data, reg, &ret); + return ret; +} + +/* This function writes data into SDIO card register. + */ +static int +mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data) +{ + struct sdio_mmc_card *card = adapter->card; + int ret; + + sdio_claim_host(card->func); + ret = mwifiex_write_reg_locked(card->func, reg, data); + sdio_release_host(card->func); + + return ret; +} + +/* This function reads data from SDIO card register. + */ +static int +mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data) +{ + struct sdio_mmc_card *card = adapter->card; + int ret = -1; + u8 val; + + sdio_claim_host(card->func); + val = sdio_readb(card->func, reg, &ret); + sdio_release_host(card->func); + + *data = val; + + return ret; +} + +/* This function writes multiple data into SDIO card memory. + * + * This does not work in suspended mode. + */ +static int +mwifiex_write_data_sync(struct mwifiex_adapter *adapter, + u8 *buffer, u32 pkt_len, u32 port) +{ + struct sdio_mmc_card *card = adapter->card; + int ret; + u8 blk_mode = + (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE; + u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; + u32 blk_cnt = + (blk_mode == + BLOCK_MODE) ? (pkt_len / + MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len; + u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); + + if (adapter->is_suspended) { + mwifiex_dbg(adapter, ERROR, + "%s: not allowed while suspended\n", __func__); + return -1; + } + + sdio_claim_host(card->func); + + ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size); + + sdio_release_host(card->func); + + return ret; +} + +/* This function reads multiple data from SDIO card memory. + */ +static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer, + u32 len, u32 port, u8 claim) +{ + struct sdio_mmc_card *card = adapter->card; + int ret; + u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE + : BLOCK_MODE; + u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; + u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE) + : len; + u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); + + if (claim) + sdio_claim_host(card->func); + + ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size); + + if (claim) + sdio_release_host(card->func); + + return ret; +} + +/* This function reads the firmware status. + */ +static int +mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) +{ + struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; + u8 fws0, fws1; + + if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0)) + return -1; + + if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1)) + return -1; + + *dat = (u16)((fws1 << 8) | fws0); + return 0; +} + +/* This function checks the firmware status in card. + */ +static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, + u32 poll_num) +{ + int ret = 0; + u16 firmware_stat; + u32 tries; + + for (tries = 0; tries < poll_num; tries++) { + ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); + if (ret) + continue; + if (firmware_stat == FIRMWARE_READY_SDIO) { + ret = 0; + break; + } + + msleep(100); + ret = -1; + } + + return ret; +} + +/* This function checks if WLAN is the winner. + */ +static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter) +{ + int ret = 0; + u8 winner = 0; + struct sdio_mmc_card *card = adapter->card; + + if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner)) + return -1; + + if (winner) + adapter->winner = 0; + else + adapter->winner = 1; + + return ret; +} + /* * SDIO remove. * @@ -374,111 +539,6 @@ static struct sdio_driver mwifiex_sdio = { } }; -/* Write data into SDIO card register. Caller claims SDIO device. */ -static int -mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data) -{ - int ret = -1; - sdio_writeb(func, data, reg, &ret); - return ret; -} - -/* - * This function writes data into SDIO card register. - */ -static int -mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data) -{ - struct sdio_mmc_card *card = adapter->card; - int ret; - - sdio_claim_host(card->func); - ret = mwifiex_write_reg_locked(card->func, reg, data); - sdio_release_host(card->func); - - return ret; -} - -/* - * This function reads data from SDIO card register. - */ -static int -mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data) -{ - struct sdio_mmc_card *card = adapter->card; - int ret = -1; - u8 val; - - sdio_claim_host(card->func); - val = sdio_readb(card->func, reg, &ret); - sdio_release_host(card->func); - - *data = val; - - return ret; -} - -/* - * This function writes multiple data into SDIO card memory. - * - * This does not work in suspended mode. - */ -static int -mwifiex_write_data_sync(struct mwifiex_adapter *adapter, - u8 *buffer, u32 pkt_len, u32 port) -{ - struct sdio_mmc_card *card = adapter->card; - int ret; - u8 blk_mode = - (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE; - u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; - u32 blk_cnt = - (blk_mode == - BLOCK_MODE) ? (pkt_len / - MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len; - u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); - - if (adapter->is_suspended) { - mwifiex_dbg(adapter, ERROR, - "%s: not allowed while suspended\n", __func__); - return -1; - } - - sdio_claim_host(card->func); - - ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size); - - sdio_release_host(card->func); - - return ret; -} - -/* - * This function reads multiple data from SDIO card memory. - */ -static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer, - u32 len, u32 port, u8 claim) -{ - struct sdio_mmc_card *card = adapter->card; - int ret; - u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE - : BLOCK_MODE; - u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; - u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE) - : len; - u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); - - if (claim) - sdio_claim_host(card->func); - - ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size); - - if (claim) - sdio_release_host(card->func); - - return ret; -} - /* * This function wakes up the card. * @@ -764,27 +824,6 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits) return -1; } -/* - * This function reads the firmware status. - */ -static int -mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) -{ - struct sdio_mmc_card *card = adapter->card; - const struct mwifiex_sdio_card_reg *reg = card->reg; - u8 fws0, fws1; - - if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0)) - return -1; - - if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1)) - return -1; - - *dat = (u16) ((fws1 << 8) | fws0); - - return 0; -} - /* * This function disables the host interrupt. * @@ -1089,51 +1128,6 @@ done: return ret; } -/* - * This function checks the firmware status in card. - */ -static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, - u32 poll_num) -{ - int ret = 0; - u16 firmware_stat; - u32 tries; - - for (tries = 0; tries < poll_num; tries++) { - ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); - if (ret) - continue; - if (firmware_stat == FIRMWARE_READY_SDIO) { - ret = 0; - break; - } else { - msleep(100); - ret = -1; - } - } - - return ret; -} - -/* This function checks if WLAN is the winner. - */ -static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter) -{ - int ret = 0; - u8 winner = 0; - struct sdio_mmc_card *card = adapter->card; - - if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner)) - return -1; - - if (winner) - adapter->winner = 0; - else - adapter->winner = 1; - - return ret; -} - /* * This function decode sdio aggreation pkt. * -- cgit v1.2.3 From 045f0c1b5e26818e28a401e623a581dfbd6b371e Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 30 Nov 2016 20:22:17 +0530 Subject: mwifiex: get rid of global user_rmmod flag bus.remove() callback function is called when user removes this module from kernel space or ejects the card from the slot. The driver handles these 2 cases differently. Few commands (FUNC_SHUTDOWN etc.) are sent to the firmware only for module unload case. The variable 'user_rmmod' is used to distinguish between these two scenarios. This patch checks hardware status and get rid of global variable user_rmmod. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 23 ++++++++++++----------- drivers/net/wireless/marvell/mwifiex/sdio.c | 27 ++++----------------------- drivers/net/wireless/marvell/mwifiex/usb.c | 6 +----- 3 files changed, 17 insertions(+), 39 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index ed86c129c9ac..ea79e3470ef2 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -31,8 +31,6 @@ #define PCIE_VERSION "1.0" #define DRV_NAME "Marvell mwifiex PCIe" -static u8 user_rmmod; - static struct mwifiex_if_ops pcie_ops; static const struct of_device_id mwifiex_pcie_of_match_table[] = { @@ -284,6 +282,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) struct pcie_service_card *card; struct mwifiex_adapter *adapter; struct mwifiex_private *priv; + const struct mwifiex_pcie_card_reg *reg; + u32 fw_status; + int ret; card = pci_get_drvdata(pdev); @@ -295,7 +296,11 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) cancel_work_sync(&card->work); - if (user_rmmod && !adapter->mfg_mode) { + reg = card->pcie.reg; + if (reg) + ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); + + if (fw_status == FIRMWARE_READY_PCIE && !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); @@ -310,7 +315,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) static void mwifiex_pcie_shutdown(struct pci_dev *pdev) { - user_rmmod = 1; mwifiex_pcie_remove(pdev); return; @@ -2874,8 +2878,11 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + int ret; + u32 fw_status; - if (user_rmmod) { + ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); + if (fw_status == FIRMWARE_READY_PCIE) { mwifiex_dbg(adapter, INFO, "Clearing driver ready signature\n"); if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) @@ -3187,9 +3194,6 @@ static int mwifiex_pcie_init_module(void) pr_debug("Marvell PCIe Driver\n"); - /* Clear the flag in case user removes the card. */ - user_rmmod = 0; - ret = pci_register_driver(&mwifiex_pcie); if (ret) pr_err("Driver register failed!\n"); @@ -3210,9 +3214,6 @@ static int mwifiex_pcie_init_module(void) */ static void mwifiex_pcie_cleanup_module(void) { - /* Set the flag as user is removing this module. */ - user_rmmod = 1; - pci_unregister_driver(&mwifiex_pcie); } diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index ba3bc95a2d35..0b6ed3fab085 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -31,21 +31,6 @@ #define SDIO_VERSION "1.0" -/* The mwifiex_sdio_remove() callback function is called when - * user removes this module from kernel space or ejects - * the card from the slot. The driver handles these 2 cases - * differently. - * If the user is removing the module, the few commands (FUNC_SHUTDOWN, - * HS_CANCEL etc.) are sent to the firmware. - * If the card is removed, there is no need to send these command. - * - * The variable 'user_rmmod' is used to distinguish these two - * scenarios. This flag is initialized as FALSE in case the card - * is removed, and will be set to TRUE for module removal when - * module_exit function is called. - */ -static u8 user_rmmod; - static void mwifiex_sdio_work(struct work_struct *work); static DECLARE_WORK(sdio_work, mwifiex_sdio_work); @@ -391,6 +376,8 @@ __mwifiex_sdio_remove(struct sdio_func *func) struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; struct mwifiex_private *priv; + int ret = 0; + u16 firmware_stat; card = sdio_get_drvdata(func); if (!card) @@ -404,7 +391,8 @@ __mwifiex_sdio_remove(struct sdio_func *func) mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); - if (user_rmmod && !adapter->mfg_mode) { + ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); + if (firmware_stat == FIRMWARE_READY_SDIO && !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); @@ -2724,9 +2712,6 @@ static struct mwifiex_if_ops sdio_ops = { static int mwifiex_sdio_init_module(void) { - /* Clear the flag in case user removes the card. */ - user_rmmod = 0; - return sdio_register_driver(&mwifiex_sdio); } @@ -2742,10 +2727,6 @@ mwifiex_sdio_init_module(void) static void mwifiex_sdio_cleanup_module(void) { - /* Set the flag as user is removing this module. */ - user_rmmod = 1; - cancel_work_sync(&sdio_work); - sdio_unregister_driver(&mwifiex_sdio); } diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index c563160b3b6b..53881c4be55d 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -22,7 +22,6 @@ #define USB_VERSION "1.0" -static u8 user_rmmod; static struct mwifiex_if_ops usb_ops; static struct usb_device_id mwifiex_usb_table[] = { @@ -618,7 +617,7 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) if (!adapter || !adapter->priv_num) return; - if (user_rmmod && !adapter->mfg_mode) { + if (card->udev->state != USB_STATE_NOTATTACHED && !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter, @@ -1230,9 +1229,6 @@ static int mwifiex_usb_init_module(void) */ static void mwifiex_usb_cleanup_module(void) { - /* set the flag as user is removing this module */ - user_rmmod = 1; - usb_deregister(&mwifiex_usb_driver); } -- cgit v1.2.3 From 526735ddc0ae8c7a7751e9ad3a57ae76cd3c35d5 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Thu, 12 Jan 2017 14:57:14 +0100 Subject: net: fix AF_SMC related typo When introducing the new socket family AF_SMC in commit ac7138746e14 ("smc: establish new socket family"), a typo in af_family_clock_key_strings has slipped in. This patch repairs it. Signed-off-by: Ursula Braun Fixes: ac7138746e14 ("smc: establish new socket family") Reported-by: Andrew Morton Signed-off-by: David S. Miller --- net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/sock.c b/net/core/sock.c index dbbdc4f16789..8b35debfe454 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -256,7 +256,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" , - "clock-AF_QIPCRTR", "closck-AF_smc" , "clock-AF_MAX" + "clock-AF_QIPCRTR", "clock-AF_SMC" , "clock-AF_MAX" }; /* -- cgit v1.2.3 From 143c017108f8a695e4e1c1f5ec57ee89be9cad70 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Thu, 12 Jan 2017 14:57:15 +0100 Subject: smc: ETH_ALEN as memcpy length for mac addresses When creating an SMC connection, there is a CLC (connection layer control) handshake to prepare for RDMA traffic. The corresponding code is part of commit 0cfdd8f92cac ("smc: connection and link group creation"). Mac addresses to be exchanged in the handshake are copied with a wrong length of 12 instead of 6 bytes. Following code overwrites the wrongly copied code, but nevertheless the correct length should already be used for the preceding mac address copying. Use ETH_ALEN for the memcpy length with mac addresses. Signed-off-by: Ursula Braun Fixes: 0cfdd8f92cac ("smc: connection and link group creation") Reported-by: Dan Carpenter Signed-off-by: David S. Miller --- net/smc/smc_clc.c | 10 ++++------ net/smc/smc_ib.h | 4 +++- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index e1e684c562b8..cc6b6f8651eb 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -10,6 +10,7 @@ */ #include +#include #include #include @@ -151,8 +152,7 @@ int smc_clc_send_proposal(struct smc_sock *smc, pclc.hdr.version = SMC_CLC_V1; /* SMC version */ memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE); - memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], - sizeof(smcibdev->mac[ibport - 1])); + memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN); /* determine subnet and mask from internal TCP socket */ rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc.outgoing_subnet, @@ -199,8 +199,7 @@ int smc_clc_send_confirm(struct smc_sock *smc) memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], SMC_GID_SIZE); - memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], - sizeof(link->smcibdev->mac)); + memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(cclc.qpn, link->roce_qp->qp_num); cclc.rmb_rkey = htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); @@ -252,8 +251,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], SMC_GID_SIZE); - memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], - sizeof(link->smcibdev->mac[link->ibport - 1])); + memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(aclc.qpn, link->roce_qp->qp_num); aclc.rmb_rkey = htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index 3fe2d55c6051..a95f74bb5569 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -11,6 +11,7 @@ #ifndef _SMC_IB_H #define _SMC_IB_H +#include #include #define SMC_MAX_PORTS 2 /* Max # of ports */ @@ -34,7 +35,8 @@ struct smc_ib_device { /* ib-device infos for smc */ struct ib_cq *roce_cq_recv; /* recv completion queue */ struct tasklet_struct send_tasklet; /* called by send cq handler */ struct tasklet_struct recv_tasklet; /* called by recv cq handler */ - char mac[SMC_MAX_PORTS][6]; /* mac address per port*/ + char mac[SMC_MAX_PORTS][ETH_ALEN]; + /* mac address per port*/ union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */ u8 initialized : 1; /* ib dev CQ, evthdl done */ struct work_struct port_event_work; -- cgit v1.2.3 From c0e6aa42682311611a7bfa40f7c8a91452eb030f Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Thu, 1 Dec 2016 18:38:20 +0530 Subject: mwifiex: use module_*_driver helper macros After user_rmmod global flag removal, *_init_module() and *_cleanup_module() have become just a wrapper functions. We will get rid of them with the help of module_*_driver() macros. For pcie, existing ".init_if" handler has same name as what module_pcie_driver() macro will create. Let's rename it to avoid conflict. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 47 ++++------------------------- drivers/net/wireless/marvell/mwifiex/sdio.c | 29 +----------------- drivers/net/wireless/marvell/mwifiex/usb.c | 35 +-------------------- 3 files changed, 8 insertions(+), 103 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index ea79e3470ef2..55c79e3811d7 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2764,7 +2764,7 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) * - Allocate command response ring buffer * - Allocate sleep cookie buffer */ -static int mwifiex_pcie_init(struct mwifiex_adapter *adapter) +static int mwifiex_init_pcie(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; int ret; @@ -2873,7 +2873,7 @@ err_enable_dev: * - Command response ring buffer * - Sleep cookie buffer */ -static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) +static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; @@ -3073,7 +3073,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) * - Allocate event BD ring buffers * - Allocate command response ring buffer * - Allocate sleep cookie buffer - * Part of mwifiex_pcie_init(), not reset the PCIE registers + * Part of mwifiex_init_pcie(), not reset the PCIE registers */ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) { @@ -3156,8 +3156,8 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) } static struct mwifiex_if_ops pcie_ops = { - .init_if = mwifiex_pcie_init, - .cleanup_if = mwifiex_pcie_cleanup, + .init_if = mwifiex_init_pcie, + .cleanup_if = mwifiex_cleanup_pcie, .check_fw_status = mwifiex_check_fw_status, .check_winner_status = mwifiex_check_winner_status, .prog_fw = mwifiex_prog_fw_w_helper, @@ -3183,42 +3183,7 @@ static struct mwifiex_if_ops pcie_ops = { .up_dev = mwifiex_pcie_up_dev, }; -/* - * This function initializes the PCIE driver module. - * - * This registers the device with PCIE bus. - */ -static int mwifiex_pcie_init_module(void) -{ - int ret; - - pr_debug("Marvell PCIe Driver\n"); - - ret = pci_register_driver(&mwifiex_pcie); - if (ret) - pr_err("Driver register failed!\n"); - else - pr_debug("info: Driver registered successfully!\n"); - - return ret; -} - -/* - * This function cleans up the PCIE driver. - * - * The following major steps are followed for cleanup - - * - Resume the device if its suspended - * - Disconnect the device if connected - * - Shutdown the firmware - * - Unregister the device from PCIE bus. - */ -static void mwifiex_pcie_cleanup_module(void) -{ - pci_unregister_driver(&mwifiex_pcie); -} - -module_init(mwifiex_pcie_init_module); -module_exit(mwifiex_pcie_cleanup_module); +module_pci_driver(mwifiex_pcie); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 0b6ed3fab085..44eb65acafdd 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -2704,34 +2704,7 @@ static struct mwifiex_if_ops sdio_ops = { .deaggr_pkt = mwifiex_deaggr_sdio_pkt, }; -/* - * This function initializes the SDIO driver. - * - * This registers the device with SDIO bus. - */ -static int -mwifiex_sdio_init_module(void) -{ - return sdio_register_driver(&mwifiex_sdio); -} - -/* - * This function cleans up the SDIO driver. - * - * The following major steps are followed for cleanup - - * - Resume the device if its suspended - * - Disconnect the device if connected - * - Shutdown the firmware - * - Unregister the device from SDIO bus. - */ -static void -mwifiex_sdio_cleanup_module(void) -{ - sdio_unregister_driver(&mwifiex_sdio); -} - -module_init(mwifiex_sdio_init_module); -module_exit(mwifiex_sdio_cleanup_module); +module_driver(mwifiex_sdio, sdio_register_driver, sdio_unregister_driver); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 53881c4be55d..9cf3334adf4d 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -1200,40 +1200,7 @@ static struct mwifiex_if_ops usb_ops = { .is_port_ready = mwifiex_usb_is_port_ready, }; -/* This function initializes the USB driver module. - * - * This registers the device with USB bus. - */ -static int mwifiex_usb_init_module(void) -{ - int ret; - - pr_debug("Marvell USB8797 Driver\n"); - - ret = usb_register(&mwifiex_usb_driver); - if (ret) - pr_err("Driver register failed!\n"); - else - pr_debug("info: Driver registered successfully!\n"); - - return ret; -} - -/* This function cleans up the USB driver. - * - * The following major steps are followed in .disconnect for cleanup: - * - Resume the device if its suspended - * - Disconnect the device if connected - * - Shutdown the firmware - * - Unregister the device from USB bus. - */ -static void mwifiex_usb_cleanup_module(void) -{ - usb_deregister(&mwifiex_usb_driver); -} - -module_init(mwifiex_usb_init_module); -module_exit(mwifiex_usb_cleanup_module); +module_usb_driver(mwifiex_usb_driver); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION); -- cgit v1.2.3 From 8750ab6236b0f54db4be6406bb2a4a71f908f049 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 14 Dec 2016 19:40:47 +0530 Subject: mwifiex: get rid of mwifiex_do_flr wrapper This patch gets rid of mwifiex_do_flr. We will call mwifiex_shutdown_sw() and mwifiex_reinit_sw() directly. These two general purpose functions will be useful for sdio card reset handler. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 31 +++++------------------------ drivers/net/wireless/marvell/mwifiex/main.h | 3 ++- drivers/net/wireless/marvell/mwifiex/pcie.c | 4 ++-- 3 files changed, 9 insertions(+), 29 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index c1821aa3ef3c..3fc622195ad7 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1348,7 +1348,7 @@ static void mwifiex_main_work_queue(struct work_struct *work) * This function gets called during PCIe function level reset. Required * code is extracted from mwifiex_remove_card() */ -static int +int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; @@ -1417,13 +1417,13 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) exit_return: return 0; } +EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw); /* This function gets called during PCIe function level reset. Required * code is extracted from mwifiex_add_card() */ -static int -mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct completion *fw_done, - struct mwifiex_if_ops *if_ops, u8 iface_type) +int +mwifiex_reinit_sw(struct mwifiex_adapter *adapter) { char fw_name[32]; struct pcie_service_card *card = adapter->card; @@ -1432,9 +1432,6 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct completion *fw_done, if (adapter->if_ops.up_dev) adapter->if_ops.up_dev(adapter); - adapter->iface_type = iface_type; - adapter->fw_done = fw_done; - adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; adapter->surprise_removed = false; init_waitqueue_head(&adapter->init_wait_q); @@ -1507,25 +1504,7 @@ err_kmalloc: return -1; } - -/* This function processes pre and post PCIe function level resets. - * It performs software cleanup without touching PCIe specific code. - * Also, during initialization PCIe stuff is skipped. - */ -void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare) -{ - struct mwifiex_if_ops if_ops; - - if (!prepare) { - mwifiex_reinit_sw(adapter, adapter->fw_done, &if_ops, - adapter->iface_type); - } else { - memcpy(&if_ops, &adapter->if_ops, - sizeof(struct mwifiex_if_ops)); - mwifiex_shutdown_sw(adapter); - } -} -EXPORT_SYMBOL_GPL(mwifiex_do_flr); +EXPORT_SYMBOL_GPL(mwifiex_reinit_sw); static irqreturn_t mwifiex_irq_wakeup_handler(int irq, void *priv) { diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index cf51e5dfc619..5c8297207f33 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1666,5 +1666,6 @@ void mwifiex_debugfs_remove(void); void mwifiex_dev_debugfs_init(struct mwifiex_private *priv); void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv); #endif -void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare); +int mwifiex_reinit_sw(struct mwifiex_adapter *adapter); +int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter); #endif /* !_MWIFIEX_MAIN_H_ */ diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 55c79e3811d7..02f6db0fa395 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -375,7 +375,7 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) * Cleanup all software without cleaning anything related to * PCIe and HW. */ - mwifiex_do_flr(adapter, prepare); + mwifiex_shutdown_sw(adapter); adapter->surprise_removed = true; } else { /* Kernel stores and restores PCIe function context before and @@ -383,7 +383,7 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) * and firmware including firmware redownload */ adapter->surprise_removed = false; - mwifiex_do_flr(adapter, prepare); + mwifiex_reinit_sw(adapter); } mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); } -- cgit v1.2.3 From ec750f1082d7cc08ac6d0173e6e377d568c5b2b5 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 14 Dec 2016 19:40:48 +0530 Subject: mwifiex: cleanup in PCIe flr code path adapter and card variables don't get freed during PCIe function level reset. "adapter->ext_scan" variable need not be re-initialized. fw_name and tx_buf_size initialization is moved to pcie specific code so that mwifiex_reinit_sw() can be used by SDIO. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 9 --------- drivers/net/wireless/marvell/mwifiex/pcie.c | 12 +++++++++++- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 3fc622195ad7..9d80180a5519 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1425,9 +1425,6 @@ EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw); int mwifiex_reinit_sw(struct mwifiex_adapter *adapter) { - char fw_name[32]; - struct pcie_service_card *card = adapter->card; - mwifiex_init_lock_list(adapter); if (adapter->if_ops.up_dev) adapter->if_ops.up_dev(adapter); @@ -1468,18 +1465,12 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter) * mwifiex_register_dev() */ mwifiex_dbg(adapter, INFO, "%s, mwifiex_init_hw_fw()...\n", __func__); - strcpy(fw_name, adapter->fw_name); - strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME); - adapter->tx_buf_size = card->pcie.tx_buf_size; - adapter->ext_scan = card->pcie.can_ext_scan; if (mwifiex_init_hw_fw(adapter, false)) { - strcpy(adapter->fw_name, fw_name); mwifiex_dbg(adapter, ERROR, "%s: firmware init failed\n", __func__); goto err_init_fw; } - strcpy(adapter->fw_name, fw_name); mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); complete_all(adapter->fw_done); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 02f6db0fa395..66226c615be0 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -3082,6 +3082,17 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) struct pci_dev *pdev = card->dev; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + /* Bluetooth is not on pcie interface. Download Wifi only firmware + * during pcie FLR, so that bluetooth part of firmware which is + * already running doesn't get affected. + */ + strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME); + + /* tx_buf_size might be changed to 3584 by firmware during + * data transfer, we should reset it to default size. + */ + adapter->tx_buf_size = card->pcie.tx_buf_size; + card->cmdrsp_buf = NULL; ret = mwifiex_pcie_create_txbd_ring(adapter); if (ret) { @@ -3143,7 +3154,6 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n"); adapter->seq_num = 0; - adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; if (reg->sleep_cookie) mwifiex_pcie_delete_sleep_cookie_buf(adapter); -- cgit v1.2.3 From c742e623e9415d3e1e5af00470ae5086fa6dd233 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 14 Dec 2016 19:40:49 +0530 Subject: mwifiex: sdio card reset enhancement Commit b4336a282db8 ("mwifiex: sdio: reset adapter using mmc_hw_reset") introduces a simple sdio card reset solution based on card remove and re-probe. This solution has proved to be vulnerable, as card and adapter structures are not protected, concurrent access will result in kernel panic issues. Let's reuse PCIe FLR's functions for SDIO reset to avoid freeing and reallocating adapter and card structures. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sdio.c | 73 +++++++++++++---------------- drivers/net/wireless/marvell/mwifiex/sdio.h | 3 -- 2 files changed, 33 insertions(+), 43 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 44eb65acafdd..b3aca10a71f2 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -104,7 +104,6 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) init_completion(&card->fw_done); card->func = func; - card->device_id = id; func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; @@ -2196,33 +2195,13 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) port, card->mp_data_port_mask); } -static void mwifiex_recreate_adapter(struct sdio_mmc_card *card) +static struct mwifiex_adapter *save_adapter; +static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) { + struct sdio_mmc_card *card = adapter->card; struct sdio_func *func = card->func; - const struct sdio_device_id *device_id = card->device_id; - - /* TODO mmc_hw_reset does not require destroying and re-probing the - * whole adapter. Hence there was no need to for this rube-goldberg - * design to reload the fw from an external workqueue. If we don't - * destroy the adapter we could reload the fw from - * mwifiex_main_work_queue directly. - * The real difficulty with fw reset is to restore all the user - * settings applied through ioctl. By destroying and recreating the - * adapter, we take the easy way out, since we rely on user space to - * restore them. We assume that user space will treat the new - * incarnation of the adapter(interfaces) as if they had been just - * discovered and initializes them from scratch. - */ - __mwifiex_sdio_remove(func); - - /* - * Normally, we would let the driver core take care of releasing these. - * But we're not letting the driver core handle this one. See above - * TODO. - */ - sdio_set_drvdata(func, NULL); - devm_kfree(&func->dev, card); + mwifiex_shutdown_sw(adapter); /* power cycle the adapter */ sdio_claim_host(func); @@ -2235,21 +2214,7 @@ static void mwifiex_recreate_adapter(struct sdio_mmc_card *card) clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags); - mwifiex_sdio_probe(func, device_id); -} - -static struct mwifiex_adapter *save_adapter; -static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) -{ - struct sdio_mmc_card *card = adapter->card; - - /* TODO card pointer is unprotected. If the adapter is removed - * physically, sdio core might trigger mwifiex_sdio_remove, before this - * workqueue is run, which will destroy the adapter struct. When this - * workqueue eventually exceutes it will dereference an invalid adapter - * pointer - */ - mwifiex_recreate_adapter(card); + mwifiex_reinit_sw(adapter); } /* This function read/write firmware */ @@ -2677,6 +2642,33 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) return p - drv_buf; } +/* sdio device/function initialization, code is extracted + * from init_if handler and register_dev handler. + */ +static void mwifiex_sdio_up_dev(struct mwifiex_adapter *adapter) +{ + struct sdio_mmc_card *card = adapter->card; + u8 sdio_ireg; + + sdio_claim_host(card->func); + sdio_enable_func(card->func); + sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE); + sdio_release_host(card->func); + + /* tx_buf_size might be changed to 3584 by firmware during + * data transfer, we will reset to default size. + */ + adapter->tx_buf_size = card->tx_buf_size; + + /* Read the host_int_status_reg for ACK the first interrupt got + * from the bootloader. If we don't do this we get a interrupt + * as soon as we register the irq. + */ + mwifiex_read_reg(adapter, card->reg->host_int_status_reg, &sdio_ireg); + + mwifiex_init_sdio_ioport(adapter); +} + static struct mwifiex_if_ops sdio_ops = { .init_if = mwifiex_init_sdio, .cleanup_if = mwifiex_cleanup_sdio, @@ -2702,6 +2694,7 @@ static struct mwifiex_if_ops sdio_ops = { .reg_dump = mwifiex_sdio_reg_dump, .device_dump = mwifiex_sdio_device_dump, .deaggr_pkt = mwifiex_deaggr_sdio_pkt, + .up_dev = mwifiex_sdio_up_dev, }; module_driver(mwifiex_sdio, sdio_register_driver, sdio_unregister_driver); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index cdbf3a3ac7f9..afa10d5031cf 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -267,9 +267,6 @@ struct sdio_mmc_card { struct mwifiex_sdio_mpa_tx mpa_tx; struct mwifiex_sdio_mpa_rx mpa_rx; - - /* needed for card reset */ - const struct sdio_device_id *device_id; }; struct mwifiex_sdio_device { -- cgit v1.2.3 From a7513a4fa9193714b1b02aaa8836af743ccd938d Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 14 Dec 2016 19:40:50 +0530 Subject: mwifiex: get rid of __mwifiex_sdio_remove helper __mwifiex_sdio_remove helper is not needed after our enhancements in SDIO card reset. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sdio.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index b3aca10a71f2..0fda87a7441d 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -370,7 +370,7 @@ static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter) * This function removes the interface and frees up the card structure. */ static void -__mwifiex_sdio_remove(struct sdio_func *func) +mwifiex_sdio_remove(struct sdio_func *func) { struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; @@ -388,6 +388,8 @@ __mwifiex_sdio_remove(struct sdio_func *func) if (!adapter || !adapter->priv_num) return; + cancel_work_sync(&sdio_work); + mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); @@ -402,13 +404,6 @@ __mwifiex_sdio_remove(struct sdio_func *func) mwifiex_remove_card(adapter); } -static void -mwifiex_sdio_remove(struct sdio_func *func) -{ - cancel_work_sync(&sdio_work); - __mwifiex_sdio_remove(func); -} - /* * SDIO suspend. * -- cgit v1.2.3 From cc75c577806a53893122829d91cb122b51643a2d Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 14 Dec 2016 19:40:51 +0530 Subject: mwifiex: get rid of global save_adapter and sdio_work This patch moves sdio_work to card structure, in this way we can get adapter structure in the work, so save_adapter won't be needed. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sdio.c | 39 ++++++++++++++++------------- drivers/net/wireless/marvell/mwifiex/sdio.h | 3 +++ 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 0fda87a7441d..a4b356d267f9 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -32,10 +32,8 @@ #define SDIO_VERSION "1.0" static void mwifiex_sdio_work(struct work_struct *work); -static DECLARE_WORK(sdio_work, mwifiex_sdio_work); static struct mwifiex_if_ops sdio_ops; -static unsigned long iface_work_flags; static struct memory_type_mapping generic_mem_type_map[] = { {"DUMP", NULL, 0, 0xDD}, @@ -123,6 +121,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) card->fw_dump_enh = data->fw_dump_enh; card->can_auto_tdls = data->can_auto_tdls; card->can_ext_scan = data->can_ext_scan; + INIT_WORK(&card->work, mwifiex_sdio_work); } sdio_claim_host(func); @@ -388,7 +387,7 @@ mwifiex_sdio_remove(struct sdio_func *func) if (!adapter || !adapter->priv_num) return; - cancel_work_sync(&sdio_work); + cancel_work_sync(&card->work); mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); @@ -2190,7 +2189,6 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) port, card->mp_data_port_mask); } -static struct mwifiex_adapter *save_adapter; static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; @@ -2206,8 +2204,8 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) /* Previous save_adapter won't be valid after this. We will cancel * pending work requests. */ - clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); - clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags); + clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); + clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); mwifiex_reinit_sw(adapter); } @@ -2513,35 +2511,40 @@ static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter) static void mwifiex_sdio_work(struct work_struct *work) { + struct sdio_mmc_card *card = + container_of(work, struct sdio_mmc_card, work); + if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, - &iface_work_flags)) - mwifiex_sdio_device_dump_work(save_adapter); + &card->work_flags)) + mwifiex_sdio_device_dump_work(card->adapter); if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, - &iface_work_flags)) - mwifiex_sdio_card_reset_work(save_adapter); + &card->work_flags)) + mwifiex_sdio_card_reset_work(card->adapter); } /* This function resets the card */ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter) { - save_adapter = adapter; - if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags)) + struct sdio_mmc_card *card = adapter->card; + + if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags)) return; - set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags); + set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); - schedule_work(&sdio_work); + schedule_work(&card->work); } /* This function dumps FW information */ static void mwifiex_sdio_device_dump(struct mwifiex_adapter *adapter) { - save_adapter = adapter; - if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) + struct sdio_mmc_card *card = adapter->card; + + if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags)) return; - set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); - schedule_work(&sdio_work); + set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); + schedule_work(&card->work); } /* Function to dump SDIO function registers and SDIO scratch registers in case diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index afa10d5031cf..dccf7fd1aef3 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -267,6 +267,9 @@ struct sdio_mmc_card { struct mwifiex_sdio_mpa_tx mpa_tx; struct mwifiex_sdio_mpa_rx mpa_rx; + + struct work_struct work; + unsigned long work_flags; }; struct mwifiex_sdio_device { -- cgit v1.2.3 From 6b8cc1d11ef75c5b9c530b3d0d148f3c2dd25f93 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 12 Jan 2017 11:51:32 +0100 Subject: bpf: pass original insn directly to convert_ctx_access Currently, when calling convert_ctx_access() callback for the various program types, we pass in insn->dst_reg, insn->src_reg, insn->off from the original instruction. This information is needed to rewrite the instruction that is based on the user ctx structure into a kernel representation for the ctx. As we'd like to allow access size beyond just BPF_W, we'd need also insn->code for that in order to decode the original access size. Given that, lets just pass insn directly to the convert_ctx_access() callback and work on that to not clutter the callback with even more arguments we need to pass when everything is already contained in insn. So lets go through that once, no functional change. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/bpf.h | 7 ++- kernel/bpf/verifier.c | 3 +- kernel/trace/bpf_trace.c | 15 ++--- net/core/filter.c | 139 +++++++++++++++++++++++++---------------------- 4 files changed, 87 insertions(+), 77 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 94ea8d2383e6..f8c3560b01db 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -161,9 +161,10 @@ struct bpf_verifier_ops { enum bpf_reg_type *reg_type); int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, const struct bpf_prog *prog); - u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, - int src_reg, int ctx_off, - struct bpf_insn *insn, struct bpf_prog *prog); + u32 (*convert_ctx_access)(enum bpf_access_type type, + const struct bpf_insn *src, + struct bpf_insn *dst, + struct bpf_prog *prog); }; struct bpf_prog_type_list { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2efdc9128e3c..df7e47244e75 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3177,8 +3177,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX) continue; - cnt = ops->convert_ctx_access(type, insn->dst_reg, insn->src_reg, - insn->off, insn_buf, env->prog); + cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose("bpf verifier is misconfigured\n"); return -EINVAL; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index f883c43c96f3..1860e7f1e5a8 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -572,28 +572,29 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type return true; } -static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, int dst_reg, - int src_reg, int ctx_off, +static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog) { struct bpf_insn *insn = insn_buf; - switch (ctx_off) { + switch (si->off) { case offsetof(struct bpf_perf_event_data, sample_period): BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, - data), dst_reg, src_reg, + data), si->dst_reg, si->src_reg, offsetof(struct bpf_perf_event_data_kern, data)); - *insn++ = BPF_LDX_MEM(BPF_DW, dst_reg, dst_reg, + *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, offsetof(struct perf_sample_data, period)); break; default: *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, - regs), dst_reg, src_reg, + regs), si->dst_reg, si->src_reg, offsetof(struct bpf_perf_event_data_kern, regs)); - *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), dst_reg, dst_reg, ctx_off); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, + si->off); break; } diff --git a/net/core/filter.c b/net/core/filter.c index f4d16a905754..8cfbdefbfb1c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2972,32 +2972,33 @@ void bpf_warn_invalid_xdp_action(u32 act) } EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); -static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg, - int src_reg, int ctx_off, +static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog) { struct bpf_insn *insn = insn_buf; + int off; - switch (ctx_off) { + switch (si->off) { case offsetof(struct __sk_buff, len): BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sk_buff, len)); break; case offsetof(struct __sk_buff, protocol): BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); - *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, offsetof(struct sk_buff, protocol)); break; case offsetof(struct __sk_buff, vlan_proto): BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); - *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, offsetof(struct sk_buff, vlan_proto)); break; @@ -3005,17 +3006,17 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sk_buff, priority)); else - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sk_buff, priority)); break; case offsetof(struct __sk_buff, ingress_ifindex): BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4); - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sk_buff, skb_iif)); break; @@ -3023,17 +3024,17 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg, BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), - dst_reg, src_reg, + si->dst_reg, si->src_reg, offsetof(struct sk_buff, dev)); - *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1); - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg, + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct net_device, ifindex)); break; case offsetof(struct __sk_buff, hash): BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sk_buff, hash)); break; @@ -3041,63 +3042,74 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sk_buff, mark)); else - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sk_buff, mark)); break; case offsetof(struct __sk_buff, pkt_type): - return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn); + return convert_skb_access(SKF_AD_PKTTYPE, si->dst_reg, + si->src_reg, insn); case offsetof(struct __sk_buff, queue_mapping): - return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn); + return convert_skb_access(SKF_AD_QUEUE, si->dst_reg, + si->src_reg, insn); case offsetof(struct __sk_buff, vlan_present): return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, - dst_reg, src_reg, insn); + si->dst_reg, si->src_reg, insn); case offsetof(struct __sk_buff, vlan_tci): return convert_skb_access(SKF_AD_VLAN_TAG, - dst_reg, src_reg, insn); + si->dst_reg, si->src_reg, insn); case offsetof(struct __sk_buff, cb[0]) ... offsetof(struct __sk_buff, cb[4]): BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); prog->cb_access = 1; - ctx_off -= offsetof(struct __sk_buff, cb[0]); - ctx_off += offsetof(struct sk_buff, cb); - ctx_off += offsetof(struct qdisc_skb_cb, data); + off = si->off; + off -= offsetof(struct __sk_buff, cb[0]); + off += offsetof(struct sk_buff, cb); + off += offsetof(struct qdisc_skb_cb, data); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off); + *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, + si->src_reg, off); else - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, + si->src_reg, off); break; case offsetof(struct __sk_buff, tc_classid): - ctx_off -= offsetof(struct __sk_buff, tc_classid); - ctx_off += offsetof(struct sk_buff, cb); - ctx_off += offsetof(struct qdisc_skb_cb, tc_classid); + BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2); + + off = si->off; + off -= offsetof(struct __sk_buff, tc_classid); + off += offsetof(struct sk_buff, cb); + off += offsetof(struct qdisc_skb_cb, tc_classid); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off); + *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, + si->src_reg, off); else - *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off); + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, + si->src_reg, off); break; case offsetof(struct __sk_buff, data): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), - dst_reg, src_reg, + si->dst_reg, si->src_reg, offsetof(struct sk_buff, data)); break; case offsetof(struct __sk_buff, data_end): - ctx_off -= offsetof(struct __sk_buff, data_end); - ctx_off += offsetof(struct sk_buff, cb); - ctx_off += offsetof(struct bpf_skb_data_end, data_end); - *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), dst_reg, src_reg, - ctx_off); + off = si->off; + off -= offsetof(struct __sk_buff, data_end); + off += offsetof(struct sk_buff, cb); + off += offsetof(struct bpf_skb_data_end, data_end); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, + si->src_reg, off); break; case offsetof(struct __sk_buff, tc_index): @@ -3105,110 +3117,107 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, + *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, offsetof(struct sk_buff, tc_index)); else - *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, offsetof(struct sk_buff, tc_index)); - break; #else if (type == BPF_WRITE) - *insn++ = BPF_MOV64_REG(dst_reg, dst_reg); + *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg); else - *insn++ = BPF_MOV64_IMM(dst_reg, 0); - break; + *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); #endif + break; } return insn - insn_buf; } static u32 sock_filter_convert_ctx_access(enum bpf_access_type type, - int dst_reg, int src_reg, - int ctx_off, + const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog) { struct bpf_insn *insn = insn_buf; - switch (ctx_off) { + switch (si->off) { case offsetof(struct bpf_sock, bound_dev_if): BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_bound_dev_if)); else - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_bound_dev_if)); break; case offsetof(struct bpf_sock, family): BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2); - *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, offsetof(struct sock, sk_family)); break; case offsetof(struct bpf_sock, type): - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, __sk_flags_offset)); - *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, SK_FL_TYPE_MASK); - *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, SK_FL_TYPE_SHIFT); + *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK); + *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT); break; case offsetof(struct bpf_sock, protocol): - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, __sk_flags_offset)); - *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, SK_FL_PROTO_MASK); - *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, SK_FL_PROTO_SHIFT); + *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); + *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT); break; } return insn - insn_buf; } -static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, int dst_reg, - int src_reg, int ctx_off, +static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog) { struct bpf_insn *insn = insn_buf; - switch (ctx_off) { + switch (si->off) { case offsetof(struct __sk_buff, ifindex): BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), - dst_reg, src_reg, + si->dst_reg, si->src_reg, offsetof(struct sk_buff, dev)); - *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg, + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct net_device, ifindex)); break; default: - return sk_filter_convert_ctx_access(type, dst_reg, src_reg, - ctx_off, insn_buf, prog); + return sk_filter_convert_ctx_access(type, si, insn_buf, prog); } return insn - insn_buf; } -static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg, - int src_reg, int ctx_off, +static u32 xdp_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog) { struct bpf_insn *insn = insn_buf; - switch (ctx_off) { + switch (si->off) { case offsetof(struct xdp_md, data): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data), - dst_reg, src_reg, + si->dst_reg, si->src_reg, offsetof(struct xdp_buff, data)); break; case offsetof(struct xdp_md, data_end): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end), - dst_reg, src_reg, + si->dst_reg, si->src_reg, offsetof(struct xdp_buff, data_end)); break; } -- cgit v1.2.3 From 62c7989b24dbd348c2507ee6458ebf5637d6ddb5 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 12 Jan 2017 11:51:33 +0100 Subject: bpf: allow b/h/w/dw access for bpf's cb in ctx When structs are used to store temporary state in cb[] buffer that is used with programs and among tail calls, then the generated code will not always access the buffer in bpf_w chunks. We can ease programming of it and let this act more natural by allowing for aligned b/h/w/dw sized access for cb[] ctx member. Various test cases are attached as well for the selftest suite. Potentially, this can also be reused for other program types to pass data around. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 8 +- net/core/filter.c | 41 ++- tools/testing/selftests/bpf/test_verifier.c | 442 +++++++++++++++++++++++++++- 3 files changed, 478 insertions(+), 13 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index df7e47244e75..d60e12c67266 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3165,10 +3165,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { - if (insn->code == (BPF_LDX | BPF_MEM | BPF_W) || + if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || + insn->code == (BPF_LDX | BPF_MEM | BPF_H) || + insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; - else if (insn->code == (BPF_STX | BPF_MEM | BPF_W) || + else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || + insn->code == (BPF_STX | BPF_MEM | BPF_H) || + insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else diff --git a/net/core/filter.c b/net/core/filter.c index 8cfbdefbfb1c..90383860e224 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2776,11 +2776,33 @@ static bool __is_valid_access(int off, int size) { if (off < 0 || off >= sizeof(struct __sk_buff)) return false; + /* The verifier guarantees that size > 0. */ if (off % size != 0) return false; - if (size != sizeof(__u32)) - return false; + + switch (off) { + case offsetof(struct __sk_buff, cb[0]) ... + offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1: + if (size == sizeof(__u16) && + off > offsetof(struct __sk_buff, cb[4]) + sizeof(__u16)) + return false; + if (size == sizeof(__u32) && + off > offsetof(struct __sk_buff, cb[4])) + return false; + if (size == sizeof(__u64) && + off > offsetof(struct __sk_buff, cb[2])) + return false; + if (size != sizeof(__u8) && + size != sizeof(__u16) && + size != sizeof(__u32) && + size != sizeof(__u64)) + return false; + break; + default: + if (size != sizeof(__u32)) + return false; + } return true; } @@ -2799,7 +2821,7 @@ static bool sk_filter_is_valid_access(int off, int size, if (type == BPF_WRITE) { switch (off) { case offsetof(struct __sk_buff, cb[0]) ... - offsetof(struct __sk_buff, cb[4]): + offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1: break; default: return false; @@ -2823,7 +2845,7 @@ static bool lwt_is_valid_access(int off, int size, case offsetof(struct __sk_buff, mark): case offsetof(struct __sk_buff, priority): case offsetof(struct __sk_buff, cb[0]) ... - offsetof(struct __sk_buff, cb[4]): + offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1: break; default: return false; @@ -2915,7 +2937,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, case offsetof(struct __sk_buff, tc_index): case offsetof(struct __sk_buff, priority): case offsetof(struct __sk_buff, cb[0]) ... - offsetof(struct __sk_buff, cb[4]): + offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1: case offsetof(struct __sk_buff, tc_classid): break; default: @@ -3066,8 +3088,11 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, si->dst_reg, si->src_reg, insn); case offsetof(struct __sk_buff, cb[0]) ... - offsetof(struct __sk_buff, cb[4]): + offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1: BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); + BUILD_BUG_ON((offsetof(struct sk_buff, cb) + + offsetof(struct qdisc_skb_cb, data)) % + sizeof(__u64)); prog->cb_access = 1; off = si->off; @@ -3075,10 +3100,10 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, off += offsetof(struct sk_buff, cb); off += offsetof(struct qdisc_skb_cb, data); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, + *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, si->src_reg, off); else - *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, + *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, si->src_reg, off); break; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 9bb45346dc72..1aa73241c999 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -859,15 +859,451 @@ static struct bpf_test tests[] = { .result = REJECT, }, { - "check non-u32 access to cb", + "check cb access: byte", .insns = { - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1, + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 1), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 2), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 3), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1])), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1]) + 1), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1]) + 2), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1]) + 3), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2])), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2]) + 1), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2]) + 2), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2]) + 3), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3]) + 1), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3]) + 2), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3]) + 3), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4])), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 1), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 2), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1]) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1]) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1]) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2]) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2]) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2]) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3]) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3]) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3]) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 3), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "check cb access: byte, oob 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 4), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: byte, oob 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) - 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: byte, oob 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 4), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: byte, oob 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) - 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: byte, wrong type", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, + }, + { + "check cb access: half", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 2), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1])), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1]) + 2), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2])), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2]) + 2), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3]) + 2), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4])), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1])), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1]) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2]) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3])), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3]) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "check cb access: half, unaligned", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 1), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned access", + .result = REJECT, + }, + { + "check cb access: half, oob 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 4), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: half, oob 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) - 2), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: half, oob 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 4), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: half, oob 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) - 2), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: half, wrong type", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, + }, + { + "check cb access: word", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "check cb access: word, unaligned 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 2), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned access", + .result = REJECT, + }, + { + "check cb access: word, unaligned 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 1), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned access", + .result = REJECT, + }, + { + "check cb access: word, unaligned 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 2), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned access", + .result = REJECT, + }, + { + "check cb access: word, unaligned 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 3), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned access", + .result = REJECT, + }, + { + "check cb access: double", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2])), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "check cb access: double, unaligned 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1])), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned access", + .result = REJECT, + }, + { + "check cb access: double, unaligned 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned access", + .result = REJECT, + }, + { + "check cb access: double, oob 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: double, oob 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 8), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: double, oob 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) - 8), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: double, oob 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: double, oob 5", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 8), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: double, oob 6", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) - 8), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check cb access: double, wrong type", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, offsetof(struct __sk_buff, cb[0])), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", - .errstr_unpriv = "R1 leaks addr", .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, }, { "check out of range skb->cb access", -- cgit v1.2.3 From c9475369bd2bce788796aa313036faecb9725194 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 12 Jan 2017 15:48:32 +0100 Subject: s390/qeth: rework RX/TX checksum offload Rework the RX/TX checksum offloading command sequence to use the provided function call back mechanims to return card data to the device driver. Signed-off-by: Thomas Richter Reviewed-by: Julian Wiedmann Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 - drivers/s390/net/qeth_core_main.c | 96 ++++++++++++++++++++++++++------------- drivers/s390/net/qeth_core_mpc.h | 7 +++ 3 files changed, 72 insertions(+), 33 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 6d4b68c483f3..41e46654e591 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -674,8 +674,6 @@ struct qeth_card_info { int broadcast_capable; int unique_id; struct qeth_card_blkt blkt; - __u32 csum_mask; - __u32 tx_csum_mask; enum qeth_ipa_promisc_modes promisc_mode; __u32 diagass_support; __u32 hwtrap; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e33558313834..5ab80ea0e099 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5289,18 +5289,6 @@ int qeth_setassparms_cb(struct qeth_card *card, if (cmd->hdr.prot_version == QETH_PROT_IPV6) card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; } - if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && - cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { - card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; - QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask); - } - if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM && - cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { - card->info.tx_csum_mask = - cmd->data.setassparms.data.flags_32bit; - QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask); - } - return 0; } EXPORT_SYMBOL_GPL(qeth_setassparms_cb); @@ -6060,23 +6048,78 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, } EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); +/* Callback to handle checksum offload command reply from OSA card. + * Verify that required features have been enabled on the card. + * Return error in hdr->return_code as this value is checked by caller. + * + * Always returns zero to indicate no further messages from the OSA card. + */ +static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_checksum_cmd *chksum_cb = + (struct qeth_checksum_cmd *)reply->param; + + QETH_CARD_TEXT(card, 4, "chkdoccb"); + if (cmd->hdr.return_code) + return 0; + + memset(chksum_cb, 0, sizeof(*chksum_cb)); + if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { + chksum_cb->supported = + cmd->data.setassparms.data.chksum.supported; + QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported); + } + if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) { + chksum_cb->supported = + cmd->data.setassparms.data.chksum.supported; + chksum_cb->enabled = + cmd->data.setassparms.data.chksum.enabled; + QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported); + QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled); + } + return 0; +} + +/* Send command to OSA card and check results. */ +static int qeth_ipa_checksum_run_cmd(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + __u16 cmd_code, long data, + struct qeth_checksum_cmd *chksum_cb) +{ + struct qeth_cmd_buffer *iob; + int rc = -ENOMEM; + + QETH_CARD_TEXT(card, 4, "chkdocmd"); + iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, + sizeof(__u32), QETH_PROT_IPV4); + if (iob) + rc = qeth_send_setassparms(card, iob, sizeof(__u32), data, + qeth_ipa_checksum_run_cmd_cb, + chksum_cb); + return rc; +} + static int qeth_send_checksum_on(struct qeth_card *card, int cstype) { - long rxtx_arg; + struct qeth_checksum_cmd chksum_cb; int rc; - rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0); + rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0, + &chksum_cb); if (rc) { + qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); dev_warn(&card->gdev->dev, "Starting HW checksumming for %s failed, using SW checksumming\n", QETH_CARD_IFNAME(card)); return rc; } - rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask - : card->info.csum_mask; - rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE, - rxtx_arg); + rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE, + chksum_cb.supported, &chksum_cb); if (rc) { + qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s failed, using SW checksumming\n", QETH_CARD_IFNAME(card)); @@ -6090,19 +6133,10 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype) static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype) { - int rc; - - if (on) { - rc = qeth_send_checksum_on(card, cstype); - if (rc) - return -EIO; - } else { - rc = qeth_send_simple_setassparms(card, cstype, - IPA_CMD_ASS_STOP, 0); - if (rc) - return -EIO; - } - return 0; + int rc = (on) ? qeth_send_checksum_on(card, cstype) + : qeth_send_simple_setassparms(card, cstype, + IPA_CMD_ASS_STOP, 0); + return rc ? -EIO : 0; } static int qeth_set_ipa_tso(struct qeth_card *card, int on) diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 6cccc9a49ede..f54ea7224b89 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -352,11 +352,18 @@ struct qeth_arp_query_info { char *udata; }; +/* IPA Assist checksum offload reply layout. */ +struct qeth_checksum_cmd { + __u32 supported; + __u32 enabled; +} __packed; + /* SETASSPARMS IPA Command: */ struct qeth_ipacmd_setassparms { struct qeth_ipacmd_setassparms_hdr hdr; union { __u32 flags_32bit; + struct qeth_checksum_cmd chksum; struct qeth_arp_cache_entry add_arp_entry; struct qeth_arp_query_data query_arp; __u8 ip[16]; -- cgit v1.2.3 From f9d8e6dc0fdee06e3eaf779a52530f4b8be6966f Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 12 Jan 2017 15:48:33 +0100 Subject: s390/qeth: test RX/TX checksum offload reply Turning on receive and/or transmit checksum offload support on the OSA card requires 2 commands: 1. start command which replies with available features 2. enable command to turn on selected features. The current version does not check the reply of the start command and simply uses the returned value to enable offload features. When the start command returns zero, this leads to a situation where no checksum offload is turned on by the hardware. Even worse no error indication is returned. The Linux kernel assumes the OSA card performs RX/TX checksum offload, but the hardware does not perform any checksum verification at all. This patch checks the return of the start and enable command responses from the hardware and turns off checksum offloading if the commands fails or does not respond with the correct bit setting. Signed-off-by: Thomas Richter Reviewed-by: Julian Wiedmann Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 13 +++++++++++++ drivers/s390/net/qeth_core_mpc.h | 10 ++++++++++ 2 files changed, 23 insertions(+) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 5ab80ea0e099..49b813f8f91b 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -6104,11 +6104,19 @@ static int qeth_ipa_checksum_run_cmd(struct qeth_card *card, static int qeth_send_checksum_on(struct qeth_card *card, int cstype) { + const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR | + QETH_IPA_CHECKSUM_UDP | + QETH_IPA_CHECKSUM_TCP; struct qeth_checksum_cmd chksum_cb; int rc; rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0, &chksum_cb); + if (!rc) { + if ((required_features & chksum_cb.supported) != + required_features) + rc = -EIO; + } if (rc) { qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); dev_warn(&card->gdev->dev, @@ -6118,6 +6126,11 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype) } rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE, chksum_cb.supported, &chksum_cb); + if (!rc) { + if ((required_features & chksum_cb.enabled) != + required_features) + rc = -EIO; + } if (rc) { qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); dev_warn(&card->gdev->dev, diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index f54ea7224b89..bc69d0a338ad 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -352,6 +352,16 @@ struct qeth_arp_query_info { char *udata; }; +/* IPA set assist segmentation bit definitions for receive and + * transmit checksum offloading. + */ +enum qeth_ipa_checksum_bits { + QETH_IPA_CHECKSUM_IP_HDR = 0x0002, + QETH_IPA_CHECKSUM_UDP = 0x0008, + QETH_IPA_CHECKSUM_TCP = 0x0010, + QETH_IPA_CHECKSUM_LP2LP = 0x0020 +}; + /* IPA Assist checksum offload reply layout. */ struct qeth_checksum_cmd { __u32 supported; -- cgit v1.2.3 From dae84c8e2a88fab45ff943675410b6c9c0d96a15 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 12 Jan 2017 15:48:34 +0100 Subject: s390/qeth: display warning for OSA3 RX/TX checksum offloading When RX/TX checksum offloading is turned on and the adapter is an OSA 3 card in layer 3 mode, the checksum offloading is only performed when both peers use different adapters. If both peers share an OSA 3 card, communication is a memory copy and checksum offloading is not performed. This patch adds a warning to inform the administrator. OSA 3 in layer 2 mode does not offer the RX/TX checksum offload feature. Signed-off-by: Thomas Richter Reviewed-by: Julian Wiedmann Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 49b813f8f91b..ca8309ff3ad4 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -6116,6 +6116,11 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype) if ((required_features & chksum_cb.supported) != required_features) rc = -EIO; + else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) && + cstype == IPA_INBOUND_CHECKSUM) + dev_warn(&card->gdev->dev, + "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", + QETH_CARD_IFNAME(card)); } if (rc) { qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); -- cgit v1.2.3 From dadc08c7e01907be1116293aa72641a5f560ea4a Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 12 Jan 2017 15:48:35 +0100 Subject: s390/qeth: Allow reading hsuid in state DOWN Accessing the current hsuid via card->options.hsuid is perfectly fine, even when the card is DOWN. Signed-off-by: Julian Wiedmann Reviewed-by: Thomas Richter Acked-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_sys.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 0e00a5ce0f00..3cd4d9f7d5bd 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -250,9 +250,6 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, if (card->info.type != QETH_CARD_TYPE_IQD) return -EPERM; - if (card->state == CARD_STATE_DOWN) - return -EPERM; - memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid)); EBCASC(tmp_hsuid, 8); return sprintf(buf, "%s\n", tmp_hsuid); -- cgit v1.2.3 From c2a7ee2a3beebc1d870c6ba20acf94383d7978f9 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 12 Jan 2017 15:48:36 +0100 Subject: s390/qeth: Remove QETH_IP_HEADER_SIZE Remove unused define QETH_IP_HEADER_SIZE. Signed-off-by: Julian Wiedmann Reviewed-by: Thomas Richter Acked-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 41e46654e591..774ae51569cb 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -281,8 +281,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, #define QETH_HIGH_WATERMARK_PACK 5 #define QETH_WATERMARK_PACK_FUZZ 1 -#define QETH_IP_HEADER_SIZE 40 - /* large receive scatter gather copy break */ #define QETH_RX_SG_CB (PAGE_SIZE >> 1) #define QETH_RX_PULL_LEN 256 -- cgit v1.2.3 From c07cbf2e209198526e059b9e6bb538b18875a19d Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 12 Jan 2017 15:48:37 +0100 Subject: s390/qeth: drop qeth_l2_del_all_macs() parameter The only caller passes del = 0, so remove both the parameter and the code that handles != 0. Signed-off-by: Julian Wiedmann Reviewed-by: Thomas Richter Acked-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 9c921c2833f1..3025f56319e2 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -216,7 +216,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac) return rc; } -static void qeth_l2_del_all_macs(struct qeth_card *card, int del) +static void qeth_l2_del_all_macs(struct qeth_card *card) { struct qeth_mac *mac; struct hlist_node *tmp; @@ -224,13 +224,6 @@ static void qeth_l2_del_all_macs(struct qeth_card *card, int del) spin_lock_bh(&card->mclock); hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { - if (del) { - if (mac->is_uc) - qeth_l2_send_setdelmac(card, mac->mac_addr, - IPA_CMD_DELVMAC); - else - qeth_l2_send_delgroupmac(card, mac->mac_addr); - } hash_del(&mac->hnode); kfree(mac); } @@ -425,7 +418,7 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) card->state = CARD_STATE_SOFTSETUP; } if (card->state == CARD_STATE_SOFTSETUP) { - qeth_l2_del_all_macs(card, 0); + qeth_l2_del_all_macs(card); qeth_clear_ipacmd_list(card); card->state = CARD_STATE_HARDSETUP; } -- cgit v1.2.3 From 4b764d1de395090662fd0291968d5ded523f07e4 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 12 Jan 2017 15:48:38 +0100 Subject: s390/qeth: don't convert return code twice qeth_l2_send_groupmac() already translates the return code, so calling qeth_setdel_makerc() a second time only produces garbage. Signed-off-by: Julian Wiedmann Reviewed-by: Thomas Richter Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 3025f56319e2..38fae10b3479 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -210,8 +210,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac) qeth_l2_send_setdelmac(card, mac->mac_addr, IPA_CMD_SETVMAC)); } else { - rc = qeth_setdel_makerc(card, - qeth_l2_send_setgroupmac(card, mac->mac_addr)); + rc = qeth_l2_send_setgroupmac(card, mac->mac_addr); } return rc; } -- cgit v1.2.3 From 754e0b8d92e5ba24a711d51b5fdbbd211e2fdd24 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 12 Jan 2017 15:48:39 +0100 Subject: s390/qeth: consolidate errno translation Consolidate errno handling for MAC management: Instead of doing this in every caller, do it in one place. Signed-off-by: Julian Wiedmann Reviewed-by: Thomas Richter Suggested-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 38fae10b3479..074fc62649e2 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -170,8 +170,7 @@ static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) int rc; QETH_CARD_TEXT(card, 2, "L2Sgmac"); - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_SETGMAC)); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC); if (rc == -EEXIST) QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n", mac, QETH_CARD_IFNAME(card)); @@ -186,8 +185,7 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) int rc; QETH_CARD_TEXT(card, 2, "L2Dgmac"); - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_DELGMAC)); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC); if (rc) QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %d\n", @@ -206,9 +204,8 @@ static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac) int rc; if (mac->is_uc) { - rc = qeth_setdel_makerc(card, - qeth_l2_send_setdelmac(card, mac->mac_addr, - IPA_CMD_SETVMAC)); + rc = qeth_l2_send_setdelmac(card, mac->mac_addr, + IPA_CMD_SETVMAC); } else { rc = qeth_l2_send_setgroupmac(card, mac->mac_addr); } @@ -582,7 +579,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); - return qeth_send_ipa_cmd(card, iob, NULL, NULL); + return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob, + NULL, NULL)); } static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) @@ -590,8 +588,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) int rc; QETH_CARD_TEXT(card, 2, "L2Setmac"); - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_SETVMAC)); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); if (rc == 0) { card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); @@ -621,8 +618,7 @@ static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) QETH_CARD_TEXT(card, 2, "L2Delmac"); if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) return 0; - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_DELVMAC)); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC); if (rc == 0) card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; return rc; -- cgit v1.2.3 From 979d79292af327a12e913ef17f29b85428d0ba0f Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 12 Jan 2017 15:48:40 +0100 Subject: s390/qeth: extract qeth_l2_remove_mac() This matches qeth_l2_write_mac(). Signed-off-by: Julian Wiedmann Reviewed-by: Thomas Richter Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 074fc62649e2..d456740904ef 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -200,16 +200,22 @@ static inline u32 qeth_l2_mac_hash(const u8 *addr) static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac) { - - int rc; - if (mac->is_uc) { - rc = qeth_l2_send_setdelmac(card, mac->mac_addr, + return qeth_l2_send_setdelmac(card, mac->mac_addr, IPA_CMD_SETVMAC); } else { - rc = qeth_l2_send_setgroupmac(card, mac->mac_addr); + return qeth_l2_send_setgroupmac(card, mac->mac_addr); + } +} + +static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac) +{ + if (mac->is_uc) { + return qeth_l2_send_setdelmac(card, mac->mac_addr, + IPA_CMD_DELVMAC); + } else { + return qeth_l2_send_delgroupmac(card, mac->mac_addr); } - return rc; } static void qeth_l2_del_all_macs(struct qeth_card *card) @@ -782,14 +788,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { if (mac->disp_flag == QETH_DISP_ADDR_DELETE) { - if (!mac->is_uc) - rc = qeth_l2_send_delgroupmac(card, - mac->mac_addr); - else { - rc = qeth_l2_send_setdelmac(card, mac->mac_addr, - IPA_CMD_DELVMAC); - } - + qeth_l2_remove_mac(card, mac); hash_del(&mac->hnode); kfree(mac); -- cgit v1.2.3 From ac988d78dc998ec1ce909d2f2ddf0f81e88e54d5 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 12 Jan 2017 15:48:41 +0100 Subject: s390/qeth: shuffle MAC management functions around Move all MAC utility functions in one place, and drop the forward declarations. Signed-off-by: Julian Wiedmann Reviewed-by: Thomas Richter Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 129 ++++++++++++++++++++-------------------- 1 file changed, 63 insertions(+), 66 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d456740904ef..c298759c30a8 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -27,9 +27,6 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); -static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); -static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, - enum qeth_ipa_cmds); static void qeth_l2_set_rx_mode(struct net_device *); static int qeth_l2_recover(void *); static void qeth_bridgeport_query_support(struct qeth_card *card); @@ -165,6 +162,64 @@ static int qeth_setdel_makerc(struct qeth_card *card, int retcode) return rc; } +static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, + enum qeth_ipa_cmds ipacmd) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "L2sdmac"); + iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; + memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); + return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob, + NULL, NULL)); +} + +static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) +{ + int rc; + + QETH_CARD_TEXT(card, 2, "L2Setmac"); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); + if (rc == 0) { + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); + dev_info(&card->gdev->dev, + "MAC address %pM successfully registered on device %s\n", + card->dev->dev_addr, card->dev->name); + } else { + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + switch (rc) { + case -EEXIST: + dev_warn(&card->gdev->dev, + "MAC address %pM already exists\n", mac); + break; + case -EPERM: + dev_warn(&card->gdev->dev, + "MAC address %pM is not authorized\n", mac); + break; + } + } + return rc; +} + +static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) +{ + int rc; + + QETH_CARD_TEXT(card, 2, "L2Delmac"); + if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) + return 0; + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC); + if (rc == 0) + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + return rc; +} + static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) { int rc; @@ -193,11 +248,6 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) return rc; } -static inline u32 qeth_l2_mac_hash(const u8 *addr) -{ - return get_unaligned((u32 *)(&addr[2])); -} - static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac) { if (mac->is_uc) { @@ -232,6 +282,11 @@ static void qeth_l2_del_all_macs(struct qeth_card *card) spin_unlock_bh(&card->mclock); } +static inline u32 qeth_l2_mac_hash(const u8 *addr) +{ + return get_unaligned((u32 *)(&addr[2])); +} + static inline int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { @@ -572,64 +627,6 @@ out: return work_done; } -static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, - enum qeth_ipa_cmds ipacmd) -{ - struct qeth_ipa_cmd *cmd; - struct qeth_cmd_buffer *iob; - - QETH_CARD_TEXT(card, 2, "L2sdmac"); - iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); - if (!iob) - return -ENOMEM; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; - memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); - return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob, - NULL, NULL)); -} - -static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) -{ - int rc; - - QETH_CARD_TEXT(card, 2, "L2Setmac"); - rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); - if (rc == 0) { - card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); - dev_info(&card->gdev->dev, - "MAC address %pM successfully registered on device %s\n", - card->dev->dev_addr, card->dev->name); - } else { - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - switch (rc) { - case -EEXIST: - dev_warn(&card->gdev->dev, - "MAC address %pM already exists\n", mac); - break; - case -EPERM: - dev_warn(&card->gdev->dev, - "MAC address %pM is not authorized\n", mac); - break; - } - } - return rc; -} - -static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) -{ - int rc; - - QETH_CARD_TEXT(card, 2, "L2Delmac"); - if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - return 0; - rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC); - if (rc == 0) - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - return rc; -} - static int qeth_l2_request_initial_mac(struct qeth_card *card) { int rc = 0; -- cgit v1.2.3 From 1034051045d125579ab1e8fcd5a724eeb0e70149 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 12 Jan 2017 15:48:42 +0100 Subject: s390/qeth: issue STARTLAN as first IPA command STARTLAN needs to be the first IPA command after MPC initialization completes. So move the qeth_send_startlan() call from the layer disciplines into the core path, right after the MPC handshake. While at it, replace the magic LAN OFFLINE return code with the existing enum. Signed-off-by: Julian Wiedmann Reviewed-by: Thomas Richter Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 1 - drivers/s390/net/qeth_core_main.c | 21 +++++++++++++++++---- drivers/s390/net/qeth_l2_main.c | 15 --------------- drivers/s390/net/qeth_l3_main.c | 15 --------------- 4 files changed, 17 insertions(+), 35 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 774ae51569cb..e7addea8741b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -913,7 +913,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); int qeth_core_hardsetup_card(struct qeth_card *); void qeth_print_status_message(struct qeth_card *); int qeth_init_qdio_queues(struct qeth_card *); -int qeth_send_startlan(struct qeth_card *); int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, int (*reply_cb) (struct qeth_card *, struct qeth_reply *, unsigned long), diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ca8309ff3ad4..315d8a2db7c0 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2944,7 +2944,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); -int qeth_send_startlan(struct qeth_card *card) +static int qeth_send_startlan(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -2957,7 +2957,6 @@ int qeth_send_startlan(struct qeth_card *card) rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_send_startlan); static int qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -5087,6 +5086,20 @@ retriable: goto out; } + rc = qeth_send_startlan(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + if (rc == IPA_RC_LAN_OFFLINE) { + dev_warn(&card->gdev->dev, + "The LAN is offline\n"); + card->lan_online = 0; + } else { + rc = -ENODEV; + goto out; + } + } else + card->lan_online = 1; + card->options.ipa4.supported_funcs = 0; card->options.ipa6.supported_funcs = 0; card->options.adp.supported_funcs = 0; @@ -5098,14 +5111,14 @@ retriable: if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { rc = qeth_query_setadapterparms(card); if (rc < 0) { - QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); goto out; } } if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { rc = qeth_query_setdiagass(card); if (rc < 0) { - QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); goto out; } } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index c298759c30a8..bea483307618 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1177,21 +1177,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - rc = qeth_send_startlan(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (rc == 0xe080) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); - card->lan_online = 0; - goto contin; - } - rc = -ENODEV; - goto out_remove; - } else - card->lan_online = 1; - -contin: if ((card->info.type == QETH_CARD_TYPE_OSD) || (card->info.type == QETH_CARD_TYPE_OSX)) { rc = qeth_l2_start_ipassists(card); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ac37d050e765..06d0addcc058 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3227,21 +3227,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - rc = qeth_send_startlan(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (rc == 0xe080) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); - card->lan_online = 0; - goto contin; - } - rc = -ENODEV; - goto out_remove; - } else - card->lan_online = 1; - -contin: rc = qeth_l3_setadapter_parms(card); if (rc) QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); -- cgit v1.2.3 From e48b9eaaa29a0a7d5da2df136b07eefa0180d584 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Thu, 12 Jan 2017 15:48:43 +0100 Subject: s390/qeth: fix retrieval of vipa and proxy-arp addresses qeth devices in layer3 mode need a separate handling of vipa and proxy-arp addresses. vipa and proxy-arp addresses processed by qeth can be read from userspace. Introduced with commit 5f78e29ceebf ("qeth: optimize IP handling in rx_mode callback") the retrieval of vipa and proxy-arp addresses is broken, if more than one vipa or proxy-arp address are set. The qeth code used local variable "int i" for 2 different purposes. This patch now spends 2 separate local variables of type "int". While touching these functions hash_for_each_safe() is converted to hash_for_each(), since there is no removal of hash entries. Signed-off-by: Ursula Braun Reviewed-by: Julian Wiedmann Reference-ID: RQM 3524 Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_sys.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 3cd4d9f7d5bd..05e9471e3d3f 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -689,15 +689,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipaddr *ipaddr; - struct hlist_node *tmp; char addr_str[40]; + int str_len = 0; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ - int i = 0; + int i; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ spin_lock_bh(&card->ip_lock); - hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) { + hash_for_each(card->ip_htable, i, ipaddr, hnode) { if (ipaddr->proto != proto) continue; if (ipaddr->type != QETH_IP_TYPE_VIPA) @@ -705,16 +705,17 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, /* String must not be longer than PAGE_SIZE. So we check if * string length gets near PAGE_SIZE. Then we can savely display * the next IPv6 address (worst case, compared to IPv4) */ - if ((PAGE_SIZE - i) <= entry_len) + if ((PAGE_SIZE - str_len) <= entry_len) break; qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str); - i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", + addr_str); } spin_unlock_bh(&card->ip_lock); - i += snprintf(buf + i, PAGE_SIZE - i, "\n"); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); - return i; + return str_len; } static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev, @@ -851,15 +852,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipaddr *ipaddr; - struct hlist_node *tmp; char addr_str[40]; + int str_len = 0; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ - int i = 0; + int i; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ spin_lock_bh(&card->ip_lock); - hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) { + hash_for_each(card->ip_htable, i, ipaddr, hnode) { if (ipaddr->proto != proto) continue; if (ipaddr->type != QETH_IP_TYPE_RXIP) @@ -867,16 +868,17 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card, /* String must not be longer than PAGE_SIZE. So we check if * string length gets near PAGE_SIZE. Then we can savely display * the next IPv6 address (worst case, compared to IPv4) */ - if ((PAGE_SIZE - i) <= entry_len) + if ((PAGE_SIZE - str_len) <= entry_len) break; qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str); - i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", + addr_str); } spin_unlock_bh(&card->ip_lock); - i += snprintf(buf + i, PAGE_SIZE - i, "\n"); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); - return i; + return str_len; } static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev, -- cgit v1.2.3 From 79471b10d67a52f5b3744d2e14c06437a65746f2 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 12 Jan 2017 14:39:28 +0000 Subject: lwt_bpf: bpf_lwt_prog_cmp() can be static Fixes the following sparse warning: net/core/lwt_bpf.c:355:5: warning: symbol 'bpf_lwt_prog_cmp' was not declared. Should it be static? Signed-off-by: Wei Yongjun Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- net/core/lwt_bpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 71bb3e2eca08..40ef8ae8d93d 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -352,7 +352,7 @@ static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate) 0; } -int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b) +static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b) { /* FIXME: * The LWT state is currently rebuilt for delete requests which -- cgit v1.2.3 From 4d7b9dc1f36a99423a6171d393040165fb135530 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Thu, 12 Jan 2017 05:10:11 -0800 Subject: tools: psock_lib: harden socket filter used by psock tests The filter added by sock_setfilter is intended to only permit packets matching the pattern set up by create_payload(), but we only check the ip_len, and a single test-character in the IP packet to ensure this condition. Harden the filter by adding additional constraints so that we only permit UDP/IPv4 packets that meet the ip_len and test-character requirements. Include the bpf_asm src as a comment, in case this needs to be enhanced in the future Signed-off-by: Sowmini Varadhan Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/testing/selftests/net/psock_lib.h | 39 +++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h index 24bc7ec1be7d..a77da88bf946 100644 --- a/tools/testing/selftests/net/psock_lib.h +++ b/tools/testing/selftests/net/psock_lib.h @@ -40,14 +40,39 @@ static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum) { + /* the filter below checks for all of the following conditions that + * are based on the contents of create_payload() + * ether type 0x800 and + * ip proto udp and + * skb->len == DATA_LEN and + * udp[38] == 'a' or udp[38] == 'b' + * It can be generated from the following bpf_asm input: + * ldh [12] + * jne #0x800, drop ; ETH_P_IP + * ldb [23] + * jneq #17, drop ; IPPROTO_UDP + * ld len ; ld skb->len + * jlt #100, drop ; DATA_LEN + * ldb [80] + * jeq #97, pass ; DATA_CHAR + * jne #98, drop ; DATA_CHAR_1 + * pass: + * ret #-1 + * drop: + * ret #0 + */ struct sock_filter bpf_filter[] = { - { 0x80, 0, 0, 0x00000000 }, /* LD pktlen */ - { 0x35, 0, 4, DATA_LEN }, /* JGE DATA_LEN [f goto nomatch]*/ - { 0x30, 0, 0, 0x00000050 }, /* LD ip[80] */ - { 0x15, 1, 0, DATA_CHAR }, /* JEQ DATA_CHAR [t goto match]*/ - { 0x15, 0, 1, DATA_CHAR_1}, /* JEQ DATA_CHAR_1 [t goto match]*/ - { 0x06, 0, 0, 0x00000060 }, /* RET match */ - { 0x06, 0, 0, 0x00000000 }, /* RET no match */ + { 0x28, 0, 0, 0x0000000c }, + { 0x15, 0, 8, 0x00000800 }, + { 0x30, 0, 0, 0x00000017 }, + { 0x15, 0, 6, 0x00000011 }, + { 0x80, 0, 0, 0000000000 }, + { 0x35, 0, 4, 0x00000064 }, + { 0x30, 0, 0, 0x00000050 }, + { 0x15, 1, 0, 0x00000061 }, + { 0x15, 0, 1, 0x00000062 }, + { 0x06, 0, 0, 0xffffffff }, + { 0x06, 0, 0, 0000000000 }, }; struct sock_fprog bpf_prog; -- cgit v1.2.3 From 37c9782c7306efcc831f7c989c0b7ab040956089 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 12 Jan 2017 13:43:47 +0000 Subject: cdc-ether: usbnet_cdc_zte_status() can be static Fixes the following sparse warning: drivers/net/usb/cdc_ether.c:469:6: warning: symbol 'usbnet_cdc_zte_status' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/usb/cdc_ether.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index fe7b2886cb6b..620ba8e530b5 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -466,7 +466,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) * connected. This causes the link state to be incorrect. Work around this by * always setting the state to off, then on. */ -void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb) +static void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb) { struct usb_cdc_notification *event; -- cgit v1.2.3 From 08d9665cfed37b51766284ca2f9b6e4f3ccd6959 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 11 Jan 2017 12:59:49 -0800 Subject: net: mdio-gpio: Use devm_gpio_request_one instead of devm_gpio_request Using devm_gpio_request_one lets us request gpio pins with initial state in one go. Signed-off-by: Guenter Roeck Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 27ab63064f95..f6e773256c82 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -137,6 +137,9 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, struct mii_bus *new_bus; struct mdio_gpio_info *bitbang; int i; + unsigned long mdc_flags = GPIOF_OUT_INIT_LOW; + unsigned long mdio_flags = GPIOF_DIR_IN; + unsigned long mdo_flags = GPIOF_OUT_INIT_HIGH; bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL); if (!bitbang) @@ -174,21 +177,17 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, else strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE); - if (devm_gpio_request(dev, bitbang->mdc, "mdc")) + if (devm_gpio_request_one(dev, bitbang->mdc, mdc_flags, "mdc")) goto out_free_bus; - if (devm_gpio_request(dev, bitbang->mdio, "mdio")) + if (devm_gpio_request_one(dev, bitbang->mdio, mdio_flags, "mdio")) goto out_free_bus; if (bitbang->mdo) { - if (devm_gpio_request(dev, bitbang->mdo, "mdo")) + if (devm_gpio_request_one(dev, bitbang->mdo, mdo_flags, "mdo")) goto out_free_bus; - gpio_direction_output(bitbang->mdo, 1); - gpio_direction_input(bitbang->mdio); } - gpio_direction_output(bitbang->mdc, 0); - dev_set_drvdata(dev, new_bus); return new_bus; -- cgit v1.2.3 From 7e5fbd1e0700f1bdb94508f84ec2aeb01eed7b12 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 11 Jan 2017 12:59:50 -0800 Subject: net: mdio-gpio: Convert to use gpiod functions where possible Using gpiod functions lets us use functionality which is not available with gpio functions. There is no gpiod function to match devm_gpio_request_one, so leave it in place and use gpio_to_desc() to convert absolute pin numbers to gpio descriptors. Signed-off-by: Guenter Roeck Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 43 +++++++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index f6e773256c82..e62fcea2d945 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -32,7 +32,7 @@ struct mdio_gpio_info { struct mdiobb_ctrl ctrl; - int mdc, mdio, mdo; + struct gpio_desc *mdc, *mdio, *mdo; int mdc_active_low, mdio_active_low, mdo_active_low; }; @@ -80,16 +80,15 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpio_set_value_cansleep(bitbang->mdo, - 1 ^ bitbang->mdo_active_low); + gpiod_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low); return; } if (dir) - gpio_direction_output(bitbang->mdio, - 1 ^ bitbang->mdio_active_low); + gpiod_direction_output(bitbang->mdio, + 1 ^ bitbang->mdio_active_low); else - gpio_direction_input(bitbang->mdio); + gpiod_direction_input(bitbang->mdio); } static int mdio_get(struct mdiobb_ctrl *ctrl) @@ -97,8 +96,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpio_get_value_cansleep(bitbang->mdio) ^ - bitbang->mdio_active_low; + return gpiod_get_value(bitbang->mdio) ^ bitbang->mdio_active_low; } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -107,11 +105,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpio_set_value_cansleep(bitbang->mdo, - what ^ bitbang->mdo_active_low); + gpiod_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low); else - gpio_set_value_cansleep(bitbang->mdio, - what ^ bitbang->mdio_active_low); + gpiod_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -119,7 +115,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low); + gpiod_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low); } static struct mdiobb_ops mdio_gpio_ops = { @@ -137,6 +133,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, struct mii_bus *new_bus; struct mdio_gpio_info *bitbang; int i; + int mdc, mdio, mdo; unsigned long mdc_flags = GPIOF_OUT_INIT_LOW; unsigned long mdio_flags = GPIOF_DIR_IN; unsigned long mdo_flags = GPIOF_OUT_INIT_HIGH; @@ -147,11 +144,15 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, bitbang->ctrl.ops = &mdio_gpio_ops; bitbang->ctrl.reset = pdata->reset; - bitbang->mdc = pdata->mdc; + mdc = pdata->mdc; + bitbang->mdc = gpio_to_desc(mdc); bitbang->mdc_active_low = pdata->mdc_active_low; - bitbang->mdio = pdata->mdio; + mdio = pdata->mdio; + bitbang->mdio = gpio_to_desc(mdio); bitbang->mdio_active_low = pdata->mdio_active_low; - bitbang->mdo = pdata->mdo; + mdo = pdata->mdo; + if (mdo) + bitbang->mdo = gpio_to_desc(mdo); bitbang->mdo_active_low = pdata->mdo_active_low; new_bus = alloc_mdio_bitbang(&bitbang->ctrl); @@ -177,16 +178,14 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, else strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE); - if (devm_gpio_request_one(dev, bitbang->mdc, mdc_flags, "mdc")) + if (devm_gpio_request_one(dev, mdc, mdc_flags, "mdc")) goto out_free_bus; - if (devm_gpio_request_one(dev, bitbang->mdio, mdio_flags, "mdio")) + if (devm_gpio_request_one(dev, mdio, mdio_flags, "mdio")) goto out_free_bus; - if (bitbang->mdo) { - if (devm_gpio_request_one(dev, bitbang->mdo, mdo_flags, "mdo")) - goto out_free_bus; - } + if (mdo && devm_gpio_request_one(dev, mdo, mdo_flags, "mdo")) + goto out_free_bus; dev_set_drvdata(dev, new_bus); -- cgit v1.2.3 From 52aab18ef54ba9e71444b1d289a4a78ea374423b Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 11 Jan 2017 12:59:51 -0800 Subject: net: mdio-gpio: Use gpio subsystem to handle low-active pins gpiod functions support handling low-active pins, so we can move thos code out of this driver into the gpio subsystem and simplify the code a bit. Signed-off-by: Guenter Roeck Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index e62fcea2d945..7faa79b254ef 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -33,7 +33,6 @@ struct mdio_gpio_info { struct mdiobb_ctrl ctrl; struct gpio_desc *mdc, *mdio, *mdo; - int mdc_active_low, mdio_active_low, mdo_active_low; }; static void *mdio_gpio_of_get_data(struct platform_device *pdev) @@ -80,13 +79,12 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpiod_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low); + gpiod_set_value(bitbang->mdo, 1); return; } if (dir) - gpiod_direction_output(bitbang->mdio, - 1 ^ bitbang->mdio_active_low); + gpiod_direction_output(bitbang->mdio, 1); else gpiod_direction_input(bitbang->mdio); } @@ -96,7 +94,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpiod_get_value(bitbang->mdio) ^ bitbang->mdio_active_low; + return gpiod_get_value(bitbang->mdio); } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -105,9 +103,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpiod_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low); + gpiod_set_value(bitbang->mdo, what); else - gpiod_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low); + gpiod_set_value(bitbang->mdio, what); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -115,7 +113,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpiod_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low); + gpiod_set_value(bitbang->mdc, what); } static struct mdiobb_ops mdio_gpio_ops = { @@ -146,14 +144,18 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, bitbang->ctrl.reset = pdata->reset; mdc = pdata->mdc; bitbang->mdc = gpio_to_desc(mdc); - bitbang->mdc_active_low = pdata->mdc_active_low; + if (pdata->mdc_active_low) + mdc_flags = GPIOF_OUT_INIT_HIGH | GPIOF_ACTIVE_LOW; mdio = pdata->mdio; bitbang->mdio = gpio_to_desc(mdio); - bitbang->mdio_active_low = pdata->mdio_active_low; + if (pdata->mdio_active_low) + mdio_flags |= GPIOF_ACTIVE_LOW; mdo = pdata->mdo; - if (mdo) + if (mdo) { bitbang->mdo = gpio_to_desc(mdo); - bitbang->mdo_active_low = pdata->mdo_active_low; + if (pdata->mdo_active_low) + mdo_flags = GPIOF_OUT_INIT_LOW | GPIOF_ACTIVE_LOW; + } new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) -- cgit v1.2.3 From b65b09aa79086f0f2aa8935eae410f87495b8c04 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 11 Jan 2017 14:52:20 -0800 Subject: tilepro: Fix non-void return from void function commit bc1f44709cf2 ("net: make ndo_get_stats64 a void function") mistakenly used a return value for this void conversion. Fix it. Signed-off-by: Joe Perches cc: stephen hemminger Signed-off-by: David S. Miller --- drivers/net/ethernet/tile/tilepro.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 30cfea62a356..44f153713791 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -2090,12 +2090,8 @@ static void tile_net_get_stats64(struct net_device *dev, stats->tx_bytes = tx_bytes; stats->rx_errors = rx_errors; stats->rx_dropped = rx_dropped; - - return stats; } - - /* * Change the Ethernet Address of the NIC. * -- cgit v1.2.3 From cb2336b5965d95d03e46d145d5e11aea4eaab0ad Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Wed, 11 Jan 2017 17:09:02 -0800 Subject: liquidio: remove unnecessary code Remove code that's no longer needed. It used to serve a purpose, which was to fix a link-related bug. For a while now, the NIC firmware has had a more elegant fix for that bug. Signed-off-by: Felix Manlunas Signed-off-by: Derek Chickles Signed-off-by: Satanand Burla Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index b8b579d8043f..cc825d574e8e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2693,13 +2693,7 @@ static int liquidio_stop(struct net_device *netdev) lio->linfo.link.s.link_up = 0; lio->link_changes++; - /* Pause for a moment and wait for Octeon to flush out (to the wire) any - * egress packets that are in-flight. - */ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(100)); - - /* Now it should be safe to tell Octeon that nic interface is down. */ + /* Tell Octeon that nic interface is down. */ send_rx_ctrl_cmd(lio, 0); if (OCTEON_CN23XX_PF(oct)) { -- cgit v1.2.3 From a8ac1a55d0859cf8f53db3275ad4a6480868e80c Mon Sep 17 00:00:00 2001 From: Prasad Kanneganti Date: Wed, 11 Jan 2017 17:40:27 -0800 Subject: liquidio VF: reduce load time of module Reduce the load time of the VF driver by decreasing the wait time between iterations of the loop that polls for a mailbox response from the PF. Also change the wait time units from jiffies to milliseconds. Signed-off-by: Prasad Kanneganti Signed-off-by: Felix Manlunas Signed-off-by: Raghu Vatsavayi Signed-off-by: Derek Chickles Signed-off-by: Satanand Burla Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c | 5 +++-- drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c index 73696b427f06..201b9875f9bb 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -131,6 +131,7 @@ int octeon_mbox_write(struct octeon_device *oct, { struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS; + long timeout = LIO_MBOX_WRITE_WAIT_TIME; unsigned long flags; spin_lock_irqsave(&mbox->lock, flags); @@ -158,7 +159,7 @@ int octeon_mbox_write(struct octeon_device *oct, count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) { - schedule_timeout_uninterruptible(LIO_MBOX_WRITE_WAIT_TIME); + schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; @@ -171,7 +172,7 @@ int octeon_mbox_write(struct octeon_device *oct, count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFACK) { - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h index fe60a3e6247b..c9376fe075bc 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h @@ -31,8 +31,8 @@ #define OCTEON_PFVFSIG 0x1122334455667788 #define OCTEON_PFVFERR 0xDEADDEADDEADDEAD -#define LIO_MBOX_WRITE_WAIT_CNT 1000 -#define LIO_MBOX_WRITE_WAIT_TIME 10 +#define LIO_MBOX_WRITE_WAIT_CNT 1000 +#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1) enum octeon_mbox_cmd_status { OCTEON_MBOX_STATUS_SUCCESS = 0, -- cgit v1.2.3 From c1ce1560a1ae1a58505c26bc0e46ce1aee982d54 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 11 Jan 2017 18:10:37 -0800 Subject: secure_seq: fix sparse errors Fixes following warnings : net/core/secure_seq.c:125:28: warning: incorrect type in argument 1 (different base types) net/core/secure_seq.c:125:28: expected unsigned int const [unsigned] [usertype] a net/core/secure_seq.c:125:28: got restricted __be32 [usertype] saddr net/core/secure_seq.c:125:35: warning: incorrect type in argument 2 (different base types) net/core/secure_seq.c:125:35: expected unsigned int const [unsigned] [usertype] b net/core/secure_seq.c:125:35: got restricted __be32 [usertype] daddr net/core/secure_seq.c:125:43: warning: cast from restricted __be16 net/core/secure_seq.c:125:61: warning: restricted __be16 degrades to integer Fixes: 7cd23e5300c1 ("secure_seq: use SipHash in place of MD5") Signed-off-by: Eric Dumazet Reviewed-by: Jason A. Donenfeld Signed-off-by: David S. Miller --- net/core/secure_seq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 3a9fcec94ace..758f140b6bed 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -122,7 +122,9 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, { u64 seq; net_secret_init(); - seq = siphash_3u32(saddr, daddr, (u32)sport << 16 | dport, &net_secret); + seq = siphash_3u32((__force u32)saddr, (__force u32)daddr, + (__force u32)sport << 16 | (__force u32)dport, + &net_secret); seq += ktime_get_real_ns(); seq &= (1ull << 48) - 1; return seq; -- cgit v1.2.3 From 8fb472c09b9df478a062eacc7841448e40fc3c17 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Thu, 12 Jan 2017 15:53:33 +0100 Subject: ipmr: improve hash scalability Recently we started using ipmr with thousands of entries and easily hit soft lockups on smaller devices. The reason is that the hash function uses the high order bits from the src and dst, but those don't change in many common cases, also the hash table is only 64 elements so with thousands it doesn't scale at all. This patch migrates the hash table to rhashtable, and in particular the rhl interface which allows for duplicate elements to be chained because of the MFC_PROXY support (*,G; *,*,oif cases) which allows for multiple duplicate entries to be added with different interfaces (IMO wrong, but it's been in for a long time). And here are some results from tests I've run in a VM: mr_table size (default, allocated for all namespaces): Before After 49304 bytes 2400 bytes Add 65000 routes (the diff is much larger on smaller devices): Before After 1m42s 58s Forwarding 256 byte packets with 65000 routes (test done in a VM): Before After 3 Mbps / ~1465 pps 122 Mbps / ~59000 pps As a bonus we no longer see the soft lockups on smaller devices which showed up even with 2000 entries before. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/linux/mroute.h | 57 ++++++++--- net/ipv4/ipmr.c | 255 +++++++++++++++++++++++++++---------------------- 2 files changed, 182 insertions(+), 130 deletions(-) diff --git a/include/linux/mroute.h b/include/linux/mroute.h index f019b62f27b5..d7f63339ef0b 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -60,7 +61,6 @@ struct vif_device { #define VIFF_STATIC 0x8000 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) -#define MFC_LINES 64 struct mr_table { struct list_head list; @@ -69,8 +69,9 @@ struct mr_table { struct sock __rcu *mroute_sk; struct timer_list ipmr_expire_timer; struct list_head mfc_unres_queue; - struct list_head mfc_cache_array[MFC_LINES]; struct vif_device vif_table[MAXVIFS]; + struct rhltable mfc_hash; + struct list_head mfc_cache_list; int maxvif; atomic_t cache_resolve_queue_len; bool mroute_do_assert; @@ -85,17 +86,48 @@ enum { MFC_STATIC = BIT(0), }; +struct mfc_cache_cmp_arg { + __be32 mfc_mcastgrp; + __be32 mfc_origin; +}; + +/** + * struct mfc_cache - multicast routing entries + * @mnode: rhashtable list + * @mfc_mcastgrp: destination multicast group address + * @mfc_origin: source address + * @cmparg: used for rhashtable comparisons + * @mfc_parent: source interface (iif) + * @mfc_flags: entry flags + * @expires: unresolved entry expire time + * @unresolved: unresolved cached skbs + * @last_assert: time of last assert + * @minvif: minimum VIF id + * @maxvif: maximum VIF id + * @bytes: bytes that have passed for this entry + * @pkt: packets that have passed for this entry + * @wrong_if: number of wrong source interface hits + * @lastuse: time of last use of the group (traffic or update) + * @ttls: OIF TTL threshold array + * @list: global entry list + * @rcu: used for entry destruction + */ struct mfc_cache { - struct list_head list; - __be32 mfc_mcastgrp; /* Group the entry belongs to */ - __be32 mfc_origin; /* Source of packet */ - vifi_t mfc_parent; /* Source interface */ - int mfc_flags; /* Flags on line */ + struct rhlist_head mnode; + union { + struct { + __be32 mfc_mcastgrp; + __be32 mfc_origin; + }; + struct mfc_cache_cmp_arg cmparg; + }; + vifi_t mfc_parent; + int mfc_flags; union { struct { unsigned long expires; - struct sk_buff_head unresolved; /* Unresolved buffers */ + struct sk_buff_head unresolved; } unres; struct { unsigned long last_assert; @@ -105,18 +137,13 @@ struct mfc_cache { unsigned long pkt; unsigned long wrong_if; unsigned long lastuse; - unsigned char ttls[MAXVIFS]; /* TTL thresholds */ + unsigned char ttls[MAXVIFS]; } res; } mfc_un; + struct list_head list; struct rcu_head rcu; }; -#ifdef __BIG_ENDIAN -#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1)) -#else -#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1)) -#endif - struct rtmsg; int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 824c4fdf21eb..beacd028848c 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -299,10 +299,29 @@ static void __net_exit ipmr_rules_exit(struct net *net) } #endif +static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg, + const void *ptr) +{ + const struct mfc_cache_cmp_arg *cmparg = arg->key; + struct mfc_cache *c = (struct mfc_cache *)ptr; + + return cmparg->mfc_mcastgrp != c->mfc_mcastgrp || + cmparg->mfc_origin != c->mfc_origin; +} + +static const struct rhashtable_params ipmr_rht_params = { + .head_offset = offsetof(struct mfc_cache, mnode), + .key_offset = offsetof(struct mfc_cache, cmparg), + .key_len = sizeof(struct mfc_cache_cmp_arg), + .nelem_hint = 3, + .locks_mul = 1, + .obj_cmpfn = ipmr_hash_cmp, + .automatic_shrinking = true, +}; + static struct mr_table *ipmr_new_table(struct net *net, u32 id) { struct mr_table *mrt; - unsigned int i; /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */ if (id != RT_TABLE_DEFAULT && id >= 1000000000) @@ -318,10 +337,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) write_pnet(&mrt->net, net); mrt->id = id; - /* Forwarding cache */ - for (i = 0; i < MFC_LINES; i++) - INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); - + rhltable_init(&mrt->mfc_hash, &ipmr_rht_params); + INIT_LIST_HEAD(&mrt->mfc_cache_list); INIT_LIST_HEAD(&mrt->mfc_unres_queue); setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, @@ -338,6 +355,7 @@ static void ipmr_free_table(struct mr_table *mrt) { del_timer_sync(&mrt->ipmr_expire_timer); mroute_clean_tables(mrt, true); + rhltable_destroy(&mrt->mfc_hash); kfree(mrt); } @@ -839,13 +857,17 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, __be32 origin, __be32 mcastgrp) { - int line = MFC_HASH(mcastgrp, origin); + struct mfc_cache_cmp_arg arg = { + .mfc_mcastgrp = mcastgrp, + .mfc_origin = origin + }; + struct rhlist_head *tmp, *list; struct mfc_cache *c; - list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) { - if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) - return c; - } + list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params); + rhl_for_each_entry_rcu(c, tmp, list, mnode) + return c; + return NULL; } @@ -853,13 +875,16 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt, int vifi) { - int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY)); + struct mfc_cache_cmp_arg arg = { + .mfc_mcastgrp = htonl(INADDR_ANY), + .mfc_origin = htonl(INADDR_ANY) + }; + struct rhlist_head *tmp, *list; struct mfc_cache *c; - list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) - if (c->mfc_origin == htonl(INADDR_ANY) && - c->mfc_mcastgrp == htonl(INADDR_ANY) && - c->mfc_un.res.ttls[vifi] < 255) + list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params); + rhl_for_each_entry_rcu(c, tmp, list, mnode) + if (c->mfc_un.res.ttls[vifi] < 255) return c; return NULL; @@ -869,29 +894,51 @@ static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt, static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt, __be32 mcastgrp, int vifi) { - int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY)); + struct mfc_cache_cmp_arg arg = { + .mfc_mcastgrp = mcastgrp, + .mfc_origin = htonl(INADDR_ANY) + }; + struct rhlist_head *tmp, *list; struct mfc_cache *c, *proxy; if (mcastgrp == htonl(INADDR_ANY)) goto skip; - list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) - if (c->mfc_origin == htonl(INADDR_ANY) && - c->mfc_mcastgrp == mcastgrp) { - if (c->mfc_un.res.ttls[vifi] < 255) - return c; - - /* It's ok if the vifi is part of the static tree */ - proxy = ipmr_cache_find_any_parent(mrt, - c->mfc_parent); - if (proxy && proxy->mfc_un.res.ttls[vifi] < 255) - return c; - } + list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params); + rhl_for_each_entry_rcu(c, tmp, list, mnode) { + if (c->mfc_un.res.ttls[vifi] < 255) + return c; + + /* It's ok if the vifi is part of the static tree */ + proxy = ipmr_cache_find_any_parent(mrt, c->mfc_parent); + if (proxy && proxy->mfc_un.res.ttls[vifi] < 255) + return c; + } skip: return ipmr_cache_find_any_parent(mrt, vifi); } +/* Look for a (S,G,iif) entry if parent != -1 */ +static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt, + __be32 origin, __be32 mcastgrp, + int parent) +{ + struct mfc_cache_cmp_arg arg = { + .mfc_mcastgrp = mcastgrp, + .mfc_origin = origin, + }; + struct rhlist_head *tmp, *list; + struct mfc_cache *c; + + list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params); + rhl_for_each_entry_rcu(c, tmp, list, mnode) + if (parent == -1 || parent == c->mfc_parent) + return c; + + return NULL; +} + /* Allocate a multicast cache entry */ static struct mfc_cache *ipmr_cache_alloc(void) { @@ -1028,10 +1075,10 @@ static int ipmr_cache_report(struct mr_table *mrt, static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) { + const struct iphdr *iph = ip_hdr(skb); + struct mfc_cache *c; bool found = false; int err; - struct mfc_cache *c; - const struct iphdr *iph = ip_hdr(skb); spin_lock_bh(&mfc_unres_lock); list_for_each_entry(c, &mrt->mfc_unres_queue, list) { @@ -1095,46 +1142,39 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent) { - int line; - struct mfc_cache *c, *next; + struct mfc_cache *c; - line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); + /* The entries are added/deleted only under RTNL */ + rcu_read_lock(); + c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr, + mfc->mfcc_mcastgrp.s_addr, parent); + rcu_read_unlock(); + if (!c) + return -ENOENT; + rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params); + list_del_rcu(&c->list); + mroute_netlink_event(mrt, c, RTM_DELROUTE); + ipmr_cache_free(c); - list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { - if (c->mfc_origin == mfc->mfcc_origin.s_addr && - c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr && - (parent == -1 || parent == c->mfc_parent)) { - list_del_rcu(&c->list); - mroute_netlink_event(mrt, c, RTM_DELROUTE); - ipmr_cache_free(c); - return 0; - } - } - return -ENOENT; + return 0; } static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, struct mfcctl *mfc, int mrtsock, int parent) { - bool found = false; - int line; struct mfc_cache *uc, *c; + bool found; + int ret; if (mfc->mfcc_parent >= MAXVIFS) return -ENFILE; - line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); - - list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { - if (c->mfc_origin == mfc->mfcc_origin.s_addr && - c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr && - (parent == -1 || parent == c->mfc_parent)) { - found = true; - break; - } - } - - if (found) { + /* The entries are added/deleted only under RTNL */ + rcu_read_lock(); + c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr, + mfc->mfcc_mcastgrp.s_addr, parent); + rcu_read_unlock(); + if (c) { write_lock_bh(&mrt_lock); c->mfc_parent = mfc->mfcc_parent; ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); @@ -1160,8 +1200,14 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, if (!mrtsock) c->mfc_flags |= MFC_STATIC; - list_add_rcu(&c->list, &mrt->mfc_cache_array[line]); - + ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->mnode, + ipmr_rht_params); + if (ret) { + pr_err("ipmr: rhtable insert error %d\n", ret); + ipmr_cache_free(c); + return ret; + } + list_add_tail_rcu(&c->list, &mrt->mfc_cache_list); /* Check to see if we resolved a queued list. If so we * need to send on the frames and tidy up. */ @@ -1191,9 +1237,9 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, /* Close the multicast socket, and clear the vif tables etc */ static void mroute_clean_tables(struct mr_table *mrt, bool all) { - int i; + struct mfc_cache *c, *tmp; LIST_HEAD(list); - struct mfc_cache *c, *next; + int i; /* Shut down all active vif entries */ for (i = 0; i < mrt->maxvif; i++) { @@ -1204,19 +1250,18 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all) unregister_netdevice_many(&list); /* Wipe the cache */ - for (i = 0; i < MFC_LINES; i++) { - list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { - if (!all && (c->mfc_flags & MFC_STATIC)) - continue; - list_del_rcu(&c->list); - mroute_netlink_event(mrt, c, RTM_DELROUTE); - ipmr_cache_free(c); - } + list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) { + if (!all && (c->mfc_flags & MFC_STATIC)) + continue; + rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params); + list_del_rcu(&c->list); + mroute_netlink_event(mrt, c, RTM_DELROUTE); + ipmr_cache_free(c); } if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { spin_lock_bh(&mfc_unres_lock); - list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { + list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { list_del(&c->list); mroute_netlink_event(mrt, c, RTM_DELROUTE); ipmr_destroy_unres(mrt, c); @@ -1791,9 +1836,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *cache, int local) { + int true_vifi = ipmr_find_vif(mrt, skb->dev); int psend = -1; int vif, ct; - int true_vifi = ipmr_find_vif(mrt, skb->dev); vif = cache->mfc_parent; cache->mfc_un.res.pkt++; @@ -2293,34 +2338,30 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) struct mr_table *mrt; struct mfc_cache *mfc; unsigned int t = 0, s_t; - unsigned int h = 0, s_h; unsigned int e = 0, s_e; s_t = cb->args[0]; - s_h = cb->args[1]; - s_e = cb->args[2]; + s_e = cb->args[1]; rcu_read_lock(); ipmr_for_each_table(mrt, net) { if (t < s_t) goto next_table; - if (t > s_t) - s_h = 0; - for (h = s_h; h < MFC_LINES; h++) { - list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) { - if (e < s_e) - goto next_entry; - if (ipmr_fill_mroute(mrt, skb, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - mfc, RTM_NEWROUTE, - NLM_F_MULTI) < 0) - goto done; + list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) { + if (e < s_e) + goto next_entry; + if (ipmr_fill_mroute(mrt, skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) + goto done; next_entry: - e++; - } - e = s_e = 0; + e++; } + e = 0; + s_e = 0; + spin_lock_bh(&mfc_unres_lock); list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) { if (e < s_e) @@ -2337,16 +2378,15 @@ next_entry2: e++; } spin_unlock_bh(&mfc_unres_lock); - e = s_e = 0; - s_h = 0; + e = 0; + s_e = 0; next_table: t++; } done: rcu_read_unlock(); - cb->args[2] = e; - cb->args[1] = h; + cb->args[1] = e; cb->args[0] = t; return skb->len; @@ -2590,10 +2630,8 @@ struct ipmr_mfc_iter { struct seq_net_private p; struct mr_table *mrt; struct list_head *cache; - int ct; }; - static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, struct ipmr_mfc_iter *it, loff_t pos) { @@ -2601,12 +2639,10 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, struct mfc_cache *mfc; rcu_read_lock(); - for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { - it->cache = &mrt->mfc_cache_array[it->ct]; - list_for_each_entry_rcu(mfc, it->cache, list) - if (pos-- == 0) - return mfc; - } + it->cache = &mrt->mfc_cache_list; + list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) + if (pos-- == 0) + return mfc; rcu_read_unlock(); spin_lock_bh(&mfc_unres_lock); @@ -2633,17 +2669,16 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) it->mrt = mrt; it->cache = NULL; - it->ct = 0; return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) : SEQ_START_TOKEN; } static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct mfc_cache *mfc = v; struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); struct mr_table *mrt = it->mrt; + struct mfc_cache *mfc = v; ++*pos; @@ -2656,19 +2691,9 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (it->cache == &mrt->mfc_unres_queue) goto end_of_list; - BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]); - - while (++it->ct < MFC_LINES) { - it->cache = &mrt->mfc_cache_array[it->ct]; - if (list_empty(it->cache)) - continue; - return list_first_entry(it->cache, struct mfc_cache, list); - } - /* exhausted cache_array, show unresolved */ rcu_read_unlock(); it->cache = &mrt->mfc_unres_queue; - it->ct = 0; spin_lock_bh(&mfc_unres_lock); if (!list_empty(it->cache)) @@ -2688,7 +2713,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) if (it->cache == &mrt->mfc_unres_queue) spin_unlock_bh(&mfc_unres_lock); - else if (it->cache == &mrt->mfc_cache_array[it->ct]) + else if (it->cache == &mrt->mfc_cache_list) rcu_read_unlock(); } -- cgit v1.2.3 From 717ac5ce13020920fa3975f290281395b6407900 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 12 Jan 2017 08:50:10 -0800 Subject: ipv6: sr: static percpu allocation for hmac_ring Current allocations are not NUMA aware, and lack proper cleanup in case of error. It is perfectly fine to use static per cpu allocations for 256 bytes per cpu. Signed-off-by: Eric Dumazet Cc: David Lebrun Acked-by: David Lebrun Signed-off-by: David S. Miller --- net/ipv6/seg6_hmac.c | 43 +++---------------------------------------- 1 file changed, 3 insertions(+), 40 deletions(-) diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index ef1c8a46e7ac..6389bf3e9c9f 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -45,7 +45,7 @@ #include #include -static char * __percpu *hmac_ring; +static DEFINE_PER_CPU(char [SEG6_HMAC_RING_SIZE], hmac_ring); static int seg6_hmac_cmpfn(struct rhashtable_compare_arg *arg, const void *obj) { @@ -192,7 +192,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr, */ local_bh_disable(); - ring = *this_cpu_ptr(hmac_ring); + ring = this_cpu_ptr(hmac_ring); off = ring; /* source address */ @@ -353,27 +353,6 @@ out: } EXPORT_SYMBOL(seg6_push_hmac); -static int seg6_hmac_init_ring(void) -{ - int i; - - hmac_ring = alloc_percpu(char *); - - if (!hmac_ring) - return -ENOMEM; - - for_each_possible_cpu(i) { - char *ring = kzalloc(SEG6_HMAC_RING_SIZE, GFP_KERNEL); - - if (!ring) - return -ENOMEM; - - *per_cpu_ptr(hmac_ring, i) = ring; - } - - return 0; -} - static int seg6_hmac_init_algo(void) { struct seg6_hmac_algo *algo; @@ -422,16 +401,7 @@ static int seg6_hmac_init_algo(void) int __init seg6_hmac_init(void) { - int ret; - - ret = seg6_hmac_init_ring(); - if (ret < 0) - goto out; - - ret = seg6_hmac_init_algo(); - -out: - return ret; + return seg6_hmac_init_algo(); } EXPORT_SYMBOL(seg6_hmac_init); @@ -450,13 +420,6 @@ void seg6_hmac_exit(void) struct seg6_hmac_algo *algo = NULL; int i, alg_count, cpu; - for_each_possible_cpu(i) { - char *ring = *per_cpu_ptr(hmac_ring, i); - - kfree(ring); - } - free_percpu(hmac_ring); - alg_count = sizeof(hmac_algos) / sizeof(struct seg6_hmac_algo); for (i = 0; i < alg_count; i++) { algo = &hmac_algos[i]; -- cgit v1.2.3 From 10b2eb6949ece992a1dd58edb28e01f05e5bf004 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 13 Jan 2017 09:31:32 +0100 Subject: wext: uninline stream addition functions With 78, 111 and 85 bytes respectively (on x86-64), the functions iwe_stream_add_event(), iwe_stream_add_point() and iwe_stream_add_value() really shouldn't be inlines. It appears that at least my compiler already decided the same, and created a single instance of each one of them for each file using it, but that's still a number of instances in the system overall, which this reduces. Signed-off-by: Johannes Berg --- include/net/iw_handler.h | 67 +++++------------------------------------------- net/wireless/wext-core.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 60 deletions(-) diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index c2aa73e5e6bb..2509728650bd 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -505,25 +505,8 @@ static inline int iwe_stream_event_len_adjust(struct iw_request_info *info, /* * Wrapper to add an Wireless Event to a stream of events. */ -static inline char * -iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends, - struct iw_event *iwe, int event_len) -{ - int lcp_len = iwe_stream_lcp_len(info); - - event_len = iwe_stream_event_len_adjust(info, event_len); - - /* Check if it's possible */ - if(likely((stream + event_len) < ends)) { - iwe->len = event_len; - /* Beware of alignement issues on 64 bits */ - memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); - memcpy(stream + lcp_len, &iwe->u, - event_len - lcp_len); - stream += event_len; - } - return stream; -} +char *iwe_stream_add_event(struct iw_request_info *info, char *stream, + char *ends, struct iw_event *iwe, int event_len); static inline char * iwe_stream_add_event_check(struct iw_request_info *info, char *stream, @@ -541,27 +524,8 @@ iwe_stream_add_event_check(struct iw_request_info *info, char *stream, * Wrapper to add an short Wireless Event containing a pointer to a * stream of events. */ -static inline char * -iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, - struct iw_event *iwe, char *extra) -{ - int event_len = iwe_stream_point_len(info) + iwe->u.data.length; - int point_len = iwe_stream_point_len(info); - int lcp_len = iwe_stream_lcp_len(info); - - /* Check if it's possible */ - if(likely((stream + event_len) < ends)) { - iwe->len = event_len; - memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); - memcpy(stream + lcp_len, - ((char *) &iwe->u) + IW_EV_POINT_OFF, - IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - if (iwe->u.data.length && extra) - memcpy(stream + point_len, extra, iwe->u.data.length); - stream += event_len; - } - return stream; -} +char *iwe_stream_add_point(struct iw_request_info *info, char *stream, + char *ends, struct iw_event *iwe, char *extra); static inline char * iwe_stream_add_point_check(struct iw_request_info *info, char *stream, @@ -580,25 +544,8 @@ iwe_stream_add_point_check(struct iw_request_info *info, char *stream, * Be careful, this one is tricky to use properly : * At the first run, you need to have (value = event + IW_EV_LCP_LEN). */ -static inline char * -iwe_stream_add_value(struct iw_request_info *info, char *event, char *value, - char *ends, struct iw_event *iwe, int event_len) -{ - int lcp_len = iwe_stream_lcp_len(info); - - /* Don't duplicate LCP */ - event_len -= IW_EV_LCP_LEN; - - /* Check if it's possible */ - if(likely((value + event_len) < ends)) { - /* Add new value */ - memcpy(value, &iwe->u, event_len); - value += event_len; - /* Patch LCP */ - iwe->len = value - event; - memcpy(event, (char *) iwe, lcp_len); - } - return value; -} +char *iwe_stream_add_value(struct iw_request_info *info, char *event, + char *value, char *ends, struct iw_event *iwe, + int event_len); #endif /* _IW_HANDLER_H */ diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 6250b1cfcde5..1a4db6790e20 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -1119,3 +1119,70 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, return ret; } #endif + +char *iwe_stream_add_event(struct iw_request_info *info, char *stream, + char *ends, struct iw_event *iwe, int event_len) +{ + int lcp_len = iwe_stream_lcp_len(info); + + event_len = iwe_stream_event_len_adjust(info, event_len); + + /* Check if it's possible */ + if (likely((stream + event_len) < ends)) { + iwe->len = event_len; + /* Beware of alignement issues on 64 bits */ + memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); + memcpy(stream + lcp_len, &iwe->u, + event_len - lcp_len); + stream += event_len; + } + + return stream; +} +EXPORT_SYMBOL(iwe_stream_add_event); + +char *iwe_stream_add_point(struct iw_request_info *info, char *stream, + char *ends, struct iw_event *iwe, char *extra) +{ + int event_len = iwe_stream_point_len(info) + iwe->u.data.length; + int point_len = iwe_stream_point_len(info); + int lcp_len = iwe_stream_lcp_len(info); + + /* Check if it's possible */ + if (likely((stream + event_len) < ends)) { + iwe->len = event_len; + memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); + memcpy(stream + lcp_len, + ((char *) &iwe->u) + IW_EV_POINT_OFF, + IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); + if (iwe->u.data.length && extra) + memcpy(stream + point_len, extra, iwe->u.data.length); + stream += event_len; + } + + return stream; +} +EXPORT_SYMBOL(iwe_stream_add_point); + +char *iwe_stream_add_value(struct iw_request_info *info, char *event, + char *value, char *ends, struct iw_event *iwe, + int event_len) +{ + int lcp_len = iwe_stream_lcp_len(info); + + /* Don't duplicate LCP */ + event_len -= IW_EV_LCP_LEN; + + /* Check if it's possible */ + if (likely((value + event_len) < ends)) { + /* Add new value */ + memcpy(value, &iwe->u, event_len); + value += event_len; + /* Patch LCP */ + iwe->len = value - event; + memcpy(event, (char *) iwe, lcp_len); + } + + return value; +} +EXPORT_SYMBOL(iwe_stream_add_value); -- cgit v1.2.3 From ab5bb2d51ba8da4add4612b529968446cdb504b1 Mon Sep 17 00:00:00 2001 From: vamsi krishna Date: Fri, 13 Jan 2017 01:12:19 +0200 Subject: cfg80211: Add support for randomizing TA of Public Action frames Add support to use a random local address (Address 2 = TA in transmit and the same address in receive functionality) for Public Action frames in order to improve privacy of WLAN clients. Applications fill the random transmit address in the frame buffer in the NL80211_CMD_FRAME command. This can be used only with the drivers that indicate support for random local address by setting the new NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA and/or NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED in ext_features. The driver needs to configure receive behavior to accept frames to the specified random address during the time the frame exchange is pending and such frames need to be acknowledged similarly to frames sent to the local permanent address when this random address functionality is not used. Signed-off-by: vamsi krishna Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 6 ++++++ net/wireless/mlme.c | 21 +++++++++++++++++++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 174f4b30e804..908886c83894 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4699,6 +4699,10 @@ enum nl80211_feature_flags { * configuration (AP/mesh) with VHT rates. * @NL80211_EXT_FEATURE_FILS_STA: This driver supports Fast Initial Link Setup * with user space SME (NL80211_CMD_AUTHENTICATE) in station mode. + * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA: This driver supports randomized TA + * in @NL80211_CMD_FRAME while not associated. + * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED: This driver supports + * randomized TA in @NL80211_CMD_FRAME while associated. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. @@ -4714,6 +4718,8 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_BEACON_RATE_HT, NL80211_EXT_FEATURE_BEACON_RATE_VHT, NL80211_EXT_FEATURE_FILS_STA, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 1c63a77aea34..b876f40c9dad 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -662,8 +662,25 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, return err; } - if (!ether_addr_equal(mgmt->sa, wdev_address(wdev))) - return -EINVAL; + if (!ether_addr_equal(mgmt->sa, wdev_address(wdev))) { + /* Allow random TA to be used with Public Action frames if the + * driver has indicated support for this. Otherwise, only allow + * the local address to be used. + */ + if (!ieee80211_is_action(mgmt->frame_control) || + mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) + return -EINVAL; + if (!wdev->current_bss && + !wiphy_ext_feature_isset( + &rdev->wiphy, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA)) + return -EINVAL; + if (wdev->current_bss && + !wiphy_ext_feature_isset( + &rdev->wiphy, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED)) + return -EINVAL; + } /* Transmit the Action frame as requested by user space */ return rdev_mgmt_tx(rdev, wdev, params, cookie); -- cgit v1.2.3 From bf95ecdba93b98d27ac219e79f773f2074b4ca47 Mon Sep 17 00:00:00 2001 From: vamsi krishna Date: Fri, 13 Jan 2017 01:12:20 +0200 Subject: cfg80211: Add support to sched scan to report better BSSs Enhance sched scan to support option of finding a better BSS while in connected state. Firmware scans the medium and reports when it finds a known BSS which has better RSSI than the current connected BSS. New attributes to specify the relative RSSI (compared to the current BSS) are added to the sched scan to implement this. Signed-off-by: vamsi krishna Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 36 +++++++++++++++++++++++++----------- include/uapi/linux/nl80211.h | 30 ++++++++++++++++++++++++++++++ net/wireless/nl80211.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 99 insertions(+), 11 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index cb13789ebaef..4456491132cd 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1619,6 +1619,17 @@ struct cfg80211_sched_scan_plan { u32 iterations; }; +/** + * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment. + * + * @band: band of BSS which should match for RSSI level adjustment. + * @delta: value of RSSI level adjustment. + */ +struct cfg80211_bss_select_adjust { + enum nl80211_band band; + s8 delta; +}; + /** * struct cfg80211_sched_scan_request - scheduled scan request description * @@ -1654,6 +1665,16 @@ struct cfg80211_sched_scan_plan { * cycle. The driver may ignore this parameter and start * immediately (or at any other time), if this feature is not * supported. + * @relative_rssi_set: Indicates whether @relative_rssi is set or not. + * @relative_rssi: Relative RSSI threshold in dB to restrict scan result + * reporting in connected state to cases where a matching BSS is determined + * to have better or slightly worse RSSI than the current connected BSS. + * The relative RSSI threshold values are ignored in disconnected state. + * @rssi_adjust: delta dB of RSSI preference to be given to the BSSs that belong + * to the specified band while deciding whether a better BSS is reported + * using @relative_rssi. If delta is a negative number, the BSSs that + * belong to the specified band will be penalized by delta dB in relative + * comparisions. */ struct cfg80211_sched_scan_request { struct cfg80211_ssid *ssids; @@ -1673,6 +1694,10 @@ struct cfg80211_sched_scan_request { u8 mac_addr[ETH_ALEN] __aligned(2); u8 mac_addr_mask[ETH_ALEN] __aligned(2); + bool relative_rssi_set; + s8 relative_rssi; + struct cfg80211_bss_select_adjust rssi_adjust; + /* internal */ struct wiphy *wiphy; struct net_device *dev; @@ -1980,17 +2005,6 @@ struct cfg80211_ibss_params { struct ieee80211_ht_cap ht_capa_mask; }; -/** - * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment. - * - * @band: band of BSS which should match for RSSI level adjustment. - * @delta: value of RSSI level adjustment. - */ -struct cfg80211_bss_select_adjust { - enum nl80211_band band; - s8 delta; -}; - /** * struct cfg80211_bss_selection - connection parameters for BSS selection. * diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 908886c83894..6b17feb5e839 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1982,6 +1982,20 @@ enum nl80211_commands { * @NL80211_ATTR_BSSID: The BSSID of the AP. Note that %NL80211_ATTR_MAC is also * used in various commands/events for specifying the BSSID. * + * @NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI: Relative RSSI threshold by which + * other BSSs has to be better or slightly worse than the current + * connected BSS so that they get reported to user space. + * This will give an opportunity to userspace to consider connecting to + * other matching BSSs which have better or slightly worse RSSI than + * the current connected BSS by using an offloaded operation to avoid + * unnecessary wakeups. + * + * @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in + * the specified band is to be adjusted before doing + * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out + * better BSSs. The attribute value is a packed structure + * value as specified by &struct nl80211_bss_select_rssi_adjust. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2388,6 +2402,9 @@ enum nl80211_attrs { NL80211_ATTR_BSSID, + NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI, + NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3080,6 +3097,13 @@ enum nl80211_reg_rule_attr { * how this API was implemented in the past. Also, due to the same problem, * the only way to create a matchset with only an RSSI filter (with this * attribute) is if there's only a single matchset with the RSSI attribute. + * @NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI: Flag indicating whether + * %NL80211_SCHED_SCAN_MATCH_ATTR_RSSI to be used as absolute RSSI or + * relative to current bss's RSSI. + * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST: When present the RSSI level for + * BSS-es in the specified band is to be adjusted before doing + * RSSI-based BSS selection. The attribute value is a packed structure + * value as specified by &struct nl80211_bss_select_rssi_adjust. * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter * attribute number currently defined * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use @@ -3089,6 +3113,8 @@ enum nl80211_sched_scan_match_attr { NL80211_SCHED_SCAN_MATCH_ATTR_SSID, NL80211_SCHED_SCAN_MATCH_ATTR_RSSI, + NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI, + NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST, /* keep last */ __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST, @@ -4703,6 +4729,9 @@ enum nl80211_feature_flags { * in @NL80211_CMD_FRAME while not associated. * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED: This driver supports * randomized TA in @NL80211_CMD_FRAME while associated. + * @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan + * for reporting BSSs with better RSSI than the current connected BSS + * (%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI). * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. @@ -4720,6 +4749,7 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_FILS_STA, NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA, NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED, + NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index b378d0a04003..71c66ff9a702 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -405,6 +405,10 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN }, [NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, }, [NL80211_ATTR_BSSID] = { .len = ETH_ALEN }, + [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 }, + [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = { + .len = sizeof(struct nl80211_bss_select_rssi_adjust) + }, }; /* policy for the key attributes */ @@ -6950,6 +6954,12 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, if (!n_plans || n_plans > wiphy->max_sched_scan_plans) return ERR_PTR(-EINVAL); + if (!wiphy_ext_feature_isset( + wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) && + (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] || + attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST])) + return ERR_PTR(-EINVAL); + request = kzalloc(sizeof(*request) + sizeof(*request->ssids) * n_ssids + sizeof(*request->match_sets) * n_match_sets @@ -7156,6 +7166,26 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, request->delay = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]); + if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) { + request->relative_rssi = nla_get_s8( + attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]); + request->relative_rssi_set = true; + } + + if (request->relative_rssi_set && + attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) { + struct nl80211_bss_select_rssi_adjust *rssi_adjust; + + rssi_adjust = nla_data( + attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]); + request->rssi_adjust.band = rssi_adjust->band; + request->rssi_adjust.delta = rssi_adjust->delta; + if (!is_band_valid(wiphy, request->rssi_adjust.band)) { + err = -EINVAL; + goto out_free; + } + } + err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs); if (err) goto out_free; @@ -9692,6 +9722,20 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg, if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay)) return -ENOBUFS; + if (req->relative_rssi_set) { + struct nl80211_bss_select_rssi_adjust rssi_adjust; + + if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI, + req->relative_rssi)) + return -ENOBUFS; + + rssi_adjust.band = req->rssi_adjust.band; + rssi_adjust.delta = req->rssi_adjust.delta; + if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST, + sizeof(rssi_adjust), &rssi_adjust)) + return -ENOBUFS; + } + freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!freqs) return -ENOBUFS; -- cgit v1.2.3 From 3093ebbeabcdddc9a982950052f2151df43c7aa2 Mon Sep 17 00:00:00 2001 From: Purushottam Kushwaha Date: Fri, 13 Jan 2017 01:12:21 +0200 Subject: cfg80211: Specify the reason for connect timeout This enhances the connect timeout API to also carry the reason for the timeout. These reason codes for the connect time out are represented by enum nl80211_timeout_reason and are passed to user space through a new attribute NL80211_ATTR_TIMEOUT_REASON (u32). Signed-off-by: Purushottam Kushwaha Signed-off-by: Jouni Malinen [keep gfp_t argument last] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 18 ++++++++++++++---- include/uapi/linux/nl80211.h | 21 +++++++++++++++++++++ net/wireless/core.h | 4 +++- net/wireless/mlme.c | 3 ++- net/wireless/nl80211.c | 9 +++++++-- net/wireless/nl80211.h | 4 +++- net/wireless/sme.c | 39 +++++++++++++++++++++++++++------------ net/wireless/util.c | 2 +- 8 files changed, 78 insertions(+), 22 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 4456491132cd..9b3427c8d1db 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5090,6 +5090,12 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you * the real status code for failures. * @gfp: allocation flags + * @timeout_reason: reason for connection timeout. This is used when the + * connection fails due to a timeout instead of an explicit rejection from + * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is + * not known. This value is used only if @status < 0 to indicate that the + * failure is due to a timeout and not due to explicit rejection by the AP. + * This value is ignored in other cases (@status >= 0). * * It should be called by the underlying driver whenever connect() has * succeeded. This is similar to cfg80211_connect_result(), but with the @@ -5099,7 +5105,8 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, struct cfg80211_bss *bss, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, - size_t resp_ie_len, int status, gfp_t gfp); + size_t resp_ie_len, int status, gfp_t gfp, + enum nl80211_timeout_reason timeout_reason); /** * cfg80211_connect_result - notify cfg80211 of connection result @@ -5125,7 +5132,8 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid, u16 status, gfp_t gfp) { cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie, - resp_ie_len, status, gfp); + resp_ie_len, status, gfp, + NL80211_TIMEOUT_UNSPECIFIED); } /** @@ -5136,6 +5144,7 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid, * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @gfp: allocation flags + * @timeout_reason: reason for connection timeout. * * It should be called by the underlying driver whenever connect() has failed * in a sequence where no explicit authentication/association rejection was @@ -5145,10 +5154,11 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid, */ static inline void cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, gfp_t gfp) + const u8 *req_ie, size_t req_ie_len, gfp_t gfp, + enum nl80211_timeout_reason timeout_reason) { cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1, - gfp); + gfp, timeout_reason); } /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 6b17feb5e839..c51b40cc0645 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1996,6 +1996,10 @@ enum nl80211_commands { * better BSSs. The attribute value is a packed structure * value as specified by &struct nl80211_bss_select_rssi_adjust. * + * @NL80211_ATTR_TIMEOUT_REASON: The reason for which an operation timed out. + * u32 attribute with an &enum nl80211_timeout_reason value. This is used, + * e.g., with %NL80211_CMD_CONNECT event. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2405,6 +2409,8 @@ enum nl80211_attrs { NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST, + NL80211_ATTR_TIMEOUT_REASON, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -4788,6 +4794,21 @@ enum nl80211_connect_failed_reason { NL80211_CONN_FAIL_BLOCKED_CLIENT, }; +/** + * enum nl80211_timeout_reason - timeout reasons + * + * @NL80211_TIMEOUT_UNSPECIFIED: Timeout reason unspecified. + * @NL80211_TIMEOUT_SCAN: Scan (AP discovery) timed out. + * @NL80211_TIMEOUT_AUTH: Authentication timed out. + * @NL80211_TIMEOUT_ASSOC: Association timed out. + */ +enum nl80211_timeout_reason { + NL80211_TIMEOUT_UNSPECIFIED, + NL80211_TIMEOUT_SCAN, + NL80211_TIMEOUT_AUTH, + NL80211_TIMEOUT_ASSOC, +}; + /** * enum nl80211_scan_flags - scan request control flags * diff --git a/net/wireless/core.h b/net/wireless/core.h index ba42055a036d..58ca206982fe 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -228,6 +228,7 @@ struct cfg80211_event { size_t resp_ie_len; struct cfg80211_bss *bss; int status; /* -1 = failed; 0..65535 = status code */ + enum nl80211_timeout_reason timeout_reason; } cr; struct { const u8 *req_ie; @@ -388,7 +389,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, int status, bool wextev, - struct cfg80211_bss *bss); + struct cfg80211_bss *bss, + enum nl80211_timeout_reason timeout_reason); void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, size_t ie_len, u16 reason, bool from_ap); int cfg80211_disconnect(struct cfg80211_registered_device *rdev, diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index b876f40c9dad..22b3d9990065 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -48,7 +48,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, /* update current_bss etc., consumes the bss reference */ __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, status_code, - status_code == WLAN_STATUS_SUCCESS, bss); + status_code == WLAN_STATUS_SUCCESS, bss, + NL80211_TIMEOUT_UNSPECIFIED); } EXPORT_SYMBOL(cfg80211_rx_assoc_resp); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 71c66ff9a702..b4e7bdd673e0 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -409,6 +409,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = { .len = sizeof(struct nl80211_bss_select_rssi_adjust) }, + [NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 }, }; /* policy for the key attributes */ @@ -13231,7 +13232,9 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, - int status, gfp_t gfp) + int status, + enum nl80211_timeout_reason timeout_reason, + gfp_t gfp) { struct sk_buff *msg; void *hdr; @@ -13252,7 +13255,9 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE : status) || - (status < 0 && nla_put_flag(msg, NL80211_ATTR_TIMED_OUT)) || + (status < 0 && + (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) || + nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, timeout_reason))) || (req_ie && nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || (resp_ie && diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 75f82520211d..e488dca87423 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -56,7 +56,9 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, - int status, gfp_t gfp); + int status, + enum nl80211_timeout_reason timeout_reason, + gfp_t gfp); void nl80211_send_roamed(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 46693913fcea..b347e63d7aaa 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -34,10 +34,11 @@ struct cfg80211_conn { CFG80211_CONN_SCAN_AGAIN, CFG80211_CONN_AUTHENTICATE_NEXT, CFG80211_CONN_AUTHENTICATING, - CFG80211_CONN_AUTH_FAILED, + CFG80211_CONN_AUTH_FAILED_TIMEOUT, CFG80211_CONN_ASSOCIATE_NEXT, CFG80211_CONN_ASSOCIATING, CFG80211_CONN_ASSOC_FAILED, + CFG80211_CONN_ASSOC_FAILED_TIMEOUT, CFG80211_CONN_DEAUTH, CFG80211_CONN_ABANDON, CFG80211_CONN_CONNECTED, @@ -140,7 +141,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) return err; } -static int cfg80211_conn_do_work(struct wireless_dev *wdev) +static int cfg80211_conn_do_work(struct wireless_dev *wdev, + enum nl80211_timeout_reason *treason) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_connect_params *params; @@ -171,7 +173,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) NULL, 0, params->key, params->key_len, params->key_idx, NULL, 0); - case CFG80211_CONN_AUTH_FAILED: + case CFG80211_CONN_AUTH_FAILED_TIMEOUT: + *treason = NL80211_TIMEOUT_AUTH; return -ENOTCONN; case CFG80211_CONN_ASSOCIATE_NEXT: if (WARN_ON(!rdev->ops->assoc)) @@ -198,6 +201,9 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) WLAN_REASON_DEAUTH_LEAVING, false); return err; + case CFG80211_CONN_ASSOC_FAILED_TIMEOUT: + *treason = NL80211_TIMEOUT_ASSOC; + /* fall through */ case CFG80211_CONN_ASSOC_FAILED: cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, @@ -223,6 +229,7 @@ void cfg80211_conn_work(struct work_struct *work) container_of(work, struct cfg80211_registered_device, conn_work); struct wireless_dev *wdev; u8 bssid_buf[ETH_ALEN], *bssid = NULL; + enum nl80211_timeout_reason treason; rtnl_lock(); @@ -244,10 +251,12 @@ void cfg80211_conn_work(struct work_struct *work) memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN); bssid = bssid_buf; } - if (cfg80211_conn_do_work(wdev)) { + treason = NL80211_TIMEOUT_UNSPECIFIED; + if (cfg80211_conn_do_work(wdev, &treason)) { __cfg80211_connect_result( wdev->netdev, bssid, - NULL, 0, NULL, 0, -1, false, NULL); + NULL, 0, NULL, 0, -1, false, NULL, + treason); } wdev_unlock(wdev); } @@ -352,7 +361,8 @@ void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) } else if (status_code != WLAN_STATUS_SUCCESS) { __cfg80211_connect_result(wdev->netdev, mgmt->bssid, NULL, 0, NULL, 0, - status_code, false, NULL); + status_code, false, NULL, + NL80211_TIMEOUT_UNSPECIFIED); } else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); @@ -400,7 +410,7 @@ void cfg80211_sme_auth_timeout(struct wireless_dev *wdev) if (!wdev->conn) return; - wdev->conn->state = CFG80211_CONN_AUTH_FAILED; + wdev->conn->state = CFG80211_CONN_AUTH_FAILED_TIMEOUT; schedule_work(&rdev->conn_work); } @@ -422,7 +432,7 @@ void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev) if (!wdev->conn) return; - wdev->conn->state = CFG80211_CONN_ASSOC_FAILED; + wdev->conn->state = CFG80211_CONN_ASSOC_FAILED_TIMEOUT; schedule_work(&rdev->conn_work); } @@ -564,7 +574,9 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev, /* we're good if we have a matching bss struct */ if (bss) { - err = cfg80211_conn_do_work(wdev); + enum nl80211_timeout_reason treason; + + err = cfg80211_conn_do_work(wdev, &treason); cfg80211_put_bss(wdev->wiphy, bss); } else { /* otherwise we'll need to scan for the AP first */ @@ -661,7 +673,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, int status, bool wextev, - struct cfg80211_bss *bss) + struct cfg80211_bss *bss, + enum nl80211_timeout_reason timeout_reason) { struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *country_ie; @@ -680,7 +693,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, - status, GFP_KERNEL); + status, timeout_reason, GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT if (wextev) { @@ -771,7 +784,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, struct cfg80211_bss *bss, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, - size_t resp_ie_len, int status, gfp_t gfp) + size_t resp_ie_len, int status, gfp_t gfp, + enum nl80211_timeout_reason timeout_reason) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); @@ -811,6 +825,7 @@ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, cfg80211_hold_bss(bss_from_pub(bss)); ev->cr.bss = bss; ev->cr.status = status; + ev->cr.timeout_reason = timeout_reason; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); diff --git a/net/wireless/util.c b/net/wireless/util.c index cd8a7ae55e7d..1b9296882dcd 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -951,7 +951,7 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev) ev->cr.resp_ie, ev->cr.resp_ie_len, ev->cr.status, ev->cr.status == WLAN_STATUS_SUCCESS, - ev->cr.bss); + ev->cr.bss, ev->cr.timeout_reason); break; case EVENT_ROAMED: __cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie, -- cgit v1.2.3 From c88215d7050f065afaed33e9599c2ef4e5e6ee22 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 13 Jan 2017 01:12:22 +0200 Subject: cfg80211: Fix documentation for connect result The function documentation for cfg80211_connect_bss() and cfg80211_connect_result() was still claiming that they are used only for a success case while these functions can now be used to report both success and various failure cases. The actual use cases were already described in the connect() documentation. Update the function specific comments to note the failure cases and also describe how the special status == -1 case is used in cfg80211_connect_bss() to indicate a connection timeout based on the internal implementation in cfg80211_connect_timeout(). Signed-off-by: Jouni Malinen [use tabs for indentation] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9b3427c8d1db..b7aba6e1a586 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5086,9 +5086,14 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length - * @status: status code, 0 for successful connection, use - * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you - * the real status code for failures. + * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use + * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you + * the real status code for failures. If this call is used to report a + * failure due to a timeout (e.g., not receiving an Authentication frame + * from the AP) instead of an explicit rejection by the AP, -1 is used to + * indicate that this is a failure, but without a status code. + * @timeout_reason is used to report the reason for the timeout in that + * case. * @gfp: allocation flags * @timeout_reason: reason for connection timeout. This is used when the * connection fails due to a timeout instead of an explicit rejection from @@ -5097,10 +5102,10 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) * failure is due to a timeout and not due to explicit rejection by the AP. * This value is ignored in other cases (@status >= 0). * - * It should be called by the underlying driver whenever connect() has - * succeeded. This is similar to cfg80211_connect_result(), but with the - * option of identifying the exact bss entry for the connection. Only one of - * these functions should be called. + * It should be called by the underlying driver once execution of the connection + * request from connect() has been completed. This is similar to + * cfg80211_connect_result(), but with the option of identifying the exact bss + * entry for the connection. Only one of these functions should be called. */ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, struct cfg80211_bss *bss, const u8 *req_ie, @@ -5117,13 +5122,15 @@ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length - * @status: status code, 0 for successful connection, use + * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you * the real status code for failures. * @gfp: allocation flags * - * It should be called by the underlying driver whenever connect() has - * succeeded. + * It should be called by the underlying driver once execution of the connection + * request from connect() has been completed. This is similar to + * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only + * one of these functions should be called. */ static inline void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, -- cgit v1.2.3 From 06efdbe70f9c3bf98e6baacb80b6bac416a6818a Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Thu, 12 Jan 2017 13:02:11 +0200 Subject: ath10k: refactor ath10k_peer_assoc_h_phymode() When adding VHT160 support to ath10k_peer_assoc_h_phymode() the VHT mode selection code becomes too complex. Simplify it by refactoring the vht part to a separate function. Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index d1b7edba5e49..2f6f777ae049 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2533,6 +2533,21 @@ static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) ATH10K_MAC_FIRST_OFDM_RATE_IDX; } +static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, + struct ieee80211_sta *sta) +{ + if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + return MODE_11AC_VHT80; + + if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + return MODE_11AC_VHT40; + + if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + return MODE_11AC_VHT20; + + return MODE_UNKNOWN; +} + static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2579,12 +2594,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, */ if (sta->vht_cap.vht_supported && !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_80) - phymode = MODE_11AC_VHT80; - else if (sta->bandwidth == IEEE80211_STA_RX_BW_40) - phymode = MODE_11AC_VHT40; - else if (sta->bandwidth == IEEE80211_STA_RX_BW_20) - phymode = MODE_11AC_VHT20; + phymode = ath10k_mac_get_phymode_vht(ar, sta); } else if (sta->ht_cap.ht_supported && !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) -- cgit v1.2.3 From bc1efd739b610a9cda31b82971b7a1e64a6144d1 Mon Sep 17 00:00:00 2001 From: Sebastian Gottschall Date: Thu, 12 Jan 2017 13:02:12 +0200 Subject: ath10k: add VHT160 support This patch adds full VHT160 support for QCA9984 chipsets Tested on Netgear R7800. 80+80 is possible, but disabled so far since it seems to contain glitches like missing vht station flags (this may be firmware or mac80211 related). Signed-off-by: Sebastian Gottschall [kvalo@qca.qualcomm.com: refactoring and fix few warnings] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 7 ++++++- drivers/net/wireless/ath/ath10k/mac.c | 35 +++++++++++++++++++++++++++++-- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 1 + drivers/net/wireless/ath/ath10k/wmi-tlv.h | 1 + drivers/net/wireless/ath/ath10k/wmi.c | 9 +++++++- drivers/net/wireless/ath/ath10k/wmi.h | 27 ++++++++++++++++++++++-- 6 files changed, 74 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 86d082cf4eef..32292c532819 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -702,6 +702,10 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, /* 80MHZ */ case 2: status->vht_flag |= RX_VHT_FLAG_80MHZ; + break; + case 3: + status->vht_flag |= RX_VHT_FLAG_160MHZ; + break; } status->flag |= RX_FLAG_VHT; @@ -926,7 +930,7 @@ static void ath10k_process_rx(struct ath10k *ar, *status = *rx_status; ath10k_dbg(ar, ATH10K_DBG_DATA, - "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n", skb, skb->len, ieee80211_get_SA(hdr), @@ -940,6 +944,7 @@ static void ath10k_process_rx(struct ath10k *ar, status->flag & RX_FLAG_VHT ? "vht" : "", status->flag & RX_FLAG_40MHZ ? "40" : "", status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "", + status->vht_flag & RX_VHT_FLAG_160MHZ ? "160" : "", status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", status->rate_idx, status->vht_nss, diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 2f6f777ae049..28bf19914fff 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -569,10 +569,14 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) case NL80211_CHAN_WIDTH_80: phymode = MODE_11AC_VHT80; break; + case NL80211_CHAN_WIDTH_160: + phymode = MODE_11AC_VHT160; + break; + case NL80211_CHAN_WIDTH_80P80: + phymode = MODE_11AC_VHT80_80; + break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: - case NL80211_CHAN_WIDTH_80P80: - case NL80211_CHAN_WIDTH_160: phymode = MODE_UNKNOWN; break; } @@ -971,6 +975,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) arg.vdev_id = vdev_id; arg.channel.freq = channel->center_freq; arg.channel.band_center_freq1 = chandef->center_freq1; + arg.channel.band_center_freq2 = chandef->center_freq2; /* TODO setup this dynamically, what in case we don't have any vifs? */ @@ -1417,6 +1422,7 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, arg.channel.freq = chandef->chan->center_freq; arg.channel.band_center_freq1 = chandef->center_freq1; + arg.channel.band_center_freq2 = chandef->center_freq2; arg.channel.mode = chan_to_phymode(chandef); arg.channel.min_power = 0; @@ -2480,6 +2486,9 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, if (sta->bandwidth == IEEE80211_STA_RX_BW_80) arg->peer_flags |= ar->wmi.peer_flags->bw80; + if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + arg->peer_flags |= ar->wmi.peer_flags->bw160; + arg->peer_vht_rates.rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->peer_vht_rates.rx_mcs_set = @@ -2536,6 +2545,18 @@ static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, struct ieee80211_sta *sta) { + if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { + switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: + return MODE_11AC_VHT160; + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: + return MODE_11AC_VHT80_80; + default: + /* not sure if this is a valid case? */ + return MODE_11AC_VHT160; + } + } + if (sta->bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11AC_VHT80; @@ -4321,6 +4342,13 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) vht_cap.cap |= val; } + /* Currently the firmware seems to be buggy, don't enable 80+80 + * mode until that's resolved. + */ + if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && + !(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)) + vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + mcs_map = 0; for (i = 0; i < 8; i++) { if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) @@ -6979,6 +7007,9 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, bw = WMI_PEER_CHWIDTH_80MHZ; break; case IEEE80211_STA_RX_BW_160: + bw = WMI_PEER_CHWIDTH_160MHZ; + break; + default: ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", sta->bandwidth, sta->addr); bw = WMI_PEER_CHWIDTH_20MHZ; diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 1f6bb9e8bb01..f9188027a6f6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3637,6 +3637,7 @@ static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = { .vht = WMI_TLV_PEER_VHT, .bw80 = WMI_TLV_PEER_80MHZ, .pmf = WMI_TLV_PEER_PMF, + .bw160 = WMI_TLV_PEER_160MHZ, }; /************/ diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index b8aa6000573c..22cf011e839a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -543,6 +543,7 @@ enum wmi_tlv_peer_flags { WMI_TLV_PEER_VHT = 0x02000000, WMI_TLV_PEER_80MHZ = 0x04000000, WMI_TLV_PEER_PMF = 0x08000000, + WMI_TLV_PEER_160MHZ = 0x20000000, }; enum wmi_tlv_tag { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 50d6ee6afe26..e27cfc19754b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -28,6 +28,7 @@ #include "wmi-ops.h" #include "p2p.h" #include "hw.h" +#include "hif.h" #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ) @@ -1574,6 +1575,7 @@ static const struct wmi_peer_flags_map wmi_peer_flags_map = { .bw80 = WMI_PEER_80MHZ, .vht_2g = WMI_PEER_VHT_2G, .pmf = WMI_PEER_PMF, + .bw160 = WMI_PEER_160MHZ, }; static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = { @@ -1591,6 +1593,7 @@ static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = { .spatial_mux = WMI_10X_PEER_SPATIAL_MUX, .vht = WMI_10X_PEER_VHT, .bw80 = WMI_10X_PEER_80MHZ, + .bw160 = WMI_10X_PEER_160MHZ, }; static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = { @@ -1610,6 +1613,7 @@ static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = { .bw80 = WMI_10_2_PEER_80MHZ, .vht_2g = WMI_10_2_PEER_VHT_2G, .pmf = WMI_10_2_PEER_PMF, + .bw160 = WMI_10_2_PEER_160MHZ, }; void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, @@ -1634,7 +1638,10 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, ch->mhz = __cpu_to_le32(arg->freq); ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1); - ch->band_center_freq2 = 0; + if (arg->mode == MODE_11AC_VHT80_80) + ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2); + else + ch->band_center_freq2 = 0; ch->min_power = arg->min_power; ch->max_power = arg->max_power; ch->reg_power = arg->max_reg_power; diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 5d3dff95b2e5..861c2d8c6e8c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -1728,8 +1728,10 @@ enum wmi_phy_mode { MODE_11AC_VHT20_2G = 11, MODE_11AC_VHT40_2G = 12, MODE_11AC_VHT80_2G = 13, - MODE_UNKNOWN = 14, - MODE_MAX = 14 + MODE_11AC_VHT80_80 = 14, + MODE_11AC_VHT160 = 15, + MODE_UNKNOWN = 16, + MODE_MAX = 16 }; static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode) @@ -1757,6 +1759,10 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode) return "11ac-vht40"; case MODE_11AC_VHT80: return "11ac-vht80"; + case MODE_11AC_VHT160: + return "11ac-vht160"; + case MODE_11AC_VHT80_80: + return "11ac-vht80+80"; case MODE_11AC_VHT20_2G: return "11ac-vht20-2g"; case MODE_11AC_VHT40_2G: @@ -1811,6 +1817,7 @@ struct wmi_channel { struct wmi_channel_arg { u32 freq; u32 band_center_freq1; + u32 band_center_freq2; bool passive; bool allow_ibss; bool allow_ht; @@ -1875,9 +1882,18 @@ enum wmi_channel_change_cause { #define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003 #define WMI_VHT_CAP_RX_LDPC 0x00000010 #define WMI_VHT_CAP_SGI_80MHZ 0x00000020 +#define WMI_VHT_CAP_SGI_160MHZ 0x00000040 #define WMI_VHT_CAP_TX_STBC 0x00000080 #define WMI_VHT_CAP_RX_STBC_MASK 0x00000300 #define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8 +#define WMI_VHT_CAP_SU_BFER 0x00000800 +#define WMI_VHT_CAP_SU_BFEE 0x00001000 +#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000 +#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13 +#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000 +#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16 +#define WMI_VHT_CAP_MU_BFER 0x00080000 +#define WMI_VHT_CAP_MU_BFEE 0x00100000 #define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000 #define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23 #define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000 @@ -1926,6 +1942,8 @@ enum { REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */ REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */ REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */ + REGDMN_MODE_11AC_VHT160 = 0x200000, /* 5Ghz, VHT160 channels */ + REGDMN_MODE_11AC_VHT80_80 = 0x400000, /* 5Ghz, VHT80+80 channels */ REGDMN_MODE_ALL = 0xffffffff }; @@ -5783,6 +5801,7 @@ enum wmi_peer_chwidth { WMI_PEER_CHWIDTH_20MHZ = 0, WMI_PEER_CHWIDTH_40MHZ = 1, WMI_PEER_CHWIDTH_80MHZ = 2, + WMI_PEER_CHWIDTH_160MHZ = 3, }; enum wmi_peer_param { @@ -5873,6 +5892,7 @@ struct wmi_peer_flags_map { u32 bw80; u32 vht_2g; u32 pmf; + u32 bw160; }; enum wmi_peer_flags { @@ -5892,6 +5912,7 @@ enum wmi_peer_flags { WMI_PEER_80MHZ = 0x04000000, WMI_PEER_VHT_2G = 0x08000000, WMI_PEER_PMF = 0x10000000, + WMI_PEER_160MHZ = 0x20000000 }; enum wmi_10x_peer_flags { @@ -5909,6 +5930,7 @@ enum wmi_10x_peer_flags { WMI_10X_PEER_SPATIAL_MUX = 0x00200000, WMI_10X_PEER_VHT = 0x02000000, WMI_10X_PEER_80MHZ = 0x04000000, + WMI_10X_PEER_160MHZ = 0x20000000 }; enum wmi_10_2_peer_flags { @@ -5928,6 +5950,7 @@ enum wmi_10_2_peer_flags { WMI_10_2_PEER_80MHZ = 0x04000000, WMI_10_2_PEER_VHT_2G = 0x08000000, WMI_10_2_PEER_PMF = 0x10000000, + WMI_10_2_PEER_160MHZ = 0x20000000 }; /* -- cgit v1.2.3 From c1e3330f22bc3c53e6a2c4282dd5a1dc6e6bcca1 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Mon, 5 Dec 2016 22:52:45 +0100 Subject: ath10k: add accounting for the extended peer statistics The 10.4 firmware adds extended peer information to the firmware's statistics payload. This additional info is stored as a separate data field and the elements are stored in their own "peers_extd" list. These elements can pile up in the same way as the peer information elements. This is because the ath10k_wmi_10_4_op_pull_fw_stats() function tries to pull the same amount (num_peer_stats) for every statistic data unit. Fixes: 4a49ae94a448faa ("ath10k: fix 10.4 extended peer stats update") Signed-off-by: Christian Lamparter Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index e1a70dffc52a..b882b0892dcf 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -400,6 +400,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) * prevent firmware from DoS-ing the host. */ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); + ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd); ath10k_warn(ar, "dropping fw peer stats\n"); goto free; } @@ -410,10 +411,12 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) goto free; } + if (!list_empty(&stats.peers)) + list_splice_tail_init(&stats.peers_extd, + &ar->debug.fw_stats.peers_extd); + list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); - list_splice_tail_init(&stats.peers_extd, - &ar->debug.fw_stats.peers_extd); } complete(&ar->debug.fw_stats_complete); -- cgit v1.2.3 From 1077ec472df43414dafaec126498ff3fc669a471 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 12 Jan 2017 13:02:23 +0200 Subject: ath9k: move RELAY and DEBUG_FS to ATH9K[_HTC]_DEBUGFS Currently, the common ath9k_common module needs to have a dependency on RELAY and DEBUG_FS in order to built. This is usually not a problem. But for RAM and FLASH starved AR71XX devices, every little bit counts. This patch adds a new symbol CONFIG_ATH9K_COMMON_DEBUG which makes it possible to drop the RELAY and DEBUG_FS dependency there and move it to ATH_(HTC)_DEBUGFS. Note: The shared FFT/spectral code (which is the only user of the relayfs in ath9k*) needs DEBUG_FS to export the relayfs interface to dump the data to userspace. So it makes no sense to have the functions compiled in, if DEBUG_FS is not there. Signed-off-by: Christian Lamparter Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/Kconfig | 9 ++++++-- drivers/net/wireless/ath/ath9k/Makefile | 5 +++-- drivers/net/wireless/ath/ath9k/common-debug.h | 27 ++++++++++++++++++++++++ drivers/net/wireless/ath/ath9k/common-spectral.c | 2 +- drivers/net/wireless/ath/ath9k/common-spectral.h | 23 ++++++++++++++++++++ drivers/net/wireless/ath/ath9k/eeprom_4k.c | 2 +- drivers/net/wireless/ath/ath9k/eeprom_9287.c | 2 +- drivers/net/wireless/ath/ath9k/eeprom_def.c | 2 +- 8 files changed, 64 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 8f231c67dd51..783a38f1a626 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -3,8 +3,8 @@ config ATH9K_HW config ATH9K_COMMON tristate select ATH_COMMON - select DEBUG_FS - select RELAY +config ATH9K_COMMON_DEBUG + bool config ATH9K_DFS_DEBUGFS def_bool y depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED @@ -60,12 +60,14 @@ config ATH9K_DEBUGFS bool "Atheros ath9k debugging" depends on ATH9K && DEBUG_FS select MAC80211_DEBUGFS + select ATH9K_COMMON_DEBUG select RELAY ---help--- Say Y, if you need access to ath9k's statistics for interrupts, rate control, etc. Also required for changing debug message flags at run time. + As well as access to the FFT/spectral data and TX99. config ATH9K_STATION_STATISTICS bool "Detailed station statistics" @@ -174,8 +176,11 @@ config ATH9K_HTC config ATH9K_HTC_DEBUGFS bool "Atheros ath9k_htc debugging" depends on ATH9K_HTC && DEBUG_FS + select ATH9K_COMMON_DEBUG + select RELAY ---help--- Say Y, if you need access to ath9k_htc's statistics. + As well as access to the FFT/spectral data. config ATH9K_HWRNG bool "Random number generator support" diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index 76f9dc37500b..36a40ffdce15 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -60,8 +60,9 @@ obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o ath9k_common-y:= common.o \ common-init.o \ common-beacon.o \ - common-debug.o \ - common-spectral.o + +ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o \ + common-spectral.o ath9k_htc-y += htc_hst.o \ hif_usb.o \ diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h index 7c9788490f7f..3376990d3a24 100644 --- a/drivers/net/wireless/ath/ath9k/common-debug.h +++ b/drivers/net/wireless/ath/ath9k/common-debug.h @@ -60,6 +60,7 @@ struct ath_rx_stats { u32 rx_spectral; }; +#ifdef CONFIG_ATH9K_COMMON_DEBUG void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy, struct ath_hw *ah); void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy, @@ -70,3 +71,29 @@ void ath9k_cmn_debug_recv(struct dentry *debugfs_phy, struct ath_rx_stats *rxstats); void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy, struct ath_rx_stats *rxstats); +#else +static inline void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy, + struct ath_hw *ah) +{ +} + +static inline void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy, + struct ath_hw *ah) +{ +} + +static inline void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats, + struct ath_rx_status *rs) +{ +} + +static inline void ath9k_cmn_debug_recv(struct dentry *debugfs_phy, + struct ath_rx_stats *rxstats) +{ +} + +static inline void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy, + struct ath_rx_stats *rxstats) +{ +} +#endif /* CONFIG_ATH9K_COMMON_DEBUG */ diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index eedf86b67cf5..789a3dbe8341 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -1075,7 +1075,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = { void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) { - if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) { + if (spec_priv->rfs_chan_spec_scan) { relay_close(spec_priv->rfs_chan_spec_scan); spec_priv->rfs_chan_spec_scan = NULL; } diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h index 998743be9c67..5d1a51d83aa6 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.h +++ b/drivers/net/wireless/ath/ath9k/common-spectral.h @@ -151,6 +151,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins) return bins[0] & 0x3f; } +#ifdef CONFIG_ATH9K_COMMON_DEBUG void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy); void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv); @@ -161,5 +162,27 @@ int ath9k_cmn_spectral_scan_config(struct ath_common *common, enum spectral_mode spectral_mode); int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr, struct ath_rx_status *rs, u64 tsf); +#else +static inline void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, + struct dentry *debugfs_phy) +{ +} + +static inline void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) +{ +} + +static inline void ath9k_cmn_spectral_scan_trigger(struct ath_common *common, + struct ath_spec_scan_priv *spec_priv) +{ +} + +static inline int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, + struct ieee80211_hdr *hdr, + struct ath_rx_status *rs, u64 tsf) +{ + return 0; +} +#endif /* CONFIG_ATH9K_COMMON_DEBUG */ #endif /* SPECTRAL_H */ diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 4a01ebe53053..b8c0a08066a0 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -72,7 +72,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) return __ath9k_hw_4k_fill_eeprom(ah); } -#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) +#ifdef CONFIG_ATH9K_COMMON_DEBUG static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_4k_header *modal_hdr) { diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 9611f020f7c0..3caa149b1013 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -75,7 +75,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) return __ath9k_hw_ar9287_fill_eeprom(ah); } -#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) +#ifdef CONFIG_ATH9K_COMMON_DEBUG static u32 ar9287_dump_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_ar9287_header *modal_hdr) { diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 7d5223451ce9..56b44fc7a8e6 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -131,7 +131,7 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) return __ath9k_hw_def_fill_eeprom(ah); } -#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) +#ifdef CONFIG_ATH9K_COMMON_DEBUG static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_header *modal_hdr) { -- cgit v1.2.3 From c486dc571a370ec58aefc9cc93937945403ef351 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Thu, 12 Jan 2017 13:02:21 +0200 Subject: ath10k: fix wifi connectivity and warning in rx with channel 169 In countries where basic operation of channel 169 is allowed, this fixes the below WARN_ON_ONCE in Rx and fixes the station connectivity failure in channel 169 as the packet is dropped in the driver as the current check limits to channel 165. As of now all the packets beyond channel 165 is dropped, fix this by extending the range to channel 169. Call trace: drivers/net/wireless/ath/ath10k/wmi.c:1505 ath10k_wmi_event_mgmt_rx+0x278/0x440 [ath10k_core]() Call Trace: [] ? printk+0x2d/0x2f [] warn_slowpath_common+0x72/0xa0 [] ? ath10k_wmi_event_mgmt_rx+0x278/0x440 [] ? ath10k_wmi_event_mgmt_rx+0x278/0x440 [] warn_slowpath_null+0x22/0x30 [] ath10k_wmi_event_mgmt_rx+0x278/0x440 [] ? ath10k_pci_sleep+0x8b/0xb0 [ath10k_pci] [] ath10k_wmi_10_2_op_rx+0xf3/0x3b0 [] ath10k_wmi_process_rx+0x1e/0x60 [] ath10k_htc_rx_completion_handler+0x347/0x4d0 [ath10k_core] [] ? ath10k_ce_completed_recv_next+0x53/0x70 [ath10k_pci] [] ath10k_pci_ce_recv_data+0x171/0x1d0 [ath10k_pci] [] ? ath10k_pci_write32+0x39/0x80 [ath10k_pci] [] ath10k_ce_per_engine_service+0x5c/0xa0 [ath10k_pci] [] ath10k_ce_per_engine_service_any+0x5f/0x70 [ath10k_pci] [] ? local_bh_enable_ip+0x90/0x90 [] ath10k_pci_tasklet+0x1b/0x50 [ath10k_pci] Fixes: 34c30b0a5e97 ("ath10k: enable advertising support for channel 169, 5Ghz") Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index e27cfc19754b..414ad3e1eed4 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2326,7 +2326,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) */ if (channel >= 1 && channel <= 14) { status->band = NL80211_BAND_2GHZ; - } else if (channel >= 36 && channel <= 165) { + } else if (channel >= 36 && channel <= 169) { status->band = NL80211_BAND_5GHZ; } else { /* Shouldn't happen unless list of advertised channels to -- cgit v1.2.3 From cd59102779aca33e50967bdb6147334fb298d7ad Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Thu, 12 Jan 2017 13:02:22 +0200 Subject: ath10k: fix tx legacy rate reporting Tx legacy rate is reported 10 fold, as below iw dev wlan#N station dump | grep "tx bitrate" tx bitrate: 240.0 MBit/s This is because by mistake we multiply by the hardware reported rate twice by 10, fix this. Fixes: cec17c382140 ("ath10k: add per peer htt tx stats support for 10.4") Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 32292c532819..4105005dcdfe 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2250,7 +2250,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, rate *= 10; if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK) rate = rate - 5; - arsta->txrate.legacy = rate * 10; + arsta->txrate.legacy = rate; } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { arsta->txrate.flags = RATE_INFO_FLAGS_MCS; arsta->txrate.mcs = txrate.mcs; -- cgit v1.2.3 From 98fc3c6fa5cfda703e7be1af50fba82c168251de Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 12 Jan 2017 18:07:16 -0500 Subject: net: dsa: mv88e6xxx: add EEPROM support to 6390 The Marvell 6352 chip has a 8-bit address/16-bit data EEPROM access. The Marvell 6390 chip has a 16-bit address/8-bit data EEPROM access. This patch implements the 8-bit data EEPROM access in the mv88e6xxx driver and adds its support to chips of the 6390 family. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 14 ++++++ drivers/net/dsa/mv88e6xxx/global2.c | 93 ++++++++++++++++++++++++++++++++++- drivers/net/dsa/mv88e6xxx/global2.h | 21 ++++++++ drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 1 + 4 files changed, 128 insertions(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index eea8e0176e33..987b2dbbd35a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3453,6 +3453,8 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { static const struct mv88e6xxx_ops mv88e6190_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3478,6 +3480,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { static const struct mv88e6xxx_ops mv88e6190x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3503,6 +3507,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { static const struct mv88e6xxx_ops mv88e6191_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3556,6 +3562,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { static const struct mv88e6xxx_ops mv88e6290_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3714,6 +3722,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { static const struct mv88e6xxx_ops mv88e6390_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3741,6 +3751,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { static const struct mv88e6xxx_ops mv88e6390x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3768,6 +3780,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { static const struct mv88e6xxx_ops mv88e6391_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 3e77071949ab..ead2e265c9ef 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -218,7 +218,8 @@ static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip) } /* Offset 0x14: EEPROM Command - * Offset 0x15: EEPROM Data + * Offset 0x15: EEPROM Data (for 16-bit data access) + * Offset 0x15: EEPROM Addr (for 8-bit data access) */ static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) @@ -239,6 +240,50 @@ static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd) return mv88e6xxx_g2_eeprom_wait(chip); } +static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip, + u16 addr, u8 *data) +{ + u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ; + int err; + + err = mv88e6xxx_g2_eeprom_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr); + if (err) + return err; + + err = mv88e6xxx_g2_eeprom_cmd(chip, cmd); + if (err) + return err; + + err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &cmd); + if (err) + return err; + + *data = cmd & 0xff; + + return 0; +} + +static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip, + u16 addr, u8 data) +{ + u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | GLOBAL2_EEPROM_CMD_WRITE_EN; + int err; + + err = mv88e6xxx_g2_eeprom_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr); + if (err) + return err; + + return mv88e6xxx_g2_eeprom_cmd(chip, cmd | data); +} + static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip, u8 addr, u16 *data) { @@ -273,6 +318,52 @@ static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_eeprom_cmd(chip, cmd); } +int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data) +{ + unsigned int offset = eeprom->offset; + unsigned int len = eeprom->len; + int err; + + eeprom->len = 0; + + while (len) { + err = mv88e6xxx_g2_eeprom_read8(chip, offset, data); + if (err) + return err; + + eeprom->len++; + offset++; + data++; + len--; + } + + return 0; +} + +int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data) +{ + unsigned int offset = eeprom->offset; + unsigned int len = eeprom->len; + int err; + + eeprom->len = 0; + + while (len) { + err = mv88e6xxx_g2_eeprom_write8(chip, offset, *data); + if (err) + return err; + + eeprom->len++; + offset++; + data++; + len--; + } + + return 0; +} + int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data) { diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 9aefb7d8b0ad..2918f22388f7 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -28,10 +28,17 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); + +int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data); +int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data); + int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); + int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); @@ -67,6 +74,20 @@ static inline int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, return -EOPNOTSUPP; } +static inline int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + return -EOPNOTSUPP; +} + static inline int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data) diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index a224d66dafd9..466cfdadb7bd 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -382,6 +382,7 @@ #define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10) #define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff #define GLOBAL2_EEPROM_DATA 0x15 +#define GLOBAL2_EEPROM_ADDR 0x15 /* 6390 */ #define GLOBAL2_PTP_AVB_OP 0x16 #define GLOBAL2_PTP_AVB_DATA 0x17 #define GLOBAL2_SMI_PHY_CMD 0x18 -- cgit v1.2.3 From 7410191afcaca3a49bb29bfb5e15f81d7b336984 Mon Sep 17 00:00:00 2001 From: Satanand Burla Date: Thu, 12 Jan 2017 16:18:22 -0800 Subject: liquidio: use fallback for selecting txq Remove assignment to ndo_select_queue so that fallback is used for selecting txq. Also remove the now-useless function that used to be assigned to ndo_select_queue. Signed-off-by: Satanand Burla Signed-off-by: Felix Manlunas Signed-off-by: Derek Chickles Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 20 -------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 21 --------------------- 2 files changed, 41 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index cc825d574e8e..2b89ec291b8b 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2223,25 +2223,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Select queue based on hash - * @param dev Net device - * @param skb sk_buff structure - * @returns selected queue number - */ -static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv __attribute__((unused)), - select_queue_fallback_t fallback __attribute__((unused))) -{ - u32 qindex = 0; - struct lio *lio; - - lio = GET_LIO(dev); - qindex = skb_tx_hash(dev, skb); - - return (u16)(qindex % (lio->linfo.num_txpciq)); -} - /** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. @@ -3755,7 +3736,6 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_vlan = liquidio_set_vf_vlan, .ndo_get_vf_config = liquidio_get_vf_config, .ndo_set_vf_link_state = liquidio_set_vf_link_state, - .ndo_select_queue = select_q }; /** \brief Entry point for the liquidio module diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index ad2e72d7d522..19d88fb387ce 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1455,26 +1455,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Select queue based on hash - * @param dev Net device - * @param skb sk_buff structure - * @returns selected queue number - */ -static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv __attribute__((unused)), - select_queue_fallback_t fallback __attribute__((unused))) -{ - struct lio *lio; - u32 qindex; - - lio = GET_LIO(dev); - - qindex = skb_tx_hash(dev, skb); - - return (u16)(qindex % (lio->linfo.num_txpciq)); -} - /** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. @@ -2717,7 +2697,6 @@ static const struct net_device_ops lionetdevops = { .ndo_set_features = liquidio_set_features, .ndo_udp_tunnel_add = liquidio_add_vxlan_port, .ndo_udp_tunnel_del = liquidio_del_vxlan_port, - .ndo_select_queue = select_q, }; static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) -- cgit v1.2.3 From db8da6bb574e1692cb86624317c572b0b9306560 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:30 -0800 Subject: tcp: new helper function for RACK loss detection Create a new helper tcp_rack_mark_skb_lost to prepare the upcoming RACK reordering timer support. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_recovery.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index e36df4fcfeba..f38dba5aed7a 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -3,6 +3,19 @@ int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS; +static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_sock *tp = tcp_sk(sk); + + tcp_skb_mark_lost_uncond_verify(tp, skb); + if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { + /* Account for retransmits that are lost again */ + TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; + tp->retrans_out -= tcp_skb_pcount(skb); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); + } +} + /* Marks a packet lost, if some packet sent later has been (s)acked. * The underlying idea is similar to the traditional dupthresh and FACK * but they look at different metrics: @@ -61,13 +74,7 @@ int tcp_rack_mark_lost(struct sock *sk) continue; /* skb is lost if packet sent later is sacked */ - tcp_skb_mark_lost_uncond_verify(tp, skb); - if (scb->sacked & TCPCB_SACKED_RETRANS) { - scb->sacked &= ~TCPCB_SACKED_RETRANS; - tp->retrans_out -= tcp_skb_pcount(skb); - NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPLOSTRETRANSMIT); - } + tcp_rack_mark_skb_lost(sk, skb); } else if (!(scb->sacked & TCPCB_RETRANS)) { /* Original data are sent sequentially so stop early * b/c the rest are all sent after rack_sent -- cgit v1.2.3 From e636f8b0104d6622aaaed6aa5ef17dfbf165bc51 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:31 -0800 Subject: tcp: new helper for RACK to detect loss Create a new helper tcp_rack_detect_loss to prepare the upcoming RACK reordering timer patch. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 3 +-- net/ipv4/tcp_input.c | 12 ++++++++---- net/ipv4/tcp_recovery.c | 22 +++++++++++++--------- 3 files changed, 22 insertions(+), 15 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 1da0aa724929..51183bba3835 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1863,8 +1863,7 @@ extern int sysctl_tcp_recovery; /* Use TCP RACK to detect (some) tail and retransmit losses */ #define TCP_RACK_LOST_RETRANS 0x1 -extern int tcp_rack_mark_lost(struct sock *sk); - +extern void tcp_rack_mark_lost(struct sock *sk); extern void tcp_rack_advance(struct tcp_sock *tp, const struct skb_mstamp *xmit_time, u8 sacked); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ec6d84363024..bb24b93e64bc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2865,10 +2865,14 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, } /* Use RACK to detect loss */ - if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS && - tcp_rack_mark_lost(sk)) { - flag |= FLAG_LOST_RETRANS; - *ack_flag |= FLAG_LOST_RETRANS; + if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS) { + u32 prior_retrans = tp->retrans_out; + + tcp_rack_mark_lost(sk); + if (prior_retrans > tp->retrans_out) { + flag |= FLAG_LOST_RETRANS; + *ack_flag |= FLAG_LOST_RETRANS; + } } /* E. Process state. */ diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index f38dba5aed7a..7ea0377229c0 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -32,17 +32,11 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) * The current version is only used after recovery starts but can be * easily extended to detect the first loss. */ -int tcp_rack_mark_lost(struct sock *sk) +static void tcp_rack_detect_loss(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - u32 reo_wnd, prior_retrans = tp->retrans_out; - - if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced) - return 0; - - /* Reset the advanced flag to avoid unnecessary queue scanning */ - tp->rack.advanced = 0; + u32 reo_wnd; /* To be more reordering resilient, allow min_rtt/4 settling delay * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed @@ -82,7 +76,17 @@ int tcp_rack_mark_lost(struct sock *sk) break; } } - return prior_retrans - tp->retrans_out; +} + +void tcp_rack_mark_lost(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced) + return; + /* Reset the advanced flag to avoid unnecessary queue scanning */ + tp->rack.advanced = 0; + tcp_rack_detect_loss(sk); } /* Record the most recently (re)sent time among the (s)acked packets */ -- cgit v1.2.3 From deed7be78f512d003c6290da0a781479b31b3d74 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:32 -0800 Subject: tcp: record most recent RTT in RACK loss detection Record the most recent RTT in RACK. It is often identical to the "ca_rtt_us" values in tcp_clean_rtx_queue. But when the packet has been retransmitted, RACK choses to believe the ACK is for the (latest) retransmitted packet if the RTT is over minimum RTT. This requires passing the arrival time of the most recent ACK to RACK routines. The timestamp is now recorded in the "ack_time" in tcp_sacktag_state during the ACK processing. This patch does not change the RACK algorithm itself. It only adds the RTT variable to prepare the next main patch. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 + include/net/tcp.h | 7 ++++--- net/ipv4/tcp_input.c | 36 ++++++++++++++++++++++-------------- net/ipv4/tcp_recovery.c | 41 +++++++++++++++++++++++------------------ 4 files changed, 50 insertions(+), 35 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index fc5848dad7a4..1255c592719c 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -207,6 +207,7 @@ struct tcp_sock { /* Information of the most recently (s)acked skb */ struct tcp_rack { struct skb_mstamp mstamp; /* (Re)sent time of the skb */ + u32 rtt_us; /* Associated RTT */ u8 advanced; /* mstamp advanced since last lost marking */ u8 reord; /* reordering detected */ } rack; diff --git a/include/net/tcp.h b/include/net/tcp.h index 51183bba3835..1439107658c2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1863,9 +1863,10 @@ extern int sysctl_tcp_recovery; /* Use TCP RACK to detect (some) tail and retransmit losses */ #define TCP_RACK_LOST_RETRANS 0x1 -extern void tcp_rack_mark_lost(struct sock *sk); -extern void tcp_rack_advance(struct tcp_sock *tp, - const struct skb_mstamp *xmit_time, u8 sacked); +extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now); +extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, + const struct skb_mstamp *xmit_time, + const struct skb_mstamp *ack_time); /* * Save and compile IPv4 options, return a pointer to it diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index bb24b93e64bc..8ccd171999bf 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1135,6 +1135,7 @@ struct tcp_sacktag_state { */ struct skb_mstamp first_sackt; struct skb_mstamp last_sackt; + struct skb_mstamp ack_time; /* Timestamp when the S/ACK was received */ struct rate_sample *rate; int flag; }; @@ -1217,7 +1218,7 @@ static u8 tcp_sacktag_one(struct sock *sk, return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { - tcp_rack_advance(tp, xmit_time, sacked); + tcp_rack_advance(tp, sacked, xmit_time, &state->ack_time); if (sacked & TCPCB_SACKED_RETRANS) { /* If the segment is not tagged as lost, @@ -2813,7 +2814,8 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked) * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, const int acked, - bool is_dupack, int *ack_flag, int *rexmit) + bool is_dupack, int *ack_flag, int *rexmit, + const struct skb_mstamp *ack_time) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -2868,7 +2870,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS) { u32 prior_retrans = tp->retrans_out; - tcp_rack_mark_lost(sk); + tcp_rack_mark_lost(sk, ack_time); if (prior_retrans > tp->retrans_out) { flag |= FLAG_LOST_RETRANS; *ack_flag |= FLAG_LOST_RETRANS; @@ -3105,11 +3107,11 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, */ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, u32 prior_snd_una, int *acked, - struct tcp_sacktag_state *sack, - struct skb_mstamp *now) + struct tcp_sacktag_state *sack) { const struct inet_connection_sock *icsk = inet_csk(sk); struct skb_mstamp first_ackt, last_ackt; + struct skb_mstamp *now = &sack->ack_time; struct tcp_sock *tp = tcp_sk(sk); u32 prior_sacked = tp->sacked_out; u32 reord = tp->packets_out; @@ -3169,7 +3171,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, } else if (tcp_is_sack(tp)) { tp->delivered += acked_pcount; if (!tcp_skb_spurious_retrans(tp, skb)) - tcp_rack_advance(tp, &skb->skb_mstamp, sacked); + tcp_rack_advance(tp, sacked, + &skb->skb_mstamp, + &sack->ack_time); } if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; @@ -3599,7 +3603,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) u32 lost = tp->lost; int acked = 0; /* Number of packets newly acked */ int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ - struct skb_mstamp now; sack_state.first_sackt.v64 = 0; sack_state.rate = &rs; @@ -3625,7 +3628,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (after(ack, tp->snd_nxt)) goto invalid_ack; - skb_mstamp_get(&now); + skb_mstamp_get(&sack_state.ack_time); if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) @@ -3693,11 +3696,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, - &sack_state, &now); + &sack_state); if (tcp_ack_is_dubious(sk, flag)) { is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); - tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); + tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit, + &sack_state.ack_time); } if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); @@ -3712,15 +3716,17 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_schedule_loss_probe(sk); delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ lost = tp->lost - lost; /* freshly marked lost */ - tcp_rate_gen(sk, delivered, lost, &now, &rs); - tcp_cong_control(sk, ack, delivered, flag, &rs); + tcp_rate_gen(sk, delivered, lost, &sack_state.ack_time, + sack_state.rate); + tcp_cong_control(sk, ack, delivered, flag, sack_state.rate); tcp_xmit_recovery(sk, rexmit); return 1; no_queue: /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) - tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); + tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit, + &sack_state.ack_time); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. @@ -3741,9 +3747,11 @@ old_ack: * If data was DSACKed, see if we can undo a cwnd reduction. */ if (TCP_SKB_CB(skb)->sacked) { + skb_mstamp_get(&sack_state.ack_time); flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, &sack_state); - tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); + tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit, + &sack_state.ack_time); tcp_xmit_recovery(sk, rexmit); } diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index 7ea0377229c0..557363cde58a 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -32,7 +32,7 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) * The current version is only used after recovery starts but can be * easily extended to detect the first loss. */ -static void tcp_rack_detect_loss(struct sock *sk) +static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; @@ -62,13 +62,14 @@ static void tcp_rack_detect_loss(struct sock *sk) continue; if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) { - - if (skb_mstamp_us_delta(&tp->rack.mstamp, - &skb->skb_mstamp) <= reo_wnd) - continue; - - /* skb is lost if packet sent later is sacked */ - tcp_rack_mark_skb_lost(sk, skb); + /* Step 3 in draft-cheng-tcpm-rack-00.txt: + * A packet is lost if its elapsed time is beyond + * the recent RTT plus the reordering window. + */ + if (skb_mstamp_us_delta(now, &skb->skb_mstamp) > + tp->rack.rtt_us + reo_wnd) { + tcp_rack_mark_skb_lost(sk, skb); + } } else if (!(scb->sacked & TCPCB_RETRANS)) { /* Original data are sent sequentially so stop early * b/c the rest are all sent after rack_sent @@ -78,7 +79,7 @@ static void tcp_rack_detect_loss(struct sock *sk) } } -void tcp_rack_mark_lost(struct sock *sk) +void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now) { struct tcp_sock *tp = tcp_sk(sk); @@ -86,20 +87,25 @@ void tcp_rack_mark_lost(struct sock *sk) return; /* Reset the advanced flag to avoid unnecessary queue scanning */ tp->rack.advanced = 0; - tcp_rack_detect_loss(sk); + tcp_rack_detect_loss(sk, now); } -/* Record the most recently (re)sent time among the (s)acked packets */ -void tcp_rack_advance(struct tcp_sock *tp, - const struct skb_mstamp *xmit_time, u8 sacked) +/* Record the most recently (re)sent time among the (s)acked packets + * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from + * draft-cheng-tcpm-rack-00.txt + */ +void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, + const struct skb_mstamp *xmit_time, + const struct skb_mstamp *ack_time) { + u32 rtt_us; + if (tp->rack.mstamp.v64 && !skb_mstamp_after(xmit_time, &tp->rack.mstamp)) return; + rtt_us = skb_mstamp_us_delta(ack_time, xmit_time); if (sacked & TCPCB_RETRANS) { - struct skb_mstamp now; - /* If the sacked packet was retransmitted, it's ambiguous * whether the retransmission or the original (or the prior * retransmission) was sacked. @@ -110,11 +116,10 @@ void tcp_rack_advance(struct tcp_sock *tp, * so it's at least one RTT (i.e., retransmission is at least * an RTT later). */ - skb_mstamp_get(&now); - if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp)) + if (rtt_us < tcp_min_rtt(tp)) return; } - + tp->rack.rtt_us = rtt_us; tp->rack.mstamp = *xmit_time; tp->rack.advanced = 1; } -- cgit v1.2.3 From 57dde7f70de34d4251f291c9eac7ad920aaf56b2 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:33 -0800 Subject: tcp: add reordering timer in RACK loss detection This patch makes RACK install a reordering timer when it suspects some packets might be lost, but wants to delay the decision a little bit to accomodate reordering. It does not create a new timer but instead repurposes the existing RTO timer, because both are meant to retransmit packets. Specifically it arms a timer ICSK_TIME_REO_TIMEOUT when the RACK timing check fails. The wait time is set to RACK.RTT + RACK.reo_wnd - (NOW - Packet.xmit_time) + fudge This translates to expecting a packet (Packet) should take (RACK.RTT + RACK.reo_wnd + fudge) to deliver after it was sent. When there are multiple packets that need a timer, we use one timer with the maximum timeout. Therefore the timer conservatively uses the maximum window to expire N packets by one timeout, instead of N timeouts to expire N packets sent at different times. The fudge factor is 2 jiffies to ensure when the timer fires, all the suspected packets would exceed the deadline and be marked lost by tcp_rack_detect_loss(). It has to be at least 1 jiffy because the clock may tick between calling icsk_reset_xmit_timer(timeout) and actually hang the timer. The next jiffy is to lower-bound the timeout to 2 jiffies when reo_wnd is < 1ms. When the reordering timer fires (tcp_rack_reo_timeout): If we aren't in Recovery we'll enter fast recovery and force fast retransmit. This is very similar to the early retransmit (RFC5827) except RACK is not constrained to only enter recovery for small outstanding flights. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/inet_connection_sock.h | 4 ++- include/net/tcp.h | 4 +++ net/ipv4/inet_diag.c | 1 + net/ipv4/tcp_input.c | 6 ++-- net/ipv4/tcp_ipv4.c | 1 + net/ipv4/tcp_output.c | 3 +- net/ipv4/tcp_recovery.c | 57 +++++++++++++++++++++++++++++++++----- net/ipv4/tcp_timer.c | 3 ++ net/ipv6/tcp_ipv6.c | 1 + 9 files changed, 68 insertions(+), 12 deletions(-) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 85ee3879499e..84b2edde09b1 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -144,6 +144,7 @@ struct inet_connection_sock { #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ #define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */ #define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */ +#define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */ static inline struct inet_connection_sock *inet_csk(const struct sock *sk) { @@ -234,7 +235,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, } if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || - what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE) { + what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE || + what == ICSK_TIME_REO_TIMEOUT) { icsk->icsk_pending = what; icsk->icsk_timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); diff --git a/include/net/tcp.h b/include/net/tcp.h index 1439107658c2..64fcdeb3358b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -143,6 +143,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes * for local resources. */ +#define TCP_REO_TIMEOUT_MIN (2000) /* Min RACK reordering timeout in usec */ #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ @@ -397,6 +398,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); void tcp_enter_loss(struct sock *sk); +void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag); void tcp_clear_retrans(struct tcp_sock *tp); void tcp_update_metrics(struct sock *sk); void tcp_init_metrics(struct sock *sk); @@ -541,6 +543,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); void tcp_retransmit_timer(struct sock *sk); void tcp_xmit_retransmit_queue(struct sock *); void tcp_simple_retransmit(struct sock *); +void tcp_enter_recovery(struct sock *sk, bool ece_ack); int tcp_trim_head(struct sock *, struct sk_buff *, u32); int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t); @@ -1867,6 +1870,7 @@ extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now); extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, const struct skb_mstamp *xmit_time, const struct skb_mstamp *ack_time); +extern void tcp_rack_reo_timeout(struct sock *sk); /* * Save and compile IPv4 options, return a pointer to it diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 4dea33e5f295..d216e40623d3 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -216,6 +216,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { r->idiag_timer = 1; r->idiag_retrans = icsk->icsk_retransmits; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8ccd171999bf..be1191829963 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2522,8 +2522,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk) tcp_ecn_queue_cwr(tp); } -static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, - int flag) +void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag) { struct tcp_sock *tp = tcp_sk(sk); int sndcnt = 0; @@ -2691,7 +2690,7 @@ void tcp_simple_retransmit(struct sock *sk) } EXPORT_SYMBOL(tcp_simple_retransmit); -static void tcp_enter_recovery(struct sock *sk, bool ece_ack) +void tcp_enter_recovery(struct sock *sk, bool ece_ack) { struct tcp_sock *tp = tcp_sk(sk); int mib_idx; @@ -3031,6 +3030,7 @@ void tcp_rearm_rto(struct sock *sk) u32 rto = inet_csk(sk)->icsk_rto; /* Offset the time elapsed after installing regular RTO */ if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { struct sk_buff *skb = tcp_write_queue_head(sk); const u32 rto_time_stamp = diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 56d756ecfb59..ebf3e0c4967a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2230,6 +2230,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 1d5331a1b1dc..0ba9026cb70d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2960,7 +2960,8 @@ begin_fwd: if (tcp_in_cwnd_reduction(sk)) tp->prr_out += tcp_skb_pcount(skb); - if (skb == tcp_write_queue_head(sk)) + if (skb == tcp_write_queue_head(sk) && + icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index 557363cde58a..eb39b1b6d1dc 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -32,19 +32,18 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) * The current version is only used after recovery starts but can be * easily extended to detect the first loss. */ -static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now) +static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now, + u32 *reo_timeout) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; u32 reo_wnd; + *reo_timeout = 0; /* To be more reordering resilient, allow min_rtt/4 settling delay * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed * RTT because reordering is often a path property and less related * to queuing or delayed ACKs. - * - * TODO: measure and adapt to the observed reordering delay, and - * use a timer to retransmit like the delayed early retransmit. */ reo_wnd = 1000; if (tp->rack.reord && tcp_min_rtt(tp) != ~0U) @@ -66,10 +65,23 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now) * A packet is lost if its elapsed time is beyond * the recent RTT plus the reordering window. */ - if (skb_mstamp_us_delta(now, &skb->skb_mstamp) > - tp->rack.rtt_us + reo_wnd) { + u32 elapsed = skb_mstamp_us_delta(now, + &skb->skb_mstamp); + s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed; + + if (remaining < 0) { tcp_rack_mark_skb_lost(sk, skb); + continue; } + + /* Skip ones marked lost but not yet retransmitted */ + if ((scb->sacked & TCPCB_LOST) && + !(scb->sacked & TCPCB_SACKED_RETRANS)) + continue; + + /* Record maximum wait time (+1 to avoid 0) */ + *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining); + } else if (!(scb->sacked & TCPCB_RETRANS)) { /* Original data are sent sequentially so stop early * b/c the rest are all sent after rack_sent @@ -82,12 +94,19 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now) void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now) { struct tcp_sock *tp = tcp_sk(sk); + u32 timeout; if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced) return; + /* Reset the advanced flag to avoid unnecessary queue scanning */ tp->rack.advanced = 0; - tcp_rack_detect_loss(sk, now); + tcp_rack_detect_loss(sk, now, &timeout); + if (timeout) { + timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, + timeout, inet_csk(sk)->icsk_rto); + } } /* Record the most recently (re)sent time among the (s)acked packets @@ -123,3 +142,27 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, tp->rack.mstamp = *xmit_time; tp->rack.advanced = 1; } + +/* We have waited long enough to accommodate reordering. Mark the expired + * packets lost and retransmit them. + */ +void tcp_rack_reo_timeout(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct skb_mstamp now; + u32 timeout, prior_inflight; + + skb_mstamp_get(&now); + prior_inflight = tcp_packets_in_flight(tp); + tcp_rack_detect_loss(sk, &now, &timeout); + if (prior_inflight != tcp_packets_in_flight(tp)) { + if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { + tcp_enter_recovery(sk, false); + if (!inet_csk(sk)->icsk_ca_ops->cong_control) + tcp_cwnd_reduction(sk, 1, 0); + } + tcp_xmit_retransmit_queue(sk); + } + if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) + tcp_rearm_rto(sk); +} diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 29a9bd5f1225..953c02a8566e 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -563,6 +563,9 @@ void tcp_write_timer_handler(struct sock *sk) event = icsk->icsk_pending; switch (event) { + case ICSK_TIME_REO_TIMEOUT: + tcp_rack_reo_timeout(sk); + break; case ICSK_TIME_EARLY_RETRANS: tcp_resume_early_retransmit(sk); break; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 228965dca3c5..f52c3742b404 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1746,6 +1746,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; -- cgit v1.2.3 From 1d0833df594390876647c54c2c88069d29059665 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:34 -0800 Subject: tcp: use sequence to break TS ties for RACK loss detection The packets inside a jumbo skb (e.g., TSO) share the same skb timestamp, even though they are sent sequentially on the wire. Since RACK is based on time, it can not detect some packets inside the same skb are lost. However, we can leverage the packet sequence numbers as extended timestamps to detect losses. Therefore, when RACK timestamp is identical to skb's timestamp (i.e., one of the packets of the skb is acked or sacked), we use the sequence numbers of the acked and unacked packets to break ties. We can use the same sequence logic to advance RACK xmit time as well to detect more losses and avoid timeout. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 + include/net/tcp.h | 2 +- net/ipv4/tcp_input.c | 5 +++-- net/ipv4/tcp_recovery.c | 17 ++++++++++++++--- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 1255c592719c..970d5f00589f 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -208,6 +208,7 @@ struct tcp_sock { struct tcp_rack { struct skb_mstamp mstamp; /* (Re)sent time of the skb */ u32 rtt_us; /* Associated RTT */ + u32 end_seq; /* Ending TCP sequence of the skb */ u8 advanced; /* mstamp advanced since last lost marking */ u8 reord; /* reordering detected */ } rack; diff --git a/include/net/tcp.h b/include/net/tcp.h index 64fcdeb3358b..5fb1e75a32a9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1867,7 +1867,7 @@ extern int sysctl_tcp_recovery; #define TCP_RACK_LOST_RETRANS 0x1 extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now); -extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, +extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, const struct skb_mstamp *xmit_time, const struct skb_mstamp *ack_time); extern void tcp_rack_reo_timeout(struct sock *sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index be1191829963..e42ca11c0326 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1218,7 +1218,8 @@ static u8 tcp_sacktag_one(struct sock *sk, return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { - tcp_rack_advance(tp, sacked, xmit_time, &state->ack_time); + tcp_rack_advance(tp, sacked, end_seq, + xmit_time, &state->ack_time); if (sacked & TCPCB_SACKED_RETRANS) { /* If the segment is not tagged as lost, @@ -3171,7 +3172,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, } else if (tcp_is_sack(tp)) { tp->delivered += acked_pcount; if (!tcp_skb_spurious_retrans(tp, skb)) - tcp_rack_advance(tp, sacked, + tcp_rack_advance(tp, sacked, scb->end_seq, &skb->skb_mstamp, &sack->ack_time); } diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index eb39b1b6d1dc..1e330a2f913d 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -16,6 +16,14 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) } } +static bool tcp_rack_sent_after(const struct skb_mstamp *t1, + const struct skb_mstamp *t2, + u32 seq1, u32 seq2) +{ + return skb_mstamp_after(t1, t2) || + (t1->v64 == t2->v64 && after(seq1, seq2)); +} + /* Marks a packet lost, if some packet sent later has been (s)acked. * The underlying idea is similar to the traditional dupthresh and FACK * but they look at different metrics: @@ -60,7 +68,8 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now, scb->sacked & TCPCB_SACKED_ACKED) continue; - if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) { + if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp, + tp->rack.end_seq, scb->end_seq)) { /* Step 3 in draft-cheng-tcpm-rack-00.txt: * A packet is lost if its elapsed time is beyond * the recent RTT plus the reordering window. @@ -113,14 +122,15 @@ void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now) * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from * draft-cheng-tcpm-rack-00.txt */ -void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, +void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, const struct skb_mstamp *xmit_time, const struct skb_mstamp *ack_time) { u32 rtt_us; if (tp->rack.mstamp.v64 && - !skb_mstamp_after(xmit_time, &tp->rack.mstamp)) + !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp, + end_seq, tp->rack.end_seq)) return; rtt_us = skb_mstamp_us_delta(ack_time, xmit_time); @@ -140,6 +150,7 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, } tp->rack.rtt_us = rtt_us; tp->rack.mstamp = *xmit_time; + tp->rack.end_seq = end_seq; tp->rack.advanced = 1; } -- cgit v1.2.3 From 98e36d449cc681f1bb2ce2230243f7f977a7da1b Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:35 -0800 Subject: tcp: check undo conditions before detecting losses Currently RACK would mark loss before the undo operations in TCP loss recovery. This could incorrectly identify real losses as spurious. For example a sender first experiences a delay spike and then eventually some packets were lost due to buffer overrun. In this case, the sender should perform fast recovery b/c not all the packets were lost. But the sender may first trigger a (spurious) RTO and reset cwnd to 1. The following ACKs may used to mark real losses by tcp_rack_mark_lost. Then in tcp_process_loss this ACK could trigger F-RTO undo condition and unmark real losses and revert the cwnd reduction. If there are no more ACKs coming back, eventually the sender would timeout again instead of performing fast recovery. The patch fixes this incorrect process by always performing the undo checks before detecting losses. Fixes: 4f41b1c58a32 ("tcp: use RACK to detect losses") Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e42ca11c0326..9c98dc874825 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2801,6 +2801,21 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked) return false; } +static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag, + const struct skb_mstamp *ack_time) +{ + struct tcp_sock *tp = tcp_sk(sk); + + /* Use RACK to detect loss */ + if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS) { + u32 prior_retrans = tp->retrans_out; + + tcp_rack_mark_lost(sk, ack_time); + if (prior_retrans > tp->retrans_out) + *ack_flag |= FLAG_LOST_RETRANS; + } +} + /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and @@ -2866,17 +2881,6 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, } } - /* Use RACK to detect loss */ - if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS) { - u32 prior_retrans = tp->retrans_out; - - tcp_rack_mark_lost(sk, ack_time); - if (prior_retrans > tp->retrans_out) { - flag |= FLAG_LOST_RETRANS; - *ack_flag |= FLAG_LOST_RETRANS; - } - } - /* E. Process state. */ switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: @@ -2894,11 +2898,13 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, tcp_try_keep_open(sk); return; } + tcp_rack_identify_loss(sk, ack_flag, ack_time); break; case TCP_CA_Loss: tcp_process_loss(sk, flag, is_dupack, rexmit); - if (icsk->icsk_ca_state != TCP_CA_Open && - !(flag & FLAG_LOST_RETRANS)) + tcp_rack_identify_loss(sk, ack_flag, ack_time); + if (!(icsk->icsk_ca_state == TCP_CA_Open || + (*ack_flag & FLAG_LOST_RETRANS))) return; /* Change state if cwnd is undone or retransmits are lost */ default: @@ -2912,6 +2918,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); + tcp_rack_identify_loss(sk, ack_flag, ack_time); if (!tcp_time_to_recover(sk, flag)) { tcp_try_to_open(sk, flag); return; -- cgit v1.2.3 From a0370b3f3f2cfb8b424b04c0545414abaa53f5ee Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:36 -0800 Subject: tcp: enable RACK loss detection to trigger recovery This patch changes two things: 1. Start fast recovery with RACK in addition to other heuristics (e.g., DUPACK threshold, FACK). Prior to this change RACK is enabled to detect losses only after the recovery has started by other algorithms. 2. Disable TCP early retransmit. RACK subsumes the early retransmit with the new reordering timer feature. A latter patch in this series removes the early retransmit code. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 11 ++++------- net/ipv4/tcp_input.c | 29 +++++++++++++++++++++-------- net/ipv4/tcp_recovery.c | 16 ++++++++++------ 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 5fb1e75a32a9..423438dd6fe9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -262,6 +262,9 @@ extern int sysctl_tcp_slow_start_after_idle; extern int sysctl_tcp_thin_linear_timeouts; extern int sysctl_tcp_thin_dupack; extern int sysctl_tcp_early_retrans; +extern int sysctl_tcp_recovery; +#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */ + extern int sysctl_tcp_limit_output_bytes; extern int sysctl_tcp_challenge_ack_limit; extern int sysctl_tcp_min_tso_segs; @@ -1043,6 +1046,7 @@ static inline void tcp_enable_early_retrans(struct tcp_sock *tp) tp->do_early_retrans = sysctl_tcp_early_retrans && sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack && + !(sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION) && net->ipv4.sysctl_tcp_reordering == 3; } @@ -1859,13 +1863,6 @@ void tcp_v4_init(void); void tcp_init(void); /* tcp_recovery.c */ - -/* Flags to enable various loss recovery features. See below */ -extern int sysctl_tcp_recovery; - -/* Use TCP RACK to detect (some) tail and retransmit losses */ -#define TCP_RACK_LOST_RETRANS 0x1 - extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now); extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, const struct skb_mstamp *xmit_time, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9c98dc874825..4ad75b8c4fee 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2129,10 +2129,25 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag) * F.e. after RTO, when all the queue is considered as lost, * lost_out = packets_out and in_flight = retrans_out. * - * Essentially, we have now two algorithms counting + * Essentially, we have now a few algorithms detecting * lost packets. * - * FACK: It is the simplest heuristics. As soon as we decided + * If the receiver supports SACK: + * + * RFC6675/3517: It is the conventional algorithm. A packet is + * considered lost if the number of higher sequence packets + * SACKed is greater than or equal the DUPACK thoreshold + * (reordering). This is implemented in tcp_mark_head_lost and + * tcp_update_scoreboard. + * + * RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm + * (2017-) that checks timing instead of counting DUPACKs. + * Essentially a packet is considered lost if it's not S/ACKed + * after RTT + reordering_window, where both metrics are + * dynamically measured and adjusted. This is implemented in + * tcp_rack_mark_lost. + * + * FACK: it is the simplest heuristics. As soon as we decided * that something is lost, we decide that _all_ not SACKed * packets until the most forward SACK are lost. I.e. * lost_out = fackets_out - sacked_out and left_out = fackets_out. @@ -2141,16 +2156,14 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag) * takes place. We use FACK by default until reordering * is suspected on the path to this destination. * - * NewReno: when Recovery is entered, we assume that one segment + * If the receiver does not support SACK: + * + * NewReno (RFC6582): in Recovery we assume that one segment * is lost (classic Reno). While we are in Recovery and * a partial ACK arrives, we assume that one more packet * is lost (NewReno). This heuristics are the same in NewReno * and SACK. * - * Imagine, that's all! Forget about all this shamanism about CWND inflation - * deflation etc. CWND is real congestion window, never inflated, changes - * only according to classic VJ rules. - * * Really tricky (and requiring careful tuning) part of algorithm * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). * The first determines the moment _when_ we should reduce CWND and, @@ -2807,7 +2820,7 @@ static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag, struct tcp_sock *tp = tcp_sk(sk); /* Use RACK to detect loss */ - if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS) { + if (sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION) { u32 prior_retrans = tp->retrans_out; tcp_rack_mark_lost(sk, ack_time); diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index 1e330a2f913d..4ecb38ae8504 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -1,7 +1,7 @@ #include #include -int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS; +int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION; static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) { @@ -24,7 +24,9 @@ static bool tcp_rack_sent_after(const struct skb_mstamp *t1, (t1->v64 == t2->v64 && after(seq1, seq2)); } -/* Marks a packet lost, if some packet sent later has been (s)acked. +/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): + * + * Marks a packet lost, if some packet sent later has been (s)acked. * The underlying idea is similar to the traditional dupthresh and FACK * but they look at different metrics: * @@ -37,8 +39,10 @@ static bool tcp_rack_sent_after(const struct skb_mstamp *t1, * is being more resilient to reordering by simply allowing some * "settling delay", instead of tweaking the dupthresh. * - * The current version is only used after recovery starts but can be - * easily extended to detect the first loss. + * When tcp_rack_detect_loss() detects some packets are lost and we + * are not already in the CA_Recovery state, either tcp_rack_reo_timeout() + * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will + * make us enter the CA_Recovery state. */ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now, u32 *reo_timeout) @@ -54,7 +58,7 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now, * to queuing or delayed ACKs. */ reo_wnd = 1000; - if (tp->rack.reord && tcp_min_rtt(tp) != ~0U) + if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U) reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd); tcp_for_write_queue(skb, sk) { @@ -105,7 +109,7 @@ void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now) struct tcp_sock *tp = tcp_sk(sk); u32 timeout; - if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced) + if (!tp->rack.advanced) return; /* Reset the advanced flag to avoid unnecessary queue scanning */ -- cgit v1.2.3 From 89fe18e44f7ee5ab1c90d0dff5835acee7751427 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:37 -0800 Subject: tcp: extend F-RTO to catch more spurious timeouts Current F-RTO reverts cwnd reset whenever a never-retransmitted packet was (s)acked. The timeout can be declared spurious because the packets acknoledged with this ACK was transmitted before the timeout, so clearly not all the packets are lost to reset the cwnd. This nice detection does not really depend F-RTO internals. This patch applies the detection universally. On Google servers this change detected 20% more spurious timeouts. Suggested-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4ad75b8c4fee..9469ce384d3b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1939,7 +1939,6 @@ void tcp_enter_loss(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); struct sk_buff *skb; - bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; bool is_reneg; /* is receiver reneging on SACKs? */ bool mark_lost; @@ -2000,13 +1999,15 @@ void tcp_enter_loss(struct sock *sk) tp->high_seq = tp->snd_nxt; tcp_ecn_queue_cwr(tp); - /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous - * loss recovery is underway except recurring timeout(s) on - * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing + /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO + * if a previous recovery is underway, otherwise it may incorrectly + * call a timeout spurious if some previously retransmitted packets + * are s/acked (sec 3.2). We do not apply that retriction since + * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS + * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO + * on PTMU discovery to avoid sending new data. */ - tp->frto = sysctl_tcp_frto && - (new_recovery || icsk->icsk_retransmits) && - !inet_csk(sk)->icsk_mtup.probe_size; + tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size; } /* If ACK arrived pointing to a remembered SACK, it means that our @@ -2740,14 +2741,18 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, tcp_try_undo_loss(sk, false)) return; - if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ - /* Step 3.b. A timeout is spurious if not all data are - * lost, i.e., never-retransmitted data are (s)acked. - */ - if ((flag & FLAG_ORIG_SACK_ACKED) && - tcp_try_undo_loss(sk, true)) - return; + /* The ACK (s)acks some never-retransmitted data meaning not all + * the data packets before the timeout were lost. Therefore we + * undo the congestion window and state. This is essentially + * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since + * a retransmitted skb is permantly marked, we can apply such an + * operation even if F-RTO was not used. + */ + if ((flag & FLAG_ORIG_SACK_ACKED) && + tcp_try_undo_loss(sk, tp->undo_marker)) + return; + if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ if (after(tp->snd_nxt, tp->high_seq)) { if (flag & FLAG_DATA_SACKED || is_dupack) tp->frto = 0; /* Step 3.a. loss was real */ -- cgit v1.2.3 From 840a3cbe89694fad75578856976f180e852e69aa Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:38 -0800 Subject: tcp: remove forward retransmit feature Forward retransmit is an esoteric feature in RFC3517 (condition(3) in the NextSeg()). Basically if a packet is not considered lost by the current criteria (# of dupacks etc), but the congestion window has room for more packets, then retransmit this packet. However it actually conflicts with the rest of recovery design. For example, when reordering is detected we want to be conservative in retransmitting packets but forward-retransmit feature would break that to force more retransmission. Also the implementation is fairly complicated inside the retransmission logic inducing extra iterations in the write queue. With RACK losses are being detected timely and this heuristic is no longer necessary. There this patch removes the feature. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 - net/ipv4/tcp_input.c | 5 ----- net/ipv4/tcp_output.c | 61 +++------------------------------------------------ 3 files changed, 3 insertions(+), 64 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 970d5f00589f..8e5f4c15d0e5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -307,7 +307,6 @@ struct tcp_sock { */ int lost_cnt_hint; - u32 retransmit_high; /* L-bits may be on up to this seqno */ u32 prior_ssthresh; /* ssthresh saved at recovery start */ u32 high_seq; /* snd_nxt at onset of congestion */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9469ce384d3b..a041a92348ee 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -916,10 +916,6 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) tp->retransmit_skb_hint = skb; - - if (!tp->lost_out || - after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) - tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } /* Sum the number of packets on the wire we have marked as lost. @@ -1983,7 +1979,6 @@ void tcp_enter_loss(struct sock *sk) TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); - tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } } tcp_verify_left_out(tp); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 0ba9026cb70d..6327e4d368a4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2831,36 +2831,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) return err; } -/* Check if we forward retransmits are possible in the current - * window/congestion state. - */ -static bool tcp_can_forward_retransmit(struct sock *sk) -{ - const struct inet_connection_sock *icsk = inet_csk(sk); - const struct tcp_sock *tp = tcp_sk(sk); - - /* Forward retransmissions are possible only during Recovery. */ - if (icsk->icsk_ca_state != TCP_CA_Recovery) - return false; - - /* No forward retransmissions in Reno are possible. */ - if (tcp_is_reno(tp)) - return false; - - /* Yeah, we have to make difficult choice between forward transmission - * and retransmission... Both ways have their merits... - * - * For now we do not retransmit anything, while we have some new - * segments to send. In the other cases, follow rule 3 for - * NextSeg() specified in RFC3517. - */ - - if (tcp_may_send_now(sk)) - return false; - - return true; -} - /* This gets called after a retransmit timeout, and the initially * retransmitted data is acknowledged. It tries to continue * resending the rest of the retransmit queue, until either @@ -2875,24 +2845,16 @@ void tcp_xmit_retransmit_queue(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; struct sk_buff *hole = NULL; - u32 max_segs, last_lost; + u32 max_segs; int mib_idx; - int fwd_rexmitting = 0; if (!tp->packets_out) return; - if (!tp->lost_out) - tp->retransmit_high = tp->snd_una; - if (tp->retransmit_skb_hint) { skb = tp->retransmit_skb_hint; - last_lost = TCP_SKB_CB(skb)->end_seq; - if (after(last_lost, tp->retransmit_high)) - last_lost = tp->retransmit_high; } else { skb = tcp_write_queue_head(sk); - last_lost = tp->snd_una; } max_segs = tcp_tso_segs(sk, tcp_current_mss(sk)); @@ -2915,31 +2877,14 @@ void tcp_xmit_retransmit_queue(struct sock *sk) */ segs = min_t(int, segs, max_segs); - if (fwd_rexmitting) { -begin_fwd: - if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) - break; - mib_idx = LINUX_MIB_TCPFORWARDRETRANS; - - } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { - tp->retransmit_high = last_lost; - if (!tcp_can_forward_retransmit(sk)) - break; - /* Backtrack if necessary to non-L'ed skb */ - if (hole) { - skb = hole; - hole = NULL; - } - fwd_rexmitting = 1; - goto begin_fwd; - + if (tp->retrans_out >= tp->lost_out) { + break; } else if (!(sacked & TCPCB_LOST)) { if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) hole = skb; continue; } else { - last_lost = TCP_SKB_CB(skb)->end_seq; if (icsk->icsk_ca_state != TCP_CA_Loss) mib_idx = LINUX_MIB_TCPFASTRETRANS; else -- cgit v1.2.3 From bec41a11dd3dc8c54f766b4f494140ca92ba7c10 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:39 -0800 Subject: tcp: remove early retransmit This patch removes the support of RFC5827 early retransmit (i.e., fast recovery on small inflight with <3 dupacks) because it is subsumed by the new RACK loss detection. More specifically when RACK receives DUPACKs, it'll arm a reordering timer to start fast recovery after a quarter of (min)RTT, hence it covers the early retransmit except RACK does not limit itself to specific inflight or dupack numbers. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 19 +++-------- include/linux/tcp.h | 3 +- include/net/tcp.h | 19 ----------- net/ipv4/inet_diag.c | 1 - net/ipv4/tcp.c | 3 -- net/ipv4/tcp_input.c | 60 ++-------------------------------- net/ipv4/tcp_ipv4.c | 1 - net/ipv4/tcp_metrics.c | 1 - net/ipv4/tcp_minisocks.c | 1 - net/ipv4/tcp_output.c | 11 +++---- net/ipv4/tcp_timer.c | 3 -- net/ipv6/tcp_ipv6.c | 1 - 12 files changed, 12 insertions(+), 111 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 7dd65c9cf707..7de2cf79e16f 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -246,21 +246,12 @@ tcp_dsack - BOOLEAN Allows TCP to send "duplicate" SACKs. tcp_early_retrans - INTEGER - Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold - for triggering fast retransmit when the amount of outstanding data is - small and when no previously unsent data can be transmitted (such - that limited transmit could be used). Also controls the use of - Tail loss probe (TLP) that converts RTOs occurring due to tail - losses into fast recovery (draft-dukkipati-tcpm-tcp-loss-probe-01). + Tail loss probe (TLP) converts RTOs occurring due to tail + losses into fast recovery (draft-ietf-tcpm-rack). Note that + TLP requires RACK to function properly (see tcp_recovery below) Possible values: - 0 disables ER - 1 enables ER - 2 enables ER but delays fast recovery and fast retransmit - by a fourth of RTT. This mitigates connection falsely - recovers when network has a small degree of reordering - (less than 3 packets). - 3 enables delayed ER and TLP. - 4 enables TLP only. + 0 disables TLP + 3 or 4 enables TLP Default: 3 tcp_ecn - INTEGER diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 8e5f4c15d0e5..4733368f953a 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -224,8 +224,7 @@ struct tcp_sock { repair : 1, frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ u8 repair_queue; - u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ - syn_data:1, /* SYN includes data */ + u8 syn_data:1, /* SYN includes data */ syn_fastopen:1, /* SYN includes Fast Open option */ syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 423438dd6fe9..c55d65f74f7f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -565,7 +565,6 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb, const struct sk_buff *next_skb); /* tcp_input.c */ -void tcp_resume_early_retransmit(struct sock *sk); void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk); @@ -1037,24 +1036,6 @@ static inline void tcp_enable_fack(struct tcp_sock *tp) tp->rx_opt.sack_ok |= TCP_FACK_ENABLED; } -/* TCP early-retransmit (ER) is similar to but more conservative than - * the thin-dupack feature. Enable ER only if thin-dupack is disabled. - */ -static inline void tcp_enable_early_retrans(struct tcp_sock *tp) -{ - struct net *net = sock_net((struct sock *)tp); - - tp->do_early_retrans = sysctl_tcp_early_retrans && - sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack && - !(sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION) && - net->ipv4.sysctl_tcp_reordering == 3; -} - -static inline void tcp_disable_early_retrans(struct tcp_sock *tp) -{ - tp->do_early_retrans = 0; -} - static inline unsigned int tcp_left_out(const struct tcp_sock *tp) { return tp->sacked_out + tp->lost_out; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index d216e40623d3..3828b3a805cd 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -215,7 +215,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, } if (icsk->icsk_pending == ICSK_TIME_RETRANS || - icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { r->idiag_timer = 1; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c8d46c140b4a..d9023e8ed53e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -406,7 +406,6 @@ void tcp_init_sock(struct sock *sk) tp->mss_cache = TCP_MSS_DEFAULT; tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; - tcp_enable_early_retrans(tp); tcp_assign_congestion_control(sk); tp->tsoffset = 0; @@ -2477,8 +2476,6 @@ static int do_tcp_setsockopt(struct sock *sk, int level, err = -EINVAL; else { tp->thin_dupack = val; - if (tp->thin_dupack) - tcp_disable_early_retrans(tp); } break; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a041a92348ee..79c819077a59 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -904,8 +904,6 @@ static void tcp_update_reordering(struct sock *sk, const int metric, tcp_disable_fack(tp); } - if (metric > 0) - tcp_disable_early_retrans(tp); tp->rack.reord = 1; } @@ -2054,30 +2052,6 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; } -static bool tcp_pause_early_retransmit(struct sock *sk, int flag) -{ - struct tcp_sock *tp = tcp_sk(sk); - unsigned long delay; - - /* Delay early retransmit and entering fast recovery for - * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples - * available, or RTO is scheduled to fire first. - */ - if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 || - (flag & FLAG_ECE) || !tp->srtt_us) - return false; - - delay = max(usecs_to_jiffies(tp->srtt_us >> 5), - msecs_to_jiffies(2)); - - if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) - return false; - - inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, - TCP_RTO_MAX); - return true; -} - /* Linux NewReno/SACK/FACK/ECN state machine. * -------------------------------------- * @@ -2221,16 +2195,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) tcp_is_sack(tp) && !tcp_send_head(sk)) return true; - /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious - * retransmissions due to small network reorderings, we implement - * Mitigation A.3 in the RFC and delay the retransmission for a short - * interval if appropriate. - */ - if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && - (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) && - !tcp_may_send_now(sk)) - return !tcp_pause_early_retransmit(sk, flag); - return false; } @@ -3050,8 +3014,7 @@ void tcp_rearm_rto(struct sock *sk) } else { u32 rto = inet_csk(sk)->icsk_rto; /* Offset the time elapsed after installing regular RTO */ - if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || - icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || + if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { struct sk_buff *skb = tcp_write_queue_head(sk); const u32 rto_time_stamp = @@ -3068,24 +3031,6 @@ void tcp_rearm_rto(struct sock *sk) } } -/* This function is called when the delayed ER timer fires. TCP enters - * fast recovery and performs fast-retransmit. - */ -void tcp_resume_early_retransmit(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tcp_rearm_rto(sk); - - /* Stop if ER is disabled after the delayed ER timer is scheduled */ - if (!tp->do_early_retrans) - return; - - tcp_enter_recovery(sk, false); - tcp_update_scoreboard(sk, 1); - tcp_xmit_retransmit_queue(sk); -} - /* If we get here, the whole TSO packet has not been acked. */ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) { @@ -3651,8 +3596,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) skb_mstamp_get(&sack_state.ack_time); - if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || - icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) + if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); if (after(ack, prior_snd_una)) { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ebf3e0c4967a..63214136cf1c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2229,7 +2229,6 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) int state; if (icsk->icsk_pending == ICSK_TIME_RETRANS || - icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index ba8f02d0f283..b9ed0d50aead 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -522,7 +522,6 @@ void tcp_init_metrics(struct sock *sk) val = tcp_metric_get(tm, TCP_METRIC_REORDERING); if (val && tp->reordering != val) { tcp_disable_fack(tp); - tcp_disable_early_retrans(tp); tp->reordering = val; } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 06fde26a82b7..bdb443471c39 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -468,7 +468,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->sacked_out = 0; newtp->fackets_out = 0; newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; - tcp_enable_early_retrans(newtp); newtp->tlp_high_seq = 0; newtp->lsndtime = treq->snt_synack.stamp_jiffies; newsk->sk_txhash = treq->txhash; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6327e4d368a4..9a1a1494b9dd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -76,10 +76,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; tp->packets_out += tcp_skb_pcount(skb); - if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || - icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); - } NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, tcp_skb_pcount(skb)); @@ -2289,8 +2287,6 @@ bool tcp_schedule_loss_probe(struct sock *sk) u32 timeout, tlp_time_stamp, rto_time_stamp; u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); - if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) - return false; /* No consecutive loss probes. */ if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { tcp_rearm_rto(sk); @@ -2309,8 +2305,9 @@ bool tcp_schedule_loss_probe(struct sock *sk) /* Schedule a loss probe in 2*RTT for SACK capable connections * in Open state, that are either limited by cwnd or application. */ - if (sysctl_tcp_early_retrans < 3 || !tp->packets_out || - !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) + if ((sysctl_tcp_early_retrans != 3 && sysctl_tcp_early_retrans != 4) || + !tp->packets_out || !tcp_is_sack(tp) || + icsk->icsk_ca_state != TCP_CA_Open) return false; if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 953c02a8566e..40d893556e67 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -566,9 +566,6 @@ void tcp_write_timer_handler(struct sock *sk) case ICSK_TIME_REO_TIMEOUT: tcp_rack_reo_timeout(sk); break; - case ICSK_TIME_EARLY_RETRANS: - tcp_resume_early_retransmit(sk); - break; case ICSK_TIME_LOSS_PROBE: tcp_send_loss_probe(sk); break; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f52c3742b404..fc14e04028bf 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1745,7 +1745,6 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) srcp = ntohs(inet->inet_sport); if (icsk->icsk_pending == ICSK_TIME_RETRANS || - icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; -- cgit v1.2.3 From ac229dca7e4e582114e1ec9765fda0915aa58468 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:40 -0800 Subject: tcp: remove RFC4653 NCR This patch removes the (partial) implementation of the aggressive limited transmit in RFC4653 TCP Non-Congestion Robustness (NCR). NCR is a mitigation to the problem created by the dynamic DUPACK threshold. With the current adaptive DUPACK threshold (tp->reordering) could cause timeouts by preventing fast recovery. For example, if the last packet of a cwnd burst was reordered, the threshold will be set to the size of cwnd. But if next application burst is smaller than threshold and has drops instead of reorderings, the sender would not trigger fast recovery but instead resorts to a timeout recovery. NCR mitigates this issue by checking the number of DUPACKs against the current flight size additionally. The techniqueue is similar to the early retransmit RFC. With RACK loss detection, this mitigation is not needed, because RACK does not use DUPACK threshold to detect losses. RACK arms a reordering timer to fire at most a quarter RTT later to start fast recovery. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 79c819077a59..87315ab1ab1a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2161,8 +2161,6 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) static bool tcp_time_to_recover(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); - __u32 packets_out; - int tcp_reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; /* Trick#1: The loss is proven. */ if (tp->lost_out) @@ -2172,19 +2170,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) if (tcp_dupack_heuristics(tp) > tp->reordering) return true; - /* Trick#4: It is still not OK... But will it be useful to delay - * recovery more? - */ - packets_out = tp->packets_out; - if (packets_out <= tp->reordering && - tp->sacked_out >= max_t(__u32, packets_out/2, tcp_reordering) && - !tcp_may_send_now(sk)) { - /* We have nothing to send. This connection is limited - * either by receiver window or by application. - */ - return true; - } - /* If a thin stream is detected, retransmit after first * received dupack. Employ only if SACK is supported in order * to avoid possible corner-case series of spurious retransmissions -- cgit v1.2.3 From 4a7f6009441144783e5925551c72e3f2e1b0839b Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:41 -0800 Subject: tcp: remove thin_dupack feature Thin stream DUPACK is to start fast recovery on only one DUPACK provided the connection is a thin stream (i.e., low inflight). But this older feature is now subsumed with RACK. If a connection receives only a single DUPACK, RACK would arm a reordering timer and soon starts fast recovery instead of timeout if no further ACKs are received. The socket option (THIN_DUPACK) is kept as a nop for compatibility. Note that this patch does not change another thin-stream feature which enables linear RTO. Although it might be good to generalize that in the future (i.e., linear RTO for the first say 3 retries). Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 12 ------------ include/linux/tcp.h | 2 +- net/ipv4/sysctl_net_ipv4.c | 7 ------- net/ipv4/tcp.c | 6 ++---- net/ipv4/tcp_input.c | 13 ------------- 5 files changed, 3 insertions(+), 37 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 7de2cf79e16f..aa1bb49f1dc6 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -703,18 +703,6 @@ tcp_thin_linear_timeouts - BOOLEAN Documentation/networking/tcp-thin.txt Default: 0 -tcp_thin_dupack - BOOLEAN - Enable dynamic triggering of retransmissions after one dupACK - for thin streams. If set, a check is performed upon reception - of a dupACK to determine if the stream is thin (less than 4 - packets in flight). As long as the stream is found to be thin, - data is retransmitted on the first received dupACK. This - improves retransmission latency for non-aggressive thin - streams, often found to be time-dependent. - For more information on thin streams, see - Documentation/networking/tcp-thin.txt - Default: 0 - tcp_limit_output_bytes - INTEGER Controls TCP Small Queue limit per tcp socket. TCP bulk sender tends to increase packets in flight until it diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 4733368f953a..6c22332afb75 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -220,7 +220,7 @@ struct tcp_sock { unused:5; u8 nonagle : 4,/* Disable Nagle algorithm? */ thin_lto : 1,/* Use linear timeouts for thin streams */ - thin_dupack : 1,/* Fast retransmit on first dupack */ + unused1 : 1, repair : 1, frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ u8 repair_queue; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 0f2d37e8e983..c8d283615c6f 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -536,13 +536,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, - { - .procname = "tcp_thin_dupack", - .data = &sysctl_tcp_thin_dupack, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "tcp_early_retrans", .data = &sysctl_tcp_early_retrans, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d9023e8ed53e..aba6ea76338e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2474,9 +2474,6 @@ static int do_tcp_setsockopt(struct sock *sk, int level, case TCP_THIN_DUPACK: if (val < 0 || val > 1) err = -EINVAL; - else { - tp->thin_dupack = val; - } break; case TCP_REPAIR: @@ -2966,8 +2963,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level, case TCP_THIN_LINEAR_TIMEOUTS: val = tp->thin_lto; break; + case TCP_THIN_DUPACK: - val = tp->thin_dupack; + val = 0; break; case TCP_REPAIR: diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 87315ab1ab1a..39ebc20ca1b2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -95,9 +95,6 @@ int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_frto __read_mostly = 2; int sysctl_tcp_min_rtt_wlen __read_mostly = 300; - -int sysctl_tcp_thin_dupack __read_mostly; - int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_early_retrans __read_mostly = 3; int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; @@ -2170,16 +2167,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) if (tcp_dupack_heuristics(tp) > tp->reordering) return true; - /* If a thin stream is detected, retransmit after first - * received dupack. Employ only if SACK is supported in order - * to avoid possible corner-case series of spurious retransmissions - * Use only if there are no unsent data. - */ - if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && - tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && - tcp_is_sack(tp) && !tcp_send_head(sk)) - return true; - return false; } -- cgit v1.2.3 From 94bdc9785a1136cef6a982b042719783978e8a26 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 12 Jan 2017 22:11:42 -0800 Subject: tcp: disable fack by default This patch disables FACK by default as RACK is the successor of FACK (inspired by the insights behind FACK). FACK[1] in Linux works as follows: a packet P is deemed lost, if packet Q of higher sequence is s/acked and P and Q are distant by at least dupthresh number of packets in sequence space. FACK is more aggressive than the IETF recommened recovery for SACK (RFC3517 A Conservative Selective Acknowledgment (SACK)-based Loss Recovery Algorithm for TCP), because a single SACK may trigger fast recovery. This obviously won't work well with reordering so FACK is dynamically disabled upon detecting reordering. RACK supersedes FACK by using time distance instead of sequence distance. On reordering, RACK waits for a quarter of RTT receiving a single SACK before starting recovery. (the timer can be made more adaptive in the future by measuring reordering distance in time, but currently RTT/4 seem to work well.) Once the recovery starts, RACK behaves almost like FACK because it reduces the reodering window to 1ms, so it fast retransmits quickly. In addition RACK can detect loss retransmission as it does not care about the packet sequences (being repeated or not), which is extremely useful when the connection is going through a traffic policer. Google server experiments indicate that disabling FACK after enabling RACK has negligible impact on the overall loss recovery performance with more reordering events detected. But we still keep the FACK implementation for backup if RACK has bugs that needs to be disabled. [1] M. Mathis, J. Mahdavi, "Forward Acknowledgment: Refining TCP Congestion Control," In Proceedings of SIGCOMM '96, August 1996. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 39ebc20ca1b2..1a34e9278c07 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -79,7 +79,7 @@ int sysctl_tcp_timestamps __read_mostly = 1; int sysctl_tcp_window_scaling __read_mostly = 1; int sysctl_tcp_sack __read_mostly = 1; -int sysctl_tcp_fack __read_mostly = 1; +int sysctl_tcp_fack __read_mostly; int sysctl_tcp_max_reordering __read_mostly = 300; int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; @@ -2114,7 +2114,8 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) * dynamically measured and adjusted. This is implemented in * tcp_rack_mark_lost. * - * FACK: it is the simplest heuristics. As soon as we decided + * FACK (Disabled by default. Subsumbed by RACK): + * It is the simplest heuristics. As soon as we decided * that something is lost, we decide that _all_ not SACKed * packets until the most forward SACK are lost. I.e. * lost_out = fackets_out - sacked_out and left_out = fackets_out. -- cgit v1.2.3 From b742995445fbac874f5fe19ce2afc76c7a6ac2cf Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 13 Jan 2017 01:32:00 -0500 Subject: bnxt_en: Fix compiler warnings when CONFIG_RFS_ACCEL is not defined. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CC [M] drivers/net/ethernet/broadcom/bnxt/bnxt.o drivers/net/ethernet/broadcom/bnxt/bnxt.c:4947:21: warning: ‘bnxt_get_max_func_rss_ctxs’ defined but not used [-Wunused-function] static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) ^ CC [M] drivers/net/ethernet/broadcom/bnxt/bnxt.o drivers/net/ethernet/broadcom/bnxt/bnxt.c:4956:21: warning: ‘bnxt_get_max_func_vnics’ defined but not used [-Wunused-function] static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) ^ Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7bd2a85694dd..c0918507fdb4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4944,6 +4944,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp) return rc; } +#ifdef CONFIG_RFS_ACCEL static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) { #if defined(CONFIG_BNXT_SRIOV) @@ -4961,6 +4962,7 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) #endif return bp->pf.max_vnics; } +#endif unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) { -- cgit v1.2.3 From 341138c3e6afa8e77f9f3e773d72b37022dbcee8 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 13 Jan 2017 01:32:01 -0500 Subject: bnxt_en: Clear TPA flags when BNXT_FLAG_NO_AGG_RINGS is set. Commit bdbd1eb59c56 ("bnxt_en: Handle no aggregation ring gracefully.") introduced the BNXT_FLAG_NO_AGG_RINGS flag. For consistency, bnxt_set_tpa_flags() should also clear TPA flags when there are no aggregation rings. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c0918507fdb4..df2358bb05e1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2467,6 +2467,8 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) static void bnxt_set_tpa_flags(struct bnxt *bp) { bp->flags &= ~BNXT_FLAG_TPA; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + return; if (bp->dev->features & NETIF_F_LRO) bp->flags |= BNXT_FLAG_LRO; if (bp->dev->features & NETIF_F_GRO) -- cgit v1.2.3 From f183886c0d798ca3cf0a51e8cab3c1902fbd1e8b Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 13 Jan 2017 01:32:02 -0500 Subject: bnxt_en: Update to firmware interface spec to 1.6.1. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 261 +++++++++++++++++++++++--- 1 file changed, 237 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index d0d49ed71ce4..5df32ab64f0c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016 Broadcom Limited + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,12 +11,12 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.6.0 */ +/* HSI and HWRM Specification 1.6.1 */ #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 6 -#define HWRM_VERSION_UPDATE 0 +#define HWRM_VERSION_UPDATE 1 -#define HWRM_VERSION_STR "1.6.0" +#define HWRM_VERSION_STR "1.6.1" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -549,6 +549,8 @@ struct hwrm_ver_get_output { __le32 dev_caps_cfg; #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL u8 roce_fw_maj; u8 roce_fw_min; u8 roce_fw_bld; @@ -1919,6 +1921,219 @@ struct hwrm_port_phy_i2c_read_output { u8 valid; }; +/* hwrm_port_led_cfg */ +/* Input (64 bytes) */ +struct hwrm_port_led_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL + __le16 port_id; + u8 num_leds; + u8 rsvd; + u8 led0_id; + u8 led0_state; + #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL + u8 led0_color; + #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 rsvd0; + u8 led1_id; + u8 led1_state; + #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL + u8 led1_color; + #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 rsvd1; + u8 led2_id; + u8 led2_state; + #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL + u8 led2_color; + #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 rsvd2; + u8 led3_id; + u8 led3_state; + #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL + u8 led3_color; + #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 rsvd3; +}; + +/* Output (16 bytes) */ +struct hwrm_port_led_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_led_qcaps */ +/* Input (24 bytes) */ +struct hwrm_port_led_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (48 bytes) */ +struct hwrm_port_led_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 unused_0[3]; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL + u8 led0_group_id; + u8 unused_1; + __le16 led0_state_caps; + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led0_color_caps; + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL + u8 led1_group_id; + u8 unused_2; + __le16 led1_state_caps; + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led1_color_caps; + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL + u8 led2_group_id; + u8 unused_3; + __le16 led2_state_caps; + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led2_color_caps; + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL + u8 led3_group_id; + u8 unused_4; + __le16 led3_state_caps; + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led3_color_caps; + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 unused_5; + u8 unused_6; + u8 unused_7; + u8 valid; +}; + /* hwrm_queue_qportcfg */ /* Input (24 bytes) */ struct hwrm_queue_qportcfg_input { @@ -4092,9 +4307,7 @@ struct hwrm_fw_set_structured_data_input { __le64 src_data_addr; __le16 data_len; u8 hdr_cnt; - u8 unused_0; - __le16 port_id; - __le16 unused_1; + u8 unused_0[5]; }; /* Output (16 bytes) */ @@ -4111,7 +4324,7 @@ struct hwrm_fw_set_structured_data_output { }; /* hwrm_fw_get_structured_data */ -/* Input (40 bytes) */ +/* Input (32 bytes) */ struct hwrm_fw_get_structured_data_input { __le16 req_type; __le16 cmpl_ring; @@ -4131,8 +4344,6 @@ struct hwrm_fw_get_structured_data_input { #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL u8 count; u8 unused_0; - __le16 port_id; - __le16 unused_1[3]; }; /* Output (16 bytes) */ @@ -4616,7 +4827,8 @@ struct hwrm_nvm_install_update_input { __le32 install_type; #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL - __le32 unused_0; + __le16 flags; + __le16 unused_0; }; /* Output (24 bytes) */ @@ -4973,12 +5185,13 @@ struct ctx_hw_stats { struct hwrm_struct_hdr { __le16 struct_id; #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL - #define STRUCT_HDR_STRUCT_ID_DCBX_ETS_CFG 0x41dUL - #define STRUCT_HDR_STRUCT_ID_DCBX_PFC_CFG 0x41fUL - #define STRUCT_HDR_STRUCT_ID_DCBX_APP_CFG 0x421UL - #define STRUCT_HDR_STRUCT_ID_DCBX_STATE_CFG 0x422UL - #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC_CFG 0x424UL - #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE_CFG 0x426UL + #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL + #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL + #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL + #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL + #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL + #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL __le16 len; u8 version; u8 count; @@ -4988,14 +5201,14 @@ struct hwrm_struct_hdr { __le16 unused_0[3]; }; -/* DCBX Application configuration structure (8 bytes) */ -struct hwrm_struct_data_dcbx_app_cfg { - __le16 protocol_id; +/* DCBX Application configuration structure (1057) (8 bytes) */ +struct hwrm_struct_data_dcbx_app { + __be16 protocol_id; u8 protocol_selector; - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_PORT 0x2UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_UDP_PORT 0x3UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL u8 priority; u8 valid; u8 unused_0[3]; -- cgit v1.2.3 From 5ad2cbeed74bd1e89ac4ba14288158ec7eb167da Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 13 Jan 2017 01:32:03 -0500 Subject: bnxt_en: Add support for ethtool -p. Add LED blinking code to support ethtool -p on the PF. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 40 +++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 17 +++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 44 ++++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h | 23 ++++++++++++ 4 files changed, 123 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index df2358bb05e1..2b46f9b09a03 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5621,6 +5621,45 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) +{ + struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_led_qcaps_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; + } + if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { + int i; + + bp->num_leds = resp->num_leds; + memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * + bp->num_leds); + for (i = 0; i < bp->num_leds; i++) { + struct bnxt_led_info *led = &bp->leds[i]; + __le16 caps = led->led_state_caps; + + if (!led->led_group_id || + !BNXT_LED_ALT_BLINK_CAP(caps)) { + bp->num_leds = 0; + break; + } + } + } + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + static bool bnxt_eee_config_ok(struct bnxt *bp) { struct ethtool_eee *eee = &bp->eee; @@ -7244,6 +7283,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_port_led_qcaps(bp); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index f6b9b1c530fe..52a1cc061ba3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -868,6 +868,20 @@ struct bnxt_queue_info { u8 queue_profile; }; +#define BNXT_MAX_LED 4 + +struct bnxt_led_info { + u8 led_id; + u8 led_type; + u8 led_group_id; + u8 unused; + __le16 led_state_caps; +#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \ + cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED)) + + __le16 led_color_caps; +}; + #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 @@ -1123,6 +1137,9 @@ struct bnxt { struct ethtool_eee eee; u32 lpi_tmr_lo; u32 lpi_tmr_hi; + + u8 num_leds; + struct bnxt_led_info leds[BNXT_MAX_LED]; }; #define BNXT_RX_STATS_OFFSET(counter) \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index dd21be4a5fdd..24818e1e59f3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2080,6 +2080,47 @@ static int bnxt_nway_reset(struct net_device *dev) return rc; } +static int bnxt_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct hwrm_port_led_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_led_cfg *led_cfg; + u8 led_state; + __le16 duration; + int i, rc; + + if (!bp->num_leds || BNXT_VF(bp)) + return -EOPNOTSUPP; + + if (state == ETHTOOL_ID_ACTIVE) { + led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; + duration = cpu_to_le16(500); + } else if (state == ETHTOOL_ID_INACTIVE) { + led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; + duration = cpu_to_le16(0); + } else { + return -EINVAL; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + req.num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req.led0_id; + for (i = 0; i < bp->num_leds; i++, led_cfg++) { + req.enables |= BNXT_LED_DFLT_ENABLES(i); + led_cfg->led_id = bp->leds[i].led_id; + led_cfg->led_state = led_state; + led_cfg->led_blink_on = duration; + led_cfg->led_blink_off = duration; + led_cfg->led_group_id = bp->leds[i].led_group_id; + } + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + return rc; +} + const struct ethtool_ops bnxt_ethtool_ops = { .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, @@ -2111,5 +2152,6 @@ const struct ethtool_ops bnxt_ethtool_ops = { .set_eee = bnxt_set_eee, .get_module_info = bnxt_get_module_info, .get_module_eeprom = bnxt_get_module_eeprom, - .nway_reset = bnxt_nway_reset + .nway_reset = bnxt_nway_reset, + .set_phys_id = bnxt_set_phys_id, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 3abc03b60dbc..ed1e555292e9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -10,6 +10,29 @@ #ifndef BNXT_ETHTOOL_H #define BNXT_ETHTOOL_H +struct bnxt_led_cfg { + u8 led_id; + u8 led_state; + u8 led_color; + u8 unused; + __le16 led_blink_on; + __le16 led_blink_off; + u8 led_group_id; + u8 rsvd; +}; + +#define BNXT_LED_DFLT_ENA \ + (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \ + PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \ + PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON | \ + PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF | \ + PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID) + +#define BNXT_LED_DFLT_ENA_SHIFT 6 + +#define BNXT_LED_DFLT_ENABLES(x) \ + cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) + extern const struct ethtool_ops bnxt_ethtool_ops; u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); -- cgit v1.2.3 From 2f5938467bd7f34e59a1d6d3809f5970f62e194b Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 13 Jan 2017 01:32:04 -0500 Subject: bnxt_en: Add the ulp_sriov_cfg hooks for bnxt_re RDMA driver. Add the ulp_sriov_cfg callbacks when the number of VFs is changing. This allows the RDMA driver to provision RDMA resources for the VFs. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 64ef0e5dad8c..0b8cd7443843 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -15,6 +15,7 @@ #include #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_ethtool.h" @@ -555,6 +556,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) if (rc) goto err_out2; + bnxt_ulp_sriov_cfg(bp, *num_vfs); + rc = pci_enable_sriov(bp->pdev, *num_vfs); if (rc) goto err_out2; @@ -596,6 +599,8 @@ void bnxt_sriov_disable(struct bnxt *bp) rtnl_lock(); bnxt_restore_pf_fw_resources(bp); rtnl_unlock(); + + bnxt_ulp_sriov_cfg(bp, 0); } int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) -- cgit v1.2.3 From f750e82e7fcba219a0b401d75a58d9fbd956744c Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Fri, 13 Jan 2017 14:46:40 +0530 Subject: cxgb4: Fix misleading packet/frame count stats. Do not count pause frames as part of general TX/RX frame counters. Based on the original work of Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 18 ++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 17 +++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index f113015074b1..ac712acf256d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -5501,6 +5501,7 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx, void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) { u32 bgmap = t4_get_mps_bg_map(adap, idx); + u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A); #define GET_STAT(name) \ t4_read_reg64(adap, \ @@ -5532,6 +5533,14 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & COUNTPAUSESTATTX_F) { + p->tx_frames -= p->tx_pause; + p->tx_octets -= p->tx_pause * 64; + } + if (stat_ctl & COUNTPAUSEMCTX_F) + p->tx_mcast_frames -= p->tx_pause; + } p->rx_octets = GET_STAT(RX_PORT_BYTES); p->rx_frames = GET_STAT(RX_PORT_FRAMES); p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); @@ -5560,6 +5569,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & COUNTPAUSESTATRX_F) { + p->rx_frames -= p->rx_pause; + p->rx_octets -= p->rx_pause * 64; + } + if (stat_ctl & COUNTPAUSEMCRX_F) + p->rx_mcast_frames -= p->rx_pause; + } + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index e685163b1357..f71001d69c47 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1798,12 +1798,29 @@ #define MPS_CMN_CTL_A 0x9000 +#define COUNTPAUSEMCRX_S 5 +#define COUNTPAUSEMCRX_V(x) ((x) << COUNTPAUSEMCRX_S) +#define COUNTPAUSEMCRX_F COUNTPAUSEMCRX_V(1U) + +#define COUNTPAUSESTATRX_S 4 +#define COUNTPAUSESTATRX_V(x) ((x) << COUNTPAUSESTATRX_S) +#define COUNTPAUSESTATRX_F COUNTPAUSESTATRX_V(1U) + +#define COUNTPAUSEMCTX_S 3 +#define COUNTPAUSEMCTX_V(x) ((x) << COUNTPAUSEMCTX_S) +#define COUNTPAUSEMCTX_F COUNTPAUSEMCTX_V(1U) + +#define COUNTPAUSESTATTX_S 2 +#define COUNTPAUSESTATTX_V(x) ((x) << COUNTPAUSESTATTX_S) +#define COUNTPAUSESTATTX_F COUNTPAUSESTATTX_V(1U) + #define NUMPORTS_S 0 #define NUMPORTS_M 0x3U #define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M) #define MPS_INT_CAUSE_A 0x9008 #define MPS_TX_INT_CAUSE_A 0x9408 +#define MPS_STAT_CTL_A 0x9600 #define FRMERR_S 15 #define FRMERR_V(x) ((x) << FRMERR_S) -- cgit v1.2.3 From ca4b5eb88aa0da96ede750d8b894e7079612aa65 Mon Sep 17 00:00:00 2001 From: Shyam Saini Date: Sat, 14 Jan 2017 06:47:40 +0530 Subject: cxgb4: Remove redundant memset before memcpy The region set by the call to memset, immediately overwritten by the subsequent call to memcpy and thus makes the memset redundant. Also remove the memset((&info, 0, sizeof(info)) on line 398 because info is memcpy()'ed to before being used in the loop and it isn't used outside of the loop. Signed-off-by: Shyam Saini Reviewed-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sched.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index cbd68a8fe2e4..c9026352a842 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -397,9 +397,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, struct ch_sched_params info; struct ch_sched_params tp; - memset(&info, 0, sizeof(info)); - memset(&tp, 0, sizeof(tp)); - memcpy(&tp, p, sizeof(tp)); /* Don't try to match class parameter */ tp.u.params.class = SCHED_CLS_NONE; @@ -409,7 +406,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, if (e->state == SCHED_STATE_UNUSED) continue; - memset(&info, 0, sizeof(info)); memcpy(&info, &e->info, sizeof(info)); /* Don't try to match class parameter */ info.u.params.class = SCHED_CLS_NONE; @@ -458,7 +454,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, if (!e) goto out; - memset(&np, 0, sizeof(np)); memcpy(&np, p, sizeof(np)); np.u.params.class = e->idx; -- cgit v1.2.3 From 1ecf9284c14614f9854dfa4c66f70d17c21182d1 Mon Sep 17 00:00:00 2001 From: jpinto Date: Thu, 12 Jan 2017 09:56:18 +0000 Subject: synopsys: remove dwc_eth_qos driver This driver is no longer necessary since it was merged into stmmac. Acked-by: Lars Persson Signed-off-by: Joao Pinto Signed-off-by: David S. Miller --- MAINTAINERS | 7 - arch/arm/configs/multi_v7_defconfig | 3 +- drivers/net/ethernet/Kconfig | 1 - drivers/net/ethernet/Makefile | 1 - drivers/net/ethernet/synopsys/Kconfig | 27 - drivers/net/ethernet/synopsys/Makefile | 5 - drivers/net/ethernet/synopsys/dwc_eth_qos.c | 2996 --------------------------- 7 files changed, 2 insertions(+), 3038 deletions(-) delete mode 100644 drivers/net/ethernet/synopsys/Kconfig delete mode 100644 drivers/net/ethernet/synopsys/Makefile delete mode 100644 drivers/net/ethernet/synopsys/dwc_eth_qos.c diff --git a/MAINTAINERS b/MAINTAINERS index d14e42bef72e..b8a19813b10a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10864,13 +10864,6 @@ F: include/linux/dma/dw.h F: include/linux/platform_data/dma-dw.h F: drivers/dma/dw/ -SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver -M: Lars Persson -L: netdev@vger.kernel.org -S: Supported -F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt -F: drivers/net/ethernet/synopsys/dwc_eth_qos.c - SYNOPSYS DESIGNWARE I2C DRIVER M: Jarkko Nikula R: Andy Shevchenko diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index b01a43851294..64f4419f14e8 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -253,7 +253,8 @@ CONFIG_R8169=y CONFIG_SH_ETH=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=y -CONFIG_SYNOPSYS_DWC_ETH_QOS=y +CONFIG_STMMAC_PLATFORM=y +CONFIG_DWMAC_DWC_QOS_ETH=y CONFIG_TI_CPSW=y CONFIG_XILINX_EMACLITE=y CONFIG_AT803X_PHY=y diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index e4c28fed61d5..afc07d4434bb 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -170,7 +170,6 @@ source "drivers/net/ethernet/sgi/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" -source "drivers/net/ethernet/synopsys/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" source "drivers/net/ethernet/tile/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 24330f4885a9..e7861a8aa817 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -81,7 +81,6 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ obj-$(CONFIG_NET_VENDOR_SUN) += sun/ -obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ obj-$(CONFIG_NET_VENDOR_TI) += ti/ obj-$(CONFIG_TILE_NET) += tile/ diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig deleted file mode 100644 index 8276ee5a7d54..000000000000 --- a/drivers/net/ethernet/synopsys/Kconfig +++ /dev/null @@ -1,27 +0,0 @@ -# -# Synopsys network device configuration -# - -config NET_VENDOR_SYNOPSYS - bool "Synopsys devices" - default y - ---help--- - If you have a network (Ethernet) device belonging to this class, say Y. - - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about Synopsys devices. If you say Y, you will be asked - for your specific device in the following questions. - -if NET_VENDOR_SYNOPSYS - -config SYNOPSYS_DWC_ETH_QOS - tristate "Sypnopsys DWC Ethernet QOS v4.10a support" - select PHYLIB - select CRC32 - select MII - depends on OF && HAS_DMA - ---help--- - This driver supports the DWC Ethernet QoS from Synopsys - -endif # NET_VENDOR_SYNOPSYS diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile deleted file mode 100644 index 7a375723fc18..000000000000 --- a/drivers/net/ethernet/synopsys/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the Synopsys network device drivers. -# - -obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c deleted file mode 100644 index 467dcc53f5e1..000000000000 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ /dev/null @@ -1,2996 +0,0 @@ -/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver - * - * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC). - * This version introduced a lot of changes which breaks backwards - * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers). - * Some fields differ between version 4.00a and 4.10a, mainly the interrupt - * bit fields. The driver could be made compatible with 4.00, if all relevant - * HW erratas are handled. - * - * The GMAC is highly configurable at synthesis time. This driver has been - * developed for a subset of the total available feature set. Currently - * it supports: - * - TSO - * - Checksum offload for RX and TX. - * - Energy efficient ethernet. - * - GMII phy interface. - * - The statistics module. - * - Single RX and TX queue. - * - * Copyright (C) 2015 Axis Communications AB. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRIVER_NAME "dwceqos" -#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver" -#define DRIVER_VERSION "0.9" - -#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ - NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) - -#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */ - -#define DWCEQOS_LPI_TIMER_MIN 8 -#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1) - -#define DWCEQOS_RX_BUF_SIZE 2048 - -#define DWCEQOS_RX_DCNT 256 -#define DWCEQOS_TX_DCNT 256 - -#define DWCEQOS_HASH_TABLE_SIZE 64 - -/* The size field in the DMA descriptor is 14 bits */ -#define BYTES_PER_DMA_DESC 16376 - -/* Hardware registers */ -#define START_MAC_REG_OFFSET 0x0000 -#define MAX_MAC_REG_OFFSET 0x0bd0 -#define START_MTL_REG_OFFSET 0x0c00 -#define MAX_MTL_REG_OFFSET 0x0d7c -#define START_DMA_REG_OFFSET 0x1000 -#define MAX_DMA_REG_OFFSET 0x117C - -#define REG_SPACE_SIZE 0x1800 - -/* DMA */ -#define REG_DWCEQOS_DMA_MODE 0x1000 -#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004 -#define REG_DWCEQOS_DMA_IS 0x1008 -#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c - -/* DMA channel registers */ -#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100 -#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104 -#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108 -#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114 -#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c -#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120 -#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128 -#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c -#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130 -#define REG_DWCEQOS_DMA_CH0_IE 0x1134 -#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144 -#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c -#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154 -#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c -#define REG_DWCEQOS_DMA_CH0_STA 0x1160 - -#define DWCEQOS_DMA_MODE_TXPR BIT(11) -#define DWCEQOS_DMA_MODE_DA BIT(1) - -#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31) -#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0) -#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12) - -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \ - (((x) << 16) & 0x000F0000) -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3 -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16) - -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \ - (((x) << 24) & 0x0F000000) -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3 -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24) - -#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1) -#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \ - (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK) -#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1) - -#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16) -#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18) - -#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16) -#define DWCEQOS_DMA_CH_CTRL_START BIT(0) -#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1) -#define DWCEQOS_DMA_CH_TX_OSP BIT(4) -#define DWCEQOS_DMA_CH_TX_TSE BIT(12) - -#define DWCEQOS_DMA_CH0_IE_NIE BIT(15) -#define DWCEQOS_DMA_CH0_IE_AIE BIT(14) -#define DWCEQOS_DMA_CH0_IE_RIE BIT(6) -#define DWCEQOS_DMA_CH0_IE_TIE BIT(0) -#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12) -#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7) - -#define DWCEQOS_DMA_IS_DC0IS BIT(0) -#define DWCEQOS_DMA_IS_MTLIS BIT(16) -#define DWCEQOS_DMA_IS_MACIS BIT(17) - -#define DWCEQOS_DMA_CH0_IS_TI BIT(0) -#define DWCEQOS_DMA_CH0_IS_RI BIT(6) -#define DWCEQOS_DMA_CH0_IS_RBU BIT(7) -#define DWCEQOS_DMA_CH0_IS_FBE BIT(12) -#define DWCEQOS_DMA_CH0_IS_CDE BIT(13) -#define DWCEQOS_DMA_CH0_IS_AIS BIT(14) - -#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16) -#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16) -#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17) - -#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19) -#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19) -#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20) - -/* DMA descriptor bits for RX normal descriptor (read format) */ -#define DWCEQOS_DMA_RDES3_OWN BIT(31) -#define DWCEQOS_DMA_RDES3_INTE BIT(30) -#define DWCEQOS_DMA_RDES3_BUF2V BIT(25) -#define DWCEQOS_DMA_RDES3_BUF1V BIT(24) - -/* DMA descriptor bits for RX normal descriptor (write back format) */ -#define DWCEQOS_DMA_RDES1_IPCE BIT(7) -#define DWCEQOS_DMA_RDES3_ES BIT(15) -#define DWCEQOS_DMA_RDES3_E_JT BIT(14) -#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff) -#define DWCEQOS_DMA_RDES1_PT 0x00000007 -#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0) -#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1) -#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003 - -/* DMA descriptor bits for TX normal descriptor (read format) */ -#define DWCEQOS_DMA_TDES2_IOC BIT(31) -#define DWCEQOS_DMA_TDES3_OWN BIT(31) -#define DWCEQOS_DMA_TDES3_CTXT BIT(30) -#define DWCEQOS_DMA_TDES3_FD BIT(29) -#define DWCEQOS_DMA_TDES3_LD BIT(28) -#define DWCEQOS_DMA_TDES3_CIPH BIT(16) -#define DWCEQOS_DMA_TDES3_CIPP BIT(17) -#define DWCEQOS_DMA_TDES3_CA 0x00030000 -#define DWCEQOS_DMA_TDES3_TSE BIT(18) -#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19) -#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16) - -#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26) - -/* DMA channel states */ -#define DMA_TX_CH_STOPPED 0 -#define DMA_TX_CH_SUSPENDED 6 - -#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12) - -/* MTL */ -#define REG_DWCEQOS_MTL_OPER 0x0c00 -#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c -#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08 -#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38 - -#define REG_DWCEQOS_MTL_IS 0x0c20 -#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00 -#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30 -#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34 -#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c - -#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c - -#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060 - -#define DWCEQOS_MTL_TXQ_TXQEN BIT(3) -#define DWCEQOS_MTL_TXQ_TSF BIT(1) -#define DWCEQOS_MTL_TXQ_FTQ BIT(0) -#define DWCEQOS_MTL_TXQ_TTC512 0x00000070 - -#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8) - -#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12) -#define DWCEQOS_MTL_RXQ_EHFC BIT(7) -#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6) -#define DWCEQOS_MTL_RXQ_FEP BIT(4) -#define DWCEQOS_MTL_RXQ_FUP BIT(3) -#define DWCEQOS_MTL_RXQ_RSF BIT(5) -#define DWCEQOS_MTL_RXQ_RTC32 BIT(0) - -/* MAC */ -#define REG_DWCEQOS_MAC_CFG 0x0000 -#define REG_DWCEQOS_MAC_EXT_CFG 0x0004 -#define REG_DWCEQOS_MAC_PKT_FILT 0x0008 -#define REG_DWCEQOS_MAC_WD_TO 0x000c -#define REG_DWCEQOS_HASTABLE_LO 0x0010 -#define REG_DWCEQOS_HASTABLE_HI 0x0014 -#define REG_DWCEQOS_MAC_IS 0x00b0 -#define REG_DWCEQOS_MAC_IE 0x00b4 -#define REG_DWCEQOS_MAC_STAT 0x00b8 -#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200 -#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204 -#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300 -#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304 -#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0 -#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c -#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120 -#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124 -#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010 -#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014 -#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0 -#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4 -#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8 -#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc -#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090 -#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070 - -#define DWCEQOS_MAC_CFG_ACS BIT(20) -#define DWCEQOS_MAC_CFG_JD BIT(17) -#define DWCEQOS_MAC_CFG_JE BIT(16) -#define DWCEQOS_MAC_CFG_PS BIT(15) -#define DWCEQOS_MAC_CFG_FES BIT(14) -#define DWCEQOS_MAC_CFG_DM BIT(13) -#define DWCEQOS_MAC_CFG_DO BIT(10) -#define DWCEQOS_MAC_CFG_TE BIT(1) -#define DWCEQOS_MAC_CFG_IPC BIT(27) -#define DWCEQOS_MAC_CFG_RE BIT(0) - -#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8)) -#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8)) - -#define DWCEQOS_MAC_IS_LPI_INT BIT(5) -#define DWCEQOS_MAC_IS_MMC_INT BIT(8) - -#define DWCEQOS_MAC_RXQ_EN BIT(1) -#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31) -#define DWCEQOS_MAC_PKT_FILT_RA BIT(31) -#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10) -#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9) -#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8) -#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5) -#define DWCEQOS_MAC_PKT_FILT_PM BIT(4) -#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3) -#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2) -#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1) -#define DWCEQOS_MAC_PKT_FILT_PR BIT(0) - -#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8) -#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2 -#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3 -#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0 -#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1 -#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4 -#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5 -#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c -#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2) -#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0) - -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21) - -#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0)) - -#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \ - DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \ - DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN) - -#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) - -#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1) -#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16) -#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4) - -/* Features */ -#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16) -#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14) -#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2) -#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13) -#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1) -#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0) - -#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18) -#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6) -#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f)) - -#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \ - (1 + (((feature1) & 0x1fc0000) >> 18)) - -#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21) -#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16) - -#define DWCEQOS_DMA_MODE_SWR BIT(0) - -#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048 - -/* Mac Management Counters */ -#define REG_DWCEQOS_MMC_CTRL 0x0700 -#define REG_DWCEQOS_MMC_RXIRQ 0x0704 -#define REG_DWCEQOS_MMC_TXIRQ 0x0708 -#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c -#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710 - -#define DWCEQOS_MMC_CTRL_CNTRST BIT(0) -#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2) - -#define DWC_MMC_TXLPITRANSCNTR 0x07F0 -#define DWC_MMC_TXLPIUSCNTR 0x07EC -#define DWC_MMC_TXOVERSIZE_G 0x0778 -#define DWC_MMC_TXVLANPACKETS_G 0x0774 -#define DWC_MMC_TXPAUSEPACKETS 0x0770 -#define DWC_MMC_TXEXCESSDEF 0x076C -#define DWC_MMC_TXPACKETCOUNT_G 0x0768 -#define DWC_MMC_TXOCTETCOUNT_G 0x0764 -#define DWC_MMC_TXCARRIERERROR 0x0760 -#define DWC_MMC_TXEXCESSCOL 0x075C -#define DWC_MMC_TXLATECOL 0x0758 -#define DWC_MMC_TXDEFERRED 0x0754 -#define DWC_MMC_TXMULTICOL_G 0x0750 -#define DWC_MMC_TXSINGLECOL_G 0x074C -#define DWC_MMC_TXUNDERFLOWERROR 0x0748 -#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744 -#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740 -#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C -#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738 -#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734 -#define DWC_MMC_TX256TO511OCTETS_GB 0x0730 -#define DWC_MMC_TX128TO255OCTETS_GB 0x072C -#define DWC_MMC_TX65TO127OCTETS_GB 0x0728 -#define DWC_MMC_TX64OCTETS_GB 0x0724 -#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720 -#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C -#define DWC_MMC_TXPACKETCOUNT_GB 0x0718 -#define DWC_MMC_TXOCTETCOUNT_GB 0x0714 - -#define DWC_MMC_RXLPITRANSCNTR 0x07F8 -#define DWC_MMC_RXLPIUSCNTR 0x07F4 -#define DWC_MMC_RXCTRLPACKETS_G 0x07E4 -#define DWC_MMC_RXRCVERROR 0x07E0 -#define DWC_MMC_RXWATCHDOG 0x07DC -#define DWC_MMC_RXVLANPACKETS_GB 0x07D8 -#define DWC_MMC_RXFIFOOVERFLOW 0x07D4 -#define DWC_MMC_RXPAUSEPACKETS 0x07D0 -#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC -#define DWC_MMC_RXLENGTHERROR 0x07C8 -#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4 -#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0 -#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC -#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8 -#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4 -#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0 -#define DWC_MMC_RX64OCTETS_GB 0x07AC -#define DWC_MMC_RXOVERSIZE_G 0x07A8 -#define DWC_MMC_RXUNDERSIZE_G 0x07A4 -#define DWC_MMC_RXJABBERERROR 0x07A0 -#define DWC_MMC_RXRUNTERROR 0x079C -#define DWC_MMC_RXALIGNMENTERROR 0x0798 -#define DWC_MMC_RXCRCERROR 0x0794 -#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790 -#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C -#define DWC_MMC_RXOCTETCOUNT_G 0x0788 -#define DWC_MMC_RXOCTETCOUNT_GB 0x0784 -#define DWC_MMC_RXPACKETCOUNT_GB 0x0780 - -static int debug = -1; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); - -/* DMA ring descriptor. These are used as support descriptors for the HW DMA */ -struct ring_desc { - struct sk_buff *skb; - dma_addr_t mapping; - size_t len; -}; - -/* DMA hardware descriptor */ -struct dwceqos_dma_desc { - u32 des0; - u32 des1; - u32 des2; - u32 des3; -} ____cacheline_aligned; - -struct dwceqos_mmc_counters { - __u64 txlpitranscntr; - __u64 txpiuscntr; - __u64 txoversize_g; - __u64 txvlanpackets_g; - __u64 txpausepackets; - __u64 txexcessdef; - __u64 txpacketcount_g; - __u64 txoctetcount_g; - __u64 txcarriererror; - __u64 txexcesscol; - __u64 txlatecol; - __u64 txdeferred; - __u64 txmulticol_g; - __u64 txsinglecol_g; - __u64 txunderflowerror; - __u64 txbroadcastpackets_gb; - __u64 txmulticastpackets_gb; - __u64 txunicastpackets_gb; - __u64 tx1024tomaxoctets_gb; - __u64 tx512to1023octets_gb; - __u64 tx256to511octets_gb; - __u64 tx128to255octets_gb; - __u64 tx65to127octets_gb; - __u64 tx64octets_gb; - __u64 txmulticastpackets_g; - __u64 txbroadcastpackets_g; - __u64 txpacketcount_gb; - __u64 txoctetcount_gb; - - __u64 rxlpitranscntr; - __u64 rxlpiuscntr; - __u64 rxctrlpackets_g; - __u64 rxrcverror; - __u64 rxwatchdog; - __u64 rxvlanpackets_gb; - __u64 rxfifooverflow; - __u64 rxpausepackets; - __u64 rxoutofrangetype; - __u64 rxlengtherror; - __u64 rxunicastpackets_g; - __u64 rx1024tomaxoctets_gb; - __u64 rx512to1023octets_gb; - __u64 rx256to511octets_gb; - __u64 rx128to255octets_gb; - __u64 rx65to127octets_gb; - __u64 rx64octets_gb; - __u64 rxoversize_g; - __u64 rxundersize_g; - __u64 rxjabbererror; - __u64 rxrunterror; - __u64 rxalignmenterror; - __u64 rxcrcerror; - __u64 rxmulticastpackets_g; - __u64 rxbroadcastpackets_g; - __u64 rxoctetcount_g; - __u64 rxoctetcount_gb; - __u64 rxpacketcount_gb; -}; - -/* Ethtool statistics */ - -struct dwceqos_stat { - const char stat_name[ETH_GSTRING_LEN]; - int offset; -}; - -#define STAT_ITEM(name, var) \ - {\ - name,\ - offsetof(struct dwceqos_mmc_counters, var),\ - } - -static const struct dwceqos_stat dwceqos_ethtool_stats[] = { - STAT_ITEM("tx_bytes", txoctetcount_gb), - STAT_ITEM("tx_packets", txpacketcount_gb), - STAT_ITEM("tx_unicst_packets", txunicastpackets_gb), - STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb), - STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb), - STAT_ITEM("tx_pause_packets", txpausepackets), - STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb), - STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb), - STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb), - STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb), - STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb), - STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb), - STAT_ITEM("tx_underflow_errors", txunderflowerror), - STAT_ITEM("tx_lpi_count", txlpitranscntr), - - STAT_ITEM("rx_bytes", rxoctetcount_gb), - STAT_ITEM("rx_packets", rxpacketcount_gb), - STAT_ITEM("rx_unicast_packets", rxunicastpackets_g), - STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g), - STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g), - STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb), - STAT_ITEM("rx_pause_packets", rxpausepackets), - STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb), - STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb), - STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb), - STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb), - STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb), - STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb), - STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow), - STAT_ITEM("rx_oversize_packets", rxoversize_g), - STAT_ITEM("rx_undersize_packets", rxundersize_g), - STAT_ITEM("rx_jabbers", rxjabbererror), - STAT_ITEM("rx_align_errors", rxalignmenterror), - STAT_ITEM("rx_crc_errors", rxcrcerror), - STAT_ITEM("rx_lpi_count", rxlpitranscntr), -}; - -/* Configuration of AXI bus parameters. - * These values depend on the parameters set on the MAC core as well - * as the AXI interconnect. - */ -struct dwceqos_bus_cfg { - /* Enable AXI low-power interface. */ - bool en_lpi; - /* Limit on number of outstanding AXI write requests. */ - u32 write_requests; - /* Limit on number of outstanding AXI read requests. */ - u32 read_requests; - /* Bitmap of allowed AXI burst lengths, 4-256 beats. */ - u32 burst_map; - /* DMA Programmable burst length*/ - u32 tx_pbl; - u32 rx_pbl; -}; - -struct dwceqos_flowcontrol { - int autoneg; - int rx; - int rx_current; - int tx; - int tx_current; -}; - -struct net_local { - void __iomem *baseaddr; - struct clk *phy_ref_clk; - struct clk *apb_pclk; - - struct device_node *phy_node; - struct net_device *ndev; - struct platform_device *pdev; - - u32 msg_enable; - - struct tasklet_struct tx_bdreclaim_tasklet; - struct workqueue_struct *txtimeout_handler_wq; - struct work_struct txtimeout_reinit; - - phy_interface_t phy_interface; - struct mii_bus *mii_bus; - - unsigned int link; - unsigned int speed; - unsigned int duplex; - - struct napi_struct napi; - - /* DMA Descriptor Areas */ - struct ring_desc *rx_skb; - struct ring_desc *tx_skb; - - struct dwceqos_dma_desc *tx_descs; - struct dwceqos_dma_desc *rx_descs; - - /* DMA Mapped Descriptor areas*/ - dma_addr_t tx_descs_addr; - dma_addr_t rx_descs_addr; - dma_addr_t tx_descs_tail_addr; - dma_addr_t rx_descs_tail_addr; - - size_t tx_free; - size_t tx_next; - size_t rx_cur; - size_t tx_cur; - - /* Spinlocks for accessing DMA Descriptors */ - spinlock_t tx_lock; - - /* Spinlock for register read-modify-writes. */ - spinlock_t hw_lock; - - u32 feature0; - u32 feature1; - u32 feature2; - - struct dwceqos_bus_cfg bus_cfg; - bool en_tx_lpi_clockgating; - - int eee_enabled; - int eee_active; - int csr_val; - u32 gso_size; - - struct dwceqos_mmc_counters mmc_counters; - /* Protect the mmc_counter updates. */ - spinlock_t stats_lock; - u32 mmc_rx_counters_mask; - u32 mmc_tx_counters_mask; - - struct dwceqos_flowcontrol flowcontrol; - - /* Tracks the intermediate state of phy started but hardware - * init not finished yet. - */ - bool phy_defer; -}; - -static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, - u32 tx_mask); - -static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, - unsigned int reg_n); -static int dwceqos_stop(struct net_device *ndev); -static int dwceqos_open(struct net_device *ndev); -static void dwceqos_tx_poll_demand(struct net_local *lp); - -static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable); -static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable); - -static void dwceqos_reset_state(struct net_local *lp); - -#define dwceqos_read(lp, reg) \ - readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg)) -#define dwceqos_write(lp, reg, val) \ - writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg)) - -static void dwceqos_reset_state(struct net_local *lp) -{ - lp->link = 0; - lp->speed = 0; - lp->duplex = DUPLEX_UNKNOWN; - lp->flowcontrol.rx_current = 0; - lp->flowcontrol.tx_current = 0; - lp->eee_active = 0; - lp->eee_enabled = 0; -} - -static void print_descriptor(struct net_local *lp, int index, int tx) -{ - struct dwceqos_dma_desc *dd; - - if (tx) - dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index]; - else - dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index]; - - pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX", - index, dd); - pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2, - dd->des3); -} - -static void print_status(struct net_local *lp) -{ - size_t desci, i; - - pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free, - lp->tx_cur, lp->tx_next); - - print_descriptor(lp, lp->rx_cur, 0); - - for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0; - i < DWCEQOS_TX_DCNT; - ++i) { - print_descriptor(lp, desci, 1); - desci = (desci + 1) % DWCEQOS_TX_DCNT; - } - - pr_info("DMA_Debug_Status0: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0)); - pr_info("DMA_CH0_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_IS)); - pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n", - dwceqos_read(lp, 0x1144)); - pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n", - dwceqos_read(lp, 0x1154)); - pr_info("MTL_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST)); - pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST)); - pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST)); - pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC), - dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC)); -} - -static void dwceqos_mdio_set_csr(struct net_local *lp) -{ - int rate = clk_get_rate(lp->apb_pclk); - - if (rate <= 20000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20; - else if (rate <= 35000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35; - else if (rate <= 60000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60; - else if (rate <= 100000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100; - else if (rate <= 150000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150; - else if (rate <= 250000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250; -} - -/* Simple MDIO functions implementing mii_bus */ -static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg) -{ - struct net_local *lp = bus->priv; - u32 regval; - int i; - int data; - - regval = DWCEQOS_MDIO_PHYADDR(mii_id) | - DWCEQOS_MDIO_PHYREG(phyreg) | - DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | - DWCEQOS_MAC_MDIO_ADDR_GB | - DWCEQOS_MAC_MDIO_ADDR_GOC_READ; - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); - - for (i = 0; i < 5; ++i) { - usleep_range(64, 128); - if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & - DWCEQOS_MAC_MDIO_ADDR_GB)) - break; - } - - data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA); - if (i == 5) { - netdev_warn(lp->ndev, "MDIO read timed out\n"); - data = 0xffff; - } - - return data & 0xffff; -} - -static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg, - u16 value) -{ - struct net_local *lp = bus->priv; - u32 regval; - int i; - - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value); - - regval = DWCEQOS_MDIO_PHYADDR(mii_id) | - DWCEQOS_MDIO_PHYREG(phyreg) | - DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | - DWCEQOS_MAC_MDIO_ADDR_GB | - DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE; - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); - - for (i = 0; i < 5; ++i) { - usleep_range(64, 128); - if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & - DWCEQOS_MAC_MDIO_ADDR_GB)) - break; - } - if (i == 5) - netdev_warn(lp->ndev, "MDIO write timed out\n"); - return 0; -} - -static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - - if (!netif_running(ndev)) - return -EINVAL; - - if (!phydev) - return -ENODEV; - - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return phy_mii_ioctl(phydev, rq, cmd); - default: - dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd); - return -EOPNOTSUPP; - } -} - -static void dwceqos_link_down(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - /* Indicate link down to the LPI state machine */ - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_link_up(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - u32 regval; - unsigned long flags; - - /* Indicate link up to the LPI state machine */ - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - - lp->eee_active = !phy_init_eee(ndev->phydev, 0); - - /* Check for changed EEE capability */ - if (!lp->eee_active && lp->eee_enabled) { - lp->eee_enabled = 0; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } -} - -static void dwceqos_set_speed(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - struct phy_device *phydev = ndev->phydev; - u32 regval; - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES | - DWCEQOS_MAC_CFG_DM); - - if (phydev->duplex) - regval |= DWCEQOS_MAC_CFG_DM; - if (phydev->speed == SPEED_10) { - regval |= DWCEQOS_MAC_CFG_PS; - } else if (phydev->speed == SPEED_100) { - regval |= DWCEQOS_MAC_CFG_PS | - DWCEQOS_MAC_CFG_FES; - } else if (phydev->speed != SPEED_1000) { - netdev_err(lp->ndev, - "unknown PHY speed %d\n", - phydev->speed); - return; - } - - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval); -} - -static void dwceqos_adjust_link(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - int status_change = 0; - - if (lp->phy_defer) - return; - - if (phydev->link) { - if ((lp->speed != phydev->speed) || - (lp->duplex != phydev->duplex)) { - dwceqos_set_speed(lp); - - lp->speed = phydev->speed; - lp->duplex = phydev->duplex; - status_change = 1; - } - - if (lp->flowcontrol.autoneg) { - lp->flowcontrol.rx = phydev->pause || - phydev->asym_pause; - lp->flowcontrol.tx = phydev->pause || - phydev->asym_pause; - } - - if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) { - if (netif_msg_link(lp)) - netdev_dbg(ndev, "set rx flow to %d\n", - lp->flowcontrol.rx); - dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx); - lp->flowcontrol.rx_current = lp->flowcontrol.rx; - } - if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) { - if (netif_msg_link(lp)) - netdev_dbg(ndev, "set tx flow to %d\n", - lp->flowcontrol.tx); - dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx); - lp->flowcontrol.tx_current = lp->flowcontrol.tx; - } - } - - if (phydev->link != lp->link) { - lp->link = phydev->link; - status_change = 1; - } - - if (status_change) { - if (phydev->link) { - netif_trans_update(lp->ndev); - dwceqos_link_up(lp); - } else { - dwceqos_link_down(lp); - } - phy_print_status(phydev); - } -} - -static int dwceqos_mii_probe(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = NULL; - - if (lp->phy_node) { - phydev = of_phy_connect(lp->ndev, - lp->phy_node, - &dwceqos_adjust_link, - 0, - lp->phy_interface); - - if (!phydev) { - netdev_err(ndev, "no PHY found\n"); - return -1; - } - } else { - netdev_err(ndev, "no PHY configured\n"); - return -ENODEV; - } - - if (netif_msg_probe(lp)) - phy_attached_info(phydev); - - phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause; - - lp->link = 0; - lp->speed = 0; - lp->duplex = DUPLEX_UNKNOWN; - lp->flowcontrol.autoneg = AUTONEG_ENABLE; - - return 0; -} - -static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index) -{ - struct sk_buff *new_skb; - dma_addr_t new_skb_baddr = 0; - - new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); - if (!new_skb) { - netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index); - goto err_out; - } - - new_skb_baddr = dma_map_single(lp->ndev->dev.parent, - new_skb->data, DWCEQOS_RX_BUF_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { - netdev_err(lp->ndev, "DMA map error\n"); - dev_kfree_skb(new_skb); - new_skb = NULL; - goto err_out; - } - - lp->rx_descs[index].des0 = new_skb_baddr; - lp->rx_descs[index].des1 = 0; - lp->rx_descs[index].des2 = 0; - lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE | - DWCEQOS_DMA_RDES3_BUF1V | - DWCEQOS_DMA_RDES3_OWN; - - lp->rx_skb[index].mapping = new_skb_baddr; - lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE; - -err_out: - lp->rx_skb[index].skb = new_skb; -} - -static void dwceqos_clean_rings(struct net_local *lp) -{ - int i; - - if (lp->rx_skb) { - for (i = 0; i < DWCEQOS_RX_DCNT; i++) { - if (lp->rx_skb[i].skb) { - dma_unmap_single(lp->ndev->dev.parent, - lp->rx_skb[i].mapping, - lp->rx_skb[i].len, - DMA_FROM_DEVICE); - - dev_kfree_skb(lp->rx_skb[i].skb); - lp->rx_skb[i].skb = NULL; - lp->rx_skb[i].mapping = 0; - } - } - } - - if (lp->tx_skb) { - for (i = 0; i < DWCEQOS_TX_DCNT; i++) { - if (lp->tx_skb[i].skb) { - dev_kfree_skb(lp->tx_skb[i].skb); - lp->tx_skb[i].skb = NULL; - } - if (lp->tx_skb[i].mapping) { - dma_unmap_single(lp->ndev->dev.parent, - lp->tx_skb[i].mapping, - lp->tx_skb[i].len, - DMA_TO_DEVICE); - lp->tx_skb[i].mapping = 0; - } - } - } -} - -static void dwceqos_descriptor_free(struct net_local *lp) -{ - int size; - - dwceqos_clean_rings(lp); - - kfree(lp->tx_skb); - lp->tx_skb = NULL; - kfree(lp->rx_skb); - lp->rx_skb = NULL; - - size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); - if (lp->rx_descs) { - dma_free_coherent(lp->ndev->dev.parent, size, - (void *)(lp->rx_descs), lp->rx_descs_addr); - lp->rx_descs = NULL; - } - - size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); - if (lp->tx_descs) { - dma_free_coherent(lp->ndev->dev.parent, size, - (void *)(lp->tx_descs), lp->tx_descs_addr); - lp->tx_descs = NULL; - } -} - -static int dwceqos_descriptor_init(struct net_local *lp) -{ - int size; - u32 i; - - lp->gso_size = 0; - - lp->tx_skb = NULL; - lp->rx_skb = NULL; - lp->rx_descs = NULL; - lp->tx_descs = NULL; - - /* Reset the DMA indexes */ - lp->rx_cur = 0; - lp->tx_cur = 0; - lp->tx_next = 0; - lp->tx_free = DWCEQOS_TX_DCNT; - - /* Allocate Ring descriptors */ - size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc); - lp->rx_skb = kzalloc(size, GFP_KERNEL); - if (!lp->rx_skb) - goto err_out; - - size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc); - lp->tx_skb = kzalloc(size, GFP_KERNEL); - if (!lp->tx_skb) - goto err_out; - - /* Allocate DMA descriptors */ - size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); - lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->rx_descs_addr, GFP_KERNEL); - if (!lp->rx_descs) - goto err_out; - lp->rx_descs_tail_addr = lp->rx_descs_addr + - sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT; - - size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); - lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->tx_descs_addr, GFP_KERNEL); - if (!lp->tx_descs) - goto err_out; - lp->tx_descs_tail_addr = lp->tx_descs_addr + - sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT; - - /* Initialize RX Ring Descriptors and buffers */ - for (i = 0; i < DWCEQOS_RX_DCNT; ++i) { - dwceqos_alloc_rxring_desc(lp, i); - if (!(lp->rx_skb[lp->rx_cur].skb)) - goto err_out; - } - - /* Initialize TX Descriptors */ - for (i = 0; i < DWCEQOS_TX_DCNT; ++i) { - lp->tx_descs[i].des0 = 0; - lp->tx_descs[i].des1 = 0; - lp->tx_descs[i].des2 = 0; - lp->tx_descs[i].des3 = 0; - } - - /* Make descriptor writes visible to the DMA. */ - wmb(); - - return 0; - -err_out: - dwceqos_descriptor_free(lp); - return -ENOMEM; -} - -static int dwceqos_packet_avail(struct net_local *lp) -{ - return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN); -} - -static void dwceqos_get_hwfeatures(struct net_local *lp) -{ - lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0); - lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1); - lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2); -} - -static void dwceqos_dma_enable_txirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval |= DWCEQOS_DMA_CH0_IE_TIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_disable_txirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval &= ~DWCEQOS_DMA_CH0_IE_TIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_enable_rxirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval |= DWCEQOS_DMA_CH0_IE_RIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_disable_rxirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval &= ~DWCEQOS_DMA_CH0_IE_RIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_enable_mmc_interrupt(struct net_local *lp) -{ - dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0); - dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0); -} - -static int dwceqos_mii_init(struct net_local *lp) -{ - int ret = -ENXIO; - struct resource res; - struct device_node *mdionode; - - mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio"); - - if (!mdionode) - return 0; - - lp->mii_bus = mdiobus_alloc(); - if (!lp->mii_bus) { - ret = -ENOMEM; - goto err_out; - } - - lp->mii_bus->name = "DWCEQOS MII bus"; - lp->mii_bus->read = &dwceqos_mdio_read; - lp->mii_bus->write = &dwceqos_mdio_write; - lp->mii_bus->priv = lp; - lp->mii_bus->parent = &lp->pdev->dev; - - of_address_to_resource(lp->pdev->dev.of_node, 0, &res); - snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", - (unsigned long long)res.start); - if (of_mdiobus_register(lp->mii_bus, mdionode)) - goto err_out_free_mdiobus; - - return 0; - -err_out_free_mdiobus: - mdiobus_free(lp->mii_bus); -err_out: - of_node_put(mdionode); - return ret; -} - -/* DMA reset. When issued also resets all MTL and MAC registers as well */ -static void dwceqos_reset_hw(struct net_local *lp) -{ - /* Wait (at most) 0.5 seconds for DMA reset*/ - int i = 5000; - u32 reg; - - /* Force gigabit to guarantee a TX clock for GMII. */ - reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES); - reg |= DWCEQOS_MAC_CFG_DM; - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg); - - dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR); - - do { - udelay(100); - i--; - reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE); - } while ((reg & DWCEQOS_DMA_MODE_SWR) && i); - /* We might experience a timeout if the chip clock mux is broken */ - if (!i) - netdev_err(lp->ndev, "DMA reset timed out!\n"); -} - -static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status) -{ - if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) { - netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n", - dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ? - "read" : "write", - dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ? - "descr" : "data", - dma_status); - - print_status(lp); - } - if (dma_status & DWCEQOS_DMA_CH0_IS_REB) { - netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n", - dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ? - "read" : "write", - dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ? - "descr" : "data", - dma_status); - - print_status(lp); - } -} - -static void dwceqos_mmc_interrupt(struct net_local *lp) -{ - unsigned long flags; - - spin_lock_irqsave(&lp->stats_lock, flags); - - /* A latched mmc interrupt can not be masked, we must read - * all the counters with an interrupt pending. - */ - dwceqos_read_mmc_counters(lp, - dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ), - dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ)); - - spin_unlock_irqrestore(&lp->stats_lock, flags); -} - -static void dwceqos_mac_interrupt(struct net_local *lp) -{ - u32 cause; - - cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS); - - if (cause & DWCEQOS_MAC_IS_MMC_INT) - dwceqos_mmc_interrupt(lp); -} - -static irqreturn_t dwceqos_interrupt(int irq, void *dev_id) -{ - struct net_device *ndev = dev_id; - struct net_local *lp = netdev_priv(ndev); - - u32 cause; - u32 dma_status; - irqreturn_t ret = IRQ_NONE; - - cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS); - /* DMA Channel 0 Interrupt */ - if (cause & DWCEQOS_DMA_IS_DC0IS) { - dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA); - - /* Transmit Interrupt */ - if (dma_status & DWCEQOS_DMA_CH0_IS_TI) { - tasklet_schedule(&lp->tx_bdreclaim_tasklet); - dwceqos_dma_disable_txirq(lp); - } - - /* Receive Interrupt */ - if (dma_status & DWCEQOS_DMA_CH0_IS_RI) { - /* Disable RX IRQs */ - dwceqos_dma_disable_rxirq(lp); - napi_schedule(&lp->napi); - } - - /* Fatal Bus Error interrupt */ - if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) { - dwceqos_fatal_bus_error(lp, dma_status); - - /* errata 9000831707 */ - dma_status |= DWCEQOS_DMA_CH0_IS_TEB | - DWCEQOS_DMA_CH0_IS_REB; - } - - /* Ack all DMA Channel 0 IRQs */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status); - ret = IRQ_HANDLED; - } - - if (cause & DWCEQOS_DMA_IS_MTLIS) { - u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL); - - dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val); - ret = IRQ_HANDLED; - } - - if (cause & DWCEQOS_DMA_IS_MACIS) { - dwceqos_mac_interrupt(lp); - ret = IRQ_HANDLED; - } - return ret; -} - -static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL); - if (enable) - regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE; - else - regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE; - dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - - /* MTL flow control */ - regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); - if (enable) - regval |= DWCEQOS_MTL_RXQ_EHFC; - else - regval &= ~DWCEQOS_MTL_RXQ_EHFC; - - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - - /* MAC flow control */ - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW); - if (enable) - regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE; - else - regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE; - dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_configure_flow_control(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - int RQS, RFD, RFA; - - spin_lock_irqsave(&lp->hw_lock, flags); - - regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); - - /* The queue size is in units of 256 bytes. We want 512 bytes units for - * the threshold fields. - */ - RQS = ((regval >> 20) & 0x3FF) + 1; - RQS /= 2; - - /* The thresholds are relative to a full queue, with a bias - * of 1 KiByte below full. - */ - RFD = RQS / 2 - 2; - RFA = RQS / 8 - 2; - - regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8); - - if (RFD >= 0 && RFA >= 0) { - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - } else { - netdev_warn(lp->ndev, - "FIFO too small for flow control."); - } - - regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) | - DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS; - - dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_configure_clock(struct net_local *lp) -{ - unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000; - - BUG_ON(!rate_mhz); - - dwceqos_write(lp, - REG_DWCEQOS_MAC_1US_TIC_COUNTER, - DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1)); -} - -static void dwceqos_configure_bus(struct net_local *lp) -{ - u32 sysbus_reg; - - /* N.B. We do not support the Fixed Burst mode because it - * opens a race window by making HW access to DMA descriptors - * non-atomic. - */ - - sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL; - - if (lp->bus_cfg.en_lpi) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI; - - if (lp->bus_cfg.burst_map) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( - lp->bus_cfg.burst_map); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( - DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT); - - if (lp->bus_cfg.read_requests) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( - lp->bus_cfg.read_requests - 1); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( - DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT); - - if (lp->bus_cfg.write_requests) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( - lp->bus_cfg.write_requests - 1); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( - DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT); - - if (netif_msg_hw(lp)) - netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg); - - dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg); -} - -static void dwceqos_init_hw(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - u32 regval; - u32 buswidth; - u32 dma_skip; - - /* Software reset */ - dwceqos_reset_hw(lp); - - dwceqos_configure_bus(lp); - - /* Probe data bus width, 32/64/128 bits. */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL); - buswidth = (regval ^ 0xF) + 1; - - /* Cache-align dma descriptors. */ - dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL, - DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) | - DWCEQOS_DMA_CH_CTRL_PBLX8); - - /* Initialize DMA Channel 0 */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST, - (u32)lp->tx_descs_addr); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST, - (u32)lp->rx_descs_addr); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, - lp->tx_descs_tail_addr); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, - lp->rx_descs_tail_addr); - - if (lp->bus_cfg.tx_pbl) - regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl); - else - regval = DWCEQOS_DMA_CH_CTRL_PBL(2); - - /* Enable TSO if the HW support it */ - if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) - regval |= DWCEQOS_DMA_CH_TX_TSE; - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval); - - if (lp->bus_cfg.rx_pbl) - regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl); - else - regval = DWCEQOS_DMA_CH_CTRL_PBL(2); - - regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); - - regval |= DWCEQOS_DMA_CH_CTRL_START; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); - - /* Initialize MTL Queues */ - regval = DWCEQOS_MTL_SCHALG_STRICT; - dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval); - - regval = DWCEQOS_MTL_TXQ_SIZE( - DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) | - DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF | - DWCEQOS_MTL_TXQ_TTC512; - dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval); - - regval = DWCEQOS_MTL_RXQ_SIZE( - DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) | - DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF; - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - - dwceqos_configure_flow_control(lp); - - /* Initialize MAC */ - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - - lp->eee_enabled = 0; - - dwceqos_configure_clock(lp); - - /* MMC counters */ - - /* probe implemented counters */ - dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u); - dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u); - lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK); - lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK); - - dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST | - DWCEQOS_MMC_CTRL_RSTONRD); - dwceqos_enable_mmc_interrupt(lp); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0); - dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); - - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | - DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); - - /* Start TX DMA */ - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, - regval | DWCEQOS_DMA_CH_CTRL_START); - - /* Enable MAC TX/RX */ - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, - regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); - - lp->phy_defer = false; - mutex_lock(&ndev->phydev->lock); - phy_read_status(ndev->phydev); - dwceqos_adjust_link(lp->ndev); - mutex_unlock(&ndev->phydev->lock); -} - -static void dwceqos_tx_reclaim(unsigned long data) -{ - struct net_device *ndev = (struct net_device *)data; - struct net_local *lp = netdev_priv(ndev); - unsigned int tx_bytes = 0; - unsigned int tx_packets = 0; - - spin_lock(&lp->tx_lock); - - while (lp->tx_free < DWCEQOS_TX_DCNT) { - struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur]; - struct ring_desc *rd = &lp->tx_skb[lp->tx_cur]; - - /* Descriptor still being held by DMA ? */ - if (dd->des3 & DWCEQOS_DMA_TDES3_OWN) - break; - - if (rd->mapping) - dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len, - DMA_TO_DEVICE); - - if (unlikely(rd->skb)) { - ++tx_packets; - tx_bytes += rd->skb->len; - dev_consume_skb_any(rd->skb); - } - - rd->skb = NULL; - rd->mapping = 0; - lp->tx_free++; - lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT; - - if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) && - (dd->des3 & DWCEQOS_DMA_RDES3_ES)) { - if (netif_msg_tx_err(lp)) - netdev_err(ndev, "TX Error, TDES3 = 0x%x\n", - dd->des3); - if (netif_msg_hw(lp)) - print_status(lp); - } - } - spin_unlock(&lp->tx_lock); - - netdev_completed_queue(ndev, tx_packets, tx_bytes); - - dwceqos_dma_enable_txirq(lp); - netif_wake_queue(ndev); -} - -static int dwceqos_rx(struct net_local *lp, int budget) -{ - struct sk_buff *skb; - u32 tot_size = 0; - unsigned int n_packets = 0; - unsigned int n_descs = 0; - u32 len; - - struct dwceqos_dma_desc *dd; - struct sk_buff *new_skb; - dma_addr_t new_skb_baddr = 0; - - while (n_descs < budget) { - if (!dwceqos_packet_avail(lp)) - break; - - new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); - if (!new_skb) { - netdev_err(lp->ndev, "no memory for new sk_buff\n"); - break; - } - - /* Get dma handle of skb->data */ - new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent, - new_skb->data, - DWCEQOS_RX_BUF_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { - netdev_err(lp->ndev, "DMA map error\n"); - dev_kfree_skb(new_skb); - break; - } - - /* Read descriptor data after reading owner bit. */ - dma_rmb(); - - dd = &lp->rx_descs[lp->rx_cur]; - len = DWCEQOS_DMA_RDES3_PL(dd->des3); - skb = lp->rx_skb[lp->rx_cur].skb; - - /* Unmap old buffer */ - dma_unmap_single(lp->ndev->dev.parent, - lp->rx_skb[lp->rx_cur].mapping, - lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE); - - /* Discard packet on reception error or bad checksum */ - if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) || - (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) { - dev_kfree_skb(skb); - skb = NULL; - } else { - skb_put(skb, len); - skb->protocol = eth_type_trans(skb, lp->ndev); - switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) { - case DWCEQOS_DMA_RDES1_PT_UDP: - case DWCEQOS_DMA_RDES1_PT_TCP: - case DWCEQOS_DMA_RDES1_PT_ICMP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - skb->ip_summed = CHECKSUM_NONE; - break; - } - } - - if (unlikely(!skb)) { - if (netif_msg_rx_err(lp)) - netdev_dbg(lp->ndev, "rx error: des3=%X\n", - lp->rx_descs[lp->rx_cur].des3); - } else { - tot_size += skb->len; - n_packets++; - - netif_receive_skb(skb); - } - - lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr; - lp->rx_descs[lp->rx_cur].des1 = 0; - lp->rx_descs[lp->rx_cur].des2 = 0; - /* The DMA must observe des0/1/2 written before des3. */ - wmb(); - lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE | - DWCEQOS_DMA_RDES3_OWN | - DWCEQOS_DMA_RDES3_BUF1V; - - lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr; - lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE; - lp->rx_skb[lp->rx_cur].skb = new_skb; - - n_descs++; - lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT; - } - - /* Make sure any ownership update is written to the descriptors before - * DMA wakeup. - */ - wmb(); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI); - /* Wake up RX by writing tail pointer */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, - lp->rx_descs_tail_addr); - - return n_descs; -} - -static int dwceqos_rx_poll(struct napi_struct *napi, int budget) -{ - struct net_local *lp = container_of(napi, struct net_local, napi); - int work_done = 0; - - work_done = dwceqos_rx(lp, budget - work_done); - - if (!dwceqos_packet_avail(lp) && work_done < budget) { - napi_complete(napi); - dwceqos_dma_enable_rxirq(lp); - } else { - work_done = budget; - } - - return work_done; -} - -/* Reinitialize function if a TX timed out */ -static void dwceqos_reinit_for_txtimeout(struct work_struct *data) -{ - struct net_local *lp = container_of(data, struct net_local, - txtimeout_reinit); - - netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n", - DWCEQOS_TX_TIMEOUT); - - if (netif_msg_hw(lp)) - print_status(lp); - - rtnl_lock(); - dwceqos_stop(lp->ndev); - dwceqos_open(lp->ndev); - rtnl_unlock(); -} - -/* DT Probing function called by main probe */ -static inline int dwceqos_probe_config_dt(struct platform_device *pdev) -{ - struct net_device *ndev; - struct net_local *lp; - const void *mac_address; - struct dwceqos_bus_cfg *bus_cfg; - struct device_node *np = pdev->dev.of_node; - - ndev = platform_get_drvdata(pdev); - lp = netdev_priv(ndev); - bus_cfg = &lp->bus_cfg; - - /* Set the MAC address. */ - mac_address = of_get_mac_address(pdev->dev.of_node); - if (mac_address) - ether_addr_copy(ndev->dev_addr, mac_address); - - /* These are all optional parameters */ - lp->en_tx_lpi_clockgating = of_property_read_bool(np, - "snps,en-tx-lpi-clockgating"); - bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi"); - of_property_read_u32(np, "snps,write-requests", - &bus_cfg->write_requests); - of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests); - of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map); - of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl); - of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl); - - netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n", - bus_cfg->en_lpi, - bus_cfg->write_requests, - bus_cfg->read_requests, - bus_cfg->burst_map, - bus_cfg->rx_pbl, - bus_cfg->tx_pbl); - - return 0; -} - -static int dwceqos_open(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - int res; - - dwceqos_reset_state(lp); - res = dwceqos_descriptor_init(lp); - if (res) { - netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res); - return res; - } - netdev_reset_queue(ndev); - - /* The dwceqos reset state machine requires all phy clocks to complete, - * hence the unusual init order with phy_start first. - */ - lp->phy_defer = true; - phy_start(ndev->phydev); - dwceqos_init_hw(lp); - napi_enable(&lp->napi); - - netif_start_queue(ndev); - tasklet_enable(&lp->tx_bdreclaim_tasklet); - - /* Enable Interrupts -- do this only after we enable NAPI and the - * tasklet. - */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, - DWCEQOS_DMA_CH0_IE_NIE | - DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | - DWCEQOS_DMA_CH0_IE_AIE | - DWCEQOS_DMA_CH0_IE_FBEE); - - return 0; -} - -static bool dweqos_is_tx_dma_suspended(struct net_local *lp) -{ - u32 reg; - - reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0); - reg = DMA_GET_TX_STATE_CH0(reg); - - return reg == DMA_TX_CH_SUSPENDED; -} - -static void dwceqos_drain_dma(struct net_local *lp) -{ - /* Wait for all pending TX buffers to be sent. Upper limit based - * on max frame size on a 10 Mbit link. - */ - size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100; - - while (!dweqos_is_tx_dma_suspended(lp) && limit--) - usleep_range(100, 200); -} - -static int dwceqos_stop(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - - tasklet_disable(&lp->tx_bdreclaim_tasklet); - napi_disable(&lp->napi); - - /* Stop all tx before we drain the tx dma. */ - netif_tx_lock_bh(lp->ndev); - netif_stop_queue(ndev); - netif_tx_unlock_bh(lp->ndev); - - dwceqos_drain_dma(lp); - dwceqos_reset_hw(lp); - phy_stop(ndev->phydev); - - dwceqos_descriptor_free(lp); - - return 0; -} - -static void dwceqos_dmadesc_set_ctx(struct net_local *lp, - unsigned short gso_size) -{ - struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next]; - - dd->des0 = 0; - dd->des1 = 0; - dd->des2 = gso_size; - dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV; - - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; -} - -static void dwceqos_tx_poll_demand(struct net_local *lp) -{ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, - lp->tx_descs_tail_addr); -} - -struct dwceqos_tx { - size_t nr_descriptors; - size_t initial_descriptor; - size_t last_descriptor; - size_t prev_gso_size; - size_t network_header_len; -}; - -static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - size_t n = 1; - size_t i; - - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) - ++n; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) / - BYTES_PER_DMA_DESC; - } - - tx->nr_descriptors = n; - tx->initial_descriptor = lp->tx_next; - tx->last_descriptor = lp->tx_next; - tx->prev_gso_size = lp->gso_size; - - tx->network_header_len = skb_transport_offset(skb); - if (skb_is_gso(skb)) - tx->network_header_len += tcp_hdrlen(skb); -} - -static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - struct ring_desc *rd; - struct dwceqos_dma_desc *dd; - size_t payload_len; - dma_addr_t dma_handle; - - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) { - dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size); - lp->gso_size = skb_shinfo(skb)->gso_size; - } - - dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - - if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { - netdev_err(lp->ndev, "TX DMA Mapping error\n"); - return -ENOMEM; - } - - rd = &lp->tx_skb[lp->tx_next]; - dd = &lp->tx_descs[lp->tx_next]; - - rd->skb = NULL; - rd->len = skb_headlen(skb); - rd->mapping = dma_handle; - - /* Set up DMA Descriptor */ - dd->des0 = dma_handle; - - if (skb_is_gso(skb)) { - payload_len = skb_headlen(skb) - tx->network_header_len; - - if (payload_len) - dd->des1 = dma_handle + tx->network_header_len; - dd->des2 = tx->network_header_len | - DWCEQOS_DMA_DES2_B2L(payload_len); - dd->des3 = DWCEQOS_DMA_TDES3_TSE | - DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) | - (skb->len - tx->network_header_len); - } else { - dd->des1 = 0; - dd->des2 = skb_headlen(skb); - dd->des3 = skb->len; - - switch (skb->ip_summed) { - case CHECKSUM_PARTIAL: - dd->des3 |= DWCEQOS_DMA_TDES3_CA; - case CHECKSUM_NONE: - case CHECKSUM_UNNECESSARY: - case CHECKSUM_COMPLETE: - default: - break; - } - } - - dd->des3 |= DWCEQOS_DMA_TDES3_FD; - if (lp->tx_next != tx->initial_descriptor) - dd->des3 |= DWCEQOS_DMA_TDES3_OWN; - - tx->last_descriptor = lp->tx_next; - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; - - return 0; -} - -static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - struct ring_desc *rd = NULL; - struct dwceqos_dma_desc *dd; - dma_addr_t dma_handle; - size_t i; - - /* Setup more ring and DMA descriptor if the packet is fragmented */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - size_t frag_size; - size_t consumed_size; - - /* Map DMA Area */ - dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { - netdev_err(lp->ndev, "DMA Mapping error\n"); - return -ENOMEM; - } - - /* order-3 fragments span more than one descriptor. */ - frag_size = skb_frag_size(frag); - consumed_size = 0; - while (consumed_size < frag_size) { - size_t dma_size = min_t(size_t, 16376, - frag_size - consumed_size); - - rd = &lp->tx_skb[lp->tx_next]; - memset(rd, 0, sizeof(*rd)); - - dd = &lp->tx_descs[lp->tx_next]; - - /* Set DMA Descriptor fields */ - dd->des0 = dma_handle + consumed_size; - dd->des1 = 0; - dd->des2 = dma_size; - - if (skb_is_gso(skb)) - dd->des3 = (skb->len - tx->network_header_len); - else - dd->des3 = skb->len; - - dd->des3 |= DWCEQOS_DMA_TDES3_OWN; - - tx->last_descriptor = lp->tx_next; - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; - consumed_size += dma_size; - } - - rd->len = skb_frag_size(frag); - rd->mapping = dma_handle; - } - - return 0; -} - -static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD; - lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC; - - lp->tx_skb[tx->last_descriptor].skb = skb; - - /* Make all descriptor updates visible to the DMA before setting the - * owner bit. - */ - wmb(); - - lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN; - - /* Make the owner bit visible before TX wakeup. */ - wmb(); - - dwceqos_tx_poll_demand(lp); -} - -static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx) -{ - size_t i = tx->initial_descriptor; - - while (i != lp->tx_next) { - if (lp->tx_skb[i].mapping) - dma_unmap_single(lp->ndev->dev.parent, - lp->tx_skb[i].mapping, - lp->tx_skb[i].len, - DMA_TO_DEVICE); - - lp->tx_skb[i].mapping = 0; - lp->tx_skb[i].skb = NULL; - - memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i])); - - i = (i + 1) % DWCEQOS_TX_DCNT; - } - - lp->tx_next = tx->initial_descriptor; - lp->gso_size = tx->prev_gso_size; -} - -static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct dwceqos_tx trans; - int err; - - dwceqos_tx_prepare(skb, lp, &trans); - if (lp->tx_free < trans.nr_descriptors) { - netif_stop_queue(ndev); - return NETDEV_TX_BUSY; - } - - err = dwceqos_tx_linear(skb, lp, &trans); - if (err) - goto tx_error; - - err = dwceqos_tx_frags(skb, lp, &trans); - if (err) - goto tx_error; - - WARN_ON(lp->tx_next != - ((trans.initial_descriptor + trans.nr_descriptors) % - DWCEQOS_TX_DCNT)); - - spin_lock_bh(&lp->tx_lock); - lp->tx_free -= trans.nr_descriptors; - dwceqos_tx_finalize(skb, lp, &trans); - netdev_sent_queue(ndev, skb->len); - spin_unlock_bh(&lp->tx_lock); - - netif_trans_update(ndev); - return 0; - -tx_error: - dwceqos_tx_rollback(lp, &trans); - dev_kfree_skb_any(skb); - return 0; -} - -/* Set MAC address and then update HW accordingly */ -static int dwceqos_set_mac_address(struct net_device *ndev, void *addr) -{ - struct net_local *lp = netdev_priv(ndev); - struct sockaddr *hwaddr = (struct sockaddr *)addr; - - if (netif_running(ndev)) - return -EBUSY; - - if (!is_valid_ether_addr(hwaddr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len); - - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - return 0; -} - -static void dwceqos_tx_timeout(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - - queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit); -} - -static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, - unsigned int reg_n) -{ - unsigned long data; - - data = (addr[5] << 8) | addr[4]; - dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), - data | DWCEQOS_MAC_MAC_ADDR_HI_EN); - data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; - dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data); -} - -static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n) -{ - /* Do not disable MAC address 0 */ - if (reg_n != 0) - dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0); -} - -static void dwceqos_set_rx_mode(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - u32 regval = 0; - u32 mc_filter[2]; - int reg = 1; - struct netdev_hw_addr *ha; - unsigned int max_mac_addr; - - max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); - - if (ndev->flags & IFF_PROMISC) { - regval = DWCEQOS_MAC_PKT_FILT_PR; - } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) || - (ndev->flags & IFF_ALLMULTI))) { - regval = DWCEQOS_MAC_PKT_FILT_PM; - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff); - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff); - } else if (!netdev_mc_empty(ndev)) { - regval = DWCEQOS_MAC_PKT_FILT_HMC; - memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(ha, ndev) { - /* The upper 6 bits of the calculated CRC are used to - * index the contens of the hash table - */ - int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; - /* The most significant bit determines the register - * to use (H/L) while the other 5 bits determine - * the bit within the register. - */ - mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - } - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]); - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]); - } - if (netdev_uc_count(ndev) > max_mac_addr) { - regval |= DWCEQOS_MAC_PKT_FILT_PR; - } else { - netdev_for_each_uc_addr(ha, ndev) { - dwceqos_set_umac_addr(lp, ha->addr, reg); - reg++; - } - for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++) - dwceqos_disable_umac_addr(lp, reg); - } - dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void dwceqos_poll_controller(struct net_device *ndev) -{ - disable_irq(ndev->irq); - dwceqos_interrupt(ndev->irq, ndev); - enable_irq(ndev->irq); -} -#endif - -static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, - u32 tx_mask) -{ - if (tx_mask & BIT(27)) - lp->mmc_counters.txlpitranscntr += - dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR); - if (tx_mask & BIT(26)) - lp->mmc_counters.txpiuscntr += - dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR); - if (tx_mask & BIT(25)) - lp->mmc_counters.txoversize_g += - dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G); - if (tx_mask & BIT(24)) - lp->mmc_counters.txvlanpackets_g += - dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G); - if (tx_mask & BIT(23)) - lp->mmc_counters.txpausepackets += - dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS); - if (tx_mask & BIT(22)) - lp->mmc_counters.txexcessdef += - dwceqos_read(lp, DWC_MMC_TXEXCESSDEF); - if (tx_mask & BIT(21)) - lp->mmc_counters.txpacketcount_g += - dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G); - if (tx_mask & BIT(20)) - lp->mmc_counters.txoctetcount_g += - dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G); - if (tx_mask & BIT(19)) - lp->mmc_counters.txcarriererror += - dwceqos_read(lp, DWC_MMC_TXCARRIERERROR); - if (tx_mask & BIT(18)) - lp->mmc_counters.txexcesscol += - dwceqos_read(lp, DWC_MMC_TXEXCESSCOL); - if (tx_mask & BIT(17)) - lp->mmc_counters.txlatecol += - dwceqos_read(lp, DWC_MMC_TXLATECOL); - if (tx_mask & BIT(16)) - lp->mmc_counters.txdeferred += - dwceqos_read(lp, DWC_MMC_TXDEFERRED); - if (tx_mask & BIT(15)) - lp->mmc_counters.txmulticol_g += - dwceqos_read(lp, DWC_MMC_TXMULTICOL_G); - if (tx_mask & BIT(14)) - lp->mmc_counters.txsinglecol_g += - dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G); - if (tx_mask & BIT(13)) - lp->mmc_counters.txunderflowerror += - dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR); - if (tx_mask & BIT(12)) - lp->mmc_counters.txbroadcastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB); - if (tx_mask & BIT(11)) - lp->mmc_counters.txmulticastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB); - if (tx_mask & BIT(10)) - lp->mmc_counters.txunicastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB); - if (tx_mask & BIT(9)) - lp->mmc_counters.tx1024tomaxoctets_gb += - dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB); - if (tx_mask & BIT(8)) - lp->mmc_counters.tx512to1023octets_gb += - dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB); - if (tx_mask & BIT(7)) - lp->mmc_counters.tx256to511octets_gb += - dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB); - if (tx_mask & BIT(6)) - lp->mmc_counters.tx128to255octets_gb += - dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB); - if (tx_mask & BIT(5)) - lp->mmc_counters.tx65to127octets_gb += - dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB); - if (tx_mask & BIT(4)) - lp->mmc_counters.tx64octets_gb += - dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB); - if (tx_mask & BIT(3)) - lp->mmc_counters.txmulticastpackets_g += - dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G); - if (tx_mask & BIT(2)) - lp->mmc_counters.txbroadcastpackets_g += - dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G); - if (tx_mask & BIT(1)) - lp->mmc_counters.txpacketcount_gb += - dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB); - if (tx_mask & BIT(0)) - lp->mmc_counters.txoctetcount_gb += - dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB); - - if (rx_mask & BIT(27)) - lp->mmc_counters.rxlpitranscntr += - dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR); - if (rx_mask & BIT(26)) - lp->mmc_counters.rxlpiuscntr += - dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR); - if (rx_mask & BIT(25)) - lp->mmc_counters.rxctrlpackets_g += - dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G); - if (rx_mask & BIT(24)) - lp->mmc_counters.rxrcverror += - dwceqos_read(lp, DWC_MMC_RXRCVERROR); - if (rx_mask & BIT(23)) - lp->mmc_counters.rxwatchdog += - dwceqos_read(lp, DWC_MMC_RXWATCHDOG); - if (rx_mask & BIT(22)) - lp->mmc_counters.rxvlanpackets_gb += - dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB); - if (rx_mask & BIT(21)) - lp->mmc_counters.rxfifooverflow += - dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW); - if (rx_mask & BIT(20)) - lp->mmc_counters.rxpausepackets += - dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS); - if (rx_mask & BIT(19)) - lp->mmc_counters.rxoutofrangetype += - dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE); - if (rx_mask & BIT(18)) - lp->mmc_counters.rxlengtherror += - dwceqos_read(lp, DWC_MMC_RXLENGTHERROR); - if (rx_mask & BIT(17)) - lp->mmc_counters.rxunicastpackets_g += - dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G); - if (rx_mask & BIT(16)) - lp->mmc_counters.rx1024tomaxoctets_gb += - dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB); - if (rx_mask & BIT(15)) - lp->mmc_counters.rx512to1023octets_gb += - dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB); - if (rx_mask & BIT(14)) - lp->mmc_counters.rx256to511octets_gb += - dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB); - if (rx_mask & BIT(13)) - lp->mmc_counters.rx128to255octets_gb += - dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB); - if (rx_mask & BIT(12)) - lp->mmc_counters.rx65to127octets_gb += - dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB); - if (rx_mask & BIT(11)) - lp->mmc_counters.rx64octets_gb += - dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB); - if (rx_mask & BIT(10)) - lp->mmc_counters.rxoversize_g += - dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G); - if (rx_mask & BIT(9)) - lp->mmc_counters.rxundersize_g += - dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G); - if (rx_mask & BIT(8)) - lp->mmc_counters.rxjabbererror += - dwceqos_read(lp, DWC_MMC_RXJABBERERROR); - if (rx_mask & BIT(7)) - lp->mmc_counters.rxrunterror += - dwceqos_read(lp, DWC_MMC_RXRUNTERROR); - if (rx_mask & BIT(6)) - lp->mmc_counters.rxalignmenterror += - dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR); - if (rx_mask & BIT(5)) - lp->mmc_counters.rxcrcerror += - dwceqos_read(lp, DWC_MMC_RXCRCERROR); - if (rx_mask & BIT(4)) - lp->mmc_counters.rxmulticastpackets_g += - dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G); - if (rx_mask & BIT(3)) - lp->mmc_counters.rxbroadcastpackets_g += - dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G); - if (rx_mask & BIT(2)) - lp->mmc_counters.rxoctetcount_g += - dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G); - if (rx_mask & BIT(1)) - lp->mmc_counters.rxoctetcount_gb += - dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB); - if (rx_mask & BIT(0)) - lp->mmc_counters.rxpacketcount_gb += - dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB); -} - -static void -dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) -{ - unsigned long flags; - struct net_local *lp = netdev_priv(ndev); - struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters; - - spin_lock_irqsave(&lp->stats_lock, flags); - dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, - lp->mmc_tx_counters_mask); - spin_unlock_irqrestore(&lp->stats_lock, flags); - - s->rx_packets = hwstats->rxpacketcount_gb; - s->rx_bytes = hwstats->rxoctetcount_gb; - s->rx_errors = hwstats->rxpacketcount_gb - - hwstats->rxbroadcastpackets_g - - hwstats->rxmulticastpackets_g - - hwstats->rxunicastpackets_g; - s->multicast = hwstats->rxmulticastpackets_g; - s->rx_length_errors = hwstats->rxlengtherror; - s->rx_crc_errors = hwstats->rxcrcerror; - s->rx_fifo_errors = hwstats->rxfifooverflow; - - s->tx_packets = hwstats->txpacketcount_gb; - s->tx_bytes = hwstats->txoctetcount_gb; - - if (lp->mmc_tx_counters_mask & BIT(21)) - s->tx_errors = hwstats->txpacketcount_gb - - hwstats->txpacketcount_g; - else - s->tx_errors = hwstats->txunderflowerror + - hwstats->txcarriererror; -} - -static void -dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) -{ - const struct net_local *lp = netdev_priv(ndev); - - strcpy(ed->driver, lp->pdev->dev.driver->name); - strcpy(ed->version, DRIVER_VERSION); -} - -static void dwceqos_get_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pp) -{ - const struct net_local *lp = netdev_priv(ndev); - - pp->autoneg = lp->flowcontrol.autoneg; - pp->tx_pause = lp->flowcontrol.tx; - pp->rx_pause = lp->flowcontrol.rx; -} - -static int dwceqos_set_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pp) -{ - struct net_local *lp = netdev_priv(ndev); - int ret = 0; - - lp->flowcontrol.autoneg = pp->autoneg; - if (pp->autoneg) { - ndev->phydev->advertising |= ADVERTISED_Pause; - ndev->phydev->advertising |= ADVERTISED_Asym_Pause; - } else { - ndev->phydev->advertising &= ~ADVERTISED_Pause; - ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause; - lp->flowcontrol.rx = pp->rx_pause; - lp->flowcontrol.tx = pp->tx_pause; - } - - if (netif_running(ndev)) - ret = phy_start_aneg(ndev->phydev); - - return ret; -} - -static void dwceqos_get_strings(struct net_device *ndev, u32 stringset, - u8 *data) -{ - size_t i; - - if (stringset != ETH_SS_STATS) - return; - - for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { - memcpy(data, dwceqos_ethtool_stats[i].stat_name, - ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } -} - -static void dwceqos_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) -{ - struct net_local *lp = netdev_priv(ndev); - unsigned long flags; - size_t i; - u8 *mmcstat = (u8 *)&lp->mmc_counters; - - spin_lock_irqsave(&lp->stats_lock, flags); - dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, - lp->mmc_tx_counters_mask); - spin_unlock_irqrestore(&lp->stats_lock, flags); - - for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { - memcpy(data, - mmcstat + dwceqos_ethtool_stats[i].offset, - sizeof(u64)); - data++; - } -} - -static int dwceqos_get_sset_count(struct net_device *ndev, int sset) -{ - if (sset == ETH_SS_STATS) - return ARRAY_SIZE(dwceqos_ethtool_stats); - - return -EOPNOTSUPP; -} - -static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *space) -{ - const struct net_local *lp = netdev_priv(dev); - u32 *reg_space = (u32 *)space; - int reg_offset; - int reg_ix = 0; - - /* MAC registers */ - for (reg_offset = START_MAC_REG_OFFSET; - reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - /* MTL registers */ - for (reg_offset = START_MTL_REG_OFFSET; - reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - - /* DMA registers */ - for (reg_offset = START_DMA_REG_OFFSET; - reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - - BUG_ON(4 * reg_ix > REG_SPACE_SIZE); -} - -static int dwceqos_get_regs_len(struct net_device *dev) -{ - return REG_SPACE_SIZE; -} - -static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl) -{ - return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off"; -} - -static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl) -{ - return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off"; -} - -static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata) -{ - struct net_local *lp = netdev_priv(ndev); - u32 lpi_status; - u32 lpi_enabled; - - if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) - return -EOPNOTSUPP; - - edata->eee_active = lp->eee_active; - edata->eee_enabled = lp->eee_enabled; - edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER); - lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA); - edata->tx_lpi_enabled = lpi_enabled; - - if (netif_msg_hw(lp)) { - u32 regval; - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - - netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n", - dwceqos_get_rx_lpi_state(regval), - dwceqos_get_tx_lpi_state(regval)); - } - - return phy_ethtool_get_eee(ndev->phydev, edata); -} - -static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) -{ - struct net_local *lp = netdev_priv(ndev); - u32 regval; - unsigned long flags; - - if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) - return -EOPNOTSUPP; - - if (edata->eee_enabled && !lp->eee_active) - return -EOPNOTSUPP; - - if (edata->tx_lpi_enabled) { - if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN || - edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX) - return -EINVAL; - } - - lp->eee_enabled = edata->eee_enabled; - - if (edata->eee_enabled && edata->tx_lpi_enabled) { - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER, - edata->tx_lpi_timer); - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE; - if (lp->en_tx_lpi_clockgating) - regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } else { - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } - - return phy_ethtool_set_eee(ndev->phydev, edata); -} - -static u32 dwceqos_get_msglevel(struct net_device *ndev) -{ - const struct net_local *lp = netdev_priv(ndev); - - return lp->msg_enable; -} - -static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel) -{ - struct net_local *lp = netdev_priv(ndev); - - lp->msg_enable = msglevel; -} - -static const struct ethtool_ops dwceqos_ethtool_ops = { - .get_drvinfo = dwceqos_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_pauseparam = dwceqos_get_pauseparam, - .set_pauseparam = dwceqos_set_pauseparam, - .get_strings = dwceqos_get_strings, - .get_ethtool_stats = dwceqos_get_ethtool_stats, - .get_sset_count = dwceqos_get_sset_count, - .get_regs = dwceqos_get_regs, - .get_regs_len = dwceqos_get_regs_len, - .get_eee = dwceqos_get_eee, - .set_eee = dwceqos_set_eee, - .get_msglevel = dwceqos_get_msglevel, - .set_msglevel = dwceqos_set_msglevel, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, -}; - -static const struct net_device_ops netdev_ops = { - .ndo_open = dwceqos_open, - .ndo_stop = dwceqos_stop, - .ndo_start_xmit = dwceqos_start_xmit, - .ndo_set_rx_mode = dwceqos_set_rx_mode, - .ndo_set_mac_address = dwceqos_set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = dwceqos_poll_controller, -#endif - .ndo_do_ioctl = dwceqos_ioctl, - .ndo_tx_timeout = dwceqos_tx_timeout, - .ndo_get_stats64 = dwceqos_get_stats64, -}; - -static const struct of_device_id dwceq_of_match[] = { - { .compatible = "snps,dwc-qos-ethernet-4.10", }, - {} -}; -MODULE_DEVICE_TABLE(of, dwceq_of_match); - -static int dwceqos_probe(struct platform_device *pdev) -{ - struct resource *r_mem = NULL; - struct net_device *ndev; - struct net_local *lp; - int ret = -ENXIO; - - r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r_mem) { - dev_err(&pdev->dev, "no IO resource defined.\n"); - return -ENXIO; - } - - ndev = alloc_etherdev(sizeof(*lp)); - if (!ndev) { - dev_err(&pdev->dev, "etherdev allocation failed.\n"); - return -ENOMEM; - } - - SET_NETDEV_DEV(ndev, &pdev->dev); - - lp = netdev_priv(ndev); - lp->ndev = ndev; - lp->pdev = pdev; - lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT); - - spin_lock_init(&lp->tx_lock); - spin_lock_init(&lp->hw_lock); - spin_lock_init(&lp->stats_lock); - - lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); - if (IS_ERR(lp->apb_pclk)) { - dev_err(&pdev->dev, "apb_pclk clock not found.\n"); - ret = PTR_ERR(lp->apb_pclk); - goto err_out_free_netdev; - } - - ret = clk_prepare_enable(lp->apb_pclk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable APER clock.\n"); - goto err_out_free_netdev; - } - - lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem); - if (IS_ERR(lp->baseaddr)) { - dev_err(&pdev->dev, "failed to map baseaddress.\n"); - ret = PTR_ERR(lp->baseaddr); - goto err_out_clk_dis_aper; - } - - ndev->irq = platform_get_irq(pdev, 0); - ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ; - ndev->netdev_ops = &netdev_ops; - ndev->ethtool_ops = &dwceqos_ethtool_ops; - ndev->base_addr = r_mem->start; - - dwceqos_get_hwfeatures(lp); - dwceqos_mdio_set_csr(lp); - - ndev->hw_features = NETIF_F_SG; - - if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) - ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - - if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL) - ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - - if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL) - ndev->hw_features |= NETIF_F_RXCSUM; - - ndev->features = ndev->hw_features; - - lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); - if (IS_ERR(lp->phy_ref_clk)) { - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); - ret = PTR_ERR(lp->phy_ref_clk); - goto err_out_clk_dis_aper; - } - - ret = clk_prepare_enable(lp->phy_ref_clk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable device clock.\n"); - goto err_out_clk_dis_aper; - } - - lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, - "phy-handle", 0); - if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) { - ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); - if (ret < 0) { - dev_err(&pdev->dev, "invalid fixed-link"); - goto err_out_clk_dis_phy; - } - - lp->phy_node = of_node_get(lp->pdev->dev.of_node); - } - - ret = of_get_phy_mode(lp->pdev->dev.of_node); - if (ret < 0) { - dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); - goto err_out_deregister_fixed_link; - } - - lp->phy_interface = ret; - - ret = dwceqos_mii_init(lp); - if (ret) { - dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); - goto err_out_deregister_fixed_link; - } - - ret = dwceqos_mii_probe(ndev); - if (ret != 0) { - netdev_err(ndev, "mii_probe fail.\n"); - ret = -ENXIO; - goto err_out_deregister_fixed_link; - } - - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - - tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim, - (unsigned long)ndev); - tasklet_disable(&lp->tx_bdreclaim_tasklet); - - lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME, - WQ_MEM_RECLAIM, 0); - INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout); - - platform_set_drvdata(pdev, ndev); - ret = dwceqos_probe_config_dt(pdev); - if (ret) { - dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", - ret); - goto err_out_deregister_fixed_link; - } - dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", - pdev->id, ndev->base_addr, ndev->irq); - - ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0, - ndev->name, ndev); - if (ret) { - dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", - ndev->irq, ret); - goto err_out_deregister_fixed_link; - } - - if (netif_msg_probe(lp)) - netdev_dbg(ndev, "net_local@%p\n", lp); - - netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); - - ret = register_netdev(ndev); - if (ret) { - dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_deregister_fixed_link; - } - - return 0; - -err_out_deregister_fixed_link: - if (of_phy_is_fixed_link(pdev->dev.of_node)) - of_phy_deregister_fixed_link(pdev->dev.of_node); -err_out_clk_dis_phy: - clk_disable_unprepare(lp->phy_ref_clk); -err_out_clk_dis_aper: - clk_disable_unprepare(lp->apb_pclk); -err_out_free_netdev: - of_node_put(lp->phy_node); - free_netdev(ndev); - platform_set_drvdata(pdev, NULL); - return ret; -} - -static int dwceqos_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct net_local *lp; - - if (ndev) { - lp = netdev_priv(ndev); - - if (ndev->phydev) { - phy_disconnect(ndev->phydev); - if (of_phy_is_fixed_link(pdev->dev.of_node)) - of_phy_deregister_fixed_link(pdev->dev.of_node); - } - mdiobus_unregister(lp->mii_bus); - mdiobus_free(lp->mii_bus); - - unregister_netdev(ndev); - - clk_disable_unprepare(lp->phy_ref_clk); - clk_disable_unprepare(lp->apb_pclk); - - free_netdev(ndev); - } - - return 0; -} - -static struct platform_driver dwceqos_driver = { - .probe = dwceqos_probe, - .remove = dwceqos_remove, - .driver = { - .name = DRIVER_NAME, - .of_match_table = dwceq_of_match, - }, -}; - -module_platform_driver(dwceqos_driver); - -MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver"); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Andreas Irestaal "); -MODULE_AUTHOR("Lars Persson "); -- cgit v1.2.3 From dbeaa8c2a4bad2295b2383a79a9749e3638ccf01 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 12 Jan 2017 21:46:32 +0300 Subject: stmmac: indent an if statement The break statement should be indented one more tab. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index ac32f9ef7bed..4daa8a3efb47 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -191,7 +191,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, for_each_child_of_node(np, plat->mdio_node) { if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio")) - break; + break; } } -- cgit v1.2.3 From 3819a35fdb8f82428c2b5190f195c4e0b8c427bf Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 13 Jan 2017 14:55:14 +0100 Subject: xfrm: fix possible null deref in xfrm_init_tempstate Dan reports following smatch warning: net/xfrm/xfrm_state.c:659 error: we previously assumed 'afinfo' could be null (see line 651) 649 struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family); 651 if (afinfo) ... 658 } 659 afinfo->init_temprop(x, tmpl, daddr, saddr); I am resonably sure afinfo cannot be NULL here. xfrm_state4.c and state6.c are both part of ipv4/ipv6 (depends on CONFIG_XFRM, a boolean) but even if ipv6 is a module state6.c can't be removed (ipv6 lacks module_exit so it cannot be removed). The only callers for xfrm6_fini that leads to state backend unregister are error unwinding paths that can be called during ipv6 init function. So after ipv6 module is loaded successfully the state backend cannot go away anymore. The family value from policy lookup path is taken from dst_entry, so that should always be AF_INET(6). However, since this silences the warning and avoids readers of this code wondering about possible null deref it seems preferrable to be defensive and just add the old check back. Fixes: 711059b9752ad0 ("xfrm: add and use xfrm_state_afinfo_get_rcu") Reported-by: Dan Carpenter Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_state.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index a62097e640b5..5a597dbbe564 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -648,8 +648,10 @@ xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl, { struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family); - if (afinfo) - afinfo->init_tempsel(&x->sel, fl); + if (!afinfo) + return; + + afinfo->init_tempsel(&x->sel, fl); if (family != tmpl->encap_family) { afinfo = xfrm_state_afinfo_get_rcu(tmpl->encap_family); -- cgit v1.2.3 From ebd89a2d0675f1325c2be5b7576fd8cb7e8defd0 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Mon, 16 Jan 2017 13:17:55 +0200 Subject: IPsec: do not ignore crypto err in ah4 input ah4 input processing uses the asynchronous hash crypto API which supplies an error code as part of the operation completion but the error code was being ignored. Treat a crypto API error indication as a verification failure. While a crypto API reported error would almost certainly result in a memcpy of the digest failing anyway and thus the security risk seems minor, performing a memory compare on what might be uninitialized memory is wrong. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Steffen Klassert --- net/ipv4/ah4.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index f2a71025a770..22377c8ff14b 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -270,6 +270,9 @@ static void ah_input_done(struct crypto_async_request *base, int err) int ihl = ip_hdrlen(skb); int ah_hlen = (ah->hdrlen + 2) << 2; + if (err) + goto out; + work_iph = AH_SKB_CB(skb)->tmp; auth_data = ah_tmp_auth(work_iph, ihl); icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); -- cgit v1.2.3 From 726282aa6bbe627b3876afc27f88172e37b1d01d Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Mon, 16 Jan 2017 13:17:56 +0200 Subject: IPsec: do not ignore crypto err in ah6 input ah6 input processing uses the asynchronous hash crypto API which supplies an error code as part of the operation completion but the error code was being ignored. Treat a crypto API error indication as a verification failure. While a crypto API reported error would almost certainly result in a memcpy of the digest failing anyway and thus the security risk seems minor, performing a memory compare on what might be uninitialized memory is wrong. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Steffen Klassert --- net/ipv6/ah6.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 189eb10b742d..dda6035e3b84 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -474,6 +474,9 @@ static void ah6_input_done(struct crypto_async_request *base, int err) int hdr_len = skb_network_header_len(skb); int ah_hlen = (ah->hdrlen + 2) << 2; + if (err) + goto out; + work_iph = AH_SKB_CB(skb)->tmp; auth_data = ah_tmp_auth(work_iph, hdr_len); icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); -- cgit v1.2.3 From 4b5b1ac5c0b7e565ea5ef0031439bffedce12ff9 Mon Sep 17 00:00:00 2001 From: Shyam Saini Date: Mon, 16 Jan 2017 09:26:21 +0530 Subject: sfc: Replace memset with eth_zero_addr Use eth_zero_addr to assign zero address to the given address array instead of memset when the second argument in memset is address of zero which makes the code clearer and also add header file linux/etherdevice.h Signed-off-by: Shyam Saini Acked-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10_sriov.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index a55c53d6f559..ed4b14283461 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -6,6 +6,7 @@ * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ +#include #include #include #include "net_driver.h" @@ -554,7 +555,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) return 0; fail: - memset(vf->mac, 0, ETH_ALEN); + eth_zero_addr(vf->mac); return rc; } -- cgit v1.2.3 From 51c89e6a3bf68d544e29b234c2cef6ffb73277a1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 13 Jan 2017 14:46:19 +0000 Subject: afs: Conditionalise a new unused variable The bulk readpages support introduced a harmless warning: fs/afs/file.c: In function 'afs_readpages_page_done': fs/afs/file.c:270:20: error: unused variable 'vnode' [-Werror=unused-variable] This adds an #ifdef to match the user of that variable. The user of the variable has to be conditional because it accesses a member of a struct that is also conditional. Fixes: 91b467e0a3f5 ("afs: Make afs_readpages() fetch data in bulk") Signed-off-by: Arnd Bergmann Signed-off-by: David Howells Signed-off-by: David S. Miller --- fs/afs/file.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/afs/file.c b/fs/afs/file.c index 82897a78abc7..ba7b71fba34b 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -267,7 +267,9 @@ static int afs_readpage(struct file *file, struct page *page) */ static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req) { +#ifdef CONFIG_AFS_FSCACHE struct afs_vnode *vnode = call->reply; +#endif struct page *page = req->pages[req->index]; req->pages[req->index] = NULL; -- cgit v1.2.3 From 3be0679b4a91930ca8d0c8a66b39058a698e70fe Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 13 Jan 2017 21:55:26 +0530 Subject: cxgb4: Shutdown adapter if firmware times out or errors out Perform an emergency shutdown of the adapter and stop it from continuing any further communication on the ports or DMA to the host. This is typically used when the adapter and/or firmware have crashed and we want to prevent any further accidental communication with the rest of the world. This will also force the port Link Status to go down -- if register writes work -- which should help our peers figure out that we're down. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 20 +++++++++++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 39 +++++++++++++++++++++++-- drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 8 +++++ 4 files changed, 64 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ad0096e74813..ccb455f14d08 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1501,6 +1501,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); +int t4_shutdown_adapter(struct adapter *adapter); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; int t4_bar2_sge_qregs(struct adapter *adapter, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e95bb6a0eca8..fb1624544073 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2782,8 +2782,24 @@ static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { void t4_fatal_err(struct adapter *adap) { - t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0); - t4_intr_disable(adap); + int port; + + /* Disable the SGE since ULDs are going to free resources that + * could be exposed to the adapter. RDMA MWs for example... + */ + t4_shutdown_adapter(adap); + for_each_port(adap, port) { + struct net_device *dev = adap->port[port]; + + /* If we get here in very early initialization the network + * devices may not have been set up yet. + */ + if (!dev) + continue; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + } dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index ac712acf256d..87000cd39737 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -330,11 +330,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, * mailbox access list but this is a start. We very rearely * contend on access to the mailbox ... */ - if (i > FW_CMD_MAX_TIMEOUT) { + pcie_fw = t4_read_reg(adap, PCIE_FW_A); + if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) { spin_lock(&adap->mbox_lock); list_del(&entry.list); spin_unlock(&adap->mbox_lock); - ret = -EBUSY; + ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY; t4_record_mbox(adap, cmd, size, access, ret); return ret; } @@ -432,6 +433,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, spin_lock(&adap->mbox_lock); list_del(&entry.list); spin_unlock(&adap->mbox_lock); + t4_fatal_err(adap); return ret; } @@ -7557,6 +7559,39 @@ int t4_prep_adapter(struct adapter *adapter) return 0; } +/** + * t4_shutdown_adapter - shut down adapter, host & wire + * @adapter: the adapter + * + * Perform an emergency shutdown of the adapter and stop it from + * continuing any further communication on the ports or DMA to the + * host. This is typically used when the adapter and/or firmware + * have crashed and we want to prevent any further accidental + * communication with the rest of the world. This will also force + * the port Link Status to go down -- if register writes work -- + * which should help our peers figure out that we're down. + */ +int t4_shutdown_adapter(struct adapter *adapter) +{ + int port; + + t4_intr_disable(adapter); + t4_write_reg(adapter, DBG_GPIO_EN_A, 0); + for_each_port(adapter, port) { + u32 a_port_cfg = PORT_REG(port, + is_t4(adapter->params.chip) + ? XGMAC_PORT_CFG_A + : MAC_PORT_CFG_A); + + t4_write_reg(adapter, a_port_cfg, + t4_read_reg(adapter, a_port_cfg) + & ~SIGNAL_DET_V(1)); + } + t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0); + + return 0; +} + /** * t4_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index f71001d69c47..3348d33c36fa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -855,6 +855,14 @@ #define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S) #define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U) +#define DBG_GPIO_EN_A 0x6010 +#define XGMAC_PORT_CFG_A 0x1000 +#define MAC_PORT_CFG_A 0x800 + +#define SIGNAL_DET_S 14 +#define SIGNAL_DET_V(x) ((x) << SIGNAL_DET_S) +#define SIGNAL_DET_F SIGNAL_DET_V(1U) + #define MC_ECC_STATUS_A 0x751c #define MC_P_ECC_STATUS_A 0x4131c -- cgit v1.2.3 From e89df813174d81abee8be8cabc047d69ef78c26d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 13 Jan 2017 09:11:22 -0800 Subject: netlink: do not enter direct reclaim from netlink_trim() In commit d35c99ff77ecb ("netlink: do not enter direct reclaim from netlink_dump()") we made sure to not trigger expensive memory reclaim. Problem is that a bit later, netlink_trim() might be called and trigger memory reclaim. netlink_trim() should be best effort, and really as fast as possible. Under memory pressure, it is fine to not trim this skb. Signed-off-by: Eric Dumazet Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/netlink/af_netlink.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 161b628ab2b0..edcc1e19ad53 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1210,7 +1210,9 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) skb = nskb; } - if (!pskb_expand_head(skb, 0, -delta, allocation)) + if (!pskb_expand_head(skb, 0, -delta, + (allocation & ~__GFP_DIRECT_RECLAIM) | + __GFP_NOWARN | __GFP_NORETRY)) skb->truesize -= delta; return skb; -- cgit v1.2.3 From 57b68ec2a7be9a7e9f8999850f8ee5baa49023e4 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 13 Jan 2017 18:48:20 +0000 Subject: flow dissector: check if arp_eth is null rather than arp arp is being checked instead of arp_eth to see if the call to __skb_header_pointer failed. Fix this by checking arp_eth is null instead of arp. Also fix to use length hlen rather than hlen - sizeof(_arp); thanks to Eric Dumazet for spotting this latter issue. CoverityScan CID#1396428 ("Logically dead code") on 2nd arp comparison (which should be arp_eth instead). Fixes: commit 55733350e5e8b70c5 ("flow disector: ARP support") Signed-off-by: Colin Ian King Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/flow_dissector.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index e3dffc7a4785..c35aae13c8d2 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -407,9 +407,9 @@ mpls: arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp), sizeof(_arp_eth), data, - hlen - sizeof(_arp), + hlen, &_arp_eth); - if (!arp) + if (!arp_eth) goto out_bad; if (dissector_uses_key(flow_dissector, -- cgit v1.2.3 From 11d05ac1dfb2e51e21c403f19790bf96809b2714 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Fri, 13 Jan 2017 18:27:33 -0200 Subject: sctp: remove unused var from sctp_process_asconf Assigned but not used. Signed-off-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Reviewed-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/sm_make_chunk.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index a15d824a313d..80a9088084ac 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3210,7 +3210,6 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, union sctp_params param; sctp_addiphdr_t *hdr; union sctp_addr_param *addr_param; - sctp_addip_param_t *asconf_param; struct sctp_chunk *asconf_ack; __be16 err_code; int length = 0; @@ -3230,7 +3229,6 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, * asconf parameter. */ length = ntohs(addr_param->p.length); - asconf_param = (void *)addr_param + length; chunk_len -= length; /* create an ASCONF_ACK chunk. -- cgit v1.2.3 From cdfb1a9f30aaf43cfcecbb6c4061cd3807a4e086 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Fri, 13 Jan 2017 18:31:15 -0200 Subject: sctp: remove useless code from sctp_apply_peer_addr_params sctp_frag_point() doesn't store anything, and thus just calling it cannot do anything useful. sctp_apply_peer_addr_params is only called by sctp_setsockopt_peer_addr_params. When operating on an asoc, sctp_setsockopt_peer_addr_params will call sctp_apply_peer_addr_params once for the asoc, and then once for each transport this asoc has, meaning that the frag_point will be recomputed when updating the transports and calling it when updating the asoc is not necessary. IOW, no action is needed here and we can remove this call. Signed-off-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Reviewed-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/socket.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 318c6786d653..635e03412693 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2430,7 +2430,6 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; - sctp_frag_point(asoc, params->spp_pathmtu); } else { sp->pathmtu = params->spp_pathmtu; } -- cgit v1.2.3 From de1deff994286f464d5cad152d97830a5155f9ba Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Fri, 13 Jan 2017 21:20:14 +0000 Subject: sfc: allow PIO more often If an option descriptor has been sent on a queue but not followed by a packet, there will have been no completion event, so the read and write counts won't match and we'll think we can't do PIO. This combines with the fact that we have two TX queues (for en/disable checksum offload), and that both must be empty for PIO to happen. This patch adds a separate "packet_write_count" that tracks the most recent write_count we expect to see a completion event for; this excludes option descriptors but _includes_ PIO descriptors (even though they look like option descriptors). This is then used, rather than write_count, in efx_nic_tx_is_empty(). We only bother to maintain packet_write_count on EF10, since on Siena (a) there are no option descriptors and it always equals write_count, and (b) there's no PIO, so we don't need it anyway. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 5 +++++ drivers/net/ethernet/sfc/net_driver.h | 9 +++++++++ drivers/net/ethernet/sfc/nic.h | 17 ++++++++++++++--- drivers/net/ethernet/sfc/siena.c | 1 + drivers/net/ethernet/sfc/tx.c | 1 + 5 files changed, 30 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 208e0048709b..f6e29a975e4e 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2358,7 +2358,11 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) /* Create TX descriptor ring entry */ if (buffer->flags & EFX_TX_BUF_OPTION) { *txd = buffer->option; + if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) + /* PIO descriptor */ + tx_queue->packet_write_count = tx_queue->write_count; } else { + tx_queue->packet_write_count = tx_queue->write_count; BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); EFX_POPULATE_QWORD_3( *txd, @@ -5796,6 +5800,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, .can_rx_scatter = true, .always_rx_scatter = true, + .option_descriptors = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .offload_features = EF10_OFFLOAD_FEATURES, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 49db9e833c96..b20fe437265f 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -208,6 +208,12 @@ struct efx_tx_buffer { * @write_count: Current write pointer * This is the number of buffers that have been added to the * hardware ring. + * @packet_write_count: Completable write pointer + * This is the write pointer of the last packet written. + * Normally this will equal @write_count, but as option descriptors + * don't produce completion events, they won't update this. + * Filled in iff @efx->type->option_descriptors; only used for PIO. + * Thus, this is written and used on EF10, and neither on farch. * @old_read_count: The value of read_count when last checked. * This is here for performance reasons. The xmit path will * only get the up-to-date value of read_count if this @@ -255,6 +261,7 @@ struct efx_tx_queue { /* Members used only on the xmit path */ unsigned int insert_count ____cacheline_aligned_in_smp; unsigned int write_count; + unsigned int packet_write_count; unsigned int old_read_count; unsigned int tso_bursts; unsigned int tso_long_headers; @@ -1237,6 +1244,7 @@ struct efx_mtd_partition { * @rx_buffer_padding: Size of padding at end of RX packet * @can_rx_scatter: NIC is able to scatter packets to multiple buffers * @always_rx_scatter: NIC will always scatter packets to multiple buffers + * @option_descriptors: NIC supports TX option descriptors * @max_interrupt_mode: Highest capability interrupt mode supported * from &enum efx_init_mode. * @timer_period_max: Maximum period of interrupt timer (in ticks) @@ -1395,6 +1403,7 @@ struct efx_nic_type { unsigned int rx_buffer_padding; bool can_rx_scatter; bool always_rx_scatter; + bool option_descriptors; unsigned int max_interrupt_mode; unsigned int timer_period_max; netdev_features_t offload_features; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 223774635cba..6a69aa3b0129 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -85,6 +85,17 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; } +/* Report whether the NIC considers this TX queue empty, using + * packet_write_count (the write count recorded for the last completable + * doorbell push). May return false negative. EF10 only, which is OK + * because only EF10 supports PIO. + */ +static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) +{ + EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors); + return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count); +} + /* Decide whether we can use TX PIO, ie. write packet data directly into * a buffer on the device. This can reduce latency at the expense of * throughput, so we only do this if both hardware and software TX rings @@ -94,9 +105,9 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) { struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); - return tx_queue->piobuf && - __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) && - __efx_nic_tx_is_empty(partner, partner->insert_count); + + return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) && + efx_nic_tx_is_empty(partner); } /* Decide whether to push a TX descriptor to the NIC vs merely writing diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 4e54e5dc9fcb..118ff5601756 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -1044,6 +1044,7 @@ const struct efx_nic_type siena_a0_nic_type = { .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, .rx_buffer_padding = 0, .can_rx_scatter = true, + .option_descriptors = false, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 3c0151424d12..beaf98080a0b 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -817,6 +817,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->insert_count = 0; tx_queue->write_count = 0; + tx_queue->packet_write_count = 0; tx_queue->old_write_count = 0; tx_queue->read_count = 0; tx_queue->old_read_count = 0; -- cgit v1.2.3 From c634700f7eec3c0da46e299cd0a0ae8b594f9b55 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Fri, 13 Jan 2017 21:20:29 +0000 Subject: sfc: get PIO buffer size from the NIC The 8000 series SFC NICs have 4K PIO buffers, rather than the 2K of the 7000 series. Rather than having a hard-coded PIO buffer size (ER_DZ_TX_PIOBUF_SIZE), read it from the GET_CAPABILITIES_V2 MCDI response. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 16 ++++++++++------ drivers/net/ethernet/sfc/nic.h | 2 ++ drivers/net/ethernet/sfc/tx.c | 1 - 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index f6e29a975e4e..2ce576919f97 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -197,11 +197,15 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) nic_data->datapath_caps = MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); - if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) + if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { nic_data->datapath_caps2 = MCDI_DWORD(outbuf, GET_CAPABILITIES_V2_OUT_FLAGS2); - else + nic_data->piobuf_size = MCDI_WORD(outbuf, + GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); + } else { nic_data->datapath_caps2 = 0; + nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; + } /* record the DPCPU firmware IDs to determine VEB vswitching support. */ @@ -823,8 +827,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) offset = ((efx->tx_channel_offset + efx->n_tx_channels - tx_queue->channel->channel - 1) * efx_piobuf_size); - index = offset / ER_DZ_TX_PIOBUF_SIZE; - offset = offset % ER_DZ_TX_PIOBUF_SIZE; + index = offset / nic_data->piobuf_size; + offset = offset % nic_data->piobuf_size; /* When the host page size is 4K, the first * host page in the WC mapping may be within @@ -1159,11 +1163,11 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) * functions of the controller. */ if (efx_piobuf_size != 0 && - ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= + nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= efx->n_tx_channels) { unsigned int n_piobufs = DIV_ROUND_UP(efx->n_tx_channels, - ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size); + nic_data->piobuf_size / efx_piobuf_size); rc = efx_ef10_alloc_piobufs(efx, n_piobufs); if (rc) diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 6a69aa3b0129..383ff6e1e647 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -343,6 +343,7 @@ enum { * @pio_write_base: Base address for writing PIO buffers * @pio_write_vi_base: Relative VI number for @pio_write_base * @piobuf_handle: Handle of each PIO buffer allocated + * @piobuf_size: size of a single PIO buffer * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC * reboot * @rx_rss_context: Firmware handle for our RSS context @@ -380,6 +381,7 @@ struct efx_ef10_nic_data { void __iomem *wc_membase, *pio_write_base; unsigned int pio_write_vi_base; unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; + u16 piobuf_size; bool must_restore_piobufs; u32 rx_rss_context; bool rx_rss_context_exclusive; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index beaf98080a0b..ff88d60aa6d5 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -28,7 +28,6 @@ #ifdef EFX_USE_PIO -#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; -- cgit v1.2.3 From 019ec0032e821a7262995af0c81b242dc7e55c9f Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Fri, 13 Jan 2017 15:48:30 -0800 Subject: ipvlan: fix dev_id creation corner case. In the last patch da36e13cf65 ("ipvlan: improvise dev_id generation logic in IPvlan") I missed some part of Dave's suggestion and because of that the dev_id creation could fail in a corner case scenario. This would happen when more or less 64k devices have been already created and several have been deleted. If the devices that are still sticking around are the last n bits from the bitmap. So in this scenario even if lower bits are available, the dev_id search is so narrow that it always fails. Fixes: da36e13cf65 ("ipvlan: improvise dev_id generation logic in IPvlan") CC: David Miller CC: Eric Dumazet Signed-off-by: Mahesh Bandewar Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 92b221a03350..b5c390f0f2b3 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -549,6 +549,9 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, */ err = ida_simple_get(&port->ida, port->dev_id_start, 0xFFFE, GFP_KERNEL); + if (err < 0) + err = ida_simple_get(&port->ida, 0x1, port->dev_id_start, + GFP_KERNEL); if (err < 0) goto destroy_ipvlan_port; dev->dev_id = err; -- cgit v1.2.3 From 2d071c643f1cd15a24172de4b5b7ae2adb93abbb Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sun, 15 Jan 2017 01:34:25 +0100 Subject: bpf, trace: make ctx access checks more robust Make sure that ctx cannot potentially be accessed oob by asserting explicitly that ctx access size into pt_regs for BPF_PROG_TYPE_KPROBE programs must be within limits. In case some 32bit archs have pt_regs not being a multiple of 8, then BPF_DW access could cause such access. BPF_PROG_TYPE_KPROBE progs don't have a ctx conversion function since there's no extra mapping needed. kprobe_prog_is_valid_access() didn't enforce sizeof(long) as the only allowed access size, since LLVM can generate non BPF_W/BPF_DW access to regs from time to time. For BPF_PROG_TYPE_TRACEPOINT we don't have a ctx conversion either, so add a BUILD_BUG_ON() check to make sure that BPF_DW access will not be a similar issue in future (ctx works on event buffer as opposed to pt_regs there). Fixes: 2541517c32be ("tracing, perf: Implement BPF programs attached to kprobes") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/trace/bpf_trace.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 1860e7f1e5a8..c22a961d1a42 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -459,6 +459,13 @@ static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type return false; if (off % size != 0) return false; + /* + * Assertion for 32 bit to make sure last 8 byte access + * (BPF_DW) to the last 4 byte member is disallowed. + */ + if (off + size > sizeof(struct pt_regs)) + return false; + return true; } @@ -540,6 +547,8 @@ static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type return false; if (off % size != 0) return false; + + BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); return true; } -- cgit v1.2.3 From a50a05f497a2a6e772900ffe93246fb7243d86d8 Mon Sep 17 00:00:00 2001 From: David Lebrun Date: Sun, 15 Jan 2017 15:26:16 +0100 Subject: ipv6: sr: add missing Kbuild export for header files Add missing IPv6-SR header files in include/uapi/linux/Kbuild. Also, prevent seg6_lwt_headroom() from being exported and add missing linux/types.h include. Signed-off-by: David Lebrun Signed-off-by: David S. Miller --- include/uapi/linux/Kbuild | 4 ++++ include/uapi/linux/seg6.h | 2 ++ include/uapi/linux/seg6_hmac.h | 1 + include/uapi/linux/seg6_iptunnel.h | 4 ++++ 4 files changed, 11 insertions(+) diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index f330ba4547cf..e600b50be77e 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -379,6 +379,10 @@ header-y += sctp.h header-y += sdla.h header-y += seccomp.h header-y += securebits.h +header-y += seg6_genl.h +header-y += seg6.h +header-y += seg6_hmac.h +header-y += seg6_iptunnel.h header-y += selinux_netlink.h header-y += sem.h header-y += serial_core.h diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h index c396a8052f73..33496595064c 100644 --- a/include/uapi/linux/seg6.h +++ b/include/uapi/linux/seg6.h @@ -14,6 +14,8 @@ #ifndef _UAPI_LINUX_SEG6_H #define _UAPI_LINUX_SEG6_H +#include + /* * SRH */ diff --git a/include/uapi/linux/seg6_hmac.h b/include/uapi/linux/seg6_hmac.h index b652dfd51bc5..e691c753fc3f 100644 --- a/include/uapi/linux/seg6_hmac.h +++ b/include/uapi/linux/seg6_hmac.h @@ -1,6 +1,7 @@ #ifndef _UAPI_LINUX_SEG6_HMAC_H #define _UAPI_LINUX_SEG6_HMAC_H +#include #include #define SEG6_HMAC_SECRET_LEN 64 diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h index 0f7dbd280a9c..7a7183d4062a 100644 --- a/include/uapi/linux/seg6_iptunnel.h +++ b/include/uapi/linux/seg6_iptunnel.h @@ -33,6 +33,8 @@ enum { SEG6_IPTUN_MODE_ENCAP, }; +#ifdef __KERNEL__ + static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo) { int encap = (tuninfo->mode == SEG6_IPTUN_MODE_ENCAP); @@ -42,3 +44,5 @@ static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo) } #endif + +#endif -- cgit v1.2.3 From a3308d8fd1f58c67aaae52d9468791c2082ab2c7 Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Mon, 16 Jan 2017 10:45:13 +0200 Subject: net/sched: cls_flower: Disallow duplicate internal elements Flower currently allows having the same filter twice with the same priority. Actions (and statistics update) will always execute on the first inserted rule leaving the second rule unused. This patch disallows that. Signed-off-by: Paul Blakey Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index a3bfda3091a4..27934456d984 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -134,6 +134,14 @@ static void fl_clear_masked_range(struct fl_flow_key *key, memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); } +static struct cls_fl_filter *fl_lookup(struct cls_fl_head *head, + struct fl_flow_key *mkey) +{ + return rhashtable_lookup_fast(&head->ht, + fl_key_get_start(mkey, &head->mask), + head->ht_params); +} + static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { @@ -181,9 +189,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, fl_set_masked_key(&skb_mkey, &skb_key, &head->mask); - f = rhashtable_lookup_fast(&head->ht, - fl_key_get_start(&skb_mkey, &head->mask), - head->ht_params); + f = fl_lookup(head, &skb_mkey); if (f && !tc_skip_sw(f->flags)) { *res = f->res; return tcf_exts_exec(skb, &f->exts, res); @@ -875,6 +881,11 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, goto errout; if (!tc_skip_sw(fnew->flags)) { + if (!fold && fl_lookup(head, &fnew->mkey)) { + err = -EEXIST; + goto errout; + } + err = rhashtable_insert_fast(&head->ht, &fnew->ht_node, head->ht_params); if (err) -- cgit v1.2.3 From 2a90f7e1d5d04e4f1060268e0b55a2c702bbd67a Mon Sep 17 00:00:00 2001 From: Simon Guinot Date: Mon, 16 Jan 2017 18:08:31 +0100 Subject: net: mvneta: add xmit_more support Basing on xmit_more flag of the skb, TX descriptors can be concatenated before flushing. This commit delay Tx descriptor flush if the queue is running and if there is more skb's to send. A maximum allowed number of descriptors for flushing at once due to MVNETA_TXQ_UPDATE_REG(q) reqisters limitation, is 255. Because of that a new macro was added (MVNETA_TXQ_DEC_SENT_MASK) in order to ensure that concatenated amount of descriptor does not exceed that value. Signed-off-by: Simon Guinot Signed-off-by: Marcin Wojtas Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 3607d8febbcf..9624537219a8 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -224,6 +224,7 @@ #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) #define MVNETA_TXQ_DEC_SENT_SHIFT 16 +#define MVNETA_TXQ_DEC_SENT_MASK 0xff #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) #define MVNETA_TXQ_SENT_DESC_SHIFT 16 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 @@ -525,6 +526,7 @@ struct mvneta_tx_queue { * descriptor ring */ int count; + int pending; int tx_stop_threshold; int tx_wake_threshold; @@ -818,8 +820,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, /* Only 255 descriptors can be added at once ; Assume caller * process TX desriptors in quanta less than 256 */ - val = pend_desc; + val = pend_desc + txq->pending; mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + txq->pending = 0; } /* Get pointer to next TX descriptor to be processed (send) by HW */ @@ -2399,11 +2402,15 @@ out: struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); txq->count += frags; - mvneta_txq_pend_desc_add(pp, txq, frags); - if (txq->count >= txq->tx_stop_threshold) netif_tx_stop_queue(nq); + if (!skb->xmit_more || netif_xmit_stopped(nq) || + txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) + mvneta_txq_pend_desc_add(pp, txq, frags); + else + txq->pending += frags; + u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += len; -- cgit v1.2.3 From a29b6235560a1ed10c8e1a73bfc616a66b802b90 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Mon, 16 Jan 2017 18:08:32 +0100 Subject: net: mvneta: add BQL support Tests showed that when whole bandwidth is consumed, the latency for various kind of traffic can reach high values. With saturated link (e.g. with iperf from target to host) simple ping could take significant amount of time. BQL proved to improve this situation when implemented in mvneta driver. Measurements of ping latency for 3 link speeds: Speed | Latency w/o BQL | Latency with BQL 10 | 7-14 ms | 3.5 ms 100 | 2-12 ms | 0.6 ms 1000 | often timeout | up to 2ms Decreasing latency as above result in sligt performance cost - 4kpps (-1.4%) when pushing 64B packets via two bridged interfaces of Armada 38x. For 1500B packets in the same setup, the mpstat tool showed +8% of CPU occupation (default affinity, second CPU idle). Even though this cost seems reasonable to take, considering other improvements. This commit adds byte queue limit mechanism for the mvneta driver. Signed-off-by: Marcin Wojtas Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 9624537219a8..6dcc951af0ff 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1759,8 +1759,10 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, /* Free tx queue skbuffs */ static void mvneta_txq_bufs_free(struct mvneta_port *pp, - struct mvneta_tx_queue *txq, int num) + struct mvneta_tx_queue *txq, int num, + struct netdev_queue *nq) { + unsigned int bytes_compl = 0, pkts_compl = 0; int i; for (i = 0; i < num; i++) { @@ -1768,6 +1770,11 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, txq->txq_get_index; struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; + if (skb) { + bytes_compl += skb->len; + pkts_compl++; + } + mvneta_txq_inc_get(txq); if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) @@ -1778,6 +1785,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, continue; dev_kfree_skb_any(skb); } + + netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); } /* Handle end of transmission */ @@ -1791,7 +1800,7 @@ static void mvneta_txq_done(struct mvneta_port *pp, if (!tx_done) return; - mvneta_txq_bufs_free(pp, txq, tx_done); + mvneta_txq_bufs_free(pp, txq, tx_done, nq); txq->count -= tx_done; @@ -2401,6 +2410,8 @@ out: struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + netdev_tx_sent_queue(nq, len); + txq->count += frags; if (txq->count >= txq->tx_stop_threshold) netif_tx_stop_queue(nq); @@ -2429,9 +2440,10 @@ static void mvneta_txq_done_force(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); int tx_done = txq->count; - mvneta_txq_bufs_free(pp, txq, tx_done); + mvneta_txq_bufs_free(pp, txq, tx_done, nq); /* reset txq */ txq->count = 0; @@ -2957,6 +2969,8 @@ static int mvneta_txq_init(struct mvneta_port *pp, static void mvneta_txq_deinit(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); + kfree(txq->tx_skb); if (txq->tso_hdrs) @@ -2968,6 +2982,8 @@ static void mvneta_txq_deinit(struct mvneta_port *pp, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); + netdev_tx_reset_queue(nq); + txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; -- cgit v1.2.3 From cac2661c53f35cbe651bef9b07026a5a05ab8ce0 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 17 Jan 2017 10:22:57 +0100 Subject: esp4: Avoid skb_cow_data whenever possible This patch tries to avoid skb_cow_data on esp4. On the encrypt side we add the IPsec tailbits to the linear part of the buffer if there is space on it. If there is no space on the linear part, we add a page fragment with the tailbits to the buffer and use separate src and dst scatterlists. On the decrypt side, we leave the buffer as it is if it is not cloned. With this, we can avoid a linearization of the buffer in most of the cases. Joint work with: Sowmini Varadhan Ilan Tayari Signed-off-by: Sowmini Varadhan Signed-off-by: Ilan Tayari Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 2 + net/ipv4/esp4.c | 338 +++++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 266 insertions(+), 74 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c52197cf51dc..d9a81dcef53e 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -213,6 +213,8 @@ struct xfrm_state { /* Last used time */ unsigned long lastused; + struct page_frag xfrag; + /* Reference to data common to all the instances of this * transformer. */ const struct xfrm_type *type; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 20fb25e3027b..9e8d97133513 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -18,6 +18,8 @@ #include #include +#include + struct esp_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; @@ -92,11 +94,40 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, __alignof__(struct scatterlist)); } +static void esp_ssg_unref(struct xfrm_state *x, void *tmp) +{ + struct esp_output_extra *extra = esp_tmp_extra(tmp); + struct crypto_aead *aead = x->data; + int extralen = 0; + u8 *iv; + struct aead_request *req; + struct scatterlist *sg; + + if (x->props.flags & XFRM_STATE_ESN) + extralen += sizeof(*extra); + + extra = esp_tmp_extra(tmp); + iv = esp_tmp_iv(aead, tmp, extralen); + req = esp_tmp_req(aead, iv); + + /* Unref skb_frag_pages in the src scatterlist if necessary. + * Skip the first sg which comes from skb->data. + */ + if (req->src != req->dst) + for (sg = sg_next(req->src); sg; sg = sg_next(sg)) + put_page(sg_page(sg)); +} + static void esp_output_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; + void *tmp; + struct dst_entry *dst = skb_dst(skb); + struct xfrm_state *x = dst->xfrm; - kfree(ESP_SKB_CB(skb)->tmp); + tmp = ESP_SKB_CB(skb)->tmp; + esp_ssg_unref(x, tmp); + kfree(tmp); xfrm_output_resume(skb, err); } @@ -120,6 +151,29 @@ static void esp_output_restore_header(struct sk_buff *skb) sizeof(__be32)); } +static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb, + struct ip_esp_hdr *esph, + struct esp_output_extra *extra) +{ + struct xfrm_state *x = skb_dst(skb)->xfrm; + + /* For ESN we move the header forward by 4 bytes to + * accomodate the high bits. We will move it back after + * encryption. + */ + if ((x->props.flags & XFRM_STATE_ESN)) { + extra->esphoff = (unsigned char *)esph - + skb_transport_header(skb); + esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); + extra->seqhi = esph->spi; + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); + } + + esph->spi = x->id.spi; + + return esph; +} + static void esp_output_done_esn(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; @@ -130,16 +184,18 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err) static int esp_output(struct xfrm_state *x, struct sk_buff *skb) { - int err; struct esp_output_extra *extra; + int err = -ENOMEM; struct ip_esp_hdr *esph; struct crypto_aead *aead; struct aead_request *req; - struct scatterlist *sg; + struct scatterlist *sg, *dsg; struct sk_buff *trailer; + struct page *page; void *tmp; u8 *iv; u8 *tail; + u8 *vaddr; int blksize; int clen; int alen; @@ -149,7 +205,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) int nfrags; int assoclen; int extralen; + int tailen; __be64 seqno; + __u8 proto = *skb_mac_header(skb); /* skb is pure payload to encrypt */ @@ -169,12 +227,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) blksize = ALIGN(crypto_aead_blocksize(aead), 4); clen = ALIGN(skb->len + 2 + tfclen, blksize); plen = clen - skb->len - tfclen; - - err = skb_cow_data(skb, tfclen + plen + alen, &trailer); - if (err < 0) - goto error; - nfrags = err; - + tailen = tfclen + plen + alen; assoclen = sizeof(*esph); extralen = 0; @@ -183,35 +236,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) assoclen += sizeof(__be32); } - tmp = esp_alloc_tmp(aead, nfrags, extralen); - if (!tmp) { - err = -ENOMEM; - goto error; - } - - extra = esp_tmp_extra(tmp); - iv = esp_tmp_iv(aead, tmp, extralen); - req = esp_tmp_req(aead, iv); - sg = esp_req_sg(aead, req); - - /* Fill padding... */ - tail = skb_tail_pointer(trailer); - if (tfclen) { - memset(tail, 0, tfclen); - tail += tfclen; - } - do { - int i; - for (i = 0; i < plen - 2; i++) - tail[i] = i + 1; - } while (0); - tail[plen - 2] = plen - 2; - tail[plen - 1] = *skb_mac_header(skb); - pskb_put(skb, trailer, clen - skb->len + alen); - - skb_push(skb, -skb_network_offset(skb)); - esph = ip_esp_hdr(skb); *skb_mac_header(skb) = IPPROTO_ESP; + esph = ip_esp_hdr(skb); /* this is non-NULL only with UDP Encapsulation */ if (x->encap) { @@ -230,7 +256,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) uh = (struct udphdr *)esph; uh->source = sport; uh->dest = dport; - uh->len = htons(skb->len - skb_transport_offset(skb)); + uh->len = htons(skb->len + tailen + - skb_transport_offset(skb)); uh->check = 0; switch (encap_type) { @@ -248,31 +275,170 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) *skb_mac_header(skb) = IPPROTO_UDP; } - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); + if (!skb_cloned(skb)) { + if (tailen <= skb_availroom(skb)) { + nfrags = 1; + trailer = skb; + tail = skb_tail_pointer(trailer); - aead_request_set_callback(req, 0, esp_output_done, skb); + goto skip_cow; + } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) + && !skb_has_frag_list(skb)) { + int allocsize; + struct sock *sk = skb->sk; + struct page_frag *pfrag = &x->xfrag; - /* For ESN we move the header forward by 4 bytes to - * accomodate the high bits. We will move it back after - * encryption. - */ - if ((x->props.flags & XFRM_STATE_ESN)) { - extra->esphoff = (unsigned char *)esph - - skb_transport_header(skb); - esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); - extra->seqhi = esph->spi; - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); - aead_request_set_callback(req, 0, esp_output_done_esn, skb); + allocsize = ALIGN(tailen, L1_CACHE_BYTES); + + spin_lock_bh(&x->lock); + + if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { + spin_unlock_bh(&x->lock); + goto cow; + } + + page = pfrag->page; + get_page(page); + + vaddr = kmap_atomic(page); + + tail = vaddr + pfrag->offset; + + /* Fill padding... */ + if (tfclen) { + memset(tail, 0, tfclen); + tail += tfclen; + } + do { + int i; + for (i = 0; i < plen - 2; i++) + tail[i] = i + 1; + } while (0); + tail[plen - 2] = plen - 2; + tail[plen - 1] = proto; + + kunmap_atomic(vaddr); + + nfrags = skb_shinfo(skb)->nr_frags; + + __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, + tailen); + skb_shinfo(skb)->nr_frags = ++nfrags; + + pfrag->offset = pfrag->offset + allocsize; + nfrags++; + + skb->len += tailen; + skb->data_len += tailen; + skb->truesize += tailen; + if (sk) + atomic_add(tailen, &sk->sk_wmem_alloc); + + skb_push(skb, -skb_network_offset(skb)); + + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); + esph->spi = x->id.spi; + + tmp = esp_alloc_tmp(aead, nfrags + 2, extralen); + if (!tmp) { + spin_unlock_bh(&x->lock); + err = -ENOMEM; + goto error; + } + + extra = esp_tmp_extra(tmp); + iv = esp_tmp_iv(aead, tmp, extralen); + req = esp_tmp_req(aead, iv); + sg = esp_req_sg(aead, req); + dsg = &sg[nfrags]; + + esph = esp_output_set_extra(skb, esph, extra); + + sg_init_table(sg, nfrags); + skb_to_sgvec(skb, sg, + (unsigned char *)esph - skb->data, + assoclen + ivlen + clen + alen); + + allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); + + if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { + spin_unlock_bh(&x->lock); + err = -ENOMEM; + goto error; + } + + skb_shinfo(skb)->nr_frags = 1; + + page = pfrag->page; + get_page(page); + /* replace page frags in skb with new page */ + __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); + pfrag->offset = pfrag->offset + allocsize; + + sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); + skb_to_sgvec(skb, dsg, + (unsigned char *)esph - skb->data, + assoclen + ivlen + clen + alen); + + spin_unlock_bh(&x->lock); + + goto skip_cow2; + } } +cow: + err = skb_cow_data(skb, tailen, &trailer); + if (err < 0) + goto error; + nfrags = err; + tail = skb_tail_pointer(trailer); + esph = ip_esp_hdr(skb); + +skip_cow: + /* Fill padding... */ + if (tfclen) { + memset(tail, 0, tfclen); + tail += tfclen; + } + do { + int i; + for (i = 0; i < plen - 2; i++) + tail[i] = i + 1; + } while (0); + tail[plen - 2] = plen - 2; + tail[plen - 1] = proto; + pskb_put(skb, trailer, clen - skb->len + alen); + + skb_push(skb, -skb_network_offset(skb)); + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); esph->spi = x->id.spi; + tmp = esp_alloc_tmp(aead, nfrags, extralen); + if (!tmp) { + err = -ENOMEM; + goto error; + } + + extra = esp_tmp_extra(tmp); + iv = esp_tmp_iv(aead, tmp, extralen); + req = esp_tmp_req(aead, iv); + sg = esp_req_sg(aead, req); + dsg = sg; + + esph = esp_output_set_extra(skb, esph, extra); + sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, (unsigned char *)esph - skb->data, assoclen + ivlen + clen + alen); - aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); +skip_cow2: + if ((x->props.flags & XFRM_STATE_ESN)) + aead_request_set_callback(req, 0, esp_output_done_esn, skb); + else + aead_request_set_callback(req, 0, esp_output_done, skb); + + aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv); aead_request_set_ad(req, assoclen); seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + @@ -298,6 +464,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) esp_output_restore_header(skb); } + if (sg != dsg) + esp_ssg_unref(x, tmp); kfree(tmp); error: @@ -401,6 +569,23 @@ static void esp_input_restore_header(struct sk_buff *skb) __skb_pull(skb, 4); } +static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi) +{ + struct xfrm_state *x = xfrm_input_state(skb); + struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data; + + /* For ESN we move the header forward by 4 bytes to + * accomodate the high bits. We will move it back after + * decryption. + */ + if ((x->props.flags & XFRM_STATE_ESN)) { + esph = (void *)skb_push(skb, 4); + *seqhi = esph->spi; + esph->spi = esph->seq_no; + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; + } +} + static void esp_input_done_esn(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; @@ -437,12 +622,6 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) if (elen <= 0) goto out; - err = skb_cow_data(skb, 0, &trailer); - if (err < 0) - goto out; - - nfrags = err; - assoclen = sizeof(*esph); seqhilen = 0; @@ -451,6 +630,26 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) assoclen += seqhilen; } + if (!skb_cloned(skb)) { + if (!skb_is_nonlinear(skb)) { + nfrags = 1; + + goto skip_cow; + } else if (!skb_has_frag_list(skb)) { + nfrags = skb_shinfo(skb)->nr_frags; + nfrags++; + + goto skip_cow; + } + } + + err = skb_cow_data(skb, 0, &trailer); + if (err < 0) + goto out; + + nfrags = err; + +skip_cow: err = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags, seqhilen); if (!tmp) @@ -462,26 +661,17 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) req = esp_tmp_req(aead, iv); sg = esp_req_sg(aead, req); - skb->ip_summed = CHECKSUM_NONE; + esp_input_set_header(skb, seqhi); - esph = (struct ip_esp_hdr *)skb->data; + sg_init_table(sg, nfrags); + skb_to_sgvec(skb, sg, 0, skb->len); - aead_request_set_callback(req, 0, esp_input_done, skb); + skb->ip_summed = CHECKSUM_NONE; - /* For ESN we move the header forward by 4 bytes to - * accomodate the high bits. We will move it back after - * decryption. - */ - if ((x->props.flags & XFRM_STATE_ESN)) { - esph = (void *)skb_push(skb, 4); - *seqhi = esph->spi; - esph->spi = esph->seq_no; - esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; + if ((x->props.flags & XFRM_STATE_ESN)) aead_request_set_callback(req, 0, esp_input_done_esn, skb); - } - - sg_init_table(sg, nfrags); - skb_to_sgvec(skb, sg, 0, skb->len); + else + aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); aead_request_set_ad(req, assoclen); -- cgit v1.2.3 From 03e2a30f6a27e2f3e5283b777f6ddd146b38c738 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 17 Jan 2017 10:23:03 +0100 Subject: esp6: Avoid skb_cow_data whenever possible This patch tries to avoid skb_cow_data on esp6. On the encrypt side we add the IPsec tailbits to the linear part of the buffer if there is space on it. If there is no space on the linear part, we add a page fragment with the tailbits to the buffer and use separate src and dst scatterlists. On the decrypt side, we leave the buffer as it is if it is not cloned. With this, we can avoid a linearization of the buffer in most of the cases. Joint work with: Sowmini Varadhan Ilan Tayari Signed-off-by: Sowmini Varadhan Signed-off-by: Ilan Tayari Signed-off-by: Steffen Klassert --- net/ipv6/esp6.c | 302 +++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 246 insertions(+), 56 deletions(-) diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index cbcdd5db31f4..a428ac6b7c74 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -44,6 +44,8 @@ #include #include +#include + struct esp_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; @@ -114,11 +116,40 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, __alignof__(struct scatterlist)); } +static void esp_ssg_unref(struct xfrm_state *x, void *tmp) +{ + __be32 *seqhi; + struct crypto_aead *aead = x->data; + int seqhilen = 0; + u8 *iv; + struct aead_request *req; + struct scatterlist *sg; + + if (x->props.flags & XFRM_STATE_ESN) + seqhilen += sizeof(__be32); + + seqhi = esp_tmp_seqhi(tmp); + iv = esp_tmp_iv(aead, tmp, seqhilen); + req = esp_tmp_req(aead, iv); + + /* Unref skb_frag_pages in the src scatterlist if necessary. + * Skip the first sg which comes from skb->data. + */ + if (req->src != req->dst) + for (sg = sg_next(req->src); sg; sg = sg_next(sg)) + put_page(sg_page(sg)); +} + static void esp_output_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; + void *tmp; + struct dst_entry *dst = skb_dst(skb); + struct xfrm_state *x = dst->xfrm; - kfree(ESP_SKB_CB(skb)->tmp); + tmp = ESP_SKB_CB(skb)->tmp; + esp_ssg_unref(x, tmp); + kfree(tmp); xfrm_output_resume(skb, err); } @@ -138,6 +169,27 @@ static void esp_output_restore_header(struct sk_buff *skb) esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32)); } +static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb, + struct ip_esp_hdr *esph, + __be32 *seqhi) +{ + struct xfrm_state *x = skb_dst(skb)->xfrm; + + /* For ESN we move the header forward by 4 bytes to + * accomodate the high bits. We will move it back after + * encryption. + */ + if ((x->props.flags & XFRM_STATE_ESN)) { + esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); + *seqhi = esph->spi; + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); + } + + esph->spi = x->id.spi; + + return esph; +} + static void esp_output_done_esn(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; @@ -152,8 +204,9 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) struct ip_esp_hdr *esph; struct crypto_aead *aead; struct aead_request *req; - struct scatterlist *sg; + struct scatterlist *sg, *dsg; struct sk_buff *trailer; + struct page *page; void *tmp; int blksize; int clen; @@ -164,10 +217,13 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) int nfrags; int assoclen; int seqhilen; + int tailen; u8 *iv; u8 *tail; + u8 *vaddr; __be32 *seqhi; __be64 seqno; + __u8 proto = *skb_mac_header(skb); /* skb is pure payload to encrypt */ aead = x->data; @@ -186,11 +242,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) blksize = ALIGN(crypto_aead_blocksize(aead), 4); clen = ALIGN(skb->len + 2 + tfclen, blksize); plen = clen - skb->len - tfclen; - - err = skb_cow_data(skb, tfclen + plen + alen, &trailer); - if (err < 0) - goto error; - nfrags = err; + tailen = tfclen + plen + alen; assoclen = sizeof(*esph); seqhilen = 0; @@ -200,19 +252,130 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) assoclen += seqhilen; } - tmp = esp_alloc_tmp(aead, nfrags, seqhilen); - if (!tmp) { - err = -ENOMEM; - goto error; + *skb_mac_header(skb) = IPPROTO_ESP; + esph = ip_esp_hdr(skb); + + if (!skb_cloned(skb)) { + if (tailen <= skb_availroom(skb)) { + nfrags = 1; + trailer = skb; + tail = skb_tail_pointer(trailer); + + goto skip_cow; + } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) + && !skb_has_frag_list(skb)) { + int allocsize; + struct sock *sk = skb->sk; + struct page_frag *pfrag = &x->xfrag; + + allocsize = ALIGN(tailen, L1_CACHE_BYTES); + + spin_lock_bh(&x->lock); + + if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { + spin_unlock_bh(&x->lock); + goto cow; + } + + page = pfrag->page; + get_page(page); + + vaddr = kmap_atomic(page); + + tail = vaddr + pfrag->offset; + + /* Fill padding... */ + if (tfclen) { + memset(tail, 0, tfclen); + tail += tfclen; + } + do { + int i; + for (i = 0; i < plen - 2; i++) + tail[i] = i + 1; + } while (0); + tail[plen - 2] = plen - 2; + tail[plen - 1] = proto; + + kunmap_atomic(vaddr); + + nfrags = skb_shinfo(skb)->nr_frags; + + __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, + tailen); + skb_shinfo(skb)->nr_frags = ++nfrags; + + pfrag->offset = pfrag->offset + allocsize; + nfrags++; + + skb->len += tailen; + skb->data_len += tailen; + skb->truesize += tailen; + if (sk) + atomic_add(tailen, &sk->sk_wmem_alloc); + + skb_push(skb, -skb_network_offset(skb)); + + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); + esph->spi = x->id.spi; + + tmp = esp_alloc_tmp(aead, nfrags + 2, seqhilen); + if (!tmp) { + spin_unlock_bh(&x->lock); + err = -ENOMEM; + goto error; + } + seqhi = esp_tmp_seqhi(tmp); + iv = esp_tmp_iv(aead, tmp, seqhilen); + req = esp_tmp_req(aead, iv); + sg = esp_req_sg(aead, req); + dsg = &sg[nfrags]; + + esph = esp_output_set_esn(skb, esph, seqhi); + + sg_init_table(sg, nfrags); + skb_to_sgvec(skb, sg, + (unsigned char *)esph - skb->data, + assoclen + ivlen + clen + alen); + + allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); + + if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { + spin_unlock_bh(&x->lock); + err = -ENOMEM; + goto error; + } + + skb_shinfo(skb)->nr_frags = 1; + + page = pfrag->page; + get_page(page); + /* replace page frags in skb with new page */ + __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); + pfrag->offset = pfrag->offset + allocsize; + + sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); + skb_to_sgvec(skb, dsg, + (unsigned char *)esph - skb->data, + assoclen + ivlen + clen + alen); + + spin_unlock_bh(&x->lock); + + goto skip_cow2; + } } - seqhi = esp_tmp_seqhi(tmp); - iv = esp_tmp_iv(aead, tmp, seqhilen); - req = esp_tmp_req(aead, iv); - sg = esp_req_sg(aead, req); +cow: + err = skb_cow_data(skb, tailen, &trailer); + if (err < 0) + goto error; + nfrags = err; - /* Fill padding... */ tail = skb_tail_pointer(trailer); + esph = ip_esp_hdr(skb); + +skip_cow: + /* Fill padding... */ if (tfclen) { memset(tail, 0, tfclen); tail += tfclen; @@ -223,36 +386,40 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) tail[i] = i + 1; } while (0); tail[plen - 2] = plen - 2; - tail[plen - 1] = *skb_mac_header(skb); + tail[plen - 1] = proto; pskb_put(skb, trailer, clen - skb->len + alen); skb_push(skb, -skb_network_offset(skb)); - esph = ip_esp_hdr(skb); - *skb_mac_header(skb) = IPPROTO_ESP; esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); + esph->spi = x->id.spi; - aead_request_set_callback(req, 0, esp_output_done, skb); - - /* For ESN we move the header forward by 4 bytes to - * accomodate the high bits. We will move it back after - * encryption. - */ - if ((x->props.flags & XFRM_STATE_ESN)) { - esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); - *seqhi = esph->spi; - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); - aead_request_set_callback(req, 0, esp_output_done_esn, skb); + tmp = esp_alloc_tmp(aead, nfrags, seqhilen); + if (!tmp) { + err = -ENOMEM; + goto error; } - esph->spi = x->id.spi; + seqhi = esp_tmp_seqhi(tmp); + iv = esp_tmp_iv(aead, tmp, seqhilen); + req = esp_tmp_req(aead, iv); + sg = esp_req_sg(aead, req); + dsg = sg; + + esph = esp_output_set_esn(skb, esph, seqhi); sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, (unsigned char *)esph - skb->data, assoclen + ivlen + clen + alen); - aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); +skip_cow2: + if ((x->props.flags & XFRM_STATE_ESN)) + aead_request_set_callback(req, 0, esp_output_done_esn, skb); + else + aead_request_set_callback(req, 0, esp_output_done, skb); + + aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv); aead_request_set_ad(req, assoclen); seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + @@ -278,6 +445,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) esp_output_restore_header(skb); } + if (sg != dsg) + esp_ssg_unref(x, tmp); kfree(tmp); error: @@ -343,6 +512,23 @@ static void esp_input_restore_header(struct sk_buff *skb) __skb_pull(skb, 4); } +static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi) +{ + struct xfrm_state *x = xfrm_input_state(skb); + struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data; + + /* For ESN we move the header forward by 4 bytes to + * accomodate the high bits. We will move it back after + * decryption. + */ + if ((x->props.flags & XFRM_STATE_ESN)) { + esph = (void *)skb_push(skb, 4); + *seqhi = esph->spi; + esph->spi = esph->seq_no; + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; + } +} + static void esp_input_done_esn(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; @@ -378,14 +564,6 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) goto out; } - nfrags = skb_cow_data(skb, 0, &trailer); - if (nfrags < 0) { - ret = -EINVAL; - goto out; - } - - ret = -ENOMEM; - assoclen = sizeof(*esph); seqhilen = 0; @@ -394,6 +572,27 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) assoclen += seqhilen; } + if (!skb_cloned(skb)) { + if (!skb_is_nonlinear(skb)) { + nfrags = 1; + + goto skip_cow; + } else if (!skb_has_frag_list(skb)) { + nfrags = skb_shinfo(skb)->nr_frags; + nfrags++; + + goto skip_cow; + } + } + + nfrags = skb_cow_data(skb, 0, &trailer); + if (nfrags < 0) { + ret = -EINVAL; + goto out; + } + +skip_cow: + ret = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags, seqhilen); if (!tmp) goto out; @@ -404,26 +603,17 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) req = esp_tmp_req(aead, iv); sg = esp_req_sg(aead, req); - skb->ip_summed = CHECKSUM_NONE; + esp_input_set_header(skb, seqhi); - esph = (struct ip_esp_hdr *)skb->data; + sg_init_table(sg, nfrags); + skb_to_sgvec(skb, sg, 0, skb->len); - aead_request_set_callback(req, 0, esp_input_done, skb); + skb->ip_summed = CHECKSUM_NONE; - /* For ESN we move the header forward by 4 bytes to - * accomodate the high bits. We will move it back after - * decryption. - */ - if ((x->props.flags & XFRM_STATE_ESN)) { - esph = (void *)skb_push(skb, 4); - *seqhi = esph->spi; - esph->spi = esph->seq_no; - esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; + if ((x->props.flags & XFRM_STATE_ESN)) aead_request_set_callback(req, 0, esp_input_done_esn, skb); - } - - sg_init_table(sg, nfrags); - skb_to_sgvec(skb, sg, 0, skb->len); + else + aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); aead_request_set_ad(req, assoclen); -- cgit v1.2.3 From eb758c8864d49f5786432ce38fd8a72bdbbd10cf Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 17 Jan 2017 10:23:08 +0100 Subject: esp: Introduce a helper to setup the trailer We need to setup the trailer in two different cases, so add a helper to avoid code duplication. Signed-off-by: Steffen Klassert --- net/ipv4/esp4.c | 44 +++++++++++++++++++------------------------- net/ipv6/esp6.c | 44 +++++++++++++++++++------------------------- 2 files changed, 38 insertions(+), 50 deletions(-) diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 9e8d97133513..b1e24446e297 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -182,6 +182,22 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err) esp_output_done(base, err); } +static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) +{ + /* Fill padding... */ + if (tfclen) { + memset(tail, 0, tfclen); + tail += tfclen; + } + do { + int i; + for (i = 0; i < plen - 2; i++) + tail[i] = i + 1; + } while (0); + tail[plen - 2] = plen - 2; + tail[plen - 1] = proto; +} + static int esp_output(struct xfrm_state *x, struct sk_buff *skb) { struct esp_output_extra *extra; @@ -304,18 +320,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) tail = vaddr + pfrag->offset; - /* Fill padding... */ - if (tfclen) { - memset(tail, 0, tfclen); - tail += tfclen; - } - do { - int i; - for (i = 0; i < plen - 2; i++) - tail[i] = i + 1; - } while (0); - tail[plen - 2] = plen - 2; - tail[plen - 1] = proto; + esp_output_fill_trailer(tail, tfclen, plen, proto); kunmap_atomic(vaddr); @@ -395,20 +400,9 @@ cow: esph = ip_esp_hdr(skb); skip_cow: - /* Fill padding... */ - if (tfclen) { - memset(tail, 0, tfclen); - tail += tfclen; - } - do { - int i; - for (i = 0; i < plen - 2; i++) - tail[i] = i + 1; - } while (0); - tail[plen - 2] = plen - 2; - tail[plen - 1] = proto; - pskb_put(skb, trailer, clen - skb->len + alen); + esp_output_fill_trailer(tail, tfclen, plen, proto); + pskb_put(skb, trailer, clen - skb->len + alen); skb_push(skb, -skb_network_offset(skb)); esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); esph->spi = x->id.spi; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index a428ac6b7c74..ff54faa75631 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -198,6 +198,22 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err) esp_output_done(base, err); } +static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) +{ + /* Fill padding... */ + if (tfclen) { + memset(tail, 0, tfclen); + tail += tfclen; + } + do { + int i; + for (i = 0; i < plen - 2; i++) + tail[i] = i + 1; + } while (0); + tail[plen - 2] = plen - 2; + tail[plen - 1] = proto; +} + static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; @@ -284,18 +300,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) tail = vaddr + pfrag->offset; - /* Fill padding... */ - if (tfclen) { - memset(tail, 0, tfclen); - tail += tfclen; - } - do { - int i; - for (i = 0; i < plen - 2; i++) - tail[i] = i + 1; - } while (0); - tail[plen - 2] = plen - 2; - tail[plen - 1] = proto; + esp_output_fill_trailer(tail, tfclen, plen, proto); kunmap_atomic(vaddr); @@ -375,20 +380,9 @@ cow: esph = ip_esp_hdr(skb); skip_cow: - /* Fill padding... */ - if (tfclen) { - memset(tail, 0, tfclen); - tail += tfclen; - } - do { - int i; - for (i = 0; i < plen - 2; i++) - tail[i] = i + 1; - } while (0); - tail[plen - 2] = plen - 2; - tail[plen - 1] = proto; - pskb_put(skb, trailer, clen - skb->len + alen); + esp_output_fill_trailer(tail, tfclen, plen, proto); + pskb_put(skb, trailer, clen - skb->len + alen); skb_push(skb, -skb_network_offset(skb)); esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); -- cgit v1.2.3 From 0e8edb9aed03892844713bb76a8e1d2d138f3175 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 13 Jan 2017 16:35:03 +0100 Subject: mwifiex: fix uninitialized variable access in pcie_remove Checking the firmware status from PCIe register only works if the register is available, otherwise we end up with random behavior: drivers/net/wireless/marvell/mwifiex/pcie.c: In function 'mwifiex_pcie_remove': drivers/net/wireless/marvell/mwifiex/pcie.c:585:5: error: 'fw_status' may be used uninitialized in this function [-Werror=maybe-uninitialized] This makes sure we treat the absence of the register as a failure. Fixes: 045f0c1b5e26 ("mwifiex: get rid of global user_rmmod flag") Signed-off-by: Arnd Bergmann Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 66226c615be0..4d78087ea13a 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -299,6 +299,8 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) reg = card->pcie.reg; if (reg) ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); + else + fw_status = -1; if (fw_status == FIRMWARE_READY_PCIE && !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); -- cgit v1.2.3 From 77c0d0cd10e793989d1e8b835a9a09694182cb39 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 4 Jan 2017 12:09:41 +0100 Subject: brcmfmac: avoid writing channel out of allocated array MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Our code was assigning number of channels to the index variable by default. If firmware reported channel we didn't predict this would result in using that initial index value and writing out of array. This never happened so far (we got a complete list of supported channels) but it means possible memory corruption so we should handle it anyway. This patch simply detects unexpected channel and ignores it. As we don't try to create new entry now, it's also safe to drop hw_value and center_freq assignment. For known channels we have these set anyway. I decided to fix this issue by assigning NULL or a target channel to the channel variable. This was one of possible ways, I prefefred this one as it also avoids using channel[index] over and over. Fixes: 58de92d2f95e ("brcmfmac: use static superset of channels for wiphy bands") Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 32 ++++++++++++---------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 15eaf722b54b..895404cccbae 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5825,7 +5825,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, u32 i, j; u32 total; u32 chaninfo; - u32 index; pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); @@ -5873,33 +5872,36 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ch.bw == BRCMU_CHAN_BW_80) continue; - channel = band->channels; - index = band->n_channels; + channel = NULL; for (j = 0; j < band->n_channels; j++) { - if (channel[j].hw_value == ch.control_ch_num) { - index = j; + if (band->channels[j].hw_value == ch.control_ch_num) { + channel = &band->channels[j]; break; } } - channel[index].center_freq = - ieee80211_channel_to_frequency(ch.control_ch_num, - band->band); - channel[index].hw_value = ch.control_ch_num; + if (!channel) { + /* It seems firmware supports some channel we never + * considered. Something new in IEEE standard? + */ + brcmf_err("Ignoring unexpected firmware channel %d\n", + ch.control_ch_num); + continue; + } /* assuming the chanspecs order is HT20, * HT40 upper, HT40 lower, and VHT80. */ if (ch.bw == BRCMU_CHAN_BW_80) { - channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ; + channel->flags &= ~IEEE80211_CHAN_NO_80MHZ; } else if (ch.bw == BRCMU_CHAN_BW_40) { - brcmf_update_bw40_channel_flag(&channel[index], &ch); + brcmf_update_bw40_channel_flag(channel, &ch); } else { /* enable the channel and disable other bandwidths * for now as mentioned order assure they are enabled * for subsequent chanspecs. */ - channel[index].flags = IEEE80211_CHAN_NO_HT40 | - IEEE80211_CHAN_NO_80MHZ; + channel->flags = IEEE80211_CHAN_NO_HT40 | + IEEE80211_CHAN_NO_80MHZ; ch.bw = BRCMU_CHAN_BW_20; cfg->d11inf.encchspec(&ch); chaninfo = ch.chspec; @@ -5907,11 +5909,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, &chaninfo); if (!err) { if (chaninfo & WL_CHAN_RADAR) - channel[index].flags |= + channel->flags |= (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR); if (chaninfo & WL_CHAN_PASSIVE) - channel[index].flags |= + channel->flags |= IEEE80211_CHAN_NO_IR; } } -- cgit v1.2.3 From 480b468625da1f054c487f7168e9a9bdc1bf869b Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 6 Jan 2017 14:05:10 +0100 Subject: rt2800usb: remove watchdog On rt2800usb, if we do not get TX status from HW, we assume frames were posted and after entry->last_action timeout, we forcibly provide TX status to mac80211. So it's not possible to detect hardware TX hung based on the timeout. Additionally TXRQ_PCNT tells on number of frames in the Packet Buffer (buffer between bus interface and chip MAC subsystem), which can be non zero on normal conditions. To check HW hung we will need provide some different mechanism, for now remove watchdog as current implementation is wrong and not useful. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800usb.c | 42 -------------------------- 1 file changed, 42 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index f38c44061b5b..8ec22c00510f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -435,47 +435,6 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev, return retval; } -/* - * Watchdog handlers - */ -static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev) -{ - unsigned int i; - u32 reg; - - rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, ®); - if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) { - rt2x00_warn(rt2x00dev, "TX HW queue 0 timed out, invoke forced kick\n"); - - rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40012); - - for (i = 0; i < 10; i++) { - udelay(10); - if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) - break; - } - - rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006); - } - - rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, ®); - if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) { - rt2x00_warn(rt2x00dev, "TX HW queue 1 timed out, invoke forced kick\n"); - - rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf4000a); - - for (i = 0; i < 10; i++) { - udelay(10); - if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) - break; - } - - rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006); - } - - rt2x00usb_watchdog(rt2x00dev); -} - /* * TX descriptor initialization */ @@ -877,7 +836,6 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .link_tuner = rt2800_link_tuner, .gain_calibration = rt2800_gain_calibration, .vco_calibration = rt2800_vco_calibration, - .watchdog = rt2800usb_watchdog, .start_queue = rt2800usb_start_queue, .kick_queue = rt2x00usb_kick_queue, .stop_queue = rt2800usb_stop_queue, -- cgit v1.2.3 From cfe82fbd84239b7b65b380eba2f02cd5b12e99d7 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 6 Jan 2017 14:05:11 +0100 Subject: rt2800: increase TX timeout When medium is busy or frames have to be resend, it takes time to send the frames and get TX status from hardware. For some really bad medium conditions it can take seconds. Patch change TX status timeout to give HW more time to provide it, however 500ms is not enough for bad conditions. In the future this timeout should be removed and replaced with proper watchdog mechanism. Increase flush timeout accordingly as well. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800usb.c | 2 +- drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c | 2 +- drivers/net/wireless/ralink/rt2x00/rt2x00usb.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 8ec22c00510f..400f074fd94f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -123,7 +123,7 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry) if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) return false; - tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100)); + tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(500)); if (unlikely(tout)) rt2x00_dbg(entry->queue->rt2x00dev, "TX status timeout for entry %d in queue %d\n", diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c index f0178fd4fe5f..da38d254c26f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c @@ -101,7 +101,7 @@ void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop) unsigned int i; for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) - msleep(10); + msleep(50); } EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index 6005e14213ca..838ca58d2dd6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -517,7 +517,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop) * Wait for a little while to give the driver * the oppurtunity to recover itself. */ - msleep(10); + msleep(50); } } EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue); -- cgit v1.2.3 From 15ec51b25e05ab434173ec647fa678b6c7b660bb Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 6 Jan 2017 14:05:12 +0100 Subject: rt2x00: save conf settings before reset tuner Reset tuner use curr_band value, make sure it is updated. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00config.c | 32 +++++++++++------------ 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c index 6a1f508d472f..350507458ddc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c @@ -249,6 +249,22 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, */ rt2x00dev->ops->lib->config(rt2x00dev, &libconf, ieee80211_flags); + if (conf->flags & IEEE80211_CONF_PS) + set_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); + else + clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); + + if (conf->flags & IEEE80211_CONF_MONITOR) + set_bit(CONFIG_MONITORING, &rt2x00dev->flags); + else + clear_bit(CONFIG_MONITORING, &rt2x00dev->flags); + + rt2x00dev->curr_band = conf->chandef.chan->band; + rt2x00dev->curr_freq = conf->chandef.chan->center_freq; + rt2x00dev->tx_power = conf->power_level; + rt2x00dev->short_retry = conf->short_frame_max_tx_count; + rt2x00dev->long_retry = conf->long_frame_max_tx_count; + /* * Some configuration changes affect the link quality * which means we need to reset the link tuner. @@ -271,20 +287,4 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, &rt2x00dev->autowakeup_work, autowake_timeout - 15); } - - if (conf->flags & IEEE80211_CONF_PS) - set_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); - else - clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); - - if (conf->flags & IEEE80211_CONF_MONITOR) - set_bit(CONFIG_MONITORING, &rt2x00dev->flags); - else - clear_bit(CONFIG_MONITORING, &rt2x00dev->flags); - - rt2x00dev->curr_band = conf->chandef.chan->band; - rt2x00dev->curr_freq = conf->chandef.chan->center_freq; - rt2x00dev->tx_power = conf->power_level; - rt2x00dev->short_retry = conf->short_frame_max_tx_count; - rt2x00dev->long_retry = conf->long_frame_max_tx_count; } -- cgit v1.2.3 From 01d97ef4b25f8ca79ff1522416cdaf5b0c6b9047 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 6 Jan 2017 14:05:13 +0100 Subject: rt2800: change default retry settings We do not have option to set per frame retry count. We have only global TX_RTY_CFG registers which specify the number or retries. Set setting of that register to value that correspond rate control algorithm number of frame post (number of retries + 1), which is 3 for aggregated frames. This should help with big amount of retries on bad conditions, hence mitigate buffer-bloat like problems. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 5436cdb07937..4833ec63b7cd 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -4769,8 +4769,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f); rt2800_register_read(rt2x00dev, TX_RTY_CFG, ®); - rt2x00_set_field32(®, TX_RTY_CFG_SHORT_RTY_LIMIT, 15); - rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_LIMIT, 31); + rt2x00_set_field32(®, TX_RTY_CFG_SHORT_RTY_LIMIT, 2); + rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_LIMIT, 2); rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_THRE, 2000); rt2x00_set_field32(®, TX_RTY_CFG_NON_AGG_RTY_MODE, 0); rt2x00_set_field32(®, TX_RTY_CFG_AGG_RTY_MODE, 0); @@ -7513,6 +7513,13 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) */ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + /* + * Change default retry settings to values corresponding more closely + * to rate[0].count setting of minstrel rate control algorithm. + */ + rt2x00dev->hw->wiphy->retry_short = 2; + rt2x00dev->hw->wiphy->retry_long = 2; + /* * Initialize all hw fields. */ -- cgit v1.2.3 From e4019e7f95302c45fad5c35d62c9b3ca8fc30fb2 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 6 Jan 2017 14:05:14 +0100 Subject: rt2800: tune TX_RTS_CFG config Enable RTS frame retry fall-back and limit number of RTS retries to 7 what is default number of retries for small frames. As RTS/CTS is used for TXOP protection, those settings prevent posting lots of RTS frames when remote station do not response with CTS at the moment. After sending 7 RTS's the HW will start back-off mechanism and after it will start posing RTS again to get access to the medium. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 4833ec63b7cd..1f4a120fb16d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -4902,10 +4902,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg); rt2800_register_read(rt2x00dev, TX_RTS_CFG, ®); - rt2x00_set_field32(®, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32); + rt2x00_set_field32(®, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 7); rt2x00_set_field32(®, TX_RTS_CFG_RTS_THRES, IEEE80211_MAX_RTS_THRESHOLD); - rt2x00_set_field32(®, TX_RTS_CFG_RTS_FBK_EN, 0); + rt2x00_set_field32(®, TX_RTS_CFG_RTS_FBK_EN, 1); rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg); rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca); -- cgit v1.2.3 From 1701221696764b6861d0ee66850812a8900b9b9b Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 6 Jan 2017 14:05:16 +0100 Subject: rt2800usb: mark tx failure on timeout If we do not get TX status in reasonable time, we most likely fail to send frame hence mark it as so. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800usb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 400f074fd94f..205a7b8ac8a7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -602,10 +602,9 @@ static void rt2800usb_txdone_nostatus(struct rt2x00_dev *rt2x00dev) !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) break; - if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) + if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) || + rt2800usb_entry_txstatus_timeout(entry)) rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); - else if (rt2800usb_entry_txstatus_timeout(entry)) - rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN); else break; } -- cgit v1.2.3 From 51583248187cf224485327aed54b64c348f415dc Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 6 Jan 2017 14:05:17 +0100 Subject: rt2x00: do not flush empty queue Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00mac.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 3cc1384ed2fc..ecc96312a370 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -743,7 +743,8 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return; tx_queue_for_each(rt2x00dev, queue) - rt2x00queue_flush_queue(queue, drop); + if (!rt2x00queue_empty(queue)) + rt2x00queue_flush_queue(queue, drop); } EXPORT_SYMBOL_GPL(rt2x00mac_flush); -- cgit v1.2.3 From 66ecec02e851cb32b1f30392d545dc821e71aaa9 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 6 Jan 2017 14:05:18 +0100 Subject: rt2800: set max_psdu to 3 on usb devices All Ralink USB devices I have, including old ones, work well with max_psdu = 3 (64kB tx AMPDUs). Fix indent on the way. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 1f4a120fb16d..ff047dc5cc2c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -4743,15 +4743,16 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, MAX_LEN_CFG, ®); rt2x00_set_field32(®, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE); - if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) || - rt2x00_rt(rt2x00dev, RT2883) || - rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E)) { + if (rt2x00_is_usb(rt2x00dev)) { + drv_data->max_psdu = 3; + } else if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) || + rt2x00_rt(rt2x00dev, RT2883) || + rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E)) { drv_data->max_psdu = 2; - rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 2); } else { drv_data->max_psdu = 1; - rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 1); } + rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, drv_data->max_psdu); rt2x00_set_field32(®, MAX_LEN_CFG_MIN_PSDU, 10); rt2x00_set_field32(®, MAX_LEN_CFG_MIN_MPDU, 10); rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); -- cgit v1.2.3 From 9ea0c307609fd20e03f53546b9cefbb20b96785d Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sat, 7 Jan 2017 21:36:04 +0100 Subject: brcmfmac: don't preset all channels as disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During init we take care of regulatory stuff by disabling all unavailable channels (see brcmf_construct_chaninfo) so this predisabling them is not really required (and this patch won't change any behavior). It will on the other hand allow more detailed runtime control over channels which is the main reason for this change. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 895404cccbae..45ee5b686939 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -138,7 +138,6 @@ static struct ieee80211_rate __wl_rates[] = { .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ - .flags = IEEE80211_CHAN_DISABLED, \ .max_antenna_gain = 0, \ .max_power = 30, \ } @@ -147,7 +146,6 @@ static struct ieee80211_rate __wl_rates[] = { .band = NL80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ - .flags = IEEE80211_CHAN_DISABLED, \ .max_antenna_gain = 0, \ .max_power = 30, \ } -- cgit v1.2.3 From ab99063f873749b3c3b1e5d44038559883465e74 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sat, 7 Jan 2017 21:36:05 +0100 Subject: brcmfmac: setup wiphy bands after registering it first MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During bands setup we disable all channels that firmware doesn't support in the current regulatory setup. If we do this before wiphy_register it will result in copying set flags (including IEEE80211_CHAN_DISABLED) to the orig_flags which is supposed to be persistent. We don't want this as regulatory change may result in enabling some channels. We shouldn't mess with orig_flags then (by changing them or ignoring them) so it's better to just take care of their proper values. This patch cleanups code a bit (by taking orig_flags more seriously) and allows further improvements like disabling really unavailable channels. We will need that e.g. if some frequencies should be disabled for good due to hardware setup (design). Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 45ee5b686939..729bf3351feb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6477,8 +6477,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->bands[NL80211_BAND_5GHZ] = band; } } - err = brcmf_setup_wiphybands(wiphy); - return err; + return 0; } static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) @@ -6843,6 +6842,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, goto priv_out; } + err = brcmf_setup_wiphybands(wiphy); + if (err) { + brcmf_err("Setting wiphy bands failed (%d)\n", err); + goto wiphy_unreg_out; + } + /* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(), * setup 40MHz in 2GHz band and enable OBSS scanning. */ -- cgit v1.2.3 From 6183468a23fc6b6903f8597982017ad2c7fdefcf Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 9 Jan 2017 15:33:50 -0800 Subject: mwifiex: debugfs: Fix (sometimes) off-by-1 SSID print Similar to commit fcd2042e8d36 ("mwifiex: printk() overflow with 32-byte SSIDs"), we failed to account for the existence of 32-char SSIDs in our debugfs code. Unlike in that case though, we zeroed out the containing struct first, and I'm pretty sure we're guaranteed to have some padding after the 'ssid.ssid' and 'ssid.ssid_len' fields (the struct is 33 bytes long). So, this is the difference between: # cat /sys/kernel/debug/mwifiex/mlan0/info ... essid="0123456789abcdef0123456789abcdef " ... and the correct output: # cat /sys/kernel/debug/mwifiex/mlan0/info ... essid="0123456789abcdef0123456789abcdef" ... Fixes: 5e6e3a92b9a4 ("wireless: mwifiex: initial commit for Marvell mwifiex driver") Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/debugfs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index b9284b533294..ae2b69db5994 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -114,7 +114,8 @@ mwifiex_info_read(struct file *file, char __user *ubuf, if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) { p += sprintf(p, "multicast_count=\"%d\"\n", netdev_mc_count(netdev)); - p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid); + p += sprintf(p, "essid=\"%.*s\"\n", info.ssid.ssid_len, + info.ssid.ssid); p += sprintf(p, "bssid=\"%pM\"\n", info.bssid); p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan); p += sprintf(p, "country_code = \"%s\"\n", info.country_code); -- cgit v1.2.3 From 7f2f61377bcad5e14d7faf38d4d4317c7d984e71 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Tue, 10 Jan 2017 10:15:26 -0600 Subject: rtlwifi: rtl8192de: Remove a pointless goto In commit c93ac39da0064 ("rtlwifi: Remove some redundant code), a goto statement was inadvertently left in the code. Fixes: c93ac39da0064 ("rtlwifi: Remove some redundant code) Reported-by: kbuild test robot Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index aa1e51c871df..569d572e0618 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -323,7 +323,6 @@ int rtl92d_download_fw(struct ieee80211_hw *hw) spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); if (err) pr_err("fw is not ready to run!\n"); - goto exit; exit: err = _rtl92d_fw_init(hw); return err; -- cgit v1.2.3 From af473688ce44b7faad6fa25bb9df43c7b48308c7 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sat, 14 Jan 2017 12:33:19 +0100 Subject: net: korina: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/korina.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 8037426ec50f..3e415b895d95 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -695,25 +695,27 @@ static void netdev_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct korina_private *lp = netdev_priv(dev); int rc; spin_lock_irq(&lp->lock); - rc = mii_ethtool_gset(&lp->mii_if, cmd); + rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); spin_unlock_irq(&lp->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct korina_private *lp = netdev_priv(dev); int rc; spin_lock_irq(&lp->lock); - rc = mii_ethtool_sset(&lp->mii_if, cmd); + rc = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd); spin_unlock_irq(&lp->lock); korina_set_carrier(&lp->mii_if); @@ -729,9 +731,9 @@ static u32 netdev_get_link(struct net_device *dev) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .get_link = netdev_get_link, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int korina_alloc_ring(struct net_device *dev) -- cgit v1.2.3 From c523838c8514e1fdccc60b0a584bae374e6c5c0f Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 15 Jan 2017 23:18:21 +0100 Subject: net: jme: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/jme.c | 34 +++++++++++++++++----------------- drivers/net/ethernet/jme.h | 6 +++--- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index f9fcab54783c..f580b49e6b67 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1879,7 +1879,7 @@ jme_open(struct net_device *netdev) jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); else jme_reset_phy_processor(jme); jme_phy_calibration(jme); @@ -2374,7 +2374,7 @@ jme_tx_timeout(struct net_device *netdev) jme->phylink = 0; jme_reset_phy_processor(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); /* * Force to Reset the link again @@ -2648,27 +2648,27 @@ jme_set_wol(struct net_device *netdev, } static int -jme_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +jme_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct jme_adapter *jme = netdev_priv(netdev); int rc; spin_lock_bh(&jme->phy_lock); - rc = mii_ethtool_gset(&(jme->mii_if), ecmd); + rc = mii_ethtool_get_link_ksettings(&jme->mii_if, cmd); spin_unlock_bh(&jme->phy_lock); return rc; } static int -jme_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +jme_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct jme_adapter *jme = netdev_priv(netdev); int rc, fdc = 0; - if (ethtool_cmd_speed(ecmd) == SPEED_1000 - && ecmd->autoneg != AUTONEG_ENABLE) + if (cmd->base.speed == SPEED_1000 && + cmd->base.autoneg != AUTONEG_ENABLE) return -EINVAL; /* @@ -2676,18 +2676,18 @@ jme_set_settings(struct net_device *netdev, * Hardware would not generate link change interrupt. */ if (jme->mii_if.force_media && - ecmd->autoneg != AUTONEG_ENABLE && - (jme->mii_if.full_duplex != ecmd->duplex)) + cmd->base.autoneg != AUTONEG_ENABLE && + (jme->mii_if.full_duplex != cmd->base.duplex)) fdc = 1; spin_lock_bh(&jme->phy_lock); - rc = mii_ethtool_sset(&(jme->mii_if), ecmd); + rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd); spin_unlock_bh(&jme->phy_lock); if (!rc) { if (fdc) jme_reset_link(jme); - jme->old_ecmd = *ecmd; + jme->old_cmd = *cmd; set_bit(JME_FLAG_SSET, &jme->flags); } @@ -2716,7 +2716,7 @@ jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) if (!rc && (cmd == SIOCSMIIREG)) { if (duplex_chg) jme_reset_link(jme); - jme_get_settings(netdev, &jme->old_ecmd); + jme_get_link_ksettings(netdev, &jme->old_cmd); set_bit(JME_FLAG_SSET, &jme->flags); } @@ -2915,8 +2915,6 @@ static const struct ethtool_ops jme_ethtool_ops = { .set_pauseparam = jme_set_pauseparam, .get_wol = jme_get_wol, .set_wol = jme_set_wol, - .get_settings = jme_get_settings, - .set_settings = jme_set_settings, .get_link = jme_get_link, .get_msglevel = jme_get_msglevel, .set_msglevel = jme_set_msglevel, @@ -2924,6 +2922,8 @@ static const struct ethtool_ops jme_ethtool_ops = { .get_eeprom_len = jme_get_eeprom_len, .get_eeprom = jme_get_eeprom, .set_eeprom = jme_set_eeprom, + .get_link_ksettings = jme_get_link_ksettings, + .set_link_ksettings = jme_set_link_ksettings, }; static int @@ -3306,7 +3306,7 @@ jme_resume(struct device *dev) jme_clear_pm_disable_wol(jme); jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); else jme_reset_phy_processor(jme); jme_phy_calibration(jme); diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index 58cd67c0c8e4..89535c019f04 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -447,7 +447,7 @@ struct jme_adapter { u8 chip_sub_rev; u8 pcirev; u32 msg_enable; - struct ethtool_cmd old_ecmd; + struct ethtool_link_ksettings old_cmd; unsigned int old_mtu; struct dynpcc_info dpi; atomic_t intr_sem; @@ -1270,8 +1270,8 @@ static inline int new_phy_power_ctrl(u8 chip_main_rev) /* * Function prototypes */ -static int jme_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd); +static int jme_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd); static void jme_set_unicastaddr(struct net_device *netdev); static void jme_set_multi(struct net_device *netdev); -- cgit v1.2.3 From 0f826385a6fa699d31fc1f835fb9ef9a724b523e Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 16 Jan 2017 22:46:08 +0100 Subject: net: marvell: skge: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. The callback set_link_ksettings no longer update the value of advertising, as the struct ethtool_link_ksettings is defined as const. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/skge.c | 63 ++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9146a514fb33..81106b73e468 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -300,65 +300,76 @@ static u32 skge_supported_modes(const struct skge_hw *hw) return supported; } -static int skge_get_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) +static int skge_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = skge_supported_modes(hw); + supported = skge_supported_modes(hw); if (hw->copper) { - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy_addr; + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; } else - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; + + advertising = skge->advertising; + cmd->base.autoneg = skge->autoneg; + cmd->base.speed = skge->speed; + cmd->base.duplex = skge->duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); - ecmd->advertising = skge->advertising; - ecmd->autoneg = skge->autoneg; - ethtool_cmd_speed_set(ecmd, skge->speed); - ecmd->duplex = skge->duplex; return 0; } -static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int skge_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct skge_port *skge = netdev_priv(dev); const struct skge_hw *hw = skge->hw; u32 supported = skge_supported_modes(hw); int err = 0; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); - if (ecmd->autoneg == AUTONEG_ENABLE) { - ecmd->advertising = supported; + if (cmd->base.autoneg == AUTONEG_ENABLE) { + advertising = supported; skge->duplex = -1; skge->speed = -1; } else { u32 setting; - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; switch (speed) { case SPEED_1000: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; @@ -371,11 +382,11 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EINVAL; skge->speed = speed; - skge->duplex = ecmd->duplex; + skge->duplex = cmd->base.duplex; } - skge->autoneg = ecmd->autoneg; - skge->advertising = ecmd->advertising; + skge->autoneg = cmd->base.autoneg; + skge->advertising = advertising; if (netif_running(dev)) { skge_down(dev); @@ -875,8 +886,6 @@ static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom } static const struct ethtool_ops skge_ethtool_ops = { - .get_settings = skge_get_settings, - .set_settings = skge_set_settings, .get_drvinfo = skge_get_drvinfo, .get_regs_len = skge_get_regs_len, .get_regs = skge_get_regs, @@ -899,6 +908,8 @@ static const struct ethtool_ops skge_ethtool_ops = { .set_phys_id = skge_set_phys_id, .get_sset_count = skge_get_sset_count, .get_ethtool_stats = skge_get_ethtool_stats, + .get_link_ksettings = skge_get_link_ksettings, + .set_link_ksettings = skge_set_link_ksettings, }; /* -- cgit v1.2.3 From 55f78fcdd88c6298933645b74c32ce3788bed7eb Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sat, 14 Jan 2017 23:26:22 +0100 Subject: net: marvell: sky2: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/sky2.c | 68 ++++++++++++++++++++++--------------- 1 file changed, 40 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 18d6336fa162..be003c5a4f5f 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -3589,47 +3589,59 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw) | SUPPORTED_1000baseT_Full; } -static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int sky2_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = sky2_supported_modes(hw); - ecmd->phy_address = PHY_ADDR_MARV; + supported = sky2_supported_modes(hw); + cmd->base.phy_address = PHY_ADDR_MARV; if (sky2_is_copper(hw)) { - ecmd->port = PORT_TP; - ethtool_cmd_speed_set(ecmd, sky2->speed); - ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; + cmd->base.port = PORT_TP; + cmd->base.speed = sky2->speed; + supported |= SUPPORTED_Autoneg | SUPPORTED_TP; } else { - ethtool_cmd_speed_set(ecmd, SPEED_1000); - ecmd->port = PORT_FIBRE; - ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; + supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; } - ecmd->advertising = sky2->advertising; - ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) + advertising = sky2->advertising; + cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) ? AUTONEG_ENABLE : AUTONEG_DISABLE; - ecmd->duplex = sky2->duplex; + cmd->base.duplex = sky2->duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } -static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int sky2_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_hw *hw = sky2->hw; u32 supported = sky2_supported_modes(hw); + u32 new_advertising; + + ethtool_convert_link_mode_to_legacy_u32(&new_advertising, + cmd->link_modes.advertising); - if (ecmd->autoneg == AUTONEG_ENABLE) { - if (ecmd->advertising & ~supported) + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (new_advertising & ~supported) return -EINVAL; if (sky2_is_copper(hw)) - sky2->advertising = ecmd->advertising | + sky2->advertising = new_advertising | ADVERTISED_TP | ADVERTISED_Autoneg; else - sky2->advertising = ecmd->advertising | + sky2->advertising = new_advertising | ADVERTISED_FIBRE | ADVERTISED_Autoneg; @@ -3638,30 +3650,30 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) sky2->speed = -1; } else { u32 setting; - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; switch (speed) { case SPEED_1000: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; @@ -3674,7 +3686,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EINVAL; sky2->speed = speed; - sky2->duplex = ecmd->duplex; + sky2->duplex = cmd->base.duplex; sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; } @@ -4405,8 +4417,6 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features) } static const struct ethtool_ops sky2_ethtool_ops = { - .get_settings = sky2_get_settings, - .set_settings = sky2_set_settings, .get_drvinfo = sky2_get_drvinfo, .get_wol = sky2_get_wol, .set_wol = sky2_set_wol, @@ -4429,6 +4439,8 @@ static const struct ethtool_ops sky2_ethtool_ops = { .set_phys_id = sky2_set_phys_id, .get_sset_count = sky2_get_sset_count, .get_ethtool_stats = sky2_get_ethtool_stats, + .get_link_ksettings = sky2_get_link_ksettings, + .set_link_ksettings = sky2_set_link_ksettings, }; #ifdef CONFIG_SKY2_DEBUG -- cgit v1.2.3 From aefb4d4ad83b608cb8e0cab8d3cd8e57d3f91feb Mon Sep 17 00:00:00 2001 From: Robert Shearman Date: Mon, 16 Jan 2017 14:16:36 +0000 Subject: net: AF-specific RTM_GETSTATS attributes Add the functionality for including address-family-specific per-link stats in RTM_GETSTATS messages. This is done through adding a new IFLA_STATS_AF_SPEC attribute under which address family attributes are nested and then the AF-specific attributes can be further nested. This follows the model of IFLA_AF_SPEC on RTM_*LINK messages and it has the advantage of presenting an easily extended hierarchy. The rtnl_af_ops structure is extended to provide AFs with the opportunity to fill and provide the size of their stats attributes. One alternative would have been to provide AFs with the ability to add attributes directly into the RTM_GETSTATS message without a nested hierarchy. I discounted this approach as it increases the rate at which the 32 attribute number space is used up and it makes implementation a little more tricky for stats dump resuming (at the moment the order in which attributes are added to the message has to match the numeric order of the attributes). Another alternative would have been to register per-AF RTM_GETSTATS handlers. I discounted this approach as I perceived a common use-case to be getting all the stats for an interface and this approach would necessitate multiple requests/dumps to retrieve them all. Signed-off-by: Robert Shearman Acked-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/net/rtnetlink.h | 4 ++++ include/uapi/linux/if_link.h | 1 + net/core/rtnetlink.c | 50 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+) diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 4113916cc1bb..106de5f7bf06 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -139,6 +139,10 @@ struct rtnl_af_ops { const struct nlattr *attr); int (*set_link_af)(struct net_device *dev, const struct nlattr *attr); + + int (*fill_stats_af)(struct sk_buff *skb, + const struct net_device *dev); + size_t (*get_stats_af_size)(const struct net_device *dev); }; void __rtnl_af_unregister(struct rtnl_af_ops *ops); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 6b13e591abc9..184b16ed2b84 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -847,6 +847,7 @@ enum { IFLA_STATS_LINK_XSTATS, IFLA_STATS_LINK_XSTATS_SLAVE, IFLA_STATS_LINK_OFFLOAD_XSTATS, + IFLA_STATS_AF_SPEC, __IFLA_STATS_MAX, }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 75e3ea7bda08..f538f764fca6 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3829,6 +3829,39 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, *idxattr = 0; } + if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { + struct rtnl_af_ops *af_ops; + + *idxattr = IFLA_STATS_AF_SPEC; + attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC); + if (!attr) + goto nla_put_failure; + + list_for_each_entry(af_ops, &rtnl_af_ops, list) { + if (af_ops->fill_stats_af) { + struct nlattr *af; + int err; + + af = nla_nest_start(skb, af_ops->family); + if (!af) + goto nla_put_failure; + + err = af_ops->fill_stats_af(skb, dev); + + if (err == -ENODATA) + nla_nest_cancel(skb, af); + else if (err < 0) + goto nla_put_failure; + + nla_nest_end(skb, af); + } + } + + nla_nest_end(skb, attr); + + *idxattr = 0; + } + nlmsg_end(skb, nlh); return 0; @@ -3885,6 +3918,23 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev, if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) size += rtnl_get_offload_stats_size(dev); + if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { + struct rtnl_af_ops *af_ops; + + /* for IFLA_STATS_AF_SPEC */ + size += nla_total_size(0); + + list_for_each_entry(af_ops, &rtnl_af_ops, list) { + if (af_ops->get_stats_af_size) { + size += nla_total_size( + af_ops->get_stats_af_size(dev)); + + /* for AF_* */ + size += nla_total_size(0); + } + } + } + return size; } -- cgit v1.2.3 From 27d691056bde4a6feca5e83fd92b787332c46302 Mon Sep 17 00:00:00 2001 From: Robert Shearman Date: Mon, 16 Jan 2017 14:16:37 +0000 Subject: mpls: Packet stats Having MPLS packet stats is useful for observing network operation and for diagnosing network problems. In the absence of anything better, RFC2863 and RFC3813 are used for guidance for which stats to expose and the semantics of them. In particular rx_noroutes maps to in unknown protos in RFC2863. The stats are exposed to userspace via AF_MPLS attributes embedded in the IFLA_STATS_AF_SPEC attribute of RTM_GETSTATS messages. All the introduced fields are 64-bit, even error ones, to ensure no overflow with long uptimes. Per-CPU counters are used to avoid cache-line contention on the commonly used fields. The other fields have also been made per-CPU for code to avoid performance problems in error conditions on the assumption that on some platforms the cost of atomic operations could be more expensive than sending the packet (which is what would be done in the success case). If that's not the case, we could instead not use per-CPU counters for these fields. Only unicast and non-fragment are exposed at the moment, but other counters can be exposed in the future either by adding to the end of struct mpls_link_stats or by additional netlink attributes in the AF_MPLS IFLA_STATS_AF_SPEC nested attribute. Signed-off-by: Robert Shearman Signed-off-by: David S. Miller --- include/uapi/linux/mpls.h | 30 ++++++++ net/mpls/af_mpls.c | 181 ++++++++++++++++++++++++++++++++++++++++------ net/mpls/internal.h | 58 ++++++++++++++- net/mpls/mpls_iptunnel.c | 11 ++- 4 files changed, 252 insertions(+), 28 deletions(-) diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h index 24a6cb1aec86..77a19dfe3990 100644 --- a/include/uapi/linux/mpls.h +++ b/include/uapi/linux/mpls.h @@ -43,4 +43,34 @@ struct mpls_label { #define MPLS_LABEL_FIRST_UNRESERVED 16 /* RFC3032 */ +/* These are embedded into IFLA_STATS_AF_SPEC: + * [IFLA_STATS_AF_SPEC] + * -> [AF_MPLS] + * -> [MPLS_STATS_xxx] + * + * Attributes: + * [MPLS_STATS_LINK] = { + * struct mpls_link_stats + * } + */ +enum { + MPLS_STATS_UNSPEC, /* also used as 64bit pad attribute */ + MPLS_STATS_LINK, + __MPLS_STATS_MAX, +}; + +#define MPLS_STATS_MAX (__MPLS_STATS_MAX - 1) + +struct mpls_link_stats { + __u64 rx_packets; /* total packets received */ + __u64 tx_packets; /* total packets transmitted */ + __u64 rx_bytes; /* total bytes received */ + __u64 tx_bytes; /* total bytes transmitted */ + __u64 rx_errors; /* bad packets received */ + __u64 tx_errors; /* packet transmit problems */ + __u64 rx_dropped; /* packet dropped on receive */ + __u64 tx_dropped; /* packet dropped on transmit */ + __u64 rx_noroute; /* no route for packet dest */ +}; + #endif /* _UAPI_MPLS_H */ diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 15fe97644ffe..4dc81963af8f 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -17,8 +18,8 @@ #include #if IS_ENABLED(CONFIG_IPV6) #include -#include #endif +#include #include #include "internal.h" @@ -48,11 +49,6 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index) return rt; } -static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev) -{ - return rcu_dereference_rtnl(dev->mpls_ptr); -} - bool mpls_output_possible(const struct net_device *dev) { return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev); @@ -98,6 +94,31 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) } EXPORT_SYMBOL_GPL(mpls_pkt_too_big); +void mpls_stats_inc_outucastpkts(struct net_device *dev, + const struct sk_buff *skb) +{ + struct mpls_dev *mdev; + + if (skb->protocol == htons(ETH_P_MPLS_UC)) { + mdev = mpls_dev_get(dev); + if (mdev) + MPLS_INC_STATS_LEN(mdev, skb->len, + tx_packets, + tx_bytes); + } else if (skb->protocol == htons(ETH_P_IP)) { + IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); +#if IS_ENABLED(CONFIG_IPV6) + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct inet6_dev *in6dev = __in6_dev_get(dev); + + if (in6dev) + IP6_UPD_PO_STATS(dev_net(dev), in6dev, + IPSTATS_MIB_OUT, skb->len); +#endif + } +} +EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts); + static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb, bool bos) { @@ -253,6 +274,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, struct mpls_nh *nh; struct mpls_entry_decoded dec; struct net_device *out_dev; + struct mpls_dev *out_mdev; struct mpls_dev *mdev; unsigned int hh_len; unsigned int new_header_size; @@ -262,17 +284,25 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, /* Careful this entire function runs inside of an rcu critical section */ mdev = mpls_dev_get(dev); - if (!mdev || !mdev->input_enabled) + if (!mdev) goto drop; - if (skb->pkt_type != PACKET_HOST) + MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets, + rx_bytes); + + if (!mdev->input_enabled) { + MPLS_INC_STATS(mdev, rx_dropped); goto drop; + } + + if (skb->pkt_type != PACKET_HOST) + goto err; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) - goto drop; + goto err; if (!pskb_may_pull(skb, sizeof(*hdr))) - goto drop; + goto err; /* Read and decode the label */ hdr = mpls_hdr(skb); @@ -285,33 +315,35 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, skb_orphan(skb); rt = mpls_route_input_rcu(net, dec.label); - if (!rt) + if (!rt) { + MPLS_INC_STATS(mdev, rx_noroute); goto drop; + } nh = mpls_select_multipath(rt, skb, dec.bos); if (!nh) - goto drop; - - /* Find the output device */ - out_dev = rcu_dereference(nh->nh_dev); - if (!mpls_output_possible(out_dev)) - goto drop; + goto err; if (skb_warn_if_lro(skb)) - goto drop; + goto err; skb_forward_csum(skb); /* Verify ttl is valid */ if (dec.ttl <= 1) - goto drop; + goto err; dec.ttl -= 1; + /* Find the output device */ + out_dev = rcu_dereference(nh->nh_dev); + if (!mpls_output_possible(out_dev)) + goto tx_err; + /* Verify the destination can hold the packet */ new_header_size = mpls_nh_header_size(nh); mtu = mpls_dev_mtu(out_dev); if (mpls_pkt_too_big(skb, mtu - new_header_size)) - goto drop; + goto tx_err; hh_len = LL_RESERVED_SPACE(out_dev); if (!out_dev->header_ops) @@ -319,7 +351,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, /* Ensure there is enough space for the headers in the skb */ if (skb_cow(skb, hh_len + new_header_size)) - goto drop; + goto tx_err; skb->dev = out_dev; skb->protocol = htons(ETH_P_MPLS_UC); @@ -327,7 +359,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, if (unlikely(!new_header_size && dec.bos)) { /* Penultimate hop popping */ if (!mpls_egress(rt, skb, dec)) - goto drop; + goto err; } else { bool bos; int i; @@ -343,6 +375,8 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, } } + mpls_stats_inc_outucastpkts(out_dev, skb); + /* If via wasn't specified then send out using device address */ if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC) err = neigh_xmit(NEIGH_LINK_TABLE, out_dev, @@ -355,6 +389,13 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, __func__, err); return 0; +tx_err: + out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL; + if (out_mdev) + MPLS_INC_STATS(out_mdev, tx_errors); + goto drop; +err: + MPLS_INC_STATS(mdev, rx_errors); drop: kfree_skb(skb); return NET_RX_DROP; @@ -853,6 +894,70 @@ errout: return err; } +static void mpls_get_stats(struct mpls_dev *mdev, + struct mpls_link_stats *stats) +{ + struct mpls_pcpu_stats *p; + int i; + + memset(stats, 0, sizeof(*stats)); + + for_each_possible_cpu(i) { + struct mpls_link_stats local; + unsigned int start; + + p = per_cpu_ptr(mdev->stats, i); + do { + start = u64_stats_fetch_begin(&p->syncp); + local = p->stats; + } while (u64_stats_fetch_retry(&p->syncp, start)); + + stats->rx_packets += local.rx_packets; + stats->rx_bytes += local.rx_bytes; + stats->tx_packets += local.tx_packets; + stats->tx_bytes += local.tx_bytes; + stats->rx_errors += local.rx_errors; + stats->tx_errors += local.tx_errors; + stats->rx_dropped += local.rx_dropped; + stats->tx_dropped += local.tx_dropped; + stats->rx_noroute += local.rx_noroute; + } +} + +static int mpls_fill_stats_af(struct sk_buff *skb, + const struct net_device *dev) +{ + struct mpls_link_stats *stats; + struct mpls_dev *mdev; + struct nlattr *nla; + + mdev = mpls_dev_get(dev); + if (!mdev) + return -ENODATA; + + nla = nla_reserve_64bit(skb, MPLS_STATS_LINK, + sizeof(struct mpls_link_stats), + MPLS_STATS_UNSPEC); + if (!nla) + return -EMSGSIZE; + + stats = nla_data(nla); + mpls_get_stats(mdev, stats); + + return 0; +} + +static size_t mpls_get_stats_af_size(const struct net_device *dev) +{ + struct mpls_dev *mdev; + + mdev = mpls_dev_get(dev); + if (!mdev) + return 0; + + return nla_total_size_64bit(sizeof(struct mpls_link_stats)); +} + #define MPLS_PERDEV_SYSCTL_OFFSET(field) \ (&((struct mpls_dev *)0)->field) @@ -911,6 +1016,7 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev) { struct mpls_dev *mdev; int err = -ENOMEM; + int i; ASSERT_RTNL(); @@ -918,6 +1024,17 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev) if (!mdev) return ERR_PTR(err); + mdev->stats = alloc_percpu(struct mpls_pcpu_stats); + if (!mdev->stats) + goto free; + + for_each_possible_cpu(i) { + struct mpls_pcpu_stats *mpls_stats; + + mpls_stats = per_cpu_ptr(mdev->stats, i); + u64_stats_init(&mpls_stats->syncp); + } + err = mpls_dev_sysctl_register(dev, mdev); if (err) goto free; @@ -927,10 +1044,19 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev) return mdev; free: + free_percpu(mdev->stats); kfree(mdev); return ERR_PTR(err); } +static void mpls_dev_destroy_rcu(struct rcu_head *head) +{ + struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu); + + free_percpu(mdev->stats); + kfree(mdev); +} + static void mpls_ifdown(struct net_device *dev, int event) { struct mpls_route __rcu **platform_label; @@ -1045,7 +1171,7 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, if (mdev) { mpls_dev_sysctl_unregister(mdev); RCU_INIT_POINTER(dev->mpls_ptr, NULL); - kfree_rcu(mdev, rcu); + call_rcu(&mdev->rcu, mpls_dev_destroy_rcu); } break; case NETDEV_CHANGENAME: @@ -1706,6 +1832,12 @@ static struct pernet_operations mpls_net_ops = { .exit = mpls_net_exit, }; +static struct rtnl_af_ops mpls_af_ops __read_mostly = { + .family = AF_MPLS, + .fill_stats_af = mpls_fill_stats_af, + .get_stats_af_size = mpls_get_stats_af_size, +}; + static int __init mpls_init(void) { int err; @@ -1722,6 +1854,8 @@ static int __init mpls_init(void) dev_add_pack(&mpls_packet_type); + rtnl_af_register(&mpls_af_ops); + rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL); rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL); rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL); @@ -1738,6 +1872,7 @@ module_init(mpls_init); static void __exit mpls_exit(void) { rtnl_unregister_all(PF_MPLS); + rtnl_af_unregister(&mpls_af_ops); dev_remove_pack(&mpls_packet_type); unregister_netdevice_notifier(&mpls_dev_notifier); unregister_pernet_subsys(&mpls_net_ops); diff --git a/net/mpls/internal.h b/net/mpls/internal.h index bdfef6c3271a..d97243034605 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -9,13 +9,58 @@ struct mpls_entry_decoded { u8 bos; }; +struct mpls_pcpu_stats { + struct mpls_link_stats stats; + struct u64_stats_sync syncp; +}; + struct mpls_dev { - int input_enabled; + int input_enabled; - struct ctl_table_header *sysctl; - struct rcu_head rcu; + struct mpls_pcpu_stats __percpu *stats; + + struct ctl_table_header *sysctl; + struct rcu_head rcu; }; +#if BITS_PER_LONG == 32 + +#define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \ + do { \ + __typeof__(*(mdev)->stats) *ptr = \ + raw_cpu_ptr((mdev)->stats); \ + local_bh_disable(); \ + u64_stats_update_begin(&ptr->syncp); \ + ptr->stats.pkts_field++; \ + ptr->stats.bytes_field += (len); \ + u64_stats_update_end(&ptr->syncp); \ + local_bh_enable(); \ + } while (0) + +#define MPLS_INC_STATS(mdev, field) \ + do { \ + __typeof__(*(mdev)->stats) *ptr = \ + raw_cpu_ptr((mdev)->stats); \ + local_bh_disable(); \ + u64_stats_update_begin(&ptr->syncp); \ + ptr->stats.field++; \ + u64_stats_update_end(&ptr->syncp); \ + local_bh_enable(); \ + } while (0) + +#else + +#define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \ + do { \ + this_cpu_inc((mdev)->stats->stats.pkts_field); \ + this_cpu_add((mdev)->stats->stats.bytes_field, (len)); \ + } while (0) + +#define MPLS_INC_STATS(mdev, field) \ + this_cpu_inc((mdev)->stats->stats.field) + +#endif + struct sk_buff; #define LABEL_NOT_SPECIFIED (1 << 20) @@ -114,6 +159,11 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr * return result; } +static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev) +{ + return rcu_dereference_rtnl(dev->mpls_ptr); +} + int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels, const u32 label[]); int nla_get_labels(const struct nlattr *nla, u32 max_labels, u8 *labels, @@ -123,5 +173,7 @@ int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table, bool mpls_output_possible(const struct net_device *dev); unsigned int mpls_dev_mtu(const struct net_device *dev); bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu); +void mpls_stats_inc_outucastpkts(struct net_device *dev, + const struct sk_buff *skb); #endif /* MPLS_INTERNAL_H */ diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 2f7ccd934416..02531284bc49 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c @@ -48,11 +48,15 @@ static int mpls_xmit(struct sk_buff *skb) struct dst_entry *dst = skb_dst(skb); struct rtable *rt = NULL; struct rt6_info *rt6 = NULL; + struct mpls_dev *out_mdev; int err = 0; bool bos; int i; unsigned int ttl; + /* Find the output device */ + out_dev = dst->dev; + /* Obtain the ttl */ if (dst->ops->family == AF_INET) { ttl = ip_hdr(skb)->ttl; @@ -66,8 +70,6 @@ static int mpls_xmit(struct sk_buff *skb) skb_orphan(skb); - /* Find the output device */ - out_dev = dst->dev; if (!mpls_output_possible(out_dev) || !dst->lwtstate || skb_warn_if_lro(skb)) goto drop; @@ -109,6 +111,8 @@ static int mpls_xmit(struct sk_buff *skb) bos = false; } + mpls_stats_inc_outucastpkts(out_dev, skb); + if (rt) err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway, skb); @@ -122,6 +126,9 @@ static int mpls_xmit(struct sk_buff *skb) return LWTUNNEL_XMIT_DONE; drop: + out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL; + if (out_mdev) + MPLS_INC_STATS(out_mdev, tx_errors); kfree_skb(skb); return -EINVAL; } -- cgit v1.2.3 From 53631a5f9c6669264adb7b4e92fd95d1d6ffa7d3 Mon Sep 17 00:00:00 2001 From: Lance Richardson Date: Mon, 16 Jan 2017 18:11:35 -0500 Subject: bridge: sparse fixes in br_ip6_multicast_alloc_query() Changed type of csum field in struct igmpv3_query from __be16 to __sum16 to eliminate type warning, made same change in struct igmpv3_report for consistency. Fixed up an ntohs() where htons() should have been used instead. Signed-off-by: Lance Richardson Acked-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/uapi/linux/igmp.h | 4 ++-- net/bridge/br_multicast.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/igmp.h b/include/uapi/linux/igmp.h index ccbb32aa6704..a97f9a7568cf 100644 --- a/include/uapi/linux/igmp.h +++ b/include/uapi/linux/igmp.h @@ -53,7 +53,7 @@ struct igmpv3_grec { struct igmpv3_report { __u8 type; __u8 resv1; - __be16 csum; + __sum16 csum; __be16 resv2; __be16 ngrec; struct igmpv3_grec grec[0]; @@ -62,7 +62,7 @@ struct igmpv3_report { struct igmpv3_query { __u8 type; __u8 code; - __be16 csum; + __sum16 csum; __be32 group; #if defined(__LITTLE_ENDIAN_BITFIELD) __u8 qrv:3, diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index b30e77e8427c..f66346122dc4 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -540,7 +540,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, break; case 2: mld2q = (struct mld2_query *)icmp6_hdr(skb); - mld2q->mld2q_mrc = ntohs((u16)jiffies_to_msecs(interval)); + mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); mld2q->mld2q_type = ICMPV6_MGM_QUERY; mld2q->mld2q_code = 0; mld2q->mld2q_cksum = 0; -- cgit v1.2.3 From 0ee28e31553a6f2ed13223e565ae5a4d7a376942 Mon Sep 17 00:00:00 2001 From: Shyam Saini Date: Tue, 17 Jan 2017 07:35:04 +0530 Subject: qed: Replace memset with eth_zero_addr Use eth_zero_addr to assign zero address to the given address array instead of memset when the second argument in memset is address of zero. Also, it makes the code clearer Signed-off-by: Shyam Saini Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index c92a8506c1e1..7520eb34ad00 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1846,7 +1846,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, qed_fill_dev_info(cdev, &info->common); if (IS_VF(cdev)) - memset(info->common.hw_mac, 0, ETH_ALEN); + eth_zero_addr(info->common.hw_mac); return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index b1213643bbfd..3f4bf31f45e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1224,7 +1224,7 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) return; /* Clear the VF mac */ - memset(vf_info->mac, 0, ETH_ALEN); + eth_zero_addr(vf_info->mac); vf_info->rx_accept_mode = 0; vf_info->tx_accept_mode = 0; @@ -2626,8 +2626,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(p_vf->shadow_config.macs[i], p_params->mac)) { - memset(p_vf->shadow_config.macs[i], 0, - ETH_ALEN); + eth_zero_addr(p_vf->shadow_config.macs[i]); break; } } @@ -2640,7 +2639,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, } else if (p_params->opcode == QED_FILTER_REPLACE || p_params->opcode == QED_FILTER_FLUSH) { for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) - memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN); + eth_zero_addr(p_vf->shadow_config.macs[i]); } /* List the new MAC address */ -- cgit v1.2.3 From a7ef6715c4b5905b663dddf3832dbdf77b3bf8ac Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Tue, 17 Jan 2017 10:21:19 +0800 Subject: net: ping: Use right format specifier to avoid type casting The inet_num is u16, so use %hu instead of casting it to int. And the sk_bound_dev_if is int actually, so it needn't cast to int. Signed-off-by: Gao Feng Signed-off-by: David S. Miller --- net/ipv4/ping.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 86cca610f4c2..592db6a3a0e9 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -433,9 +433,9 @@ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) goto out; } - pr_debug("after bind(): num = %d, dif = %d\n", - (int)isk->inet_num, - (int)sk->sk_bound_dev_if); + pr_debug("after bind(): num = %hu, dif = %d\n", + isk->inet_num, + sk->sk_bound_dev_if); err = 0; if (sk->sk_family == AF_INET && isk->inet_rcv_saddr) -- cgit v1.2.3 From 96fe11f27b70f6b64f62a2d13ed209aa02e02a48 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Tue, 17 Jan 2017 14:09:38 +0530 Subject: cxgb4: Implement ndo_get_phys_port_id for mgmt dev Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 27 +++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index fb1624544073..4da6f900ff24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2583,6 +2583,19 @@ static int cxgb_get_vf_config(struct net_device *dev, ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr); return 0; } + +static int cxgb_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct port_info *pi = netdev_priv(dev); + unsigned int phy_port_id; + + phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; + ppid->id_len = sizeof(phy_port_id); + memcpy(ppid->id, &phy_port_id, ppid->id_len); + return 0; +} + #endif static int cxgb_set_mac_addr(struct net_device *dev, void *p) @@ -2762,6 +2775,7 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = { .ndo_open = dummy_open, .ndo_set_vf_mac = cxgb_set_vf_mac, .ndo_get_vf_config = cxgb_get_vf_config, + .ndo_get_phys_port_id = cxgb_get_phys_port_id, }; #endif @@ -4539,6 +4553,7 @@ static int config_mgmt_dev(struct pci_dev *pdev) pi = netdev_priv(netdev); pi->adapter = adap; + pi->port_id = adap->pf % adap->params.nports; SET_NETDEV_DEV(netdev, &pdev->dev); adap->port[0] = netdev; @@ -4628,6 +4643,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u32 whoami, pl_rev; enum chip_type chip; static int adap_idx = 1; + u32 v, port_vec; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -5009,6 +5025,17 @@ sriov: } spin_lock_init(&adapter->mbox_lock); INIT_LIST_HEAD(&adapter->mlist.list); + + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); + err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, + &v, &port_vec); + if (err < 0) { + dev_err(adapter->pdev_dev, "Could not fetch port params\n"); + goto free_adapter; + } + + adapter->params.nports = hweight32(port_vec); pci_set_drvdata(pdev, adapter); return 0; -- cgit v1.2.3 From f74d1995192cd0834eea13a8287a3e26a7317b6d Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 17 Jan 2017 12:01:53 +0000 Subject: sfc: support setting RSS hash key through ethtool API Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 32 ++++++++++++++++++++++++-------- drivers/net/ethernet/sfc/ethtool.c | 24 +++++++++++++++++++----- drivers/net/ethernet/sfc/net_driver.h | 3 ++- drivers/net/ethernet/sfc/siena.c | 7 +++++-- 4 files changed, 50 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 2ce576919f97..f117e0b5b5ad 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1325,7 +1325,7 @@ static int efx_ef10_init_nic(struct efx_nic *efx) } /* don't fail init if RSS setup doesn't work */ - rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table); + rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); efx->rss_active = (rc == 0); return 0; @@ -2535,7 +2535,7 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) } static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, - const u32 *rx_indir_table) + const u32 *rx_indir_table, const u8 *key) { MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); @@ -2546,6 +2546,11 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); + /* This iterates over the length of efx->rx_indir_table, but copies + * bytes from rx_indir_table. That's because the latter is a pointer + * rather than an array, but should have the same length. + * The efx->rx_hash_key loop below is similar. + */ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) MCDI_PTR(tablebuf, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = @@ -2561,8 +2566,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) - MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = - efx->rx_hash_key[i]; + MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, sizeof(keybuf), NULL, 0, NULL); @@ -2595,7 +2599,8 @@ static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, } static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, - const u32 *rx_indir_table) + const u32 *rx_indir_table, + const u8 *key) { struct efx_ef10_nic_data *nic_data = efx->nic_data; int rc; @@ -2614,7 +2619,7 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, } rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, - rx_indir_table); + rx_indir_table, key); if (rc != 0) goto fail2; @@ -2625,6 +2630,9 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, if (rx_indir_table != efx->rx_indir_table) memcpy(efx->rx_indir_table, rx_indir_table, sizeof(efx->rx_indir_table)); + if (key != efx->rx_hash_key) + memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size); + return 0; fail2: @@ -2636,14 +2644,18 @@ fail1: } static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, - const u32 *rx_indir_table) + const u32 *rx_indir_table, + const u8 *key) { int rc; if (efx->rss_spread == 1) return 0; - rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table); + if (!key) + key = efx->rx_hash_key; + + rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key); if (rc == -ENOBUFS && !user) { unsigned context_size; @@ -2681,6 +2693,8 @@ static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, const u32 *rx_indir_table + __attribute__ ((unused)), + const u8 *key __attribute__ ((unused))) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -5686,6 +5700,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_ALL, + .rx_hash_key_size = 40, }; const struct efx_nic_type efx_hunt_a0_nic_type = { @@ -5812,4 +5827,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_ALL, + .rx_hash_key_size = 40, }; diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 18ebaea44e82..becdba38a8e4 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1278,6 +1278,13 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) return (efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table); } +static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return efx->type->rx_hash_key_size; +} + static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, u8 *hfunc) { @@ -1287,6 +1294,8 @@ static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, *hfunc = ETH_RSS_HASH_TOP; if (indir) memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); + if (key) + memcpy(key, efx->rx_hash_key, efx->type->rx_hash_key_size); return 0; } @@ -1295,14 +1304,18 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, { struct efx_nic *efx = netdev_priv(net_dev); - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!indir) + if (!indir && !key) return 0; - return efx->type->rx_push_rss_config(efx, true, indir); + if (!key) + key = efx->rx_hash_key; + if (!indir) + indir = efx->rx_indir_table; + + return efx->type->rx_push_rss_config(efx, true, indir, key); } static int efx_ethtool_get_ts_info(struct net_device *net_dev, @@ -1377,6 +1390,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxnfc = efx_ethtool_get_rxnfc, .set_rxnfc = efx_ethtool_set_rxnfc, .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, + .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, .get_ts_info = efx_ethtool_get_ts_info, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b20fe437265f..ad53551b2c54 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1311,7 +1311,7 @@ struct efx_nic_type { unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len); int (*rx_push_rss_config)(struct efx_nic *efx, bool user, - const u32 *rx_indir_table); + const u32 *rx_indir_table, const u8 *key); int (*rx_probe)(struct efx_rx_queue *rx_queue); void (*rx_init)(struct efx_rx_queue *rx_queue); void (*rx_remove)(struct efx_rx_queue *rx_queue); @@ -1410,6 +1410,7 @@ struct efx_nic_type { int mcdi_max_ver; unsigned int max_rx_ip_filters; u32 hwtstamp_filters; + unsigned int rx_hash_key_size; }; /************************************************************************** diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 118ff5601756..0606aa290879 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -333,11 +333,13 @@ fail1: } static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, - const u32 *rx_indir_table) + const u32 *rx_indir_table, const u8 *key) { efx_oword_t temp; /* Set hash key for IPv4 */ + if (key) + memcpy(efx->rx_hash_key, key, sizeof(temp)); memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); @@ -402,7 +404,7 @@ static int siena_init_nic(struct efx_nic *efx) EFX_RX_USR_BUF_SIZE >> 5); efx_writeo(efx, &temp, FR_AZ_RX_CFG); - siena_rx_push_rss_config(efx, false, efx->rx_indir_table); + siena_rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); efx->rss_active = true; /* Enable event logging */ @@ -1054,4 +1056,5 @@ const struct efx_nic_type siena_a0_nic_type = { .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), + .rx_hash_key_size = 16, }; -- cgit v1.2.3 From a707d18851a3bab517e21a6449806e468703bc3d Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 17 Jan 2017 12:02:12 +0000 Subject: sfc: read back RX hash config from the NIC when querying it with ethtool -x Ensures that we report the key and indirection table the NIC is using, rather than (if setting them failed earlier) what we wanted it to use. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 52 +++++++++++++++++++++++++++++++++++ drivers/net/ethernet/sfc/ethtool.c | 5 ++++ drivers/net/ethernet/sfc/farch.c | 16 +++++++++++ drivers/net/ethernet/sfc/net_driver.h | 2 ++ drivers/net/ethernet/sfc/nic.h | 1 + drivers/net/ethernet/sfc/siena.c | 20 ++++++++++++++ 6 files changed, 96 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index f117e0b5b5ad..dccbbd323616 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2643,6 +2643,56 @@ fail1: return rc; } +static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); + MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); + MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); + size_t outlen; + int rc, i; + + BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != + MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); + + if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) + return -ENOENT; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, + nic_data->rx_rss_context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), + tablebuf, sizeof(tablebuf), &outlen); + if (rc != 0) + return rc; + + if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) + efx->rx_indir_table[i] = MCDI_PTR(tablebuf, + RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, + nic_data->rx_rss_context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != + MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), + keybuf, sizeof(keybuf), &outlen); + if (rc != 0) + return rc; + + if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) + efx->rx_hash_key[i] = MCDI_PTR( + keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; + + return 0; +} + static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, const u32 *rx_indir_table, const u8 *key) @@ -5643,6 +5693,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .tx_write = efx_ef10_tx_write, .tx_limit_len = efx_ef10_tx_limit_len, .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, + .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, @@ -5751,6 +5802,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .tx_write = efx_ef10_tx_write, .tx_limit_len = efx_ef10_tx_limit_len, .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, + .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index becdba38a8e4..adddf70780ad 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1289,6 +1289,11 @@ static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, u8 *hfunc) { struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->rx_pull_rss_config(efx); + if (rc) + return rc; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index e4ca2161af70..ba45150f53c7 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1649,6 +1649,22 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx) } } +void efx_farch_rx_pull_indir_table(struct efx_nic *efx) +{ + size_t i = 0; + efx_dword_t dword; + + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { + efx_readd(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); + efx->rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); + } +} + /* Looks at available SRAM resources and works out how many queues we * can support, and where things like descriptor caches should live. * diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index ad53551b2c54..5927c20ba43f 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1181,6 +1181,7 @@ struct efx_mtd_partition { * @tx_remove: Free resources for TX queue * @tx_write: Write TX descriptors and doorbell * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC + * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC * @rx_probe: Allocate resources for RX queue * @rx_init: Initialise RX queue on the NIC * @rx_remove: Free resources for RX queue @@ -1312,6 +1313,7 @@ struct efx_nic_type { dma_addr_t dma_addr, unsigned int len); int (*rx_push_rss_config)(struct efx_nic *efx, bool user, const u32 *rx_indir_table, const u8 *key); + int (*rx_pull_rss_config)(struct efx_nic *efx); int (*rx_probe)(struct efx_rx_queue *rx_queue); void (*rx_init)(struct efx_rx_queue *rx_queue); void (*rx_remove)(struct efx_rx_queue *rx_queue); diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 383ff6e1e647..85cf131288b7 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -626,6 +626,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); void efx_farch_init_common(struct efx_nic *efx); void efx_ef10_handle_drain_event(struct efx_nic *efx); void efx_farch_rx_push_indir_table(struct efx_nic *efx); +void efx_farch_rx_pull_indir_table(struct efx_nic *efx); int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len, gfp_t gfp_flags); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 0606aa290879..af7cd8565a41 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -332,6 +332,25 @@ fail1: return rc; } +static int siena_rx_pull_rss_config(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Read from IPv6 RSS key as that's longer (the IPv4 key is just the + * first 128 bits of the same key, assuming it's been set by + * siena_rx_push_rss_config, below) + */ + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); + memcpy(efx->rx_hash_key, &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); + memcpy(efx->rx_hash_key + sizeof(temp), &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + memcpy(efx->rx_hash_key + 2 * sizeof(temp), &temp, + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); + efx_farch_rx_pull_indir_table(efx); + return 0; +} + static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, const u32 *rx_indir_table, const u8 *key) { @@ -981,6 +1000,7 @@ const struct efx_nic_type siena_a0_nic_type = { .tx_write = efx_farch_tx_write, .tx_limit_len = efx_farch_tx_limit_len, .rx_push_rss_config = siena_rx_push_rss_config, + .rx_pull_rss_config = siena_rx_pull_rss_config, .rx_probe = efx_farch_rx_probe, .rx_init = efx_farch_rx_init, .rx_remove = efx_farch_rx_remove, -- cgit v1.2.3 From a870a97757dd4f165f4f7bb749350bee7df31716 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 17 Jan 2017 15:01:08 +0100 Subject: net: ethoc: Make needlessly global struct ethtool_ops static Make the needlessly global struct ethtool_ops ethoc_ethtool_ops static to fix a sparse warning. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/ethoc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 63e5e14174ee..c45757af9ade 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -995,7 +995,7 @@ static int ethoc_set_ringparam(struct net_device *dev, return 0; } -const struct ethtool_ops ethoc_ethtool_ops = { +static const struct ethtool_ops ethoc_ethtool_ops = { .get_regs_len = ethoc_get_regs_len, .get_regs = ethoc_get_regs, .nway_reset = phy_ethtool_nway_reset, -- cgit v1.2.3 From 0e40f4c9593ba2c7c30150ed669da97bd581c0cd Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Tue, 17 Jan 2017 13:37:19 -0500 Subject: tcp: accept RST for rcv_nxt - 1 after receiving a FIN Using a Mac OSX box as a client connecting to a Linux server, we have found that when certain applications (such as 'ab'), are abruptly terminated (via ^C), a FIN is sent followed by a RST packet on tcp connections. The FIN is accepted by the Linux stack but the RST is sent with the same sequence number as the FIN, and Linux responds with a challenge ACK per RFC 5961. The OSX client then sometimes (they are rate-limited) does not reply with any RST as would be expected on a closed socket. This results in sockets accumulating on the Linux server left mostly in the CLOSE_WAIT state, although LAST_ACK and CLOSING are also possible. This sequence of events can tie up a lot of resources on the Linux server since there may be a lot of data in write buffers at the time of the RST. Accepting a RST equal to rcv_nxt - 1, after we have already successfully processed a FIN, has made a significant difference for us in practice, by freeing up unneeded resources in a more expedient fashion. A packetdrill test demonstrating the behavior: // testing mac osx rst behavior // Establish a connection 0.000 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 0.000 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 0.000 bind(3, ..., ...) = 0 0.000 listen(3, 1) = 0 0.100 < S 0:0(0) win 32768 0.100 > S. 0:0(0) ack 1 0.200 < . 1:1(0) ack 1 win 32768 0.200 accept(3, ..., ...) = 4 // Client closes the connection 0.300 < F. 1:1(0) ack 1 win 32768 // now send rst with same sequence 0.300 < R. 1:1(0) ack 1 win 32768 // make sure we are in TCP_CLOSE 0.400 %{ assert tcpi_state == 7 }% Signed-off-by: Jason Baron Cc: Eric Dumazet Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1a34e9278c07..bfa165cc455a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5199,6 +5199,23 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) return err; } +/* Accept RST for rcv_nxt - 1 after a FIN. + * When tcp connections are abruptly terminated from Mac OSX (via ^C), a + * FIN is sent followed by a RST packet. The RST is sent with the same + * sequence number as the FIN, and thus according to RFC 5961 a challenge + * ACK should be sent. However, Mac OSX rate limits replies to challenge + * ACKs on the closed socket. In addition middleboxes can drop either the + * challenge ACK or a subsequent RST. + */ +static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb) +{ + struct tcp_sock *tp = tcp_sk(sk); + + return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) && + (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK | + TCPF_CLOSING)); +} + /* Does PAWS and seqno based validation of an incoming segment, flags will * play significant role here. */ @@ -5237,20 +5254,25 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, LINUX_MIB_TCPACKSKIPPEDSEQ, &tp->last_oow_ack_time)) tcp_send_dupack(sk, skb); + } else if (tcp_reset_check(sk, skb)) { + tcp_reset(sk); } goto discard; } /* Step 2: check RST bit */ if (th->rst) { - /* RFC 5961 3.2 (extend to match against SACK too if available): - * If seq num matches RCV.NXT or the right-most SACK block, + /* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a + * FIN and SACK too if available): + * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or + * the right-most SACK block, * then * RESET the connection * else * Send a challenge ACK */ - if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { + if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt || + tcp_reset_check(sk, skb)) { rst_seq_match = true; } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { struct tcp_sack_block *sp = &tp->selective_acks[0]; -- cgit v1.2.3 From d8bec2b29a82981acb229215e3970d4d50a7e6eb Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Tue, 17 Jan 2017 22:06:07 -0800 Subject: net/mlx5e: Support bpf_xdp_adjust_head() This patch adds bpf_xdp_adjust_head() support to mlx5e. 1. rx_headroom is added to struct mlx5e_rq. It uses an existing 4 byte hole in the struct. 2. The adjusted data length is checked against MLX5E_XDP_MIN_INLINE and MLX5E_SW2HW_MTU(rq->netdev->mtu). 3. The macro MLX5E_SW2HW_MTU is moved from en_main.c to en.h. MLX5E_HW2SW_MTU is also moved to en.h for symmetric reason but it is not a must. v2: - Keep the xdp specific logic in mlx5e_xdp_handle() - Update dma_len after the sanity checks in mlx5e_xmit_xdp_frame() Signed-off-by: Martin KaFai Lau Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 ++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 18 ++++----- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 47 ++++++++++++++--------- 3 files changed, 40 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a473cea10c16..0d9dd860a295 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -51,6 +51,9 @@ #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) +#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) +#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) + #define MLX5E_MAX_NUM_TC 8 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 @@ -369,6 +372,7 @@ struct mlx5e_rq { unsigned long state; int ix; + u16 rx_headroom; struct mlx5e_rx_am am; /* Adaptive Moderation */ struct bpf_prog *xdp_prog; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f74ba73c55c7..aba3691e0919 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -343,9 +343,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); } -#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) -#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) - static inline int mlx5e_get_wqe_mtt_sz(void) { /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. @@ -534,9 +531,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - rq->buff.map_dir = DMA_FROM_DEVICE; - if (rq->xdp_prog) + if (rq->xdp_prog) { rq->buff.map_dir = DMA_BIDIRECTIONAL; + rq->rx_headroom = XDP_PACKET_HEADROOM; + } else { + rq->buff.map_dir = DMA_FROM_DEVICE; + rq->rx_headroom = MLX5_RX_HEADROOM; + } switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -586,7 +587,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, byte_count = rq->buff.wqe_sz; /* calc the required page order */ - frag_sz = MLX5_RX_HEADROOM + + frag_sz = rq->rx_headroom + byte_count /* packet data */ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); frag_sz = SKB_DATA_ALIGN(frag_sz); @@ -3153,11 +3154,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) bool reset, was_opened; int i; - if (prog && prog->xdp_adjust_head) { - netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - mutex_lock(&priv->state_lock); if ((netdev->features & NETIF_F_LRO) && prog) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0e2fb3ed1790..20f116f8c457 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -264,7 +264,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) if (unlikely(mlx5e_page_alloc_mapped(rq, di))) return -ENOMEM; - wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM); + wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom); return 0; } @@ -646,8 +646,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq) static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, - unsigned int data_offset, - int len) + const struct xdp_buff *xdp) { struct mlx5e_sq *sq = &rq->channel->xdp_sq; struct mlx5_wq_cyc *wq = &sq->wq; @@ -659,9 +658,16 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg; + ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE; - unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE; - void *data = page_address(di->page) + data_offset; + unsigned int dma_len = xdp->data_end - xdp->data; + + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || + MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) { + rq->stats.xdp_drop++; + mlx5e_page_release(rq, di, true); + return; + } if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) { if (sq->db.xdp.doorbell) { @@ -674,13 +680,14 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, return; } + dma_len -= MLX5E_XDP_MIN_INLINE; dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); memset(wqe, 0, sizeof(*wqe)); /* copy the inline part */ - memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE); + memcpy(eseg->inline_hdr_start, xdp->data, MLX5E_XDP_MIN_INLINE); eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1); @@ -703,25 +710,29 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, } /* returns true if packet was consumed by xdp */ -static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, - const struct bpf_prog *prog, - struct mlx5e_dma_info *di, - void *data, u16 len) +static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, + struct mlx5e_dma_info *di, + void *va, u16 *rx_headroom, u32 *len) { + const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); struct xdp_buff xdp; u32 act; if (!prog) return false; - xdp.data = data; - xdp.data_end = xdp.data + len; + xdp.data = va + *rx_headroom; + xdp.data_end = xdp.data + *len; + xdp.data_hard_start = va; + act = bpf_prog_run_xdp(prog, &xdp); switch (act) { case XDP_PASS: + *rx_headroom = xdp.data - xdp.data_hard_start; + *len = xdp.data_end - xdp.data; return false; case XDP_TX: - mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len); + mlx5e_xmit_xdp_frame(rq, di, &xdp); return true; default: bpf_warn_invalid_xdp_action(act); @@ -740,15 +751,16 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_dma_info *di; struct sk_buff *skb; void *va, *data; + u16 rx_headroom = rq->rx_headroom; bool consumed; di = &rq->dma_info[wqe_counter]; va = page_address(di->page); - data = va + MLX5_RX_HEADROOM; + data = va + rx_headroom; dma_sync_single_range_for_cpu(rq->pdev, di->addr, - MLX5_RX_HEADROOM, + rx_headroom, rq->buff.wqe_sz, DMA_FROM_DEVICE); prefetch(data); @@ -760,8 +772,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, } rcu_read_lock(); - consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data, - cqe_bcnt); + consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt); rcu_read_unlock(); if (consumed) return NULL; /* page/packet was consumed by XDP */ @@ -777,7 +788,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, page_ref_inc(di->page); mlx5e_page_release(rq, di, true); - skb_reserve(skb, MLX5_RX_HEADROOM); + skb_reserve(skb, rx_headroom); skb_put(skb, cqe_bcnt); return skb; -- cgit v1.2.3 From ab70e5862670177d207ea0e0e16454e4083a5ad5 Mon Sep 17 00:00:00 2001 From: jpinto Date: Tue, 17 Jan 2017 14:53:07 +0000 Subject: stmicro: add more information to Kconfig This patch adds more info to stmicro' Kconfig files in order to be clearer that the driver can be used by ethernet cards based on 10/100/1000/EQOS Synopsys IP Cores. EQOS was also added stmmac/Kconfig Kconfig, since dwmac4 is in fact EQoS, one of Synopsys Ethernet IPs. More info at: https://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_qos Signed-off-by: Joao Pinto Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/Kconfig | 3 ++- drivers/net/ethernet/stmicro/stmmac/Kconfig | 15 +++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig index 1c1157d2bd40..ecd7a5edef5d 100644 --- a/drivers/net/ethernet/stmicro/Kconfig +++ b/drivers/net/ethernet/stmicro/Kconfig @@ -7,7 +7,8 @@ config NET_VENDOR_STMICRO default y depends on HAS_IOMEM ---help--- - If you have a network (Ethernet) card belonging to this class, say Y. + If you have a network (Ethernet) card based on Synopsys Ethernet IP + Cores, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 99594e308db9..cfbe3634dfa1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,5 +1,5 @@ config STMMAC_ETH - tristate "STMicroelectronics 10/100/1000 Ethernet driver" + tristate "STMicroelectronics 10/100/1000/EQOS Ethernet driver" depends on HAS_IOMEM && HAS_DMA select MII select PHYLIB @@ -7,9 +7,8 @@ config STMMAC_ETH imply PTP_1588_CLOCK select RESET_CONTROLLER ---help--- - This is the driver for the Ethernet IPs are built around a - Synopsys IP Core and only tested on the STMicroelectronics - platforms. + This is the driver for the Ethernet IPs built around a + Synopsys IP Core. if STMMAC_ETH @@ -152,11 +151,11 @@ config STMMAC_PCI tristate "STMMAC PCI bus support" depends on STMMAC_ETH && PCI ---help--- - This is to select the Synopsys DWMAC available on PCI devices, - if you have a controller with this interface, say Y or M here. + This selects the platform specific bus support for the stmmac driver. + This driver was tested on XLINX XC2V3000 FF1152AMT0221 + D1215994A VIRTEX FPGA board and SNPS QoS IPK Prototyping Kit. - This PCI support is tested on XLINX XC2V3000 FF1152AMT0221 - D1215994A VIRTEX FPGA board. + If you have a controller with this interface, say Y or M here. If unsure, say N. endif -- cgit v1.2.3 From fe38d2a1c8bee0b3a0be40de5b621a28200612e5 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 17 Jan 2017 07:51:01 -0800 Subject: inet: collapse ipv4/v6 rcv_saddr_equal functions into one We pass these per-protocol equal functions around in various places, but we can just have one function that checks the sk->sk_family and then do the right comparison function. I've also changed the ipv4 version to not cast to inet_sock since it is unneeded. Signed-off-by: Josef Bacik Signed-off-by: David S. Miller --- include/net/addrconf.h | 4 +-- include/net/inet_hashtables.h | 5 +-- include/net/udp.h | 1 - net/ipv4/inet_connection_sock.c | 72 ++++++++++++++++++++++++++++++++++++++++ net/ipv4/inet_hashtables.c | 16 +++------ net/ipv4/udp.c | 58 +++++++------------------------- net/ipv6/inet6_connection_sock.c | 4 +-- net/ipv6/inet6_hashtables.c | 46 +------------------------ net/ipv6/udp.c | 2 +- 9 files changed, 95 insertions(+), 113 deletions(-) diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 8f998afc1384..17c6fd84e287 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -88,9 +88,7 @@ int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, u32 banned_flags); int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, u32 banned_flags); -int ipv4_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, - bool match_wildcard); -int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, +int inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, bool match_wildcard); void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 0574493e3899..756ed1692906 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -203,10 +203,7 @@ void inet_hashinfo_init(struct inet_hashinfo *h); bool inet_ehash_insert(struct sock *sk, struct sock *osk); bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); -int __inet_hash(struct sock *sk, struct sock *osk, - int (*saddr_same)(const struct sock *sk1, - const struct sock *sk2, - bool match_wildcard)); +int __inet_hash(struct sock *sk, struct sock *osk); int inet_hash(struct sock *sk); void inet_unhash(struct sock *sk); diff --git a/include/net/udp.h b/include/net/udp.h index 1661791e8ca1..c9d8b8e848e0 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -204,7 +204,6 @@ static inline void udp_lib_close(struct sock *sk, long timeout) } int udp_lib_get_port(struct sock *sk, unsigned short snum, - int (*)(const struct sock *, const struct sock *, bool), unsigned int hash2_nulladdr); u32 udp_flow_hashrnd(void); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 19ea045c50ed..ba597cb504ff 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -31,6 +31,78 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; EXPORT_SYMBOL(inet_csk_timer_bug_msg); #endif +#if IS_ENABLED(CONFIG_IPV6) +/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6 + * only, and any IPv4 addresses if not IPv6 only + * match_wildcard == false: addresses must be exactly the same, i.e. + * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, + * and 0.0.0.0 equals to 0.0.0.0 only + */ +static int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, + bool match_wildcard) +{ + const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); + int sk2_ipv6only = inet_v6_ipv6only(sk2); + int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); + int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; + + /* if both are mapped, treat as IPv4 */ + if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) { + if (!sk2_ipv6only) { + if (sk->sk_rcv_saddr == sk2->sk_rcv_saddr) + return 1; + if (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr) + return match_wildcard; + } + return 0; + } + + if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) + return 1; + + if (addr_type2 == IPV6_ADDR_ANY && match_wildcard && + !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) + return 1; + + if (addr_type == IPV6_ADDR_ANY && match_wildcard && + !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED)) + return 1; + + if (sk2_rcv_saddr6 && + ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6)) + return 1; + + return 0; +} +#endif + +/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses + * match_wildcard == false: addresses must be exactly the same, i.e. + * 0.0.0.0 only equals to 0.0.0.0 + */ +static int ipv4_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, + bool match_wildcard) +{ + if (!ipv6_only_sock(sk2)) { + if (sk->sk_rcv_saddr == sk2->sk_rcv_saddr) + return 1; + if (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr) + return match_wildcard; + } + return 0; +} + +int inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, + bool match_wildcard) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + return ipv6_rcv_saddr_equal(sk, sk2, match_wildcard); +#endif + return ipv4_rcv_saddr_equal(sk, sk2, match_wildcard); +} +EXPORT_SYMBOL(inet_rcv_saddr_equal); + void inet_get_local_port_range(struct net *net, int *low, int *high) { unsigned int seq; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index ca97835bfec4..2ef9b010bd34 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -435,10 +435,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk) EXPORT_SYMBOL_GPL(inet_ehash_nolisten); static int inet_reuseport_add_sock(struct sock *sk, - struct inet_listen_hashbucket *ilb, - int (*saddr_same)(const struct sock *sk1, - const struct sock *sk2, - bool match_wildcard)) + struct inet_listen_hashbucket *ilb) { struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; struct sock *sk2; @@ -451,7 +448,7 @@ static int inet_reuseport_add_sock(struct sock *sk, sk2->sk_bound_dev_if == sk->sk_bound_dev_if && inet_csk(sk2)->icsk_bind_hash == tb && sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && - saddr_same(sk, sk2, false)) + inet_rcv_saddr_equal(sk, sk2, false)) return reuseport_add_sock(sk, sk2); } @@ -461,10 +458,7 @@ static int inet_reuseport_add_sock(struct sock *sk, return 0; } -int __inet_hash(struct sock *sk, struct sock *osk, - int (*saddr_same)(const struct sock *sk1, - const struct sock *sk2, - bool match_wildcard)) +int __inet_hash(struct sock *sk, struct sock *osk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct inet_listen_hashbucket *ilb; @@ -479,7 +473,7 @@ int __inet_hash(struct sock *sk, struct sock *osk, spin_lock(&ilb->lock); if (sk->sk_reuseport) { - err = inet_reuseport_add_sock(sk, ilb, saddr_same); + err = inet_reuseport_add_sock(sk, ilb); if (err) goto unlock; } @@ -503,7 +497,7 @@ int inet_hash(struct sock *sk) if (sk->sk_state != TCP_CLOSE) { local_bh_disable(); - err = __inet_hash(sk, NULL, ipv4_rcv_saddr_equal); + err = __inet_hash(sk, NULL); local_bh_enable(); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4318d72e0248..d6dddcf59e79 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -137,11 +137,7 @@ EXPORT_SYMBOL(udp_memory_allocated); static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, unsigned long *bitmap, - struct sock *sk, - int (*saddr_comp)(const struct sock *sk1, - const struct sock *sk2, - bool match_wildcard), - unsigned int log) + struct sock *sk, unsigned int log) { struct sock *sk2; kuid_t uid = sock_i_uid(sk); @@ -153,7 +149,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && - saddr_comp(sk, sk2, true)) { + inet_rcv_saddr_equal(sk, sk2, true)) { if (sk2->sk_reuseport && sk->sk_reuseport && !rcu_access_pointer(sk->sk_reuseport_cb) && uid_eq(uid, sock_i_uid(sk2))) { @@ -176,10 +172,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, */ static int udp_lib_lport_inuse2(struct net *net, __u16 num, struct udp_hslot *hslot2, - struct sock *sk, - int (*saddr_comp)(const struct sock *sk1, - const struct sock *sk2, - bool match_wildcard)) + struct sock *sk) { struct sock *sk2; kuid_t uid = sock_i_uid(sk); @@ -193,7 +186,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && - saddr_comp(sk, sk2, true)) { + inet_rcv_saddr_equal(sk, sk2, true)) { if (sk2->sk_reuseport && sk->sk_reuseport && !rcu_access_pointer(sk->sk_reuseport_cb) && uid_eq(uid, sock_i_uid(sk2))) { @@ -208,10 +201,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, return res; } -static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot, - int (*saddr_same)(const struct sock *sk1, - const struct sock *sk2, - bool match_wildcard)) +static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) { struct net *net = sock_net(sk); kuid_t uid = sock_i_uid(sk); @@ -225,7 +215,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot, (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && - (*saddr_same)(sk, sk2, false)) { + inet_rcv_saddr_equal(sk, sk2, false)) { return reuseport_add_sock(sk, sk2); } } @@ -241,14 +231,10 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot, * * @sk: socket struct in question * @snum: port number to look up - * @saddr_comp: AF-dependent comparison of bound local IP addresses * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, * with NULL address */ int udp_lib_get_port(struct sock *sk, unsigned short snum, - int (*saddr_comp)(const struct sock *sk1, - const struct sock *sk2, - bool match_wildcard), unsigned int hash2_nulladdr) { struct udp_hslot *hslot, *hslot2; @@ -277,7 +263,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, - saddr_comp, udptable->log); + udptable->log); snum = first; /* @@ -310,12 +296,11 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, if (hslot->count < hslot2->count) goto scan_primary_hash; - exist = udp_lib_lport_inuse2(net, snum, hslot2, - sk, saddr_comp); + exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); if (!exist && (hash2_nulladdr != slot2)) { hslot2 = udp_hashslot2(udptable, hash2_nulladdr); exist = udp_lib_lport_inuse2(net, snum, hslot2, - sk, saddr_comp); + sk); } if (exist) goto fail_unlock; @@ -323,8 +308,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, goto found; } scan_primary_hash: - if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, - saddr_comp, 0)) + if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0)) goto fail_unlock; } found: @@ -333,7 +317,7 @@ found: udp_sk(sk)->udp_portaddr_hash ^= snum; if (sk_unhashed(sk)) { if (sk->sk_reuseport && - udp_reuseport_add_sock(sk, hslot, saddr_comp)) { + udp_reuseport_add_sock(sk, hslot)) { inet_sk(sk)->inet_num = 0; udp_sk(sk)->udp_port_hash = 0; udp_sk(sk)->udp_portaddr_hash ^= snum; @@ -365,24 +349,6 @@ fail: } EXPORT_SYMBOL(udp_lib_get_port); -/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses - * match_wildcard == false: addresses must be exactly the same, i.e. - * 0.0.0.0 only equals to 0.0.0.0 - */ -int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2, - bool match_wildcard) -{ - struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); - - if (!ipv6_only_sock(sk2)) { - if (inet1->inet_rcv_saddr == inet2->inet_rcv_saddr) - return 1; - if (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr) - return match_wildcard; - } - return 0; -} - static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr, unsigned int port) { @@ -398,7 +364,7 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum) /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; - return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); + return udp_lib_get_port(sk, snum, hash2_nulladdr); } static int compute_score(struct sock *sk, struct net *net, diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 7396e75e161b..55ee2ea2aee0 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -54,12 +54,12 @@ int inet6_csk_bind_conflict(const struct sock *sk, (sk2->sk_state != TCP_TIME_WAIT && !uid_eq(uid, sock_i_uid((struct sock *)sk2))))) { - if (ipv6_rcv_saddr_equal(sk, sk2, true)) + if (inet_rcv_saddr_equal(sk, sk2, true)) break; } if (!relax && reuse && sk2->sk_reuse && sk2->sk_state != TCP_LISTEN && - ipv6_rcv_saddr_equal(sk, sk2, true)) + inet_rcv_saddr_equal(sk, sk2, true)) break; } } diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 02761c9fe43e..d0900918a19e 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -268,54 +268,10 @@ int inet6_hash(struct sock *sk) if (sk->sk_state != TCP_CLOSE) { local_bh_disable(); - err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal); + err = __inet_hash(sk, NULL); local_bh_enable(); } return err; } EXPORT_SYMBOL_GPL(inet6_hash); - -/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6 - * only, and any IPv4 addresses if not IPv6 only - * match_wildcard == false: addresses must be exactly the same, i.e. - * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, - * and 0.0.0.0 equals to 0.0.0.0 only - */ -int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, - bool match_wildcard) -{ - const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); - int sk2_ipv6only = inet_v6_ipv6only(sk2); - int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); - int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; - - /* if both are mapped, treat as IPv4 */ - if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) { - if (!sk2_ipv6only) { - if (sk->sk_rcv_saddr == sk2->sk_rcv_saddr) - return 1; - if (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr) - return match_wildcard; - } - return 0; - } - - if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) - return 1; - - if (addr_type2 == IPV6_ADDR_ANY && match_wildcard && - !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) - return 1; - - if (addr_type == IPV6_ADDR_ANY && match_wildcard && - !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED)) - return 1; - - if (sk2_rcv_saddr6 && - ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6)) - return 1; - - return 0; -} -EXPORT_SYMBOL_GPL(ipv6_rcv_saddr_equal); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4d5c4eee4b3f..05d69324862e 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -103,7 +103,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; - return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); + return udp_lib_get_port(sk, snum, hash2_nulladdr); } static void udp_v6_rehash(struct sock *sk) -- cgit v1.2.3 From aa078842b702b4a45111f028a604a6c8f69cb27d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 17 Jan 2017 07:51:02 -0800 Subject: inet: drop ->bind_conflict The only difference between inet6_csk_bind_conflict and inet_csk_bind_conflict is how they check the rcv_saddr, so delete this call back and simply change inet_csk_bind_conflict to call inet_rcv_saddr_equal. Signed-off-by: Josef Bacik Signed-off-by: David S. Miller --- include/net/inet6_connection_sock.h | 5 ----- include/net/inet_connection_sock.h | 6 ------ net/dccp/ipv4.c | 1 - net/dccp/ipv6.c | 2 -- net/ipv4/inet_connection_sock.c | 22 +++++++------------- net/ipv4/tcp_ipv4.c | 1 - net/ipv6/inet6_connection_sock.c | 40 ------------------------------------- net/ipv6/tcp_ipv6.c | 2 -- 8 files changed, 7 insertions(+), 72 deletions(-) diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index 3212b39b5bfc..8ec87b62257b 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h @@ -15,16 +15,11 @@ #include -struct inet_bind_bucket; struct request_sock; struct sk_buff; struct sock; struct sockaddr; -int inet6_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb, bool relax, - bool soreuseport_ok); - struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6, const struct request_sock *req, u8 proto); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 84b2edde09b1..826f198374f8 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -62,9 +62,6 @@ struct inet_connection_sock_af_ops { char __user *optval, int __user *optlen); #endif void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); - int (*bind_conflict)(const struct sock *sk, - const struct inet_bind_bucket *tb, - bool relax, bool soreuseport_ok); void (*mtu_reduced)(struct sock *sk); }; @@ -263,9 +260,6 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk, struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); -int inet_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb, bool relax, - bool soreuseport_ok); int inet_csk_get_port(struct sock *sk, unsigned short snum); struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4, diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index d859a5c36e70..b043ec833785 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -904,7 +904,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), - .bind_conflict = inet_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index adfc790f7193..08bcdc3d1717 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -937,7 +937,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), - .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, @@ -958,7 +957,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), - .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index ba597cb504ff..a1c9055769fc 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -116,9 +116,9 @@ void inet_get_local_port_range(struct net *net, int *low, int *high) } EXPORT_SYMBOL(inet_get_local_port_range); -int inet_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb, bool relax, - bool reuseport_ok) +static int inet_csk_bind_conflict(const struct sock *sk, + const struct inet_bind_bucket *tb, + bool relax, bool reuseport_ok) { struct sock *sk2; bool reuse = sk->sk_reuse; @@ -134,7 +134,6 @@ int inet_csk_bind_conflict(const struct sock *sk, sk_for_each_bound(sk2, &tb->owners) { if (sk != sk2 && - !inet_v6_ipv6only(sk2) && (!sk->sk_bound_dev_if || !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { @@ -144,23 +143,18 @@ int inet_csk_bind_conflict(const struct sock *sk, rcu_access_pointer(sk->sk_reuseport_cb) || (sk2->sk_state != TCP_TIME_WAIT && !uid_eq(uid, sock_i_uid(sk2))))) { - - if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr || - sk2->sk_rcv_saddr == sk->sk_rcv_saddr) + if (inet_rcv_saddr_equal(sk, sk2, true)) break; } if (!relax && reuse && sk2->sk_reuse && sk2->sk_state != TCP_LISTEN) { - - if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr || - sk2->sk_rcv_saddr == sk->sk_rcv_saddr) + if (inet_rcv_saddr_equal(sk, sk2, true)) break; } } } return sk2 != NULL; } -EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); /* Obtain a reference to a local port for the given sock, * if snum is zero it means select any available local port. @@ -239,8 +233,7 @@ other_parity_scan: smallest_size = tb->num_owners; smallest_port = port; } - if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false, - reuseport_ok)) + if (!inet_csk_bind_conflict(sk, tb, false, reuseport_ok)) goto tb_found; goto next_port; } @@ -281,8 +274,7 @@ tb_found: sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && smallest_size == -1) goto success; - if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true, - reuseport_ok)) { + if (inet_csk_bind_conflict(sk, tb, true, reuseport_ok)) { if ((reuse || (tb->fastreuseport > 0 && sk->sk_reuseport && diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 63214136cf1c..3644fc117691 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1817,7 +1817,6 @@ const struct inet_connection_sock_af_ops ipv4_specific = { .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), - .bind_conflict = inet_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 55ee2ea2aee0..97074c459fe6 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -28,46 +28,6 @@ #include #include -int inet6_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb, bool relax, - bool reuseport_ok) -{ - const struct sock *sk2; - bool reuse = !!sk->sk_reuse; - bool reuseport = !!sk->sk_reuseport && reuseport_ok; - kuid_t uid = sock_i_uid((struct sock *)sk); - - /* We must walk the whole port owner list in this case. -DaveM */ - /* - * See comment in inet_csk_bind_conflict about sock lookup - * vs net namespaces issues. - */ - sk_for_each_bound(sk2, &tb->owners) { - if (sk != sk2 && - (!sk->sk_bound_dev_if || - !sk2->sk_bound_dev_if || - sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { - if ((!reuse || !sk2->sk_reuse || - sk2->sk_state == TCP_LISTEN) && - (!reuseport || !sk2->sk_reuseport || - rcu_access_pointer(sk->sk_reuseport_cb) || - (sk2->sk_state != TCP_TIME_WAIT && - !uid_eq(uid, - sock_i_uid((struct sock *)sk2))))) { - if (inet_rcv_saddr_equal(sk, sk2, true)) - break; - } - if (!relax && reuse && sk2->sk_reuse && - sk2->sk_state != TCP_LISTEN && - inet_rcv_saddr_equal(sk, sk2, true)) - break; - } - } - - return sk2 != NULL; -} -EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); - struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6, const struct request_sock *req, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index fc14e04028bf..f72100eedd5d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1621,7 +1621,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), - .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, @@ -1652,7 +1651,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), - .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, -- cgit v1.2.3 From b9470c27607bed1ad3450de789c154f225530112 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 17 Jan 2017 07:51:03 -0800 Subject: inet: kill smallest_size and smallest_port In inet_csk_get_port we seem to be using smallest_port to figure out where the best place to look for a SO_REUSEPORT sk that matches with an existing set of SO_REUSEPORT's. However if we get to the logic if (smallest_size != -1) { port = smallest_port; goto have_port; } we will do a useless search, because we would have already done the inet_csk_bind_conflict for that port and it would have returned 1, otherwise we would have gone to found_tb and succeeded. Since this logic makes us do yet another trip through inet_csk_bind_conflict for a port we know won't work just delete this code and save us the time. Signed-off-by: Josef Bacik Signed-off-by: David S. Miller --- include/net/inet_hashtables.h | 1 - net/ipv4/inet_connection_sock.c | 26 ++++---------------------- net/ipv4/inet_hashtables.c | 3 --- 3 files changed, 4 insertions(+), 26 deletions(-) diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 756ed1692906..3fc0366743da 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -80,7 +80,6 @@ struct inet_bind_bucket { signed char fastreuse; signed char fastreuseport; kuid_t fastuid; - int num_owners; struct hlist_node node; struct hlist_head owners; }; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index a1c9055769fc..d3523661c905 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -165,7 +165,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; int ret = 1, attempts = 5, port = snum; - int smallest_size = -1, smallest_port; struct inet_bind_hashbucket *head; struct net *net = sock_net(sk); int i, low, high, attempt_half; @@ -175,7 +174,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) bool reuseport_ok = !!snum; if (port) { -have_port: head = &hinfo->bhash[inet_bhashfn(net, port, hinfo->bhash_size)]; spin_lock_bh(&head->lock); @@ -209,8 +207,6 @@ other_half_scan: * We do the opposite to not pollute connect() users. */ offset |= 1U; - smallest_size = -1; - smallest_port = low; /* avoid compiler warning */ other_parity_scan: port = low + offset; @@ -224,15 +220,6 @@ other_parity_scan: spin_lock_bh(&head->lock); inet_bind_bucket_for_each(tb, &head->chain) if (net_eq(ib_net(tb), net) && tb->port == port) { - if (((tb->fastreuse > 0 && reuse) || - (tb->fastreuseport > 0 && - sk->sk_reuseport && - !rcu_access_pointer(sk->sk_reuseport_cb) && - uid_eq(tb->fastuid, uid))) && - (tb->num_owners < smallest_size || smallest_size == -1)) { - smallest_size = tb->num_owners; - smallest_port = port; - } if (!inet_csk_bind_conflict(sk, tb, false, reuseport_ok)) goto tb_found; goto next_port; @@ -243,10 +230,6 @@ next_port: cond_resched(); } - if (smallest_size != -1) { - port = smallest_port; - goto have_port; - } offset--; if (!(offset & 1)) goto other_parity_scan; @@ -268,19 +251,18 @@ tb_found: if (sk->sk_reuse == SK_FORCE_REUSE) goto success; - if (((tb->fastreuse > 0 && reuse) || + if ((tb->fastreuse > 0 && reuse) || (tb->fastreuseport > 0 && !rcu_access_pointer(sk->sk_reuseport_cb) && - sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && - smallest_size == -1) + sk->sk_reuseport && uid_eq(tb->fastuid, uid))) goto success; if (inet_csk_bind_conflict(sk, tb, true, reuseport_ok)) { if ((reuse || (tb->fastreuseport > 0 && sk->sk_reuseport && !rcu_access_pointer(sk->sk_reuseport_cb) && - uid_eq(tb->fastuid, uid))) && - !snum && smallest_size != -1 && --attempts >= 0) { + uid_eq(tb->fastuid, uid))) && !snum && + --attempts >= 0) { spin_unlock_bh(&head->lock); goto again; } diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 2ef9b010bd34..8bea74298173 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -73,7 +73,6 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, tb->port = snum; tb->fastreuse = 0; tb->fastreuseport = 0; - tb->num_owners = 0; INIT_HLIST_HEAD(&tb->owners); hlist_add_head(&tb->node, &head->chain); } @@ -96,7 +95,6 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, { inet_sk(sk)->inet_num = snum; sk_add_bind_node(sk, &tb->owners); - tb->num_owners++; inet_csk(sk)->icsk_bind_hash = tb; } @@ -114,7 +112,6 @@ static void __inet_put_port(struct sock *sk) spin_lock(&head->lock); tb = inet_csk(sk)->icsk_bind_hash; __sk_del_bind_node(sk); - tb->num_owners--; inet_csk(sk)->icsk_bind_hash = NULL; inet_sk(sk)->inet_num = 0; inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); -- cgit v1.2.3 From 6cd66616834c89b8a6c8a182c4c99e5478cf6d6b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 17 Jan 2017 07:51:04 -0800 Subject: inet: don't check for bind conflicts twice when searching for a port This is just wasted time, we've already found a tb that doesn't have a bind conflict, and we don't drop the head lock so scanning again isn't going to give us a different answer. Instead move the tb->reuse setting logic outside of the found_tb path and put it in the success: path. Then make it so that we don't goto again if we find a bind conflict in the found_tb path as we won't reach this anymore when we are scanning for an ephemeral port. Signed-off-by: Josef Bacik Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index d3523661c905..f7e844d84836 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -164,7 +164,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) { bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; - int ret = 1, attempts = 5, port = snum; + int ret = 1, port = snum; struct inet_bind_hashbucket *head; struct net *net = sock_net(sk); int i, low, high, attempt_half; @@ -183,7 +183,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) goto tb_not_found; } -again: attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; other_half_scan: inet_get_local_port_range(net, &low, &high); @@ -221,7 +220,7 @@ other_parity_scan: inet_bind_bucket_for_each(tb, &head->chain) if (net_eq(ib_net(tb), net) && tb->port == port) { if (!inet_csk_bind_conflict(sk, tb, false, reuseport_ok)) - goto tb_found; + goto success; goto next_port; } goto tb_not_found; @@ -256,23 +255,11 @@ tb_found: !rcu_access_pointer(sk->sk_reuseport_cb) && sk->sk_reuseport && uid_eq(tb->fastuid, uid))) goto success; - if (inet_csk_bind_conflict(sk, tb, true, reuseport_ok)) { - if ((reuse || - (tb->fastreuseport > 0 && - sk->sk_reuseport && - !rcu_access_pointer(sk->sk_reuseport_cb) && - uid_eq(tb->fastuid, uid))) && !snum && - --attempts >= 0) { - spin_unlock_bh(&head->lock); - goto again; - } + if (inet_csk_bind_conflict(sk, tb, true, reuseport_ok)) goto fail_unlock; - } - if (!reuse) - tb->fastreuse = 0; - if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)) - tb->fastreuseport = 0; - } else { + } +success: + if (!hlist_empty(&tb->owners)) { tb->fastreuse = reuse; if (sk->sk_reuseport) { tb->fastreuseport = 1; @@ -280,8 +267,12 @@ tb_found: } else { tb->fastreuseport = 0; } + } else { + if (!reuse) + tb->fastreuse = 0; + if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)) + tb->fastreuseport = 0; } -success: if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, port); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); -- cgit v1.2.3 From 289141b7688b71dc69b8d7a54bf67a4d7bc79f96 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 17 Jan 2017 07:51:05 -0800 Subject: inet: split inet_csk_get_port into two functions inet_csk_get_port does two different things, it either scans for an open port, or it tries to see if the specified port is available for use. Since these two operations have different rules and are basically independent lets split them into two different functions to make them both more readable. Signed-off-by: Josef Bacik Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 66 +++++++++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 22 deletions(-) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f7e844d84836..bbe28920e2d8 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -156,33 +156,21 @@ static int inet_csk_bind_conflict(const struct sock *sk, return sk2 != NULL; } -/* Obtain a reference to a local port for the given sock, - * if snum is zero it means select any available local port. - * We try to allocate an odd port (and leave even ports for connect()) +/* + * Find an open port number for the socket. Returns with the + * inet_bind_hashbucket lock held. */ -int inet_csk_get_port(struct sock *sk, unsigned short snum) +static struct inet_bind_hashbucket * +inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret) { - bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; - int ret = 1, port = snum; + int port = 0; struct inet_bind_hashbucket *head; struct net *net = sock_net(sk); int i, low, high, attempt_half; struct inet_bind_bucket *tb; - kuid_t uid = sock_i_uid(sk); u32 remaining, offset; - bool reuseport_ok = !!snum; - if (port) { - head = &hinfo->bhash[inet_bhashfn(net, port, - hinfo->bhash_size)]; - spin_lock_bh(&head->lock); - inet_bind_bucket_for_each(tb, &head->chain) - if (net_eq(ib_net(tb), net) && tb->port == port) - goto tb_found; - - goto tb_not_found; - } attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; other_half_scan: inet_get_local_port_range(net, &low, &high); @@ -219,11 +207,12 @@ other_parity_scan: spin_lock_bh(&head->lock); inet_bind_bucket_for_each(tb, &head->chain) if (net_eq(ib_net(tb), net) && tb->port == port) { - if (!inet_csk_bind_conflict(sk, tb, false, reuseport_ok)) + if (!inet_csk_bind_conflict(sk, tb, false, false)) goto success; goto next_port; } - goto tb_not_found; + tb = NULL; + goto success; next_port: spin_unlock_bh(&head->lock); cond_resched(); @@ -238,8 +227,41 @@ next_port: attempt_half = 2; goto other_half_scan; } - return ret; + return NULL; +success: + *port_ret = port; + *tb_ret = tb; + return head; +} + +/* Obtain a reference to a local port for the given sock, + * if snum is zero it means select any available local port. + * We try to allocate an odd port (and leave even ports for connect()) + */ +int inet_csk_get_port(struct sock *sk, unsigned short snum) +{ + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; + struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; + int ret = 1, port = snum; + struct inet_bind_hashbucket *head; + struct net *net = sock_net(sk); + struct inet_bind_bucket *tb = NULL; + kuid_t uid = sock_i_uid(sk); + if (!port) { + head = inet_csk_find_open_port(sk, &tb, &port); + if (!head) + return ret; + if (!tb) + goto tb_not_found; + goto success; + } + head = &hinfo->bhash[inet_bhashfn(net, port, + hinfo->bhash_size)]; + spin_lock_bh(&head->lock); + inet_bind_bucket_for_each(tb, &head->chain) + if (net_eq(ib_net(tb), net) && tb->port == port) + goto tb_found; tb_not_found: tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net, head, port); @@ -255,7 +277,7 @@ tb_found: !rcu_access_pointer(sk->sk_reuseport_cb) && sk->sk_reuseport && uid_eq(tb->fastuid, uid))) goto success; - if (inet_csk_bind_conflict(sk, tb, true, reuseport_ok)) + if (inet_csk_bind_conflict(sk, tb, true, true)) goto fail_unlock; } success: -- cgit v1.2.3 From 637bc8bbe6c0a288a596edfdcdd5657c72a848db Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 17 Jan 2017 07:51:06 -0800 Subject: inet: reset tb->fastreuseport when adding a reuseport sk If we have non reuseport sockets on a tb we will set tb->fastreuseport to 0 and never set it again. Which means that in the future if we end up adding a bunch of reuseport sk's to that tb we'll have to do the expensive scan every time. Instead add the ipv4/ipv6 saddr fields to the bind bucket, as well as the family so we know what comparison to make, and the ipv6 only setting so we can make sure to compare with new sockets appropriately. Once one sk has made it onto the list we know that there are no potential bind conflicts on the owners list that match that sk's rcv_addr. So copy the sk's information into our bind bucket and set tb->fastruseport to FASTREUSESOCK_STRICT so we know we have to do an extra check for subsequent reuseport sockets and skip the expensive bind conflict check. Signed-off-by: Josef Bacik Signed-off-by: David S. Miller --- include/net/inet_hashtables.h | 9 ++++ net/ipv4/inet_connection_sock.c | 106 ++++++++++++++++++++++++++++++++-------- 2 files changed, 95 insertions(+), 20 deletions(-) diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 3fc0366743da..1178931288cb 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -74,12 +74,21 @@ struct inet_ehash_bucket { * users logged onto your box, isn't it nice to know that new data * ports are created in O(1) time? I thought so. ;-) -DaveM */ +#define FASTREUSEPORT_ANY 1 +#define FASTREUSEPORT_STRICT 2 + struct inet_bind_bucket { possible_net_t ib_net; unsigned short port; signed char fastreuse; signed char fastreuseport; kuid_t fastuid; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr fast_v6_rcv_saddr; +#endif + __be32 fast_rcv_saddr; + unsigned short fast_sk_family; + bool fast_ipv6_only; struct hlist_node node; struct hlist_head owners; }; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index bbe28920e2d8..096a085611ab 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -38,20 +38,21 @@ EXPORT_SYMBOL(inet_csk_timer_bug_msg); * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, * and 0.0.0.0 equals to 0.0.0.0 only */ -static int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, +static int ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, + const struct in6_addr *sk2_rcv_saddr6, + __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, + bool sk1_ipv6only, bool sk2_ipv6only, bool match_wildcard) { - const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); - int sk2_ipv6only = inet_v6_ipv6only(sk2); - int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); + int addr_type = ipv6_addr_type(sk1_rcv_saddr6); int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; /* if both are mapped, treat as IPv4 */ if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) { if (!sk2_ipv6only) { - if (sk->sk_rcv_saddr == sk2->sk_rcv_saddr) + if (sk1_rcv_saddr == sk2_rcv_saddr) return 1; - if (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr) + if (!sk1_rcv_saddr || !sk2_rcv_saddr) return match_wildcard; } return 0; @@ -65,11 +66,11 @@ static int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, return 1; if (addr_type == IPV6_ADDR_ANY && match_wildcard && - !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED)) + !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) return 1; if (sk2_rcv_saddr6 && - ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6)) + ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6)) return 1; return 0; @@ -80,13 +81,13 @@ static int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, * match_wildcard == false: addresses must be exactly the same, i.e. * 0.0.0.0 only equals to 0.0.0.0 */ -static int ipv4_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, - bool match_wildcard) +static int ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, + bool sk2_ipv6only, bool match_wildcard) { - if (!ipv6_only_sock(sk2)) { - if (sk->sk_rcv_saddr == sk2->sk_rcv_saddr) + if (!sk2_ipv6only) { + if (sk1_rcv_saddr == sk2_rcv_saddr) return 1; - if (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr) + if (!sk1_rcv_saddr || !sk2_rcv_saddr) return match_wildcard; } return 0; @@ -97,9 +98,16 @@ int inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, { #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) - return ipv6_rcv_saddr_equal(sk, sk2, match_wildcard); + return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr, + &sk2->sk_v6_rcv_saddr, + sk->sk_rcv_saddr, + sk2->sk_rcv_saddr, + ipv6_only_sock(sk), + ipv6_only_sock(sk2), + match_wildcard); #endif - return ipv4_rcv_saddr_equal(sk, sk2, match_wildcard); + return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, + ipv6_only_sock(sk2), match_wildcard); } EXPORT_SYMBOL(inet_rcv_saddr_equal); @@ -234,6 +242,39 @@ success: return head; } +static inline int sk_reuseport_match(struct inet_bind_bucket *tb, + struct sock *sk) +{ + kuid_t uid = sock_i_uid(sk); + + if (tb->fastreuseport <= 0) + return 0; + if (!sk->sk_reuseport) + return 0; + if (rcu_access_pointer(sk->sk_reuseport_cb)) + return 0; + if (!uid_eq(tb->fastuid, uid)) + return 0; + /* We only need to check the rcv_saddr if this tb was once marked + * without fastreuseport and then was reset, as we can only know that + * the fast_*rcv_saddr doesn't have any conflicts with the socks on the + * owners list. + */ + if (tb->fastreuseport == FASTREUSEPORT_ANY) + return 1; +#if IS_ENABLED(CONFIG_IPV6) + if (tb->fast_sk_family == AF_INET6) + return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr, + &sk->sk_v6_rcv_saddr, + tb->fast_rcv_saddr, + sk->sk_rcv_saddr, + tb->fast_ipv6_only, + ipv6_only_sock(sk), true); +#endif + return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, + ipv6_only_sock(sk), true); +} + /* Obtain a reference to a local port for the given sock, * if snum is zero it means select any available local port. * We try to allocate an odd port (and leave even ports for connect()) @@ -273,9 +314,7 @@ tb_found: goto success; if ((tb->fastreuse > 0 && reuse) || - (tb->fastreuseport > 0 && - !rcu_access_pointer(sk->sk_reuseport_cb) && - sk->sk_reuseport && uid_eq(tb->fastuid, uid))) + sk_reuseport_match(tb, sk)) goto success; if (inet_csk_bind_conflict(sk, tb, true, true)) goto fail_unlock; @@ -284,16 +323,43 @@ success: if (!hlist_empty(&tb->owners)) { tb->fastreuse = reuse; if (sk->sk_reuseport) { - tb->fastreuseport = 1; + tb->fastreuseport = FASTREUSEPORT_ANY; tb->fastuid = uid; + tb->fast_rcv_saddr = sk->sk_rcv_saddr; + tb->fast_ipv6_only = ipv6_only_sock(sk); +#if IS_ENABLED(CONFIG_IPV6) + tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; +#endif } else { tb->fastreuseport = 0; } } else { if (!reuse) tb->fastreuse = 0; - if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)) + if (sk->sk_reuseport) { + /* We didn't match or we don't have fastreuseport set on + * the tb, but we have sk_reuseport set on this socket + * and we know that there are no bind conflicts with + * this socket in this tb, so reset our tb's reuseport + * settings so that any subsequent sockets that match + * our current socket will be put on the fast path. + * + * If we reset we need to set FASTREUSEPORT_STRICT so we + * do extra checking for all subsequent sk_reuseport + * socks. + */ + if (!sk_reuseport_match(tb, sk)) { + tb->fastreuseport = FASTREUSEPORT_STRICT; + tb->fastuid = uid; + tb->fast_rcv_saddr = sk->sk_rcv_saddr; + tb->fast_ipv6_only = ipv6_only_sock(sk); +#if IS_ENABLED(CONFIG_IPV6) + tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; +#endif + } + } else { tb->fastreuseport = 0; + } } if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, port); -- cgit v1.2.3 From 9a6d87626252496311cd122aaa9fbf21ae19e449 Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Sat, 7 Jan 2017 21:33:54 +0800 Subject: netfilter: pkttype: unnecessary to check ipv6 multicast address Since there's no broadcast address in IPV6, so in ipv6 family, the PACKET_LOOPBACK must be multicast packets, there's no need to check it again. Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_meta.c | 5 +---- net/netfilter/xt_pkttype.c | 3 +-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 66c7f4b4c49b..9a22b24346b8 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -154,10 +154,7 @@ void nft_meta_get_eval(const struct nft_expr *expr, *dest = PACKET_BROADCAST; break; case NFPROTO_IPV6: - if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF) - *dest = PACKET_MULTICAST; - else - *dest = PACKET_BROADCAST; + *dest = PACKET_MULTICAST; break; default: WARN_ON(1); diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c index 57efb703ff18..1ef99151b3ba 100644 --- a/net/netfilter/xt_pkttype.c +++ b/net/netfilter/xt_pkttype.c @@ -33,8 +33,7 @@ pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par) else if (xt_family(par) == NFPROTO_IPV4 && ipv4_is_multicast(ip_hdr(skb)->daddr)) type = PACKET_MULTICAST; - else if (xt_family(par) == NFPROTO_IPV6 && - ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF) + else if (xt_family(par) == NFPROTO_IPV6) type = PACKET_MULTICAST; else type = PACKET_BROADCAST; -- cgit v1.2.3 From f169fd695b192dd7b23aff8e69d25a1bc881bbfa Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Sat, 7 Jan 2017 21:33:55 +0800 Subject: netfilter: nft_meta: deal with PACKET_LOOPBACK in netdev family After adding the following nft rule, then ping 224.0.0.1: # nft add rule netdev t c pkttype host counter The warning complain message will be printed out again and again: WARNING: CPU: 0 PID: 10182 at net/netfilter/nft_meta.c:163 \ nft_meta_get_eval+0x3fe/0x460 [nft_meta] [...] Call Trace: dump_stack+0x85/0xc2 __warn+0xcb/0xf0 warn_slowpath_null+0x1d/0x20 nft_meta_get_eval+0x3fe/0x460 [nft_meta] nft_do_chain+0xff/0x5e0 [nf_tables] So we should deal with PACKET_LOOPBACK in netdev family too. For ipv4, convert it to PACKET_BROADCAST/MULTICAST according to the destination address's type; For ipv6, convert it to PACKET_MULTICAST directly. Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_meta.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 9a22b24346b8..e1f5ca9b423b 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -156,8 +156,34 @@ void nft_meta_get_eval(const struct nft_expr *expr, case NFPROTO_IPV6: *dest = PACKET_MULTICAST; break; + case NFPROTO_NETDEV: + switch (skb->protocol) { + case htons(ETH_P_IP): { + int noff = skb_network_offset(skb); + struct iphdr *iph, _iph; + + iph = skb_header_pointer(skb, noff, + sizeof(_iph), &_iph); + if (!iph) + goto err; + + if (ipv4_is_multicast(iph->daddr)) + *dest = PACKET_MULTICAST; + else + *dest = PACKET_BROADCAST; + + break; + } + case htons(ETH_P_IPV6): + *dest = PACKET_MULTICAST; + break; + default: + WARN_ON_ONCE(1); + goto err; + } + break; default: - WARN_ON(1); + WARN_ON_ONCE(1); goto err; } break; -- cgit v1.2.3 From cc16f00f6529aa2378f2b949a6f68e9dc6dec363 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 18 Jan 2017 00:44:42 +0800 Subject: sctp: add support for generating stream reconf ssn reset request chunk This patch is to add asoc strreset_outseq and strreset_inseq for saving the reconf request sequence, initialize them when create assoc and process init, and also to define Incoming and Outgoing SSN Reset Request Parameter described in rfc6525 section 4.1 and 4.2, As they can be in one same chunk as section rfc6525 3.1-3 describes, it makes them in one function. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 27 ++++++++++ include/net/sctp/sm.h | 5 +- include/net/sctp/structs.h | 3 ++ net/sctp/associola.c | 1 + net/sctp/sm_make_chunk.c | 121 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 156 insertions(+), 1 deletion(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index fcb4c3646173..a9e790685af3 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -108,6 +108,7 @@ typedef enum { /* Use hex, as defined in ADDIP sec. 3.1 */ SCTP_CID_ASCONF = 0xC1, SCTP_CID_ASCONF_ACK = 0x80, + SCTP_CID_RECONF = 0x82, } sctp_cid_t; /* enum */ @@ -199,6 +200,13 @@ typedef enum { SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005), SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006), + /* RE-CONFIG. Section 4 */ + SCTP_PARAM_RESET_OUT_REQUEST = cpu_to_be16(0x000d), + SCTP_PARAM_RESET_IN_REQUEST = cpu_to_be16(0x000e), + SCTP_PARAM_RESET_TSN_REQUEST = cpu_to_be16(0x000f), + SCTP_PARAM_RESET_RESPONSE = cpu_to_be16(0x0010), + SCTP_PARAM_RESET_ADD_OUT_STREAMS = cpu_to_be16(0x0011), + SCTP_PARAM_RESET_ADD_IN_STREAMS = cpu_to_be16(0x0012), } sctp_param_t; /* enum */ @@ -710,4 +718,23 @@ struct sctp_infox { struct sctp_association *asoc; }; +struct sctp_reconf_chunk { + sctp_chunkhdr_t chunk_hdr; + __u8 params[0]; +} __packed; + +struct sctp_strreset_outreq { + sctp_paramhdr_t param_hdr; + __u32 request_seq; + __u32 response_seq; + __u32 send_reset_at_tsn; + __u16 list_of_streams[0]; +} __packed; + +struct sctp_strreset_inreq { + sctp_paramhdr_t param_hdr; + __u32 request_seq; + __u16 list_of_streams[0]; +} __packed; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index ca6c971dd74a..3462cb08f51a 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -259,7 +259,10 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist); struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc); - +struct sctp_chunk *sctp_make_strreset_req( + const struct sctp_association *asoc, + __u16 stream_num, __u16 *stream_list, + bool out, bool in); void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 4741ec240caf..3dc983e97564 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1865,6 +1865,9 @@ struct sctp_association { temp:1, /* Is it a temporary association? */ prsctp_enable:1; + __u32 strreset_outseq; /* Update after receiving response */ + __u32 strreset_inseq; /* Update after receiving request */ + struct sctp_priv_assoc_stats stats; int sent_cnt_removable; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 36294f7fb9a7..42ece6f35b98 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -207,6 +207,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a * association to the same value as the initial TSN. */ asoc->addip_serial = asoc->c.initial_tsn; + asoc->strreset_outseq = asoc->c.initial_tsn; INIT_LIST_HEAD(&asoc->addip_chunk_list); INIT_LIST_HEAD(&asoc->asconf_ack_list); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 80a9088084ac..df81fce08890 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1844,6 +1844,7 @@ no_hmac: retval->next_tsn = retval->c.initial_tsn; retval->ctsn_ack_point = retval->next_tsn - 1; retval->addip_serial = retval->c.initial_tsn; + retval->strreset_outseq = retval->c.initial_tsn; retval->adv_peer_ack_point = retval->ctsn_ack_point; retval->peer.prsctp_capable = retval->c.prsctp_capable; retval->peer.adaptation_ind = retval->c.adaptation_ind; @@ -2387,6 +2388,8 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, asoc->peer.i.initial_tsn = ntohl(peer_init->init_hdr.initial_tsn); + asoc->strreset_inseq = asoc->peer.i.initial_tsn; + /* Apply the upper bounds for output streams based on peer's * number of inbound streams. */ @@ -3524,3 +3527,121 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, return retval; } + +/* RE-CONFIG 3.1 (RE-CONFIG chunk) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 130 | Chunk Flags | Chunk Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * \ \ + * / Re-configuration Parameter / + * \ \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * \ \ + * / Re-configuration Parameter (optional) / + * \ \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +static struct sctp_chunk *sctp_make_reconf( + const struct sctp_association *asoc, + int length) +{ + struct sctp_reconf_chunk *reconf; + struct sctp_chunk *retval; + + retval = sctp_make_control(asoc, SCTP_CID_RECONF, 0, length, + GFP_ATOMIC); + if (!retval) + return NULL; + + reconf = (struct sctp_reconf_chunk *)retval->chunk_hdr; + retval->param_hdr.v = reconf->params; + + return retval; +} + +/* RE-CONFIG 4.1 (STREAM OUT RESET) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Parameter Type = 13 | Parameter Length = 16 + 2 * N | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Re-configuration Request Sequence Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Re-configuration Response Sequence Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Sender's Last Assigned TSN | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Stream Number 1 (optional) | Stream Number 2 (optional) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * / ...... / + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Stream Number N-1 (optional) | Stream Number N (optional) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * RE-CONFIG 4.2 (STREAM IN RESET) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Parameter Type = 14 | Parameter Length = 8 + 2 * N | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Re-configuration Request Sequence Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Stream Number 1 (optional) | Stream Number 2 (optional) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * / ...... / + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Stream Number N-1 (optional) | Stream Number N (optional) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct sctp_chunk *sctp_make_strreset_req( + const struct sctp_association *asoc, + __u16 stream_num, __u16 *stream_list, + bool out, bool in) +{ + struct sctp_strreset_outreq outreq; + __u16 stream_len = stream_num * 2; + struct sctp_strreset_inreq inreq; + struct sctp_chunk *retval; + __u16 outlen, inlen, i; + + outlen = (sizeof(outreq) + stream_len) * out; + inlen = (sizeof(inreq) + stream_len) * in; + + retval = sctp_make_reconf(asoc, outlen + inlen); + if (!retval) + return NULL; + + for (i = 0; i < stream_num; i++) + stream_list[i] = htons(stream_list[i]); + + if (outlen) { + outreq.param_hdr.type = SCTP_PARAM_RESET_OUT_REQUEST; + outreq.param_hdr.length = htons(outlen); + outreq.request_seq = htonl(asoc->strreset_outseq); + outreq.response_seq = htonl(asoc->strreset_inseq - 1); + outreq.send_reset_at_tsn = htonl(asoc->next_tsn - 1); + + sctp_addto_chunk(retval, sizeof(outreq), &outreq); + + if (stream_len) + sctp_addto_chunk(retval, stream_len, stream_list); + } + + if (inlen) { + inreq.param_hdr.type = SCTP_PARAM_RESET_IN_REQUEST; + inreq.param_hdr.length = htons(inlen); + inreq.request_seq = htonl(asoc->strreset_outseq + out); + + sctp_addto_chunk(retval, sizeof(inreq), &inreq); + + if (stream_len) + sctp_addto_chunk(retval, stream_len, stream_list); + } + + for (i = 0; i < stream_num; i++) + stream_list[i] = ntohs(stream_list[i]); + + return retval; +} -- cgit v1.2.3 From 7b9438de0cd4b46a6914416bfede6cf839cd9e68 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 18 Jan 2017 00:44:43 +0800 Subject: sctp: add stream reconf timer This patch is to add a per transport timer based on sctp timer frame for stream reconf chunk retransmission. It would start after sending a reconf request chunk, and stop after receiving the response chunk. If the timer expires, besides retransmitting the reconf request chunk, it would also do the same thing with data RTO timer. like to increase the appropriate error counts, and perform threshold management, possibly destroying the asoc if sctp retransmission thresholds are exceeded, just as section 5.1.1 describes. This patch is also to add asoc strreset_chunk, it is used to save the reconf request chunk, so that it can be retransmitted, and to check if the response is really for this request by comparing the information inside with the response chunk as well. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 1 + include/net/sctp/sm.h | 2 ++ include/net/sctp/structs.h | 6 ++++++ net/sctp/associola.c | 9 +++++++++ net/sctp/sm_sideeffect.c | 32 ++++++++++++++++++++++++++++++++ net/sctp/sm_statefuns.c | 28 ++++++++++++++++++++++++++++ net/sctp/sm_statetable.c | 20 ++++++++++++++++++++ net/sctp/transport.c | 17 +++++++++++++++-- 8 files changed, 113 insertions(+), 2 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 5b847e49f7e9..8307c862b5c2 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -90,6 +90,7 @@ typedef enum { SCTP_EVENT_TIMEOUT_T4_RTO, SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD, SCTP_EVENT_TIMEOUT_HEARTBEAT, + SCTP_EVENT_TIMEOUT_RECONF, SCTP_EVENT_TIMEOUT_SACK, SCTP_EVENT_TIMEOUT_AUTOCLOSE, } sctp_event_timeout_t; diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 3462cb08f51a..d2d9e28fe783 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -167,6 +167,7 @@ sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort; /* Prototypes for timeout event state functions. */ sctp_state_fn_t sctp_sf_do_6_3_3_rtx; +sctp_state_fn_t sctp_sf_send_reconf; sctp_state_fn_t sctp_sf_do_6_2_sack; sctp_state_fn_t sctp_sf_autoclose_timer_expire; @@ -278,6 +279,7 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, /* 2nd level prototypes */ void sctp_generate_t3_rtx_event(unsigned long peer); void sctp_generate_heartbeat_event(unsigned long peer); +void sctp_generate_reconf_event(unsigned long peer); void sctp_generate_proto_unreach_event(unsigned long peer); void sctp_ootb_pkt_free(struct sctp_packet *); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 3dc983e97564..463b4d642d68 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -877,6 +877,9 @@ struct sctp_transport { /* Timer to handle ICMP proto unreachable envets */ struct timer_list proto_unreach_timer; + /* Timer to handler reconf chunk rtx */ + struct timer_list reconf_timer; + /* Since we're using per-destination retransmission timers * (see above), we're also using per-destination "transmitted" * queues. This probably ought to be a private struct @@ -935,6 +938,7 @@ void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk); void sctp_transport_free(struct sctp_transport *); void sctp_transport_reset_t3_rtx(struct sctp_transport *); void sctp_transport_reset_hb_timer(struct sctp_transport *); +void sctp_transport_reset_reconf_timer(struct sctp_transport *transport); int sctp_transport_hold(struct sctp_transport *); void sctp_transport_put(struct sctp_transport *); void sctp_transport_update_rto(struct sctp_transport *, __u32); @@ -1868,6 +1872,8 @@ struct sctp_association { __u32 strreset_outseq; /* Update after receiving response */ __u32 strreset_inseq; /* Update after receiving request */ + struct sctp_chunk *strreset_chunk; /* save request chunk */ + struct sctp_priv_assoc_stats stats; int sent_cnt_removable; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 42ece6f35b98..fc33540d2f11 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -362,6 +362,9 @@ void sctp_association_free(struct sctp_association *asoc) /* Free stream information. */ sctp_stream_free(asoc->stream); + if (asoc->strreset_chunk) + sctp_chunk_free(asoc->strreset_chunk); + /* Clean up the bound address list. */ sctp_bind_addr_free(&asoc->base.bind_addr); @@ -520,6 +523,12 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, if (asoc->peer.last_data_from == peer) asoc->peer.last_data_from = transport; + if (asoc->strreset_chunk && + asoc->strreset_chunk->transport == peer) { + asoc->strreset_chunk->transport = transport; + sctp_transport_reset_reconf_timer(transport); + } + /* If we remove the transport an INIT was last sent to, set it to * NULL. Combined with the update of the retran path above, this * will cause the next INIT to be sent to the next available diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index c345bf153bed..a4552712b882 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -436,6 +436,37 @@ out_unlock: sctp_association_put(asoc); } + /* Handle the timeout of the RE-CONFIG timer. */ +void sctp_generate_reconf_event(unsigned long data) +{ + struct sctp_transport *transport = (struct sctp_transport *)data; + struct sctp_association *asoc = transport->asoc; + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); + int error = 0; + + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { + pr_debug("%s: sock is busy\n", __func__); + + /* Try again later. */ + if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20))) + sctp_transport_hold(transport); + goto out_unlock; + } + + error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, + SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF), + asoc->state, asoc->ep, asoc, + transport, GFP_ATOMIC); + + if (error) + sk->sk_err = -error; + +out_unlock: + bh_unlock_sock(sk); + sctp_transport_put(transport); +} /* Inject a SACK Timeout event into the state machine. */ static void sctp_generate_sack_event(unsigned long data) @@ -453,6 +484,7 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { sctp_generate_t4_rto_event, sctp_generate_t5_shutdown_guard_event, NULL, + NULL, sctp_generate_sack_event, sctp_generate_autoclose_event, }; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 0ceded37d20b..2ae186aba9a8 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1021,6 +1021,34 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net, return SCTP_DISPOSITION_CONSUME; } +/* resend asoc strreset_chunk. */ +sctp_disposition_t sctp_sf_send_reconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_transport *transport = arg; + + if (asoc->overall_error_count >= asoc->max_retrans) { + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ETIMEDOUT)); + /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_NO_ERROR)); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + return SCTP_DISPOSITION_DELETE_TCB; + } + + sctp_chunk_hold(asoc->strreset_chunk); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(asoc->strreset_chunk)); + sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); + + return SCTP_DISPOSITION_CONSUME; +} + /* * Process an heartbeat request. * diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index a987d54b379c..3da521abfc57 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -888,6 +888,25 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } +#define TYPE_SCTP_EVENT_TIMEOUT_RECONF { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_send_reconf), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ +} + static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_EVENT_TIMEOUT_NONE, TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE, @@ -897,6 +916,7 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S TYPE_SCTP_EVENT_TIMEOUT_T4_RTO, TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD, TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT, + TYPE_SCTP_EVENT_TIMEOUT_RECONF, TYPE_SCTP_EVENT_TIMEOUT_SACK, TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE, }; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index a1652ab63918..baa1ac00d7b5 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -88,9 +88,11 @@ static struct sctp_transport *sctp_transport_init(struct net *net, INIT_LIST_HEAD(&peer->transports); setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, - (unsigned long)peer); + (unsigned long)peer); setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, - (unsigned long)peer); + (unsigned long)peer); + setup_timer(&peer->reconf_timer, sctp_generate_reconf_event, + (unsigned long)peer); setup_timer(&peer->proto_unreach_timer, sctp_generate_proto_unreach_event, (unsigned long)peer); @@ -144,6 +146,9 @@ void sctp_transport_free(struct sctp_transport *transport) if (del_timer(&transport->T3_rtx_timer)) sctp_transport_put(transport); + if (del_timer(&transport->reconf_timer)) + sctp_transport_put(transport); + /* Delete the ICMP proto unreachable timer if it's active. */ if (del_timer(&transport->proto_unreach_timer)) sctp_association_put(transport->asoc); @@ -211,6 +216,14 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport) sctp_transport_hold(transport); } +void sctp_transport_reset_reconf_timer(struct sctp_transport *transport) +{ + if (!timer_pending(&transport->reconf_timer)) + if (!mod_timer(&transport->reconf_timer, + jiffies + transport->rto)) + sctp_transport_hold(transport); +} + /* This transport has been assigned to an association. * Initialize fields from the association or from the sock itself. * Register the reference count in the association. -- cgit v1.2.3 From 7a090b04522b46a219c271d4cd2abbf572623e03 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 18 Jan 2017 00:44:44 +0800 Subject: sctp: add stream reconf primitive This patch is to add a primitive based on sctp primitive frame for sending stream reconf request. It works as the other primitives, and create a SCTP_CMD_REPLY command to send the request chunk out. sctp_primitive_RECONF would be the api to send a reconf request chunk. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 3 ++- include/net/sctp/sctp.h | 2 ++ include/net/sctp/sm.h | 1 + net/sctp/primitive.c | 3 +++ net/sctp/sm_statefuns.c | 13 +++++++++++++ net/sctp/sm_statetable.c | 20 ++++++++++++++++++++ 6 files changed, 41 insertions(+), 1 deletion(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 8307c862b5c2..3567c971cf3b 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -114,9 +114,10 @@ typedef enum { SCTP_PRIMITIVE_SEND, SCTP_PRIMITIVE_REQUESTHEARTBEAT, SCTP_PRIMITIVE_ASCONF, + SCTP_PRIMITIVE_RECONF, } sctp_event_primitive_t; -#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_ASCONF +#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_RECONF #define SCTP_NUM_PRIMITIVE_TYPES (SCTP_EVENT_PRIMITIVE_MAX + 1) /* We define here a utility type for manipulating subtypes. diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 598d938b0d0a..bc0e049b1474 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -141,6 +141,8 @@ int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg); int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg); int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg); int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg); +int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc, + void *arg); /* * sctp/input.c diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index d2d9e28fe783..430ed139fbbb 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -157,6 +157,7 @@ sctp_state_fn_t sctp_sf_error_shutdown; sctp_state_fn_t sctp_sf_ignore_primitive; sctp_state_fn_t sctp_sf_do_prm_requestheartbeat; sctp_state_fn_t sctp_sf_do_prm_asconf; +sctp_state_fn_t sctp_sf_do_prm_reconf; /* Prototypes for other event state functions. */ sctp_state_fn_t sctp_sf_do_no_pending_tsn; diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c index ab8d9f96a177..f0553a022859 100644 --- a/net/sctp/primitive.c +++ b/net/sctp/primitive.c @@ -211,3 +211,6 @@ DECLARE_PRIMITIVE(REQUESTHEARTBEAT); */ DECLARE_PRIMITIVE(ASCONF); + +/* RE-CONFIG 5.1 */ +DECLARE_PRIMITIVE(RECONF); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 2ae186aba9a8..782e579472c9 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -5185,6 +5185,19 @@ sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net, return SCTP_DISPOSITION_CONSUME; } +/* RE-CONFIG Section 5.1 RECONF Chunk Procedures */ +sctp_disposition_t sctp_sf_do_prm_reconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); + return SCTP_DISPOSITION_CONSUME; +} + /* * Ignore the primitive event * diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 3da521abfc57..b5438b4f6c1e 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -643,6 +643,25 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ } /* TYPE_SCTP_PRIMITIVE_ASCONF */ +#define TYPE_SCTP_PRIMITIVE_RECONF { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_error_closed), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_error_closed), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_error_closed), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ +} /* TYPE_SCTP_PRIMITIVE_RECONF */ + /* The primary index for this table is the primitive type. * The secondary index for this table is the state. */ @@ -653,6 +672,7 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE TYPE_SCTP_PRIMITIVE_SEND, TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT, TYPE_SCTP_PRIMITIVE_ASCONF, + TYPE_SCTP_PRIMITIVE_RECONF, }; #define TYPE_SCTP_OTHER_NO_PENDING_TSN { \ -- cgit v1.2.3 From c28445c3cb07ba1da2c1dc7b5f3066c686a6acc6 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 18 Jan 2017 00:44:45 +0800 Subject: sctp: add reconf_enable in asoc ep and netns This patch is to add reconf_enable field in all of asoc ep and netns to indicate if they support stream reset. When initializing, asoc reconf_enable get the default value from ep reconf_enable which is from netns netns reconf_enable by default. It is also to add reconf_capable in asoc peer part to know if peer supports reconf_enable, the value is set if ext params have reconf chunk support when processing init chunk, just as rfc6525 section 5.1.1 demands. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/netns/sctp.h | 3 +++ include/net/sctp/structs.h | 7 +++++-- net/sctp/associola.c | 1 + net/sctp/endpointola.c | 1 + net/sctp/protocol.c | 3 +++ net/sctp/sm_make_chunk.c | 15 +++++++++++++++ 6 files changed, 28 insertions(+), 2 deletions(-) diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index c501d67172b1..b7871d018354 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -118,6 +118,9 @@ struct netns_sctp { /* Flag to indicate if PR-SCTP is enabled. */ int prsctp_enable; + /* Flag to indicate if PR-CONFIG is enabled. */ + int reconf_enable; + /* Flag to idicate if SCTP-AUTH is enabled */ int auth_enable; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 463b4d642d68..ee037ef15d65 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1255,7 +1255,8 @@ struct sctp_endpoint { struct list_head endpoint_shared_keys; __u16 active_key_id; __u8 auth_enable:1, - prsctp_enable:1; + prsctp_enable:1, + reconf_enable:1; }; /* Recover the outter endpoint structure. */ @@ -1508,6 +1509,7 @@ struct sctp_association { hostname_address:1, /* Peer understands DNS addresses? */ asconf_capable:1, /* Does peer support ADDIP? */ prsctp_capable:1, /* Can peer do PR-SCTP? */ + reconf_capable:1, /* Can peer do RE-CONFIG? */ auth_capable:1; /* Is peer doing SCTP-AUTH? */ /* sack_needed : This flag indicates if the next received @@ -1867,7 +1869,8 @@ struct sctp_association { __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ temp:1, /* Is it a temporary association? */ - prsctp_enable:1; + prsctp_enable:1, + reconf_enable:1; __u32 strreset_outseq; /* Update after receiving response */ __u32 strreset_inseq; /* Update after receiving request */ diff --git a/net/sctp/associola.c b/net/sctp/associola.c index fc33540d2f11..68b99adc21a3 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -270,6 +270,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a asoc->active_key_id = ep->active_key_id; asoc->prsctp_enable = ep->prsctp_enable; + asoc->reconf_enable = ep->reconf_enable; /* Save the hmacs and chunks list into this association */ if (ep->auth_hmacs_list) diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 410ddc1e3443..8c589230794f 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -164,6 +164,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, ep->auth_hmacs_list = auth_hmacs; ep->auth_chunk_list = auth_chunks; ep->prsctp_enable = net->sctp.prsctp_enable; + ep->reconf_enable = net->sctp.reconf_enable; return ep; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index f9c3c37c9ae0..8227bbbd077a 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1258,6 +1258,9 @@ static int __net_init sctp_defaults_init(struct net *net) /* Enable PR-SCTP by default. */ net->sctp.prsctp_enable = 1; + /* Disable RECONF by default. */ + net->sctp.reconf_enable = 0; + /* Disable AUTH by default. */ net->sctp.auth_enable = 0; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index df81fce08890..ad3445b3408e 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -270,6 +270,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, num_ext += 2; } + if (asoc->reconf_enable) { + extensions[num_ext] = SCTP_CID_RECONF; + num_ext += 1; + } + if (sp->adaptation_ind) chunksize += sizeof(aiparam); @@ -434,6 +439,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, num_ext += 2; } + if (asoc->peer.reconf_capable) { + extensions[num_ext] = SCTP_CID_RECONF; + num_ext += 1; + } + if (sp->adaptation_ind) chunksize += sizeof(aiparam); @@ -2012,6 +2022,11 @@ static void sctp_process_ext_param(struct sctp_association *asoc, for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { + case SCTP_CID_RECONF: + if (asoc->reconf_enable && + !asoc->peer.reconf_capable) + asoc->peer.reconf_capable = 1; + break; case SCTP_CID_FWD_TSN: if (asoc->prsctp_enable && !asoc->peer.prsctp_capable) asoc->peer.prsctp_capable = 1; -- cgit v1.2.3 From 9fb657aec0e20b4ed4401c44a4140f8d7b7a9ca0 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 18 Jan 2017 00:44:46 +0800 Subject: sctp: add sockopt SCTP_ENABLE_STREAM_RESET This patch is to add sockopt SCTP_ENABLE_STREAM_RESET to get/set strreset_enable to indicate which reconf request type it supports, which is described in rfc6525 section 6.3.1. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 4 +++ include/uapi/linux/sctp.h | 7 ++++ net/sctp/associola.c | 1 + net/sctp/socket.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 96 insertions(+) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index ee037ef15d65..d99b76e33b2e 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1257,6 +1257,8 @@ struct sctp_endpoint { __u8 auth_enable:1, prsctp_enable:1, reconf_enable:1; + + __u8 strreset_enable; }; /* Recover the outter endpoint structure. */ @@ -1872,6 +1874,8 @@ struct sctp_association { prsctp_enable:1, reconf_enable:1; + __u8 strreset_enable; + __u32 strreset_outseq; /* Update after receiving response */ __u32 strreset_inseq; /* Update after receiving request */ diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index a406adcc0793..867be0f32fd7 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -115,6 +115,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_PR_SUPPORTED 113 #define SCTP_DEFAULT_PRINFO 114 #define SCTP_PR_ASSOC_STATUS 115 +#define SCTP_ENABLE_STREAM_RESET 118 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 @@ -138,6 +139,12 @@ typedef __s32 sctp_assoc_t; #define SCTP_PR_RTX_ENABLED(x) (SCTP_PR_POLICY(x) == SCTP_PR_SCTP_RTX) #define SCTP_PR_PRIO_ENABLED(x) (SCTP_PR_POLICY(x) == SCTP_PR_SCTP_PRIO) +/* For enable stream reset */ +#define SCTP_ENABLE_RESET_STREAM_REQ 0x01 +#define SCTP_ENABLE_RESET_ASSOC_REQ 0x02 +#define SCTP_ENABLE_CHANGE_ASSOC_REQ 0x04 +#define SCTP_ENABLE_STRRESET_MASK 0x07 + /* These are bit fields for msghdr->msg_flags. See section 5.1. */ /* On user space Linux, these live in as an enum. */ enum sctp_msg_flags { diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 68b99adc21a3..e50dc6d7543f 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -271,6 +271,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a asoc->active_key_id = ep->active_key_id; asoc->prsctp_enable = ep->prsctp_enable; asoc->reconf_enable = ep->reconf_enable; + asoc->strreset_enable = ep->strreset_enable; /* Save the hmacs and chunks list into this association */ if (ep->auth_hmacs_list) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 635e03412693..0a9bc984b6c8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3750,6 +3750,42 @@ out: return retval; } +static int sctp_setsockopt_enable_strreset(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EINVAL; + + if (optlen != sizeof(params)) + goto out; + + if (copy_from_user(¶ms, optval, optlen)) { + retval = -EFAULT; + goto out; + } + + if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK)) + goto out; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (asoc) { + asoc->strreset_enable = params.assoc_value; + } else if (!params.assoc_id) { + struct sctp_sock *sp = sctp_sk(sk); + + sp->ep->strreset_enable = params.assoc_value; + } else { + goto out; + } + + retval = 0; + +out: + return retval; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -3916,6 +3952,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_DEFAULT_PRINFO: retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); break; + case SCTP_ENABLE_STREAM_RESET: + retval = sctp_setsockopt_enable_strreset(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -6400,6 +6439,47 @@ out: return retval; } +static int sctp_getsockopt_enable_strreset(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EFAULT; + + if (len < sizeof(params)) { + retval = -EINVAL; + goto out; + } + + len = sizeof(params); + if (copy_from_user(¶ms, optval, len)) + goto out; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (asoc) { + params.assoc_value = asoc->strreset_enable; + } else if (!params.assoc_id) { + struct sctp_sock *sp = sctp_sk(sk); + + params.assoc_value = sp->ep->strreset_enable; + } else { + retval = -EINVAL; + goto out; + } + + if (put_user(len, optlen)) + goto out; + + if (copy_to_user(optval, ¶ms, len)) + goto out; + + retval = 0; + +out: + return retval; +} + static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -6567,6 +6647,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, retval = sctp_getsockopt_pr_assocstatus(sk, len, optval, optlen); break; + case SCTP_ENABLE_STREAM_RESET: + retval = sctp_getsockopt_enable_strreset(sk, len, optval, + optlen); + break; default: retval = -ENOPROTOOPT; break; -- cgit v1.2.3 From 7f9d68ac944e24ee5f9ac8d059ca00b1c1d34137 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 18 Jan 2017 00:44:47 +0800 Subject: sctp: implement sender-side procedures for SSN Reset Request Parameter This patch is to implement sender-side procedures for the Outgoing and Incoming SSN Reset Request Parameter described in rfc6525 section 5.1.2 and 5.1.3. It is also add sockopt SCTP_RESET_STREAMS in rfc6525 section 6.3.2 for users. Note that the new asoc member strreset_outstanding is to make sure only one reconf request chunk on the fly as rfc6525 section 5.1.1 demands. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 6 ++++ include/net/sctp/structs.h | 1 + include/uapi/linux/sctp.h | 11 +++++++ net/sctp/outqueue.c | 33 +++++++++++++------ net/sctp/socket.c | 29 +++++++++++++++++ net/sctp/stream.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 149 insertions(+), 10 deletions(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index bc0e049b1474..3cfd365bcfbc 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -193,6 +193,12 @@ void sctp_remaddr_proc_exit(struct net *net); */ int sctp_offload_init(void); +/* + * sctp/stream.c + */ +int sctp_send_reset_streams(struct sctp_association *asoc, + struct sctp_reset_streams *params); + /* * Module global variables */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index d99b76e33b2e..231fa9ac50bd 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1875,6 +1875,7 @@ struct sctp_association { reconf_enable:1; __u8 strreset_enable; + __u8 strreset_outstanding; /* request param count on the fly */ __u32 strreset_outseq; /* Update after receiving response */ __u32 strreset_inseq; /* Update after receiving request */ diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index 867be0f32fd7..03c27cefffb1 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -116,6 +116,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_DEFAULT_PRINFO 114 #define SCTP_PR_ASSOC_STATUS 115 #define SCTP_ENABLE_STREAM_RESET 118 +#define SCTP_RESET_STREAMS 119 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 @@ -145,6 +146,9 @@ typedef __s32 sctp_assoc_t; #define SCTP_ENABLE_CHANGE_ASSOC_REQ 0x04 #define SCTP_ENABLE_STRRESET_MASK 0x07 +#define SCTP_STREAM_RESET_INCOMING 0x01 +#define SCTP_STREAM_RESET_OUTGOING 0x02 + /* These are bit fields for msghdr->msg_flags. See section 5.1. */ /* On user space Linux, these live in as an enum. */ enum sctp_msg_flags { @@ -1015,4 +1019,11 @@ struct sctp_info { __u32 __reserved3; }; +struct sctp_reset_streams { + sctp_assoc_t srs_assoc_id; + uint16_t srs_flags; + uint16_t srs_number_streams; /* 0 == ALL */ + uint16_t srs_stream_list[]; /* list if srs_num_streams is not 0 */ +}; + #endif /* _UAPI_SCTP_H */ diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 34efaa4ef2f6..65abe22d8691 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -915,22 +915,28 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) case SCTP_CID_ECN_ECNE: case SCTP_CID_ASCONF: case SCTP_CID_FWD_TSN: + case SCTP_CID_RECONF: status = sctp_packet_transmit_chunk(packet, chunk, one_packet, gfp); if (status != SCTP_XMIT_OK) { /* put the chunk back */ list_add(&chunk->list, &q->control_chunk_list); - } else { - asoc->stats.octrlchunks++; - /* PR-SCTP C5) If a FORWARD TSN is sent, the - * sender MUST assure that at least one T3-rtx - * timer is running. - */ - if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { - sctp_transport_reset_t3_rtx(transport); - transport->last_time_sent = jiffies; - } + break; + } + + asoc->stats.octrlchunks++; + /* PR-SCTP C5) If a FORWARD TSN is sent, the + * sender MUST assure that at least one T3-rtx + * timer is running. + */ + if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { + sctp_transport_reset_t3_rtx(transport); + transport->last_time_sent = jiffies; } + + if (chunk == asoc->strreset_chunk) + sctp_transport_reset_reconf_timer(transport); + break; default: @@ -1016,6 +1022,8 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) /* Finally, transmit new packets. */ while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { + __u32 sid = ntohs(chunk->subh.data_hdr->stream); + /* RFC 2960 6.5 Every DATA chunk MUST carry a valid * stream identifier. */ @@ -1038,6 +1046,11 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) continue; } + if (asoc->stream->out[sid].state == SCTP_STREAM_CLOSED) { + sctp_outq_head_data(q, chunk); + goto sctp_flush_out; + } + /* If there is a specified transport, use it. * Otherwise, we want to use the active path. */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0a9bc984b6c8..bee4dd3feabb 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3786,6 +3786,32 @@ out: return retval; } +static int sctp_setsockopt_reset_streams(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_reset_streams *params; + struct sctp_association *asoc; + int retval = -EINVAL; + + if (optlen < sizeof(struct sctp_reset_streams)) + return -EINVAL; + + params = memdup_user(optval, optlen); + if (IS_ERR(params)) + return PTR_ERR(params); + + asoc = sctp_id2assoc(sk, params->srs_assoc_id); + if (!asoc) + goto out; + + retval = sctp_send_reset_streams(asoc, params); + +out: + kfree(params); + return retval; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -3955,6 +3981,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_ENABLE_STREAM_RESET: retval = sctp_setsockopt_enable_strreset(sk, optval, optlen); break; + case SCTP_RESET_STREAMS: + retval = sctp_setsockopt_reset_streams(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; diff --git a/net/sctp/stream.c b/net/sctp/stream.c index f86de43cbbe5..13d5e07dcd7d 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -33,6 +33,7 @@ */ #include +#include struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp) { @@ -83,3 +84,81 @@ void sctp_stream_clear(struct sctp_stream *stream) for (i = 0; i < stream->incnt; i++) stream->in[i].ssn = 0; } + +static int sctp_send_reconf(struct sctp_association *asoc, + struct sctp_chunk *chunk) +{ + struct net *net = sock_net(asoc->base.sk); + int retval = 0; + + retval = sctp_primitive_RECONF(net, asoc, chunk); + if (retval) + sctp_chunk_free(chunk); + + return retval; +} + +int sctp_send_reset_streams(struct sctp_association *asoc, + struct sctp_reset_streams *params) +{ + struct sctp_stream *stream = asoc->stream; + __u16 i, str_nums, *str_list; + struct sctp_chunk *chunk; + int retval = -EINVAL; + bool out, in; + + if (!asoc->peer.reconf_capable || + !(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) { + retval = -ENOPROTOOPT; + goto out; + } + + if (asoc->strreset_outstanding) { + retval = -EINPROGRESS; + goto out; + } + + out = params->srs_flags & SCTP_STREAM_RESET_OUTGOING; + in = params->srs_flags & SCTP_STREAM_RESET_INCOMING; + if (!out && !in) + goto out; + + str_nums = params->srs_number_streams; + str_list = params->srs_stream_list; + if (out && str_nums) + for (i = 0; i < str_nums; i++) + if (str_list[i] >= stream->outcnt) + goto out; + + if (in && str_nums) + for (i = 0; i < str_nums; i++) + if (str_list[i] >= stream->incnt) + goto out; + + chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in); + if (!chunk) + goto out; + + if (out) { + if (str_nums) + for (i = 0; i < str_nums; i++) + stream->out[str_list[i]].state = + SCTP_STREAM_CLOSED; + else + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_CLOSED; + } + + asoc->strreset_outstanding = out + in; + asoc->strreset_chunk = chunk; + sctp_chunk_hold(asoc->strreset_chunk); + + retval = sctp_send_reconf(asoc, chunk); + if (retval) { + sctp_chunk_put(asoc->strreset_chunk); + asoc->strreset_chunk = NULL; + } + +out: + return retval; +} -- cgit v1.2.3 From 1a28ad74ebd8f9d3c7eae0d781f72a6d30545e17 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Mon, 16 Jan 2017 22:02:57 +0800 Subject: netfilter: nf_tables: eliminate useless condition checks The return value of nf_tables_table_lookup() is valid pointer or one pointer error. There are two cases: 1) IS_ERR(table) is true, it would return the error or reset the table as NULL, it is unnecessary to perform the latter check "table != NULL". 2) IS_ERR(obj) is false, the table is one valid pointer. It is also unnecessary to perform that check. The nf_tables_newset() and nf_tables_newobj() have same logic codes. In summary, we could move the block of condition check "table != NULL" in the else block to eliminate the original condition checks. Signed-off-by: Gao Feng Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index a019a87e58ee..6e07c214c208 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -696,10 +696,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk, if (IS_ERR(table)) { if (PTR_ERR(table) != -ENOENT) return PTR_ERR(table); - table = NULL; - } - - if (table != NULL) { + } else { if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) @@ -2963,10 +2960,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, if (IS_ERR(set)) { if (PTR_ERR(set) != -ENOENT) return PTR_ERR(set); - set = NULL; - } - - if (set != NULL) { + } else { if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) @@ -4153,10 +4147,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, if (err != -ENOENT) return err; - obj = NULL; - } - - if (obj != NULL) { + } else { if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; -- cgit v1.2.3 From fd61c6ba313de6758aeeab58fe03bd9fbbc8cea9 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 17 Jan 2017 15:51:07 -0800 Subject: net: ipv6: remove nowait arg to rt6_fill_node All callers of rt6_fill_node pass 0 for nowait arg. Remove the arg and simplify rt6_fill_node accordingly. rt6_fill_node passes the nowait of 0 to ip6mr_get_route. Remove the nowait arg from it as well. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/linux/mroute6.h | 2 +- net/ipv6/ip6mr.c | 9 ++------- net/ipv6/route.c | 27 ++++++++++----------------- 3 files changed, 13 insertions(+), 25 deletions(-) diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 19a1c0c2993b..ce44e3e96d27 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -116,7 +116,7 @@ struct mfc6_cache { struct rtmsg; extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, - struct rtmsg *rtm, int nowait, u32 portid); + struct rtmsg *rtm, u32 portid); #ifdef CONFIG_IPV6_MROUTE extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index e275077e8af2..babaf3ec2742 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2288,7 +2288,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, } int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, - int nowait, u32 portid) + u32 portid) { int err; struct mr6_table *mrt; @@ -2315,11 +2315,6 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, struct net_device *dev; int vif; - if (nowait) { - read_unlock(&mrt_lock); - return -EAGAIN; - } - dev = skb->dev; if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) { read_unlock(&mrt_lock); @@ -2357,7 +2352,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, return err; } - if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) + if (rtm->rtm_flags & RTM_F_NOTIFY) cache->mfc_flags |= MFC_NOTIFY; err = __ip6mr_fill_mroute(mrt, skb, cache, rtm); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4f6b067c8753..b2044dd71724 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3169,7 +3169,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, struct rt6_info *rt, struct in6_addr *dst, struct in6_addr *src, int iif, int type, u32 portid, u32 seq, - int prefix, int nowait, unsigned int flags) + int prefix, unsigned int flags) { u32 metrics[RTAX_MAX]; struct rtmsg *rtm; @@ -3261,19 +3261,12 @@ static int rt6_fill_node(struct net *net, if (iif) { #ifdef CONFIG_IPV6_MROUTE if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { - int err = ip6mr_get_route(net, skb, rtm, nowait, - portid); - - if (err <= 0) { - if (!nowait) { - if (err == 0) - return 0; - goto nla_put_failure; - } else { - if (err == -EMSGSIZE) - goto nla_put_failure; - } - } + int err = ip6mr_get_route(net, skb, rtm, portid); + + if (err == 0) + return 0; + if (err < 0) + goto nla_put_failure; } else #endif if (nla_put_u32(skb, RTA_IIF, iif)) @@ -3342,7 +3335,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg) return rt6_fill_node(arg->net, arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq, - prefix, 0, NLM_F_MULTI); + prefix, NLM_F_MULTI); } static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) @@ -3433,7 +3426,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, 0, 0); + nlh->nlmsg_seq, 0, 0); if (err < 0) { kfree_skb(skb); goto errout; @@ -3460,7 +3453,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, goto errout; err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, - event, info->portid, seq, 0, 0, nlm_flags); + event, info->portid, seq, 0, nlm_flags); if (err < 0) { /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); -- cgit v1.2.3 From f8cfe2ceb1c2ebe1469040d037a733815d2255d5 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 17 Jan 2017 15:51:08 -0800 Subject: net: ipv6: remove prefix arg to rt6_fill_node The prefix arg to rt6_fill_node is non-0 in only 1 path - rt6_dump_route where a user is requesting a prefix only dump. Simplify rt6_fill_node by removing the prefix arg and moving the prefix check to rt6_dump_route. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b2044dd71724..5585c501a540 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3169,7 +3169,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, struct rt6_info *rt, struct in6_addr *dst, struct in6_addr *src, int iif, int type, u32 portid, u32 seq, - int prefix, unsigned int flags) + unsigned int flags) { u32 metrics[RTAX_MAX]; struct rtmsg *rtm; @@ -3177,13 +3177,6 @@ static int rt6_fill_node(struct net *net, long expires; u32 table; - if (prefix) { /* user wants prefix routes only */ - if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { - /* success since this is not a prefix route */ - return 1; - } - } - nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); if (!nlh) return -EMSGSIZE; @@ -3324,18 +3317,22 @@ nla_put_failure: int rt6_dump_route(struct rt6_info *rt, void *p_arg) { struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; - int prefix; if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) { struct rtmsg *rtm = nlmsg_data(arg->cb->nlh); - prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0; - } else - prefix = 0; + + /* user wants prefix routes only */ + if (rtm->rtm_flags & RTM_F_PREFIX && + !(rt->rt6i_flags & RTF_PREFIX_RT)) { + /* success since this is not a prefix route */ + return 1; + } + } return rt6_fill_node(arg->net, arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq, - prefix, NLM_F_MULTI); + NLM_F_MULTI); } static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) @@ -3426,7 +3423,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, 0); + nlh->nlmsg_seq, 0); if (err < 0) { kfree_skb(skb); goto errout; @@ -3453,7 +3450,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, goto errout; err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, - event, info->portid, seq, 0, nlm_flags); + event, info->portid, seq, nlm_flags); if (err < 0) { /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); -- cgit v1.2.3 From 1a8b6d76dc5b489cd0123fa8447b6e20569f357b Mon Sep 17 00:00:00 2001 From: Mao Wenan Date: Wed, 18 Jan 2017 08:50:05 +0800 Subject: net:add one common config ARCH_WANT_RELAX_ORDER to support relax ordering Relax ordering(RO) is one feature of 82599 NIC, to enable this feature can enhance the performance for some cpu architecure, such as SPARC and so on. Currently it only supports one special cpu architecture(SPARC) in 82599 driver to enable RO feature, this is not very common for other cpu architecture which really needs RO feature. This patch add one common config CONFIG_ARCH_WANT_RELAX_ORDER to set RO feature, and should define CONFIG_ARCH_WANT_RELAX_ORDER in sparc Kconfig firstly. Signed-off-by: Mao Wenan Reviewed-by: Alexander Duyck Reviewed-by: Alexander Duyck Signed-off-by: David S. Miller --- arch/Kconfig | 3 +++ arch/sparc/Kconfig | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/Kconfig b/arch/Kconfig index 99839c23d453..bd04eace455c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -781,4 +781,7 @@ config VMAP_STACK the stack to map directly to the KASAN shadow map using a formula that is incorrect if the stack is in vmalloc space. +config ARCH_WANT_RELAX_ORDER + bool + source "kernel/gcov/Kconfig" diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index cf4034c66362..68ac5c7cd982 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -44,6 +44,7 @@ config SPARC select CPU_NO_EFFICIENT_FFS select HAVE_ARCH_HARDENED_USERCOPY select PROVE_LOCKING_SMALL if PROVE_LOCKING + select ARCH_WANT_RELAX_ORDER config SPARC32 def_bool !64BIT diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 094e1d63309a..c38d50c1fcf7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -350,7 +350,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); -#ifndef CONFIG_SPARC +#ifndef CONFIG_ARCH_WANT_RELAX_ORDER /* Disable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { u32 regval; -- cgit v1.2.3 From 275bf960ac69744430a6725a4ed7f50d36cf1441 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 18 Jan 2017 15:02:01 +0800 Subject: vhost: better detection of available buffers This patch tries to do several tweaks on vhost_vq_avail_empty() for a better performance: - check cached avail index first which could avoid userspace memory access. - using unlikely() for the failure of userspace access - check vq->last_avail_idx instead of cached avail index as the last step. This patch is need for batching supports which needs to peek whether or not there's still available buffers in the ring. Reviewed-by: Stefan Hajnoczi Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/vhost/vhost.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index d6432603880c..9f118388a5b7 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -2241,11 +2241,15 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) __virtio16 avail_idx; int r; + if (vq->avail_idx != vq->last_avail_idx) + return false; + r = vhost_get_user(vq, avail_idx, &vq->avail->idx); - if (r) + if (unlikely(r)) return false; + vq->avail_idx = vhost16_to_cpu(vq, avail_idx); - return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx; + return vq->avail_idx == vq->last_avail_idx; } EXPORT_SYMBOL_GPL(vhost_vq_avail_empty); -- cgit v1.2.3 From 0ed005ce02fa0a88e5e6b7b5f7ff452171881610 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 18 Jan 2017 15:02:02 +0800 Subject: vhost_net: tx batching This patch tries to utilize tuntap rx batching by peeking the tx virtqueue during transmission, if there's more available buffers in the virtqueue, set MSG_MORE flag for a hint for backend (e.g tuntap) to batch the packets. Reviewed-by: Stefan Hajnoczi Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/vhost/net.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 5dc34653274a..c42e9c305134 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -351,6 +351,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net, return r; } +static bool vhost_exceeds_maxpend(struct vhost_net *net) +{ + struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; + struct vhost_virtqueue *vq = &nvq->vq; + + return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV + == nvq->done_idx; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_tx(struct vhost_net *net) @@ -394,8 +403,7 @@ static void handle_tx(struct vhost_net *net) /* If more outstanding DMAs, queue the work. * Handle upend_idx wrap around */ - if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND) - % UIO_MAXIOV == nvq->done_idx)) + if (unlikely(vhost_exceeds_maxpend(net))) break; head = vhost_net_tx_get_vq_desc(net, vq, vq->iov, @@ -454,6 +462,16 @@ static void handle_tx(struct vhost_net *net) msg.msg_control = NULL; ubufs = NULL; } + + total_len += len; + if (total_len < VHOST_NET_WEIGHT && + !vhost_vq_avail_empty(&net->dev, vq) && + likely(!vhost_exceeds_maxpend(net))) { + msg.msg_flags |= MSG_MORE; + } else { + msg.msg_flags &= ~MSG_MORE; + } + /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { @@ -472,7 +490,6 @@ static void handle_tx(struct vhost_net *net) vhost_add_used_and_signal(&net->dev, vq, head, 0); else vhost_zerocopy_signal_used(net, vq); - total_len += len; vhost_net_tx_packet(net); if (unlikely(total_len >= VHOST_NET_WEIGHT)) { vhost_poll_queue(&vq->poll); -- cgit v1.2.3 From 5503fcecd49f599e52d10f82057fe0c9d53c8f03 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 18 Jan 2017 15:02:03 +0800 Subject: tun: rx batching We can only process 1 packet at one time during sendmsg(). This often lead bad cache utilization under heavy load. So this patch tries to do some batching during rx before submitting them to host network stack. This is done through accepting MSG_MORE as a hint from sendmsg() caller, if it was set, batch the packet temporarily in a linked list and submit them all once MSG_MORE were cleared. Tests were done by pktgen (burst=128) in guest over mlx4(noqueue) on host: Mpps -+% rx-frames = 0 0.91 +0% rx-frames = 4 1.00 +9.8% rx-frames = 8 1.00 +9.8% rx-frames = 16 1.01 +10.9% rx-frames = 32 1.07 +17.5% rx-frames = 48 1.07 +17.5% rx-frames = 64 1.08 +18.6% rx-frames = 64 (no MSG_MORE) 0.91 +0% User were allowed to change per device batched packets through ethtool -C rx-frames. NAPI_POLL_WEIGHT were used as upper limitation to prevent bh from being disabled too long. Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/tun.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 70 insertions(+), 6 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8c1d3bd6b4d0..13890ac3cb37 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -218,6 +218,7 @@ struct tun_struct { struct list_head disabled; void *security; u32 flow_count; + u32 rx_batched; struct tun_pcpu_stats __percpu *pcpu_stats; }; @@ -522,6 +523,7 @@ static void tun_queue_purge(struct tun_file *tfile) while ((skb = skb_array_consume(&tfile->tx_array)) != NULL) kfree_skb(skb); + skb_queue_purge(&tfile->sk.sk_write_queue); skb_queue_purge(&tfile->sk.sk_error_queue); } @@ -1139,10 +1141,46 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, return skb; } +static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, + struct sk_buff *skb, int more) +{ + struct sk_buff_head *queue = &tfile->sk.sk_write_queue; + struct sk_buff_head process_queue; + u32 rx_batched = tun->rx_batched; + bool rcv = false; + + if (!rx_batched || (!more && skb_queue_empty(queue))) { + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); + return; + } + + spin_lock(&queue->lock); + if (!more || skb_queue_len(queue) == rx_batched) { + __skb_queue_head_init(&process_queue); + skb_queue_splice_tail_init(queue, &process_queue); + rcv = true; + } else { + __skb_queue_tail(queue, skb); + } + spin_unlock(&queue->lock); + + if (rcv) { + struct sk_buff *nskb; + + local_bh_disable(); + while ((nskb = __skb_dequeue(&process_queue))) + netif_receive_skb(nskb); + netif_receive_skb(skb); + local_bh_enable(); + } +} + /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, - int noblock) + int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; @@ -1283,9 +1321,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, rxhash = skb_get_hash(skb); #ifndef CONFIG_4KSTACKS - local_bh_disable(); - netif_receive_skb(skb); - local_bh_enable(); + tun_rx_batched(tun, tfile, skb, more); #else netif_rx_ni(skb); #endif @@ -1311,7 +1347,8 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) if (!tun) return -EBADFD; - result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK); + result = tun_get_user(tun, tfile, NULL, from, + file->f_flags & O_NONBLOCK, false); tun_put(tun); return result; @@ -1569,7 +1606,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) return -EBADFD; ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, - m->msg_flags & MSG_DONTWAIT); + m->msg_flags & MSG_DONTWAIT, + m->msg_flags & MSG_MORE); tun_put(tun); return ret; } @@ -1770,6 +1808,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->align = NET_SKB_PAD; tun->filter_attached = false; tun->sndbuf = tfile->socket.sk->sk_sndbuf; + tun->rx_batched = 0; tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); if (!tun->pcpu_stats) { @@ -2438,6 +2477,29 @@ static void tun_set_msglevel(struct net_device *dev, u32 value) #endif } +static int tun_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct tun_struct *tun = netdev_priv(dev); + + ec->rx_max_coalesced_frames = tun->rx_batched; + + return 0; +} + +static int tun_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct tun_struct *tun = netdev_priv(dev); + + if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) + tun->rx_batched = NAPI_POLL_WEIGHT; + else + tun->rx_batched = ec->rx_max_coalesced_frames; + + return 0; +} + static const struct ethtool_ops tun_ethtool_ops = { .get_settings = tun_get_settings, .get_drvinfo = tun_get_drvinfo, @@ -2445,6 +2507,8 @@ static const struct ethtool_ops tun_ethtool_ops = { .set_msglevel = tun_set_msglevel, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_coalesce = tun_get_coalesce, + .set_coalesce = tun_set_coalesce, }; static int tun_queue_resize(struct tun_struct *tun) -- cgit v1.2.3 From e33c2ef106d17ad8cdb16a0be0ef0daebb2d2c16 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Wed, 18 Jan 2017 02:28:06 +0200 Subject: net: ethernet: ti: davinci_cpdma: correct check on NULL in set rate Check "ch" on NULL first, then get ctlr. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index d80bff19d4ec..7ecc6b70e7e8 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -835,8 +835,8 @@ EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate); */ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) { - struct cpdma_ctlr *ctlr = ch->ctlr; unsigned long flags, ch_flags; + struct cpdma_ctlr *ctlr; int ret, prio_mode; u32 rmask; @@ -846,6 +846,7 @@ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) if (ch->rate == rate) return rate; + ctlr = ch->ctlr; spin_lock_irqsave(&ctlr->lock, flags); spin_lock_irqsave(&ch->lock, ch_flags); -- cgit v1.2.3 From b22de490869da354116ea4cbbaa09dcbc260b2b4 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 17 Jan 2017 20:41:38 -0500 Subject: net: dsa: store CPU switch structure in the tree Store a dsa_switch pointer to the CPU switch in the tree instead of only its index. This avoids the need to initialize it to -1. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 8 ++++---- net/dsa/dsa.c | 7 +++---- net/dsa/dsa2.c | 5 ++--- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index b94d1f2ef912..c72ed7af2a2a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -124,7 +124,7 @@ struct dsa_switch_tree { /* * The switch and port to which the CPU is attached. */ - s8 cpu_switch; + struct dsa_switch *cpu_switch; s8 cpu_port; /* @@ -204,7 +204,7 @@ struct dsa_switch { static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) { - return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); + return !!(ds == ds->dst->cpu_switch && p == ds->dst->cpu_port); } static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p) @@ -227,10 +227,10 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) * Else return the (DSA) port number that connects to the * switch that is one hop closer to the cpu. */ - if (dst->cpu_switch == ds->index) + if (dst->cpu_switch == ds) return dst->cpu_port; else - return ds->rtable[dst->cpu_switch]; + return ds->rtable[dst->cpu_switch->index]; } struct switchdev_trans; diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index fd532487dfdf..b220609cfe6f 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -225,12 +225,12 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) continue; if (!strcmp(name, "cpu")) { - if (dst->cpu_switch != -1) { + if (!dst->cpu_switch) { netdev_err(dst->master_netdev, "multiple cpu ports?!\n"); return -EINVAL; } - dst->cpu_switch = index; + dst->cpu_switch = ds; dst->cpu_port = i; ds->cpu_port_mask |= 1 << i; } else if (!strcmp(name, "dsa")) { @@ -254,7 +254,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) * tagging protocol to the preferred tagging format of this * switch. */ - if (dst->cpu_switch == index) { + if (dst->cpu_switch == ds) { enum dsa_tag_protocol tag_protocol; tag_protocol = ops->get_tag_protocol(ds); @@ -757,7 +757,6 @@ static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, dst->pd = pd; dst->master_netdev = dev; - dst->cpu_switch = -1; dst->cpu_port = -1; for (i = 0; i < pd->nr_chips; i++) { diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 42a41d84053c..020e072b4299 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -57,7 +57,6 @@ static struct dsa_switch_tree *dsa_add_dst(u32 tree) if (!dst) return NULL; dst->tree = tree; - dst->cpu_switch = -1; INIT_LIST_HEAD(&dst->list); list_add_tail(&dsa_switch_trees, &dst->list); kref_init(&dst->refcount); @@ -448,8 +447,8 @@ static int dsa_cpu_parse(struct device_node *port, u32 index, if (!dst->master_netdev) dst->master_netdev = ethernet_dev; - if (dst->cpu_switch == -1) { - dst->cpu_switch = ds->index; + if (!dst->cpu_switch) { + dst->cpu_switch = ds; dst->cpu_port = index; } -- cgit v1.2.3 From 9520ed8fb8410dcb6babf751561a08f73ca03812 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 17 Jan 2017 20:41:39 -0500 Subject: net: dsa: use cpu_switch instead of ds[0] Now that the DSA Ethernet switches are true Linux devices, the CPU switch is not necessarily the first one. If its address is higher than the second switch on the same MDIO bus, its index will be 1, not 0. Avoid any confusion by using dst->cpu_switch instead of dst->ds[0]. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa.c | 2 +- net/dsa/dsa2.c | 8 ++++---- net/dsa/slave.c | 6 +++--- net/dsa/tag_brcm.c | 2 +- net/dsa/tag_qca.c | 2 +- net/dsa/tag_trailer.c | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index b220609cfe6f..91f96e1bd2ec 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -868,7 +868,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst) dsa_switch_destroy(ds); } - dsa_cpu_port_ethtool_restore(dst->ds[0]); + dsa_cpu_port_ethtool_restore(dst->cpu_switch); dev_put(dst->master_netdev); } diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 020e072b4299..866222a8f9bf 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -377,8 +377,8 @@ static int dsa_dst_apply(struct dsa_switch_tree *dst) return err; } - if (dst->ds[0]) { - err = dsa_cpu_port_ethtool_setup(dst->ds[0]); + if (dst->cpu_switch) { + err = dsa_cpu_port_ethtool_setup(dst->cpu_switch); if (err) return err; } @@ -418,8 +418,8 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst) dsa_ds_unapply(dst, ds); } - if (dst->ds[0]) - dsa_cpu_port_ethtool_restore(dst->ds[0]); + if (dst->cpu_switch) + dsa_cpu_port_ethtool_restore(dst->cpu_switch); pr_info("DSA: tree %d unapplied\n", dst->tree); dst->applied = false; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 0cdcaf526987..b8e58689a9a1 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -781,7 +781,7 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev, uint64_t *data) { struct dsa_switch_tree *dst = dev->dsa_ptr; - struct dsa_switch *ds = dst->ds[0]; + struct dsa_switch *ds = dst->cpu_switch; s8 cpu_port = dst->cpu_port; int count = 0; @@ -798,7 +798,7 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev, static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset) { struct dsa_switch_tree *dst = dev->dsa_ptr; - struct dsa_switch *ds = dst->ds[0]; + struct dsa_switch *ds = dst->cpu_switch; int count = 0; if (dst->master_ethtool_ops.get_sset_count) @@ -814,7 +814,7 @@ static void dsa_cpu_port_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { struct dsa_switch_tree *dst = dev->dsa_ptr; - struct dsa_switch *ds = dst->ds[0]; + struct dsa_switch *ds = dst->cpu_switch; s8 cpu_port = dst->cpu_port; int len = ETH_GSTRING_LEN; int mcount = 0, count; diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index 21bffde6e4bf..af82927674e0 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -102,7 +102,7 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, if (unlikely(dst == NULL)) goto out_drop; - ds = dst->ds[0]; + ds = dst->cpu_switch; skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index 0c90cacee7aa..736ca8e8c31e 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -104,7 +104,7 @@ static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, /* This protocol doesn't support cascading multiple switches so it's * safe to assume the switch is first in the tree */ - ds = dst->ds[0]; + ds = dst->cpu_switch; if (!ds) goto out_drop; diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 5e3903eb1afa..271128a2dc64 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -67,7 +67,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, if (unlikely(dst == NULL)) goto out_drop; - ds = dst->ds[0]; + ds = dst->cpu_switch; skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) -- cgit v1.2.3 From 4a7c972644c1151f6dd34ff4b5f7eacb239e22ee Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Wed, 18 Jan 2017 17:45:01 +0100 Subject: net: Remove usage of net_device last_rx member The network stack no longer uses the last_rx member of struct net_device since the bonding driver switched to use its own private last_rx in commit 9f242738376d ("bonding: use last_arp_rx in slave_last_rx()"). However, some drivers still (ab)use the field for their own purposes and some driver just update it without actually using it. Previously, there was an accompanying comment for the last_rx member added in commit 4dc89133f49b ("net: add a comment on netdev->last_rx") which asked drivers not to update is, unless really needed. However, this commend was removed in commit f8ff080dacec ("bonding: remove useless updating of slave->dev->last_rx"), so some drivers added later on still did update last_rx. Remove all usage of last_rx and switch three drivers (sky2, atp and smc91c92_cs) which actually read and write it to use their own private copy in netdev_priv. Compile-tested with allyesconfig and allmodconfig on x86 and arm. Cc: Eric Dumazet Cc: Jay Vosburgh Cc: Veaceslav Falico Cc: Andy Gospodarek Cc: Mirko Lindner Cc: Stephen Hemminger Signed-off-by: Tobias Klauser Acked-by: Eric Dumazet Reviewed-by: Jay Vosburgh Signed-off-by: David S. Miller --- arch/m68k/emu/nfeth.c | 1 - drivers/net/ethernet/cavium/liquidio/lio_main.c | 1 - drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 1 - drivers/net/ethernet/hisilicon/hns/hns_enet.c | 1 - drivers/net/ethernet/intel/e1000e/netdev.c | 6 +++--- drivers/net/ethernet/intel/igb/igb_main.c | 6 +++--- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 +++---- drivers/net/ethernet/marvell/sky2.c | 6 +++--- drivers/net/ethernet/marvell/sky2.h | 1 + drivers/net/ethernet/qualcomm/emac/emac-mac.c | 1 - drivers/net/ethernet/realtek/atp.c | 7 +++---- drivers/net/ethernet/smsc/smc91c92_cs.c | 6 ++++-- drivers/net/irda/bfin_sir.c | 5 ++--- drivers/net/irda/sh_sir.c | 1 - drivers/staging/ks7010/ks_hostif.c | 2 -- drivers/staging/netlogic/xlr_net.c | 1 - drivers/staging/rtl8192e/rtllib_rx.c | 1 - drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c | 4 ---- drivers/staging/wlan-ng/hfa384x_usb.c | 1 - drivers/staging/wlan-ng/p80211netdev.c | 2 -- include/linux/netdevice.h | 3 --- net/batman-adv/bridge_loop_avoidance.c | 1 - net/batman-adv/distributed-arp-table.c | 1 - net/batman-adv/soft-interface.c | 2 -- 24 files changed, 22 insertions(+), 46 deletions(-) diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c index fc4be028c418..e45ce4243aaa 100644 --- a/arch/m68k/emu/nfeth.c +++ b/arch/m68k/emu/nfeth.c @@ -124,7 +124,6 @@ static inline void recv_packet(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 2b89ec291b8b..5ee3f007c613 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2360,7 +2360,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (packet_was_received) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; - netdev->last_rx = jiffies; } else { droq->stats.rx_dropped++; netif_info(lio, rx_err, lio->netdev, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 19d88fb387ce..e96cf6cdecfd 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1571,7 +1571,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (packet_was_received) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; - netdev->last_rx = jiffies; } else { droq->stats.rx_dropped++; netif_info(lio, rx_err, lio->netdev, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index b7cb61385ad8..f7b75e96c1c3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -797,7 +797,6 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, skb->protocol = eth_type_trans(skb, ndev); (void)napi_gro_receive(&ring_data->napi, skb); - ndev->last_rx = jiffies; } static int hns_desc_unused(struct hnae_ring *ring) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 79651eb608ff..2175cced402f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -240,9 +240,9 @@ static void e1000e_dump(struct e1000_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, dev_trans_start(netdev), netdev->last_rx); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); } /* Print Registers */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7fc95493b692..be456bae8169 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -383,9 +383,9 @@ static void igb_dump(struct igb_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, dev_trans_start(netdev), netdev->last_rx); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); } /* Print Registers */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ffe7d940d9ff..3b3b52b62a5f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -611,12 +611,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state " - "trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", + "trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, netdev->state, - dev_trans_start(netdev), - netdev->last_rx); + dev_trans_start(netdev)); } /* Print Registers */ diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index be003c5a4f5f..2b2cc3f3ca10 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2666,7 +2666,7 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, sky2->rx_stats.bytes += bytes; u64_stats_update_end(&sky2->rx_stats.syncp); - dev->last_rx = jiffies; + sky2->last_rx = jiffies; sky2_rx_update(netdev_priv(dev), rxqaddr[port]); } @@ -2953,7 +2953,7 @@ static int sky2_rx_hung(struct net_device *dev) u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); /* If idle and MAC or PCI is stuck */ - if (sky2->check.last == dev->last_rx && + if (sky2->check.last == sky2->last_rx && ((mac_rp == sky2->check.mac_rp && mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || /* Check if the PCI RX hang */ @@ -2965,7 +2965,7 @@ static int sky2_rx_hung(struct net_device *dev) fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); return 1; } else { - sky2->check.last = dev->last_rx; + sky2->check.last = sky2->last_rx; sky2->check.mac_rp = mac_rp; sky2->check.mac_lev = mac_lev; sky2->check.fifo_rp = fifo_rp; diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index ec6dcd80152b..0fe160796842 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2247,6 +2247,7 @@ struct sky2_port { u16 rx_data_size; u16 rx_nfrags; + unsigned long last_rx; struct { unsigned long last; u32 mac_rp; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 0b4deb31e742..d297ed961da6 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1213,7 +1213,6 @@ void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd), (bool)RRD_CVTAG(&rrd)); - netdev->last_rx = jiffies; (*num_pkts)++; } while (*num_pkts < max_pkts); diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index 570ed3bd3cbf..9bcd4aefc9c5 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -170,7 +170,7 @@ struct net_local { spinlock_t lock; struct net_device *next_module; struct timer_list timer; /* Media selection timer. */ - long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ + unsigned long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ int saved_tx_size; unsigned int tx_unit_busy:1; unsigned char re_tx, /* Number of packet retransmissions. */ @@ -668,11 +668,11 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) } num_tx_since_rx++; } else if (num_tx_since_rx > 8 && - time_after(jiffies, dev->last_rx + HZ)) { + time_after(jiffies, lp->last_rx_time + HZ)) { if (net_debug > 2) printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and " "%ld jiffies status %02x CMR1 %02x.\n", dev->name, - num_tx_since_rx, jiffies - dev->last_rx, status, + num_tx_since_rx, jiffies - lp->last_rx_time, status, (read_nibble(ioaddr, CMR1) >> 3) & 15); dev->stats.rx_missed_errors++; hardware_init(dev); @@ -789,7 +789,6 @@ static void net_rx(struct net_device *dev) read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 67154621abcf..97280daba27f 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -113,6 +113,7 @@ struct smc_private { struct mii_if_info mii_if; int duplex; int rx_ovrn; + unsigned long last_rx; }; /* Special definitions for Megahertz multifunction cards */ @@ -1491,6 +1492,7 @@ static void smc_rx(struct net_device *dev) if (!(rx_status & RS_ERRORS)) { /* do stuff to make a new packet */ struct sk_buff *skb; + struct smc_private *smc = netdev_priv(dev); /* Note: packet_length adds 5 or 6 extra bytes here! */ skb = netdev_alloc_skb(dev, packet_length+2); @@ -1509,7 +1511,7 @@ static void smc_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - dev->last_rx = jiffies; + smc->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += packet_length; if (rx_status & RS_MULTICAST) @@ -1790,7 +1792,7 @@ static void media_check(u_long arg) } /* Ignore collisions unless we've had no rx's recently */ - if (time_after(jiffies, dev->last_rx + HZ)) { + if (time_after(jiffies, smc->last_rx + HZ)) { if (smc->tx_err || (smc->media_status & EPH_16COL)) media |= EPH_16COL; } diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index be5bb0b7f29c..3151b580dbd6 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -22,7 +22,7 @@ static int max_rate = 57600; static int max_rate = 115200; #endif -static void turnaround_delay(unsigned long last_jif, int mtt) +static void turnaround_delay(int mtt) { long ticks; @@ -209,7 +209,6 @@ static void bfin_sir_rx_chars(struct net_device *dev) UART_CLEAR_LSR(port); ch = UART_GET_CHAR(port); async_unwrap_char(dev, &self->stats, &self->rx_buff, ch); - dev->last_rx = jiffies; } static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id) @@ -510,7 +509,7 @@ static void bfin_sir_send_work(struct work_struct *work) int tx_cnt = 10; while (bfin_sir_is_receiving(dev) && --tx_cnt) - turnaround_delay(dev->last_rx, self->mtt); + turnaround_delay(self->mtt); bfin_sir_stop_rx(port); diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index e3fe9a286136..fede6864c737 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -547,7 +547,6 @@ static void sh_sir_rx(struct sh_sir_self *self) async_unwrap_char(self->ndev, &self->ndev->stats, &self->rx_buff, (u8)data); - self->ndev->last_rx = jiffies; if (EOFD & sh_sir_read(self, IRIF_SIR_FRM)) continue; diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c index 1fbd495e5e63..c7652c35be19 100644 --- a/drivers/staging/ks7010/ks_hostif.c +++ b/drivers/staging/ks7010/ks_hostif.c @@ -461,7 +461,6 @@ void hostif_data_indication(struct ks_wlan_private *priv) skb->protocol = eth_type_trans(skb, skb->dev); priv->nstats.rx_packets++; priv->nstats.rx_bytes += rx_ind_size; - skb->dev->last_rx = jiffies; netif_rx(skb); } else { priv->nstats.rx_dropped++; @@ -494,7 +493,6 @@ void hostif_data_indication(struct ks_wlan_private *priv) skb->protocol = eth_type_trans(skb, skb->dev); priv->nstats.rx_packets++; priv->nstats.rx_bytes += rx_ind_size; - skb->dev->last_rx = jiffies; netif_rx(skb); } else { priv->nstats.rx_dropped++; diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index f84069ffa8c6..781ef623233e 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -155,7 +155,6 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code, skb_reserve(skb, BYTE_OFFSET); skb_put(skb, length); skb->protocol = eth_type_trans(skb, skb->dev); - skb->dev->last_rx = jiffies; netif_rx(skb); /* Fill rx ring */ skb_data = xlr_alloc_skb(); diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index e5ba7d1a809f..43a77745e6fb 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -1375,7 +1375,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb, ieee->LinkDetectInfo.NumRecvDataInPeriod++; ieee->LinkDetectInfo.NumRxOkInPeriod++; } - dev->last_rx = jiffies; /* Data frame - extract src/dst addresses */ rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid); diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index 82f654305414..b1f2fdfcb718 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -1103,11 +1103,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, stats = hostap_get_stats(dev); from_assoc_ap = 1; } -#endif - - dev->last_rx = jiffies; -#ifdef NOT_YET if ((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 4fe037aeef12..6134eba5cad4 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -3409,7 +3409,6 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb) &usbin->rxfrm.desc.frame_control, hdrlen); skb->dev = wlandev->netdev; - skb->dev->last_rx = jiffies; /* And set the frame length properly */ skb_trim(skb, data_len + hdrlen); diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index 73fcf07254fe..53dbbd69e552 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -252,7 +252,6 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev, } if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) { - skb->dev->last_rx = jiffies; wlandev->netdev->stats.rx_packets++; wlandev->netdev->stats.rx_bytes += skb->len; netif_rx_ni(skb); @@ -287,7 +286,6 @@ static void p80211netdev_rx_bh(unsigned long arg) skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_80211_RAW); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 97ae0ac513ee..3868c32d98af 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1551,7 +1551,6 @@ enum netdev_priv_flags { * @ax25_ptr: AX.25 specific data * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering * - * @last_rx: Time of last Rx * @dev_addr: Hw address (before bcast, * because most packets are unicast) * @@ -1777,8 +1776,6 @@ struct net_device { /* * Cache lines mostly used on receive path (including eth_type_trans()) */ - unsigned long last_rx; - /* Interface address info used in eth_type_trans() */ unsigned char *dev_addr; diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index e7f690b571ea..36917a7b1b59 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -449,7 +449,6 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); - soft_iface->last_rx = jiffies; netif_rx(skb); out: diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 49576c5a3fe3..6394206bfcae 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -1050,7 +1050,6 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, bat_priv->soft_iface); bat_priv->stats.rx_packets++; bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; - bat_priv->soft_iface->last_rx = jiffies; netif_rx(skb_new); batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 7b3494ae6ad9..420e19b501f2 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -481,8 +481,6 @@ void batadv_interface_rx(struct net_device *soft_iface, batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); - soft_iface->last_rx = jiffies; - /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ -- cgit v1.2.3 From 259010c509b6f28b3b851ae45238cf526f52e185 Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Sat, 3 Dec 2016 18:27:37 +0800 Subject: libertas: fix improper return value Function lbs_cmd_802_11_sleep_params() always return 0, even if the call to lbs_cmd_with_response() fails. In this case, the parameter @sp will keep uninitialized. Because the return value is 0, its caller (say lbs_sleepparams_read()) will not detect the error, and will copy the uninitialized stack memory to user sapce, resulting in stack information leak. To avoid the bug, this patch returns variable ret (which takes the return value of lbs_cmd_with_response()) instead of 0. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=188451 Signed-off-by: Pan Bian Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index 301170cccfff..033ff881c751 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c @@ -305,7 +305,7 @@ int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, } lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); - return 0; + return ret; } static int lbs_wait_for_ds_awake(struct lbs_private *priv) -- cgit v1.2.3 From e457a8a01a19277e96830d3d95887e0e3c1e2f26 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sat, 7 Jan 2017 23:43:45 +0100 Subject: brcmfmac: make brcmf_of_probe more generic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We may want to use Open Firmware for other devices than just SDIO ones. In future we may want to support more Broadcom properties so there is really no reason for such limitation. Call brcmf_of_probe for all kind of devices & move extra conditions to the body of that funcion. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c | 8 +++----- drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c | 7 +++++-- drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h | 6 ++++-- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 3e15d64c6481..b1f574f5ee6c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -299,11 +299,9 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev, } } } - if ((bus_type == BRCMF_BUSTYPE_SDIO) && (!found)) { - /* No platform data for this device. In case of SDIO try OF - * (Open Firwmare) Device Tree. - */ - brcmf_of_probe(dev, &settings->bus.sdio); + if (!found) { + /* No platform data for this device, try OF (Open Firwmare) */ + brcmf_of_probe(dev, bus_type, settings); } return settings; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index 425c41dc0a59..aee6e5937c41 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -23,14 +23,17 @@ #include "common.h" #include "of.h" -void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio) +void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, + struct brcmf_mp_device *settings) { + struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio; struct device_node *np = dev->of_node; int irq; u32 irqf; u32 val; - if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac")) + if (!np || bus_type != BRCMF_BUSTYPE_SDIO || + !of_device_is_compatible(np, "brcm,bcm4329-fmac")) return; if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h index a9d94c15d0f5..95b7032d54b1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h @@ -14,9 +14,11 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef CONFIG_OF -void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio); +void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, + struct brcmf_mp_device *settings); #else -static void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio) +static void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, + struct brcmf_mp_device *settings) { } #endif /* CONFIG_OF */ -- cgit v1.2.3 From 5f0a221f59ad6b72202ef9c6e232086de8c336f2 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 11 Jan 2017 21:41:24 +0530 Subject: mwifiex: remove redundant dma padding in AMSDU We already ensure 64 bytes alignment and add padding if required during skb_aggr allocation. Alignment and padding in mwifiex_11n_form_amsdu_txpd() is redundant. We may end up accessing more data than allocated size with this. This patch fixes following issue by removing redundant padding. [ 370.241338] skbuff: skb_over_panic: text:ffffffffc046946a len:3550 put:72 head:ffff880000110000 data:ffff8800001100e4 tail:0xec2 end:0xec0 dev: [ 370.241374] ------------[ cut here ]------------ [ 370.241382] kernel BUG at net/core/skbuff.c:104! 370.244032] Call Trace: [ 370.244041] [] skb_put+0x44/0x45 [ 370.244055] [] mwifiex_11n_aggregate_pkt+0x1e9/0xa50 [mwifiex] [ 370.244067] [] mwifiex_wmm_process_tx+0x44a/0x6b7 [mwifiex] [ 370.244074] [] ? 0xffffffffc0411eb8 [ 370.244084] [] mwifiex_main_process+0x476/0x5a5 [mwifiex] [ 370.244098] [] mwifiex_main_process+0x5a3/0x5a5 [mwifiex] [ 370.244113] [] process_one_work+0x1a4/0x309 [ 370.244123] [] worker_thread+0x20c/0x2ee [ 370.244130] [] ? rescuer_thread+0x383/0x383 [ 370.244136] [] ? rescuer_thread+0x383/0x383 [ 370.244143] [] kthread+0x11c/0x124 [ 370.244150] [] ? kthread_parkme+0x24/0x24 [ 370.244157] [] ret_from_fork+0x3f/0x70 [ 370.244168] [] ? kthread_parkme+0x24/0x24 Fixes: 84b313b35f8158d ("mwifiex: make tx packet 64 byte DMA aligned") Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/11n_aggr.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c index c47d6366875d..a75013ac84d7 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c @@ -101,13 +101,6 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, { struct txpd *local_tx_pd; struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); - unsigned int pad; - int headroom = (priv->adapter->iface_type == - MWIFIEX_USB) ? 0 : INTF_HEADER_LEN; - - pad = ((void *)skb->data - sizeof(*local_tx_pd) - - headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1); - skb_push(skb, pad); skb_push(skb, sizeof(*local_tx_pd)); @@ -121,12 +114,10 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, local_tx_pd->bss_num = priv->bss_num; local_tx_pd->bss_type = priv->bss_type; /* Always zero as the data is followed by struct txpd */ - local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) + - pad); + local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd)); local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU); local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len - - sizeof(*local_tx_pd) - - pad); + sizeof(*local_tx_pd)); if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT) local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET; @@ -190,7 +181,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, ra_list_flags); return -1; } - skb_reserve(skb_aggr, MWIFIEX_MIN_DATA_HEADER_LEN); + + /* skb_aggr->data already 64 byte align, just reserve bus interface + * header and txpd. + */ + skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); memset(tx_info_aggr, 0, sizeof(*tx_info_aggr)); -- cgit v1.2.3 From feecb0cb466ba458f59640b4d59ecef1cd956b1f Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 13 Jan 2017 15:55:07 +0100 Subject: rt2800: remove warning on bcn_num != rt2x00dev->intf_beaconing Since rt2800pci update beacon settings asynchronously from tbtt tasklet, without beacon_skb_mutex protection, number of currently active beacons entries can be different than number pointed by rt2x00dev->intf_beaconing. Remove warning about that inconsistency. Reported-by: evaxige@qq.com Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index ff047dc5cc2c..f36bc9bf55bd 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -967,8 +967,6 @@ static void rt2800_update_beacons_setup(struct rt2x00_dev *rt2x00dev) bcn_num++; } - WARN_ON_ONCE(bcn_num != rt2x00dev->intf_beaconing); - rt2800_register_write(rt2x00dev, BCN_OFFSET0, (u32) reg); rt2800_register_write(rt2x00dev, BCN_OFFSET1, (u32) (reg >> 32)); -- cgit v1.2.3 From e974f3acafe3c7cc84ceef35c8e6ab59dea2f91a Mon Sep 17 00:00:00 2001 From: Serge Vasilugin Date: Mon, 16 Jan 2017 04:08:38 +0100 Subject: rt2x00: rt2800lib: correctly set HT20/HT40 filter Simple patch to correct HT20/HT40 filter setting. Tested with Rt3290, Rt3352 and Rt5350 Signed-off-by: Serge Vasilugin Signed-off-by: Daniel Golle Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800.h | 2 ++ drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 13 +++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index ec622a08a486..52743d451e72 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -2286,6 +2286,8 @@ struct mac_iveiv_entry { #define RFCSR30_RX_H20M FIELD8(0x04) #define RFCSR30_RX_VCM FIELD8(0x18) #define RFCSR30_RF_CALIBRATION FIELD8(0x80) +#define RF3322_RFCSR30_TX_H20M FIELD8(0x01) +#define RF3322_RFCSR30_RX_H20M FIELD8(0x02) /* * RFCSR 31: diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index f36bc9bf55bd..7480b28a6873 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -3220,8 +3220,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2x00_rf(rt2x00dev, RF5390) || rt2x00_rf(rt2x00dev, RF5392)) { rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); - rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0); - rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0); + if (rt2x00_rf(rt2x00dev, RF3322)) { + rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_TX_H20M, + conf_is_ht40(conf)); + rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_RX_H20M, + conf_is_ht40(conf)); + } else { + rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, + conf_is_ht40(conf)); + rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, + conf_is_ht40(conf)); + } rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); -- cgit v1.2.3 From b8c2db58d5a1a5e216e84c0e1ea97f1b6d9a8c45 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 16 Jan 2017 04:13:01 +0100 Subject: rt2x00: rt2800lib: fix rf id for RT3352 Signed-off-by: Felix Fietkau Signed-off-by: Daniel Golle Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 7480b28a6873..41fbfd6c4eec 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -7112,6 +7112,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392)) rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf); + else if (rt2x00_rt(rt2x00dev, RT3352)) + rf = RF3322; else rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); -- cgit v1.2.3 From dab38e7d251d26917935352bee605974cc46eaec Mon Sep 17 00:00:00 2001 From: Daniel Golle Date: Mon, 16 Jan 2017 04:14:57 +0100 Subject: rt2x00: rt2800lib: support for for RT3352 with external PA This is needed for WiFi to work e.g. on DIR-615 rev.H1 which got external RF power amplifiers connected to the WiSoC. Signed-off-by: Daniel Golle Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800.h | 24 ++++++++ drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 76 +++++++++++++++++++++----- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 2 + 3 files changed, 89 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index 52743d451e72..18096903e20f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -2302,6 +2302,12 @@ struct mac_iveiv_entry { /* RFCSR 36 bits for RF3053 */ #define RFCSR36_RF_BS FIELD8(0x80) +/* + * RFCSR 34: + */ +#define RFCSR34_TX0_EXT_PA FIELD8(0x04) +#define RFCSR34_TX1_EXT_PA FIELD8(0x08) + /* * RFCSR 38: */ @@ -2313,6 +2319,18 @@ struct mac_iveiv_entry { #define RFCSR39_RX_DIV FIELD8(0x40) #define RFCSR39_RX_LO2_EN FIELD8(0x80) +/* + * RFCSR 41: + */ +#define RFCSR41_BIT1 FIELD8(0x01) +#define RFCSR41_BIT4 FIELD8(0x08) + +/* + * RFCSR 42: + */ +#define RFCSR42_BIT1 FIELD8(0x01) +#define RFCSR42_BIT4 FIELD8(0x08) + /* * RFCSR 49: */ @@ -2326,6 +2344,8 @@ struct mac_iveiv_entry { * RFCSR 50: */ #define RFCSR50_TX FIELD8(0x3f) +#define RFCSR50_TX0_EXT_PA FIELD8(0x02) +#define RFCSR50_TX1_EXT_PA FIELD8(0x10) #define RFCSR50_EP FIELD8(0xc0) /* bits for RT3593 */ #define RFCSR50_TX_LO1_EN FIELD8(0x20) @@ -2473,6 +2493,8 @@ enum rt2800_eeprom_word { * INTERNAL_TX_ALC: 0: disable, 1: enable * BT_COEXIST: 0: disable, 1: enable * DAC_TEST: 0: disable, 1: enable + * EXTERNAL_TX0_PA: 0: disable, 1: enable (only on RT3352) + * EXTERNAL_TX1_PA: 0: disable, 1: enable (only on RT3352) */ #define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001) #define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002) @@ -2489,6 +2511,8 @@ enum rt2800_eeprom_word { #define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000) #define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000) #define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000) +#define EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352 FIELD16(0x4000) +#define EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352 FIELD16(0x8000) /* * EEPROM frequency diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 41fbfd6c4eec..8ea844d35ecb 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -3241,11 +3241,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, /* * Change BBP settings */ + if (rt2x00_rt(rt2x00dev, RT3352)) { + rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 27, 0x0); rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 27, 0x20); rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 83, 0x6a); } else if (rt2x00_rt(rt2x00dev, RT3593)) { if (rf->channel > 14) { /* Disable CCK Packet detection on 5GHz */ @@ -6187,6 +6194,12 @@ static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev) static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) { + int tx0_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX0, + &rt2x00dev->cap_flags); + int tx1_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX1, + &rt2x00dev->cap_flags); + u8 rfcsr; + rt2800_rf_init_calibration(rt2x00dev, 30); rt2800_rfcsr_write(rt2x00dev, 0, 0xf0); @@ -6222,15 +6235,30 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 31, 0x80); rt2800_rfcsr_write(rt2x00dev, 32, 0x80); rt2800_rfcsr_write(rt2x00dev, 33, 0x00); - rt2800_rfcsr_write(rt2x00dev, 34, 0x01); + rfcsr = 0x01; + if (!tx0_int_pa) + rt2x00_set_field8(&rfcsr, RFCSR34_TX0_EXT_PA, 1); + if (!tx1_int_pa) + rt2x00_set_field8(&rfcsr, RFCSR34_TX1_EXT_PA, 1); + rt2800_rfcsr_write(rt2x00dev, 34, rfcsr); rt2800_rfcsr_write(rt2x00dev, 35, 0x03); rt2800_rfcsr_write(rt2x00dev, 36, 0xbd); rt2800_rfcsr_write(rt2x00dev, 37, 0x3c); rt2800_rfcsr_write(rt2x00dev, 38, 0x5f); rt2800_rfcsr_write(rt2x00dev, 39, 0xc5); rt2800_rfcsr_write(rt2x00dev, 40, 0x33); - rt2800_rfcsr_write(rt2x00dev, 41, 0x5b); - rt2800_rfcsr_write(rt2x00dev, 42, 0x5b); + rfcsr = 0x52; + if (tx0_int_pa) { + rt2x00_set_field8(&rfcsr, RFCSR41_BIT1, 1); + rt2x00_set_field8(&rfcsr, RFCSR41_BIT4, 1); + } + rt2800_rfcsr_write(rt2x00dev, 41, rfcsr); + rfcsr = 0x52; + if (tx1_int_pa) { + rt2x00_set_field8(&rfcsr, RFCSR42_BIT1, 1); + rt2x00_set_field8(&rfcsr, RFCSR42_BIT4, 1); + } + rt2800_rfcsr_write(rt2x00dev, 42, rfcsr); rt2800_rfcsr_write(rt2x00dev, 43, 0xdb); rt2800_rfcsr_write(rt2x00dev, 44, 0xdb); rt2800_rfcsr_write(rt2x00dev, 45, 0xdb); @@ -6238,15 +6266,20 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 47, 0x0d); rt2800_rfcsr_write(rt2x00dev, 48, 0x14); rt2800_rfcsr_write(rt2x00dev, 49, 0x00); - rt2800_rfcsr_write(rt2x00dev, 50, 0x2d); - rt2800_rfcsr_write(rt2x00dev, 51, 0x7f); - rt2800_rfcsr_write(rt2x00dev, 52, 0x00); - rt2800_rfcsr_write(rt2x00dev, 53, 0x52); - rt2800_rfcsr_write(rt2x00dev, 54, 0x1b); - rt2800_rfcsr_write(rt2x00dev, 55, 0x7f); - rt2800_rfcsr_write(rt2x00dev, 56, 0x00); - rt2800_rfcsr_write(rt2x00dev, 57, 0x52); - rt2800_rfcsr_write(rt2x00dev, 58, 0x1b); + rfcsr = 0x2d; + if (!tx0_int_pa) + rt2x00_set_field8(&rfcsr, RFCSR50_TX0_EXT_PA, 1); + if (!tx1_int_pa) + rt2x00_set_field8(&rfcsr, RFCSR50_TX1_EXT_PA, 1); + rt2800_rfcsr_write(rt2x00dev, 50, rfcsr); + rt2800_rfcsr_write(rt2x00dev, 51, (tx0_int_pa ? 0x7f : 0x52)); + rt2800_rfcsr_write(rt2x00dev, 52, (tx0_int_pa ? 0x00 : 0xc0)); + rt2800_rfcsr_write(rt2x00dev, 53, (tx0_int_pa ? 0x52 : 0xd2)); + rt2800_rfcsr_write(rt2x00dev, 54, (tx0_int_pa ? 0x1b : 0xc0)); + rt2800_rfcsr_write(rt2x00dev, 55, (tx1_int_pa ? 0x7f : 0x52)); + rt2800_rfcsr_write(rt2x00dev, 56, (tx1_int_pa ? 0x00 : 0xc0)); + rt2800_rfcsr_write(rt2x00dev, 57, (tx0_int_pa ? 0x52 : 0x49)); + rt2800_rfcsr_write(rt2x00dev, 58, (tx1_int_pa ? 0x1b : 0xc0)); rt2800_rfcsr_write(rt2x00dev, 59, 0x00); rt2800_rfcsr_write(rt2x00dev, 60, 0x00); rt2800_rfcsr_write(rt2x00dev, 61, 0x00); @@ -7203,7 +7236,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) /* * Detect if this device has Bluetooth co-existence. */ - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) + if (!rt2x00_rt(rt2x00dev, RT3352) && + rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) __set_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags); /* @@ -7232,6 +7266,22 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) EIRP_MAX_TX_POWER_LIMIT) __set_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags); + /* + * Detect if device uses internal or external PA + */ + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + + if (rt2x00_rt(rt2x00dev, RT3352)) { + if (!rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352)) + __set_bit(CAPABILITY_INTERNAL_PA_TX0, + &rt2x00dev->cap_flags); + if (!rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352)) + __set_bit(CAPABILITY_INTERNAL_PA_TX1, + &rt2x00dev->cap_flags); + } + return 0; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 034a07273038..bea7ac30522f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -716,6 +716,8 @@ enum rt2x00_capability_flags { CAPABILITY_DOUBLE_ANTENNA, CAPABILITY_BT_COEXIST, CAPABILITY_VCO_RECALIBRATION, + CAPABILITY_INTERNAL_PA_TX0, + CAPABILITY_INTERNAL_PA_TX1, }; /* -- cgit v1.2.3 From 96d179b517a91008f1e335a8c12533a952925ab3 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 11 Jan 2017 16:31:30 +0000 Subject: ath6kl: fix warning for using 0 as NULL Fixes the following sparse warning: drivers/net/wireless/ath/ath6kl/sdio.c:716:55: warning: Using plain integer as NULL pointer Signed-off-by: Wei Yongjun Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index 8ec66e74d06d..2195b1b7a8a6 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -713,7 +713,7 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) * that the packet is properly freed? */ if (s_req->busrequest) { - s_req->busrequest->scat_req = 0; + s_req->busrequest->scat_req = NULL; ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); } kfree(s_req->virt_dma_buf); -- cgit v1.2.3 From 0a744d927406389e00687560d9ce3c5ab0e58db9 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Thu, 12 Jan 2017 16:14:30 +0100 Subject: ath10k: prevent sta pointer rcu violation Station pointers are RCU protected so driver must be extra careful if it tries to store them internally for later use outside of the RCU section it obtained it in. It was possible for station teardown to race with some htt events. The possible outcome could be a use-after-free and a crash. Only peer-flow-control capable firmware was affected (so hardware-wise qca99x0 and qca4019). This could be done in sta_state() itself via explicit synchronize_net() call but there's already a convenient sta_pre_rcu_remove() op that can be hooked up to avoid extra rcu stall. The peer->sta pointer itself can't be set to NULL/ERR_PTR because it is later used in sta_state() for extra sanity checks. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 1 + drivers/net/wireless/ath/ath10k/mac.c | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index c7664d6569fa..1ab589296dff 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -314,6 +314,7 @@ struct ath10k_peer { struct ieee80211_vif *vif; struct ieee80211_sta *sta; + bool removed; int vdev_id; u8 addr[ETH_ALEN]; DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 28bf19914fff..9977829a6ec4 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3805,6 +3805,9 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, if (!peer) return NULL; + if (peer->removed) + return NULL; + if (peer->sta) return peer->sta->txq[tid]; else if (peer->vif) @@ -7517,6 +7520,20 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, return 0; } +static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath10k *ar; + struct ath10k_peer *peer; + + ar = hw->priv; + + list_for_each_entry(peer, &ar->peers, list) + if (peer->sta == sta) + peer->removed = true; +} + static const struct ieee80211_ops ath10k_ops = { .tx = ath10k_mac_op_tx, .wake_tx_queue = ath10k_mac_op_wake_tx_queue, @@ -7557,6 +7574,7 @@ static const struct ieee80211_ops ath10k_ops = { .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, + .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, CFG80211_TESTMODE_CMD(ath10k_tm_cmd) -- cgit v1.2.3 From 0f8a2b7772f75d155c95ce3d023b81ad911234c9 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Fri, 13 Jan 2017 16:00:03 +0530 Subject: ath10k: fix per station tx bit rate reporting Not clearing the previous tx bit rate status results in a ambigous tx bit rate reporting to mac80211/cfg80211, for example the previous bit rate status would have been marked as legacy rate , while the current rate would have been an HT/VHT rate with the tx bit rate flags set and this results in exporting tx bitrate as legacy rate but with HT/VHT rate flags set, fix this by clearing the tx bitrate status for each event. This also fixes the below warning when we do: iw dev wlan#N station dump WARNING: net/wireless/util.c:1222 cfg80211 [] (warn_slowpath_null) from [] (cfg80211_calculate_bitrate+0x110/0x1f4 [cfg80211]) [] (cfg80211_calculate_bitrate [cfg80211]) from [] (nl80211_put_sta_rate+0x44/0x1dc [cfg80211]) [] (nl80211_put_sta_rate [cfg80211]) from [] (nl80211_set_interface+0x724/0xd70 [cfg80211]) [] (nl80211_set_interface [cfg80211]) from [] (nl80211_dump_station+0xdc/0x100 [cfg80211]) [] (nl80211_dump_station [cfg80211]) Fixes: cec17c382140 ("ath10k: add per peer htt tx stats support for 10.4") Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 4105005dcdfe..12e67c46ea0a 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2236,6 +2236,8 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, return; } + memset(&arsta->txrate, 0, sizeof(arsta->txrate)); + if (txrate.flags == WMI_RATE_PREAMBLE_CCK || txrate.flags == WMI_RATE_PREAMBLE_OFDM) { rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); -- cgit v1.2.3 From c75c398be6edd3c7e40b905ec4c9db40b0685bd0 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Mon, 16 Jan 2017 17:31:08 +0200 Subject: ath10k: dump Copy Engine registers during firmware crash Dump Copy Engine source and destination ring addresses. This is useful information to debug firmware crashes, assertes or hangs over long run assessing the Copy Engine Register status. This also enables dumping CE register status in debugfs Crash Dump file. Screenshot: ath10k_pci 0000:02:00.0: simulating hard firmware crash ath10k_pci 0000:02:00.0: firmware crashed! (uuid 84901ff5-d33c-456e-93ee-0165dea643cf) ath10k_pci 0000:02:00.0: qca988x hw2.0 target 0x4100016c chip_id 0x043202ff sub 0000:0000 ath10k_pci 0000:02:00.0: kconfig debug 1 debugfs 1 tracing 1 dfs 1 testmode 1 ath10k_pci 0000:02:00.0: firmware ver 10.2.4.70.59-2 api 5 features no-p2p,raw-mode,mfp,allows-mesh-bcast crc32 4159f498 ath10k_pci 0000:02:00.0: board_file api 1 bmi_id N/A crc32 bebc7c08 ath10k_pci 0000:02:00.0: htt-ver 2.1 wmi-op 5 htt-op 2 cal otp max-sta 128 raw 0 hwcrypto 1 ath10k_pci 0000:02:00.0: firmware register dump: ath10k_pci 0000:02:00.0: [00]: 0x4100016C 0x00000000 0x009A0F2A 0x00000000 ath10k_pci 0000:02:00.0: [04]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [08]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [12]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [16]: 0x00000000 0x00000000 0x00000000 0x009A0F2A ath10k_pci 0000:02:00.0: [20]: 0x00000000 0x00401930 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [24]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [28]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [32]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [36]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [40]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [44]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [48]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [52]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: [56]: 0x00000000 0x00000000 0x00000000 0x00000000 ath10k_pci 0000:02:00.0: Copy Engine register dump: ath10k_pci 0000:02:00.0: [00]: 0x00057400 7 7 3 3 ath10k_pci 0000:02:00.0: [01]: 0x00057800 18 18 85 86 ath10k_pci 0000:02:00.0: [02]: 0x00057c00 49 49 48 49 ath10k_pci 0000:02:00.0: [03]: 0x00058000 16 16 17 16 ath10k_pci 0000:02:00.0: [04]: 0x00058400 4 4 44 4 ath10k_pci 0000:02:00.0: [05]: 0x00058800 12 12 11 12 ath10k_pci 0000:02:00.0: [06]: 0x00058c00 3 3 3 3 ath10k_pci 0000:02:00.0: [07]: 0x00059000 0 0 0 0 ieee80211 phy0: Hardware restart was requested ath10k_pci 0000:02:00.0: device successfully recovered Signed-off-by: Mohammed Shafi Shajakhan [kvalo@qca.qualcomm.com: simplify the implementation] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.c | 39 +++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/ce.h | 4 ++-- drivers/net/wireless/ath/ath10k/core.h | 16 ++++++++++++++ drivers/net/wireless/ath/ath10k/debug.c | 16 ++++++++++++++ drivers/net/wireless/ath/ath10k/hw.h | 3 +++ drivers/net/wireless/ath/ath10k/pci.c | 1 + 6 files changed, 77 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 0b4d79659884..c04e68796a20 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1130,3 +1130,42 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) ce_state->src_ring = NULL; ce_state->dest_ring = NULL; } + +void ath10k_ce_dump_registers(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_crash_data ce; + u32 addr, id; + + lockdep_assert_held(&ar->data_lock); + + ath10k_err(ar, "Copy Engine register dump:\n"); + + spin_lock_bh(&ar_pci->ce_lock); + for (id = 0; id < CE_COUNT; id++) { + addr = ath10k_ce_base_address(ar, id); + ce.base_addr = cpu_to_le32(addr); + + ce.src_wr_idx = + cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr)); + ce.src_r_idx = + cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr)); + ce.dst_wr_idx = + cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr)); + ce.dst_r_idx = + cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr)); + + if (crash_data) + crash_data->ce_crash_data[id] = ce; + + ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id, + le32_to_cpu(ce.base_addr), + le32_to_cpu(ce.src_wr_idx), + le32_to_cpu(ce.src_r_idx), + le32_to_cpu(ce.dst_wr_idx), + le32_to_cpu(ce.dst_r_idx)); + } + + spin_unlock_bh(&ar_pci->ce_lock); +} diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index dfc098606bee..e76a98242b98 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -20,8 +20,6 @@ #include "hif.h" -/* Maximum number of Copy Engine's supported */ -#define CE_COUNT_MAX 12 #define CE_HTT_H2T_MSG_SRC_NENTRIES 8192 /* Descriptor rings must be aligned to this boundary */ @@ -228,6 +226,8 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar); void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_disable_interrupts(struct ath10k *ar); void ath10k_ce_enable_interrupts(struct ath10k *ar); +void ath10k_ce_dump_registers(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data); /* ce_attr.flags values */ /* Use NonSnooping PCIe accesses? */ diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 1ab589296dff..757242ef52ac 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -420,6 +420,21 @@ struct ath10k_vif_iter { struct ath10k_vif *arvif; }; +/* Copy Engine register dump, protected by ce-lock */ +struct ath10k_ce_crash_data { + __le32 base_addr; + __le32 src_wr_idx; + __le32 src_r_idx; + __le32 dst_wr_idx; + __le32 dst_r_idx; +}; + +struct ath10k_ce_crash_hdr { + __le32 ce_count; + __le32 reserved[3]; /* for future use */ + struct ath10k_ce_crash_data entries[]; +}; + /* used for crash-dump storage, protected by data-lock */ struct ath10k_fw_crash_data { bool crashed_since_read; @@ -427,6 +442,7 @@ struct ath10k_fw_crash_data { uuid_le uuid; struct timespec timestamp; __le32 registers[REG_DUMP_COUNT_QCA988X]; + struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX]; }; struct ath10k_debug { diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index b882b0892dcf..d5ff0f4ef5ce 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -41,6 +41,7 @@ */ enum ath10k_fw_crash_dump_type { ATH10K_FW_CRASH_DUMP_REGISTERS = 0, + ATH10K_FW_CRASH_DUMP_CE_DATA = 1, ATH10K_FW_CRASH_DUMP_MAX, }; @@ -729,6 +730,7 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar, bool mark_read) { struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; + struct ath10k_ce_crash_hdr *ce_hdr; struct ath10k_dump_file_data *dump_data; struct ath10k_tlv_dump_data *dump_tlv; int hdr_len = sizeof(*dump_data); @@ -737,6 +739,8 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar, len = hdr_len; len += sizeof(*dump_tlv) + sizeof(crash_data->registers); + len += sizeof(*dump_tlv) + sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0]); sofar += hdr_len; @@ -795,6 +799,18 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar, sizeof(crash_data->registers)); sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers); + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA); + dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0])); + ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data); + ce_hdr->ce_count = cpu_to_le32(CE_COUNT); + memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved)); + memcpy(ce_hdr->entries, crash_data->ce_crash_data, + CE_COUNT * sizeof(ce_hdr->entries[0])); + sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0]); + ar->debug.fw_crash_data->crashed_since_read = !mark_read; spin_unlock_bh(&ar->data_lock); diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 7feffec531cc..38aa7c95732e 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -578,6 +578,9 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define TARGET_10_4_IPHDR_PAD_CONFIG 1 #define TARGET_10_4_QWRAP_CONFIG 0 +/* Maximum number of Copy Engine's supported */ +#define CE_COUNT_MAX 12 + /* Number of Copy Engines supported */ #define CE_COUNT ar->hw_values->ce_count diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 93b9790cfe8d..95ccd6db7623 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1474,6 +1474,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); ath10k_print_driver_info(ar); ath10k_pci_dump_registers(ar, crash_data); + ath10k_ce_dump_registers(ar, crash_data); spin_unlock_bh(&ar->data_lock); -- cgit v1.2.3 From 39b7b6a6247568f99df059e8a4c9bd674f6b99ac Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 19 Jan 2017 10:45:31 +0100 Subject: net/sched: cls_flower: reduce fl_change stack size The new ARP support has pushed the stack size over the edge on ARM, as there are two large objects on the stack in this function (mask and tb) and both have now grown a bit more: net/sched/cls_flower.c: In function 'fl_change': net/sched/cls_flower.c:928:1: error: the frame size of 1072 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] We can solve this by dynamically allocating one or both of them. I first tried to do it just for the mask, but that only saved 152 bytes on ARM, while this version just does it for the 'tb' array, bringing the stack size back down to 664 bytes. Fixes: 99d31326cbe6 ("net/sched: cls_flower: Support matching on ARP") Signed-off-by: Arnd Bergmann Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 27934456d984..9e74b0fa4b89 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -832,23 +832,31 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, struct cls_fl_head *head = rtnl_dereference(tp->root); struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg; struct cls_fl_filter *fnew; - struct nlattr *tb[TCA_FLOWER_MAX + 1]; + struct nlattr **tb; struct fl_flow_mask mask = {}; int err; if (!tca[TCA_OPTIONS]) return -EINVAL; + tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); + if (!tb) + return -ENOBUFS; + err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy); if (err < 0) - return err; + goto errout_tb; - if (fold && handle && fold->handle != handle) - return -EINVAL; + if (fold && handle && fold->handle != handle) { + err = -EINVAL; + goto errout_tb; + } fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); - if (!fnew) - return -ENOBUFS; + if (!fnew) { + err = -ENOBUFS; + goto errout_tb; + } err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0); if (err < 0) @@ -919,11 +927,14 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, list_add_tail_rcu(&fnew->list, &head->filters); } + kfree(tb); return 0; errout: tcf_exts_destroy(&fnew->exts); kfree(fnew); +errout_tb: + kfree(tb); return err; } -- cgit v1.2.3 From 54c30f604dcc110dba21db7c2c09ddf25b65acbe Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 19 Jan 2017 16:04:25 +0100 Subject: net: caif: Remove unused stats member from struct chnl_net The stats member of struct chnl_net is used nowhere in the code, so it might as well be removed. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- net/caif/chnl_net.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 3408ed51b611..1816fc9f1ee7 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -44,7 +44,6 @@ enum caif_states { struct chnl_net { struct cflayer chnl; - struct net_device_stats stats; struct caif_connect_request conn_req; struct list_head list_field; struct net_device *netdev; -- cgit v1.2.3 From c2594d804d5c8033861d44840673d852d98508c1 Mon Sep 17 00:00:00 2001 From: "Andrei.Pistirica@microchip.com" Date: Thu, 19 Jan 2017 17:56:15 +0200 Subject: macb: Common code to enable ptp support for MACB/GEM This patch does the following: - MACB/GEM-PTP interface - registers and bitfields for TSU - capability flags to enable PTP per platform basis Signed-off-by: Andrei Pistirica Acked-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.c | 32 +++++++++++++++- drivers/net/ethernet/cadence/macb.h | 74 +++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index c0fb80acc2da..ff1e648e74c9 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2085,6 +2085,9 @@ static int macb_open(struct net_device *dev) netif_tx_start_all_queues(dev); + if (bp->ptp_info) + bp->ptp_info->ptp_init(dev); + return 0; } @@ -2106,6 +2109,9 @@ static int macb_close(struct net_device *dev) macb_free_consistent(bp); + if (bp->ptp_info) + bp->ptp_info->ptp_remove(dev); + return 0; } @@ -2379,6 +2385,17 @@ static int macb_set_ringparam(struct net_device *netdev, return 0; } +static int macb_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct macb *bp = netdev_priv(netdev); + + if (bp->ptp_info) + return bp->ptp_info->get_ts_info(netdev, info); + + return ethtool_op_get_ts_info(netdev, info); +} + static const struct ethtool_ops macb_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, @@ -2396,7 +2413,7 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = macb_get_ts_info, .get_ethtool_stats = gem_get_ethtool_stats, .get_strings = gem_get_ethtool_strings, .get_sset_count = gem_get_sset_count, @@ -2409,6 +2426,7 @@ static const struct ethtool_ops gem_ethtool_ops = { static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct phy_device *phydev = dev->phydev; + struct macb *bp = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; @@ -2416,7 +2434,17 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!phydev) return -ENODEV; - return phy_mii_ioctl(phydev, rq, cmd); + if (!bp->ptp_info) + return phy_mii_ioctl(phydev, rq, cmd); + + switch (cmd) { + case SIOCSHWTSTAMP: + return bp->ptp_info->set_hwtst(dev, rq, cmd); + case SIOCGHWTSTAMP: + return bp->ptp_info->get_hwtst(dev, rq); + default: + return phy_mii_ioctl(phydev, rq, cmd); + } } static int macb_set_features(struct net_device *netdev, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d67adad67be1..94ddedd2d83e 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -131,6 +131,20 @@ #define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */ #define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */ #define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */ +#define GEM_TISUBN 0x01bc /* 1588 Timer Increment Sub-ns */ +#define GEM_TSH 0x01c0 /* 1588 Timer Seconds High */ +#define GEM_TSL 0x01d0 /* 1588 Timer Seconds Low */ +#define GEM_TN 0x01d4 /* 1588 Timer Nanoseconds */ +#define GEM_TA 0x01d8 /* 1588 Timer Adjust */ +#define GEM_TI 0x01dc /* 1588 Timer Increment */ +#define GEM_EFTSL 0x01e0 /* PTP Event Frame Tx Seconds Low */ +#define GEM_EFTN 0x01e4 /* PTP Event Frame Tx Nanoseconds */ +#define GEM_EFRSL 0x01e8 /* PTP Event Frame Rx Seconds Low */ +#define GEM_EFRN 0x01ec /* PTP Event Frame Rx Nanoseconds */ +#define GEM_PEFTSL 0x01f0 /* PTP Peer Event Frame Tx Secs Low */ +#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */ +#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */ +#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */ #define GEM_DCFG1 0x0280 /* Design Config 1 */ #define GEM_DCFG2 0x0284 /* Design Config 2 */ #define GEM_DCFG3 0x0288 /* Design Config 3 */ @@ -174,6 +188,7 @@ #define MACB_NCR_TPF_SIZE 1 #define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */ #define MACB_TZQ_SIZE 1 +#define MACB_SRTSM_OFFSET 15 /* Bitfields in NCFGR */ #define MACB_SPD_OFFSET 0 /* Speed */ @@ -319,6 +334,32 @@ #define MACB_PTZ_SIZE 1 #define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */ #define MACB_WOL_SIZE 1 +#define MACB_DRQFR_OFFSET 18 /* PTP Delay Request Frame Received */ +#define MACB_DRQFR_SIZE 1 +#define MACB_SFR_OFFSET 19 /* PTP Sync Frame Received */ +#define MACB_SFR_SIZE 1 +#define MACB_DRQFT_OFFSET 20 /* PTP Delay Request Frame Transmitted */ +#define MACB_DRQFT_SIZE 1 +#define MACB_SFT_OFFSET 21 /* PTP Sync Frame Transmitted */ +#define MACB_SFT_SIZE 1 +#define MACB_PDRQFR_OFFSET 22 /* PDelay Request Frame Received */ +#define MACB_PDRQFR_SIZE 1 +#define MACB_PDRSFR_OFFSET 23 /* PDelay Response Frame Received */ +#define MACB_PDRSFR_SIZE 1 +#define MACB_PDRQFT_OFFSET 24 /* PDelay Request Frame Transmitted */ +#define MACB_PDRQFT_SIZE 1 +#define MACB_PDRSFT_OFFSET 25 /* PDelay Response Frame Transmitted */ +#define MACB_PDRSFT_SIZE 1 +#define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */ +#define MACB_SRI_SIZE 1 + +/* Timer increment fields */ +#define MACB_TI_CNS_OFFSET 0 +#define MACB_TI_CNS_SIZE 8 +#define MACB_TI_ACNS_OFFSET 8 +#define MACB_TI_ACNS_SIZE 8 +#define MACB_TI_NIT_OFFSET 16 +#define MACB_TI_NIT_SIZE 8 /* Bitfields in MAN */ #define MACB_DATA_OFFSET 0 /* data */ @@ -386,6 +427,17 @@ #define GEM_PBUF_LSO_OFFSET 27 #define GEM_PBUF_LSO_SIZE 1 +/* Bitfields in TISUBN */ +#define GEM_SUBNSINCR_OFFSET 0 +#define GEM_SUBNSINCR_SIZE 16 + +/* Bitfields in TI */ +#define GEM_NSINCR_OFFSET 0 +#define GEM_NSINCR_SIZE 8 + +/* Bitfields in ADJ */ +#define GEM_ADDSUB_OFFSET 31 +#define GEM_ADDSUB_SIZE 1 /* Constants for CLK */ #define MACB_CLK_DIV8 0 #define MACB_CLK_DIV16 1 @@ -413,6 +465,7 @@ #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 #define MACB_CAPS_USRIO_DISABLED 0x00000010 #define MACB_CAPS_JUMBO 0x00000020 +#define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -782,6 +835,20 @@ struct macb_or_gem_ops { int (*mog_rx)(struct macb *bp, int budget); }; +/* MACB-PTP interface: adapt to platform needs. */ +struct macb_ptp_info { + void (*ptp_init)(struct net_device *ndev); + void (*ptp_remove)(struct net_device *ndev); + s32 (*get_ptp_max_adj)(void); + unsigned int (*get_tsu_rate)(struct macb *bp); + int (*get_ts_info)(struct net_device *dev, + struct ethtool_ts_info *info); + int (*get_hwtst)(struct net_device *netdev, + struct ifreq *ifr); + int (*set_hwtst)(struct net_device *netdev, + struct ifreq *ifr, int cmd); +}; + struct macb_config { u32 caps; unsigned int dma_burst_length; @@ -874,6 +941,8 @@ struct macb { unsigned int jumbo_max_len; u32 wol; + + struct macb_ptp_info *ptp_info; /* macb-ptp interface */ }; static inline bool macb_is_gem(struct macb *bp) @@ -881,4 +950,9 @@ static inline bool macb_is_gem(struct macb *bp) return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); } +static inline bool gem_has_ptp(struct macb *bp) +{ + return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP); +} + #endif /* _MACB_H */ -- cgit v1.2.3 From 4567d686f5c6d955e57a3afa1741944c1e7f4033 Mon Sep 17 00:00:00 2001 From: Volodymyr Bendiuga Date: Thu, 19 Jan 2017 17:05:04 +0100 Subject: phy: increase size of MII_BUS_ID_SIZE and bus_id MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some bus names are pretty long and do not fit into 17 chars. Increase therefore MII_BUS_ID_SIZE and phy_fixup.bus_id to larger number. Now mii_bus.id can host larger name. Signed-off-by: Volodymyr Bendiuga Signed-off-by: Magnus Öberg Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/include/linux/phy.h b/include/linux/phy.h index f7d95f644eed..5c9d2529685f 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -158,11 +158,7 @@ static inline const char *phy_modes(phy_interface_t interface) /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ #define PHY_ID_FMT "%s:%02x" -/* - * Need to be a little smaller than phydev->dev.bus_id to leave room - * for the ":%02x" - */ -#define MII_BUS_ID_SIZE (20 - 3) +#define MII_BUS_ID_SIZE 61 /* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ @@ -632,7 +628,7 @@ struct phy_driver { /* A Structure for boards to register fixups with the PHY Lib */ struct phy_fixup { struct list_head list; - char bus_id[20]; + char bus_id[MII_BUS_ID_SIZE + 3]; u32 phy_uid; u32 phy_uid_mask; int (*run)(struct phy_device *phydev); -- cgit v1.2.3 From f82eed45239988db125d1fc8c52994f3684324c6 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 1 Dec 2016 10:34:57 +0200 Subject: net/mlx5: Remove information print after attempt to load mlx5_ib module Infiniband part of mlx5 driver can be compiled as a module or as a part of bzImage (compiled in). In the second case, the call to request_module will return an error -ENOENT. It will cause to a misleading print "failed request module on mlx5_ib". This patch removes this print, In order to comply with mlx4. Fixes: f66f049fb738 ("net/mlx5_core: Request the mlx5 IB module on driver load") Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8701e85daf1c..5d160f33bc17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1340,9 +1340,7 @@ static int init_one(struct pci_dev *pdev, goto clean_health; } - err = request_module_nowait(MLX5_IB_MOD); - if (err) - pr_info("failed request module on %s\n", MLX5_IB_MOD); + request_module_nowait(MLX5_IB_MOD); err = devlink_register(devlink, &pdev->dev); if (err) -- cgit v1.2.3 From 712bfef60912d91033cb25739f7444d5b8d8c59f Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Fri, 16 Dec 2016 10:30:15 -0600 Subject: net/mlx5: Fix version printout in case of health issue Firmware representation of the firmware version on the health buffer has changed for newer device. The representation in the initialization segment does not and will not change. In addition, we print the health buffer firmware version as a raw hex number. Signed-off-by: Eli Cohen Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/health.c | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 5bcf93422ee0..d0515391d33b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -231,21 +231,6 @@ static const char *hsynd_str(u8 synd) } } -static u16 get_maj(u32 fw) -{ - return fw >> 28; -} - -static u16 get_min(u32 fw) -{ - return fw >> 16 & 0xfff; -} - -static u16 get_sub(u32 fw) -{ - return fw & 0xffff; -} - static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; @@ -263,13 +248,14 @@ static void print_health_info(struct mlx5_core_dev *dev) dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); - fw = ioread32be(&h->fw_ver); - sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw)); + sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str); dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index)); dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); + fw = ioread32be(&h->fw_ver); + dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw); } static unsigned long get_next_poll_jiffies(void) -- cgit v1.2.3 From f9a1ef720e9e32bc6a4a382c15ac77d62749c79e Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Mon, 10 Oct 2016 16:05:53 +0300 Subject: net/mlx5: Add MTPPS and MTPPSE registers infrastructure Implement query and set functionality for MTPPS and MTPPSE registers. MTPPS (Management Pulse Per Second) provides the device PPS capabilities, configures the PPS in and out modules and holds the PPS in time stamp. Query MTPPS is supported only when HCA_CAP.pps is set and modify is supported when HCA_CAP.pps_modify is set. MTPPSE (Management Pulse Per Second Event) configures the different event generation modes for PPS. Supported when HCA_CAP.pps is set. Signed-off-by: Eugenia Emantayev Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 9 ++++ .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 5 ++ drivers/net/ethernet/mellanox/mlx5/core/port.c | 48 +++++++++++++++++ include/linux/mlx5/device.h | 18 +++++++ include/linux/mlx5/driver.h | 3 ++ include/linux/mlx5/mlx5_ifc.h | 60 +++++++++++++++++++++- 6 files changed, 142 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 5130d65dd41a..ea5d8d37a75c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -154,6 +154,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_PAGE_REQUEST"; case MLX5_EVENT_TYPE_PAGE_FAULT: return "MLX5_EVENT_TYPE_PAGE_FAULT"; + case MLX5_EVENT_TYPE_PPS_EVENT: + return "MLX5_EVENT_TYPE_PPS_EVENT"; default: return "Unrecognized event"; } @@ -470,6 +472,10 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) mlx5_port_module_event(dev, eqe); break; + case MLX5_EVENT_TYPE_PPS_EVENT: + if (dev->event) + dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe); + break; default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); @@ -684,6 +690,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) else mlx5_core_dbg(dev, "port_module_event is not set\n"); + if (MLX5_CAP_GEN(dev, pps)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 74241e82de63..090e3a1dedc2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -138,6 +138,11 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id); bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); +int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); +int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); +int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); +int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); + void mlx5e_init(void); void mlx5e_cleanup(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index d2ec9d232a70..5ea85336b2e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -866,3 +866,51 @@ void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) module_num, mlx5_pme_status[module_status - 1], mlx5_pme_error[error_type]); } + +int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size) +{ + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps, + mtpps_size, MLX5_REG_MTPPS, 0, 0); +} + +int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size) +{ + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + return mlx5_core_access_reg(mdev, mtpps, mtpps_size, out, + sizeof(out), MLX5_REG_MTPPS, 0, 1); +} + +int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode) +{ + u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + int err = 0; + + MLX5_SET(mtppse_reg, in, pin, pin); + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MTPPSE, 0, 0); + if (err) + return err; + + *arm = MLX5_GET(mtppse_reg, in, event_arm); + *mode = MLX5_GET(mtppse_reg, in, event_generation_mode); + + return err; +} + +int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode) +{ + u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + + MLX5_SET(mtppse_reg, in, pin, pin); + MLX5_SET(mtppse_reg, in, event_arm, arm); + MLX5_SET(mtppse_reg, in, event_generation_mode, mode); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MTPPSE, 0, 1); +} diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 7c5265db02a9..6ac8eb5a89ca 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -289,6 +289,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, + MLX5_EVENT_TYPE_PPS_EVENT = 0x25, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, @@ -569,6 +570,22 @@ struct mlx5_eqe_port_module { u8 error_type; } __packed; +struct mlx5_eqe_pps { + u8 rsvd0[3]; + u8 pin; + u8 rsvd1[4]; + union { + struct { + __be32 time_sec; + __be32 time_nsec; + }; + struct { + __be64 time_stamp; + }; + }; + u8 rsvd2[12]; +} __packed; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -583,6 +600,7 @@ union ev_data { struct mlx5_eqe_page_fault page_fault; struct mlx5_eqe_vport_change vport_change; struct mlx5_eqe_port_module port_module; + struct mlx5_eqe_pps pps; } __packed; struct mlx5_eqe { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 3a309f6a4a15..ebbc8834063b 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -125,6 +125,8 @@ enum { MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, MLX5_REG_MLCR = 0x902b, + MLX5_REG_MTPPS = 0x9053, + MLX5_REG_MTPPSE = 0x9054, }; enum mlx5_dcbx_oper_mode { @@ -172,6 +174,7 @@ enum mlx5_dev_event { MLX5_DEV_EVENT_PKEY_CHANGE, MLX5_DEV_EVENT_GUID_CHANGE, MLX5_DEV_EVENT_CLIENT_REREG, + MLX5_DEV_EVENT_PPS, }; enum mlx5_port_status { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 37327f6ba9cb..f5292f3b0301 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -835,7 +835,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 port_type[0x2]; u8 num_ports[0x8]; - u8 reserved_at_1c0[0x3]; + u8 reserved_at_1c0[0x1]; + u8 pps[0x1]; + u8 pps_modify[0x1]; u8 log_max_msg[0x5]; u8 reserved_at_1c8[0x4]; u8 max_tc[0x4]; @@ -7821,6 +7823,60 @@ struct mlx5_ifc_initial_seg_bits { u8 reserved_at_80a0[0x17fc0]; }; +struct mlx5_ifc_mtpps_reg_bits { + u8 reserved_at_0[0xc]; + u8 cap_number_of_pps_pins[0x4]; + u8 reserved_at_10[0x4]; + u8 cap_max_num_of_pps_in_pins[0x4]; + u8 reserved_at_18[0x4]; + u8 cap_max_num_of_pps_out_pins[0x4]; + + u8 reserved_at_20[0x24]; + u8 cap_pin_3_mode[0x4]; + u8 reserved_at_48[0x4]; + u8 cap_pin_2_mode[0x4]; + u8 reserved_at_50[0x4]; + u8 cap_pin_1_mode[0x4]; + u8 reserved_at_58[0x4]; + u8 cap_pin_0_mode[0x4]; + + u8 reserved_at_60[0x4]; + u8 cap_pin_7_mode[0x4]; + u8 reserved_at_68[0x4]; + u8 cap_pin_6_mode[0x4]; + u8 reserved_at_70[0x4]; + u8 cap_pin_5_mode[0x4]; + u8 reserved_at_78[0x4]; + u8 cap_pin_4_mode[0x4]; + + u8 reserved_at_80[0x80]; + + u8 enable[0x1]; + u8 reserved_at_101[0xb]; + u8 pattern[0x4]; + u8 reserved_at_110[0x4]; + u8 pin_mode[0x4]; + u8 pin[0x8]; + + u8 reserved_at_120[0x20]; + + u8 time_stamp[0x40]; + + u8 out_pulse_duration[0x10]; + u8 out_periodic_adjustment[0x10]; + + u8 reserved_at_1a0[0x60]; +}; + +struct mlx5_ifc_mtppse_reg_bits { + u8 reserved_at_0[0x18]; + u8 pin[0x8]; + u8 event_arm[0x1]; + u8 reserved_at_21[0x1b]; + u8 event_generation_mode[0x4]; + u8 reserved_at_40[0x40]; +}; + union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; @@ -7865,6 +7921,8 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pvlc_reg_bits pvlc_reg; struct mlx5_ifc_slrg_reg_bits slrg_reg; struct mlx5_ifc_sltp_reg_bits sltp_reg; + struct mlx5_ifc_mtpps_reg_bits mtpps_reg; + struct mlx5_ifc_mtppse_reg_bits mtppse_reg; u8 reserved_at_0[0x60e0]; }; -- cgit v1.2.3 From ee7f12205abc21c2e83b266636e27b1cb2fd4e21 Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Mon, 22 Aug 2016 14:57:41 +0300 Subject: net/mlx5e: Implement 1PPS support This patch enables the 1PPS IN and 1PPS OUT support according to the advertised HCA capability. Single pin may be configured to one of the above mutual exclusive functions via standard Linux tools and APIs. For example, testptp open source application. Signed-off-by: Eugenia Emantayev Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 + drivers/net/ethernet/mellanox/mlx5/core/en_clock.c | 218 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 +- 3 files changed, 232 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 0d9dd860a295..22c3d26c6d3d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -262,6 +262,7 @@ struct mlx5e_tstamp { struct mlx5_core_dev *mdev; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; + u8 *pps_pin_caps; }; enum { @@ -780,6 +781,8 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, struct skb_shared_hwtstamps *hwts); void mlx5e_timestamp_init(struct mlx5e_priv *priv); void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv); +void mlx5e_pps_event_handler(struct mlx5e_priv *priv, + struct ptp_clock_event *event); int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 746a92c13644..349dc72cd2b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -37,6 +37,22 @@ enum { MLX5E_CYCLES_SHIFT = 23 }; +enum { + MLX5E_PIN_MODE_IN = 0x0, + MLX5E_PIN_MODE_OUT = 0x1, +}; + +enum { + MLX5E_OUT_PATTERN_PULSE = 0x0, + MLX5E_OUT_PATTERN_PERIODIC = 0x1, +}; + +enum { + MLX5E_EVENT_MODE_DISABLE = 0x0, + MLX5E_EVENT_MODE_REPETETIVE = 0x1, + MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, +}; + void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, struct skb_shared_hwtstamps *hwts) { @@ -189,6 +205,18 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) int neg_adj = 0; struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + + if (MLX5_CAP_GEN(priv->mdev, pps_modify)) { + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + /* For future use need to add a loop for finding all 1PPS out pins */ + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); + MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF); + + mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + } if (delta < 0) { neg_adj = 1; @@ -208,6 +236,124 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) return 0; } +static int mlx5e_extts_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u8 pattern = 0; + int pin = -1; + int err = 0; + + if (!MLX5_CAP_GEN(priv->mdev, pps) || + !MLX5_CAP_GEN(priv->mdev, pps_modify)) + return -EOPNOTSUPP; + + if (rq->extts.index >= tstamp->ptp_info.n_pins) + return -EINVAL; + + if (on) { + pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); + if (pin < 0) + return -EBUSY; + } + + if (rq->extts.flags & PTP_FALLING_EDGE) + pattern = 1; + + MLX5_SET(mtpps_reg, in, pin, pin); + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); + MLX5_SET(mtpps_reg, in, pattern, pattern); + MLX5_SET(mtpps_reg, in, enable, on); + + err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + if (err) + return err; + + return mlx5_set_mtppse(priv->mdev, pin, 0, + MLX5E_EVENT_MODE_REPETETIVE & on); +} + +static int mlx5e_perout_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u64 nsec_now, nsec_delta, time_stamp; + u64 cycles_now, cycles_delta; + struct timespec64 ts; + unsigned long flags; + int pin = -1; + s64 ns; + + if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) + return -EOPNOTSUPP; + + if (rq->perout.index >= tstamp->ptp_info.n_pins) + return -EINVAL; + + if (on) { + pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + if (on) + if ((ns >> 1) != 500000000LL) + return -EINVAL; + ts.tv_sec = rq->perout.start.sec; + ts.tv_nsec = rq->perout.start.nsec; + ns = timespec64_to_ns(&ts); + cycles_now = mlx5_read_internal_timer(tstamp->mdev); + write_lock_irqsave(&tstamp->lock, flags); + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + nsec_delta = ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, + tstamp->cycles.mult); + write_unlock_irqrestore(&tstamp->lock, flags); + time_stamp = cycles_now + cycles_delta; + MLX5_SET(mtpps_reg, in, pin, pin); + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); + MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); + MLX5_SET(mtpps_reg, in, enable, on); + MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); + + return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); +} + +static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + return mlx5e_extts_configure(ptp, rq, on); + case PTP_CLK_REQ_PEROUT: + return mlx5e_perout_configure(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; +} + static const struct ptp_clock_info mlx5e_ptp_clock_info = { .owner = THIS_MODULE, .max_adj = 100000000, @@ -221,6 +367,7 @@ static const struct ptp_clock_info mlx5e_ptp_clock_info = { .gettime64 = mlx5e_ptp_gettime, .settime64 = mlx5e_ptp_settime, .enable = NULL, + .verify = NULL, }; static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) @@ -229,6 +376,62 @@ static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; } +static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) +{ + int i; + + tstamp->ptp_info.pin_config = + kzalloc(sizeof(*tstamp->ptp_info.pin_config) * + tstamp->ptp_info.n_pins, GFP_KERNEL); + if (!tstamp->ptp_info.pin_config) + return -ENOMEM; + tstamp->ptp_info.enable = mlx5e_ptp_enable; + tstamp->ptp_info.verify = mlx5e_ptp_verify; + + for (i = 0; i < tstamp->ptp_info.n_pins; i++) { + snprintf(tstamp->ptp_info.pin_config[i].name, + sizeof(tstamp->ptp_info.pin_config[i].name), + "mlx5_pps%d", i); + tstamp->ptp_info.pin_config[i].index = i; + tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE; + tstamp->ptp_info.pin_config[i].chan = i; + } + + return 0; +} + +static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, + struct mlx5e_tstamp *tstamp) +{ + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + mlx5_query_mtpps(priv->mdev, out, sizeof(out)); + + tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, + cap_number_of_pps_pins); + tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_in_pins); + tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_out_pins); + + tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); + tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); + tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); + tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); + tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); + tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); + tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); + tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); +} + +void mlx5e_pps_event_handler(struct mlx5e_priv *priv, + struct ptp_clock_event *event) +{ + struct mlx5e_tstamp *tstamp = &priv->tstamp; + + ptp_clock_event(tstamp->ptp, event); +} + void mlx5e_timestamp_init(struct mlx5e_priv *priv) { struct mlx5e_tstamp *tstamp = &priv->tstamp; @@ -272,6 +475,18 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) tstamp->ptp_info = mlx5e_ptp_clock_info; snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); + /* Initialize 1PPS data structures */ +#define MAX_PIN_NUM 8 + tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); + if (tstamp->pps_pin_caps) { + if (MLX5_CAP_GEN(priv->mdev, pps)) + mlx5e_get_pps_caps(priv, tstamp); + if (tstamp->ptp_info.n_pins) + mlx5e_init_pin_config(tstamp); + } else { + mlx5_core_warn(priv->mdev, "1PPS initialization failed\n"); + } + tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, &priv->mdev->pdev->dev); if (IS_ERR(tstamp->ptp)) { @@ -293,5 +508,8 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) priv->tstamp.ptp = NULL; } + kfree(tstamp->pps_pin_caps); + kfree(tstamp->ptp_info.pin_config); + cancel_delayed_work_sync(&tstamp->overflow_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index aba3691e0919..2129fbd1d6ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -317,6 +317,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, enum mlx5_dev_event event, unsigned long param) { struct mlx5e_priv *priv = vpriv; + struct ptp_clock_event ptp_event; + struct mlx5_eqe *eqe = NULL; if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) return; @@ -326,7 +328,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, case MLX5_DEV_EVENT_PORT_DOWN: queue_work(priv->wq, &priv->update_carrier_work); break; - + case MLX5_DEV_EVENT_PPS: + eqe = (struct mlx5_eqe *)param; + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_event.index = eqe->data.pps.pin; + ptp_event.timestamp = + timecounter_cyc2time(&priv->tstamp.clock, + be64_to_cpu(eqe->data.pps.time_stamp)); + mlx5e_pps_event_handler(vpriv, &ptp_event); + break; default: break; } -- cgit v1.2.3 From 105433659d394b70dc3b6ceb65d0c1673e14cee1 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Sun, 9 Oct 2016 16:25:43 +0300 Subject: net/mlx5: Add support to s-tag in mlx5 firmware interface Add svlan_tag and rename vlan_tag to cvlan_tag in flow table entry match param. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- drivers/infiniband/hw/mlx5/main.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 10 +++++----- drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 12 ++++++------ include/linux/mlx5/mlx5_ifc.h | 12 +++++++----- 6 files changed, 24 insertions(+), 22 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a191b9327b0c..8727116a4cab 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1704,9 +1704,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v, if (ib_spec->eth.mask.vlan_tag) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 1fe80de5d68f..7ae0744ea50f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -172,7 +172,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, dest.ft = priv->fs.l2.ft.t; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: @@ -180,11 +180,11 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, break; case MLX5E_VLAN_RULE_TYPE_ANY_VID: rule_p = &priv->fs.vlan.any_vlan_rule; - MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ rule_p = &priv->fs.vlan.active_vlans_rule[vid]; - MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, @@ -991,7 +991,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP0_SIZE; @@ -1003,7 +1003,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP1_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index d088effd7160..4b4323f3c158 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -237,9 +237,9 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v, if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, first_vid, 0xfff); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 46bef6a26a8c..d4af5507679f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -460,8 +460,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, FLOW_DISSECTOR_KEY_VLAN, f->mask); if (mask->vlan_id || mask->vlan_priority) { - MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f14d9c9ba773..4b3b60be319d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -979,7 +979,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); @@ -1098,7 +1098,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); @@ -1115,7 +1115,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); @@ -1254,7 +1254,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, } if (vport->info.vlan || vport->info.qos) - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); if (vport->info.spoofchk) { MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); @@ -1335,8 +1335,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, } /* Allowed vlan rule */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f5292f3b0301..6f19e4b8574f 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -365,8 +365,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 ip_protocol[0x8]; u8 ip_dscp[0x6]; u8 ip_ecn[0x2]; - u8 vlan_tag[0x1]; - u8 reserved_at_91[0x1]; + u8 cvlan_tag[0x1]; + u8 svlan_tag[0x1]; u8 frag[0x1]; u8 reserved_at_93[0x4]; u8 tcp_flags[0x9]; @@ -398,9 +398,11 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 inner_second_cfi[0x1]; u8 inner_second_vid[0xc]; - u8 outer_second_vlan_tag[0x1]; - u8 inner_second_vlan_tag[0x1]; - u8 reserved_at_62[0xe]; + u8 outer_second_cvlan_tag[0x1]; + u8 inner_second_cvlan_tag[0x1]; + u8 outer_second_svlan_tag[0x1]; + u8 inner_second_svlan_tag[0x1]; + u8 reserved_at_64[0xc]; u8 gre_protocol[0x10]; u8 gre_key_h[0x18]; -- cgit v1.2.3 From 8a271746a264c1bd05fd98da1d96d97b79799970 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Sun, 9 Oct 2016 17:05:31 +0300 Subject: net/mlx5e: Receive s-tagged packets in promiscuous mode Today when the driver enter to promiscuous mode or vlan filter is disabled, we add flow rule to receive any c-taggd packets, therefore s-tagged packets are dropped. In order to receive s-tagged packets as well we need to add flow rule to receive any s-tagged packet. Fixes: 7cb21b794baa ('net/mlx5e: Rename en_flow_table.c to en_fs.c') Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 5 +- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 83 +++++++++++++++++++------ 2 files changed, 68 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 22c3d26c6d3d..d603a30ad639 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -572,8 +572,9 @@ struct mlx5e_vlan_table { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *untagged_rule; - struct mlx5_flow_handle *any_vlan_rule; - bool filter_disabled; + struct mlx5_flow_handle *any_cvlan_rule; + struct mlx5_flow_handle *any_svlan_rule; + bool filter_disabled; }; struct mlx5e_l2_table { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 7ae0744ea50f..92d8364e98f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -150,7 +150,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) enum mlx5e_vlan_rule_type { MLX5E_VLAN_RULE_TYPE_UNTAGGED, - MLX5E_VLAN_RULE_TYPE_ANY_VID, + MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, + MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, MLX5E_VLAN_RULE_TYPE_MATCH_VID, }; @@ -172,18 +173,30 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, dest.ft = priv->fs.l2.ft.t; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); + switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: rule_p = &priv->fs.vlan.untagged_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - rule_p = &priv->fs.vlan.any_vlan_rule; + case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: + rule_p = &priv->fs.vlan.any_cvlan_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); break; + case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: + rule_p = &priv->fs.vlan.any_svlan_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.svlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); + break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ rule_p = &priv->fs.vlan.active_vlans_rule[vid]; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); @@ -235,10 +248,16 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, priv->fs.vlan.untagged_rule = NULL; } break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - if (priv->fs.vlan.any_vlan_rule) { - mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule); - priv->fs.vlan.any_vlan_rule = NULL; + case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: + if (priv->fs.vlan.any_cvlan_rule) { + mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule); + priv->fs.vlan.any_cvlan_rule = NULL; + } + break; + case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: + if (priv->fs.vlan.any_svlan_rule) { + mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule); + priv->fs.vlan.any_svlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_VID: @@ -252,6 +271,23 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, } } +static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) +{ + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); +} + +static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + if (err) + return err; + + return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); +} + void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) { if (!priv->fs.vlan.filter_disabled) @@ -260,7 +296,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) priv->fs.vlan.filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_del_any_vid_rules(priv); } void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) @@ -271,7 +307,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) priv->fs.vlan.filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_add_any_vid_rules(priv); } int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, @@ -308,7 +344,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) if (priv->fs.vlan.filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_add_any_vid_rules(priv); } static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) @@ -323,7 +359,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) if (priv->fs.vlan.filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_del_any_vid_rules(priv); } #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ @@ -503,8 +539,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) if (enable_promisc) { mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); if (!priv->fs.vlan.filter_disabled) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); + mlx5e_add_any_vid_rules(priv); } if (enable_allmulti) mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); @@ -519,8 +554,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) mlx5e_del_l2_flow_rule(priv, &ea->allmulti); if (disable_promisc) { if (!priv->fs.vlan.filter_disabled) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); + mlx5e_del_any_vid_rules(priv); mlx5e_del_l2_flow_rule(priv, &ea->promisc); } @@ -976,11 +1010,13 @@ err_destroy_flow_table: return err; } -#define MLX5E_NUM_VLAN_GROUPS 2 +#define MLX5E_NUM_VLAN_GROUPS 3 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) #define MLX5E_VLAN_GROUP1_SIZE BIT(1) +#define MLX5E_VLAN_GROUP2_SIZE BIT(0) #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ - MLX5E_VLAN_GROUP1_SIZE) + MLX5E_VLAN_GROUP1_SIZE +\ + MLX5E_VLAN_GROUP2_SIZE) static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, int inlen) @@ -1012,6 +1048,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in goto err_destroy_groups; ft->num_groups++; + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + return 0; err_destroy_groups: -- cgit v1.2.3 From cfdcbceaeffc669b70d904d80a2df9c86c232566 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 8 Dec 2016 15:52:00 +0200 Subject: net/mlx5: Expose PCAM, MCAM registers infrastructure PCAM: Ports capabilities mask register. MCAM: Management capabilities mask register. PCAM and MCAM registers will provide information regarding firmware support for different features, in order to avoid cases where new driver combined with old firmware results in syndromes (for ex. PCIe counters before this patchset). Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- include/linux/mlx5/device.h | 16 ++++++++++++ include/linux/mlx5/driver.h | 2 ++ include/linux/mlx5/mlx5_ifc.h | 60 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 77 insertions(+), 1 deletion(-) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 6ac8eb5a89ca..79f38e67fe2f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -970,6 +970,22 @@ enum mlx5_cap_type { MLX5_CAP_NUM }; +enum mlx5_pcam_reg_groups { + MLX5_PCAM_REGS_5000_TO_507F = 0x0, +}; + +enum mlx5_pcam_feature_groups { + MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0, +}; + +enum mlx5_mcam_reg_groups { + MLX5_MCAM_REGS_FIRST_128 = 0x0, +}; + +enum mlx5_mcam_feature_groups { + MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0, +}; + /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ebbc8834063b..60c2b156da8c 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -121,12 +121,14 @@ enum { MLX5_REG_PVLC = 0x500f, MLX5_REG_PCMR = 0x5041, MLX5_REG_PMLP = 0x5002, + MLX5_REG_PCAM = 0x507f, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, MLX5_REG_MLCR = 0x902b, MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPSE = 0x9054, + MLX5_REG_MCAM = 0x907f, }; enum mlx5_dcbx_oper_mode { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 6f19e4b8574f..e8061a95326a 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -826,7 +826,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; u8 early_vf_enable[0x1]; - u8 reserved_at_1a9[0x2]; + u8 mcam_reg[0x1]; + u8 pcam_reg[0x1]; u8 local_ca_ack_delay[0x5]; u8 port_module_event[0x1]; u8 reserved_at_1b1[0x1]; @@ -7481,6 +7482,63 @@ struct mlx5_ifc_peir_reg_bits { u8 error_type[0x8]; }; +struct mlx5_ifc_pcam_enhanced_features_bits { + u8 reserved_at_0[0x7e]; + + u8 ppcnt_discard_group[0x1]; + u8 ppcnt_statistical_group[0x1]; +}; + +struct mlx5_ifc_pcam_reg_bits { + u8 reserved_at_0[0x8]; + u8 feature_group[0x8]; + u8 reserved_at_10[0x8]; + u8 access_reg_group[0x8]; + + u8 reserved_at_20[0x20]; + + union { + u8 reserved_at_0[0x80]; + } port_access_reg_cap_mask; + + u8 reserved_at_c0[0x80]; + + union { + struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features; + u8 reserved_at_0[0x80]; + } feature_cap_mask; + + u8 reserved_at_1c0[0xc0]; +}; + +struct mlx5_ifc_mcam_enhanced_features_bits { + u8 reserved_at_0[0x7f]; + + u8 pcie_performance_group[0x1]; +}; + +struct mlx5_ifc_mcam_reg_bits { + u8 reserved_at_0[0x8]; + u8 feature_group[0x8]; + u8 reserved_at_10[0x8]; + u8 access_reg_group[0x8]; + + u8 reserved_at_20[0x20]; + + union { + u8 reserved_at_0[0x80]; + } mng_access_reg_cap_mask; + + u8 reserved_at_c0[0x80]; + + union { + struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features; + u8 reserved_at_0[0x80]; + } mng_feature_cap_mask; + + u8 reserved_at_1c0[0x80]; +}; + struct mlx5_ifc_pcap_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; -- cgit v1.2.3 From c835ad64683bd3e2d1b31ed2cb1ff4366932edb1 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 8 Dec 2016 15:56:00 +0200 Subject: net/mlx5: Implement PCAM, MCAM access register commands Introduced registers will expose capabilities of new registers and features related to port/management. Driver will query MCAM and PCAM in order to avoid failing on old firmwares with lack of support. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 5 +++++ drivers/net/ethernet/mellanox/mlx5/core/port.c | 24 ++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 090e3a1dedc2..b3dabe6e8836 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -113,6 +113,11 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); void mlx5_cq_tasklet_cb(unsigned long data); +int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, + u8 access_reg_group); +int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group, + u8 access_reg_group); + void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_remove(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 5ea85336b2e4..969e352435ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -74,6 +74,30 @@ out: } EXPORT_SYMBOL_GPL(mlx5_core_access_reg); +int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, + u8 access_reg_group) +{ + u32 in[MLX5_ST_SZ_DW(pcam_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(pcam_reg); + + MLX5_SET(pcam_reg, in, feature_group, feature_group); + MLX5_SET(pcam_reg, in, access_reg_group, access_reg_group); + + return mlx5_core_access_reg(dev, in, sz, pcam, sz, MLX5_REG_PCAM, 0, 0); +} + +int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group, + u8 access_reg_group) +{ + u32 in[MLX5_ST_SZ_DW(mcam_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(mcam_reg); + + MLX5_SET(mcam_reg, in, feature_group, feature_group); + MLX5_SET(mcam_reg, in, access_reg_group, access_reg_group); + + return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0); +} + struct mlx5_reg_pcap { u8 rsvd0; u8 port_num; -- cgit v1.2.3 From 71862561f3a62015a11de16d1c306481e8415c08 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 8 Dec 2016 16:03:31 +0200 Subject: net/mlx5: Query and cache PCAM, MCAM registers on initialization On load_one, we now cache our capabilities registers internally, similar to QUERY_HCA_CAP. Capabilities can later be queried using macros introduced in this patch. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 20 ++++++++++++++++++++ include/linux/mlx5/device.h | 6 ++++++ include/linux/mlx5/driver.h | 4 ++++ 3 files changed, 30 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 5718aada6605..d0bbefa08af7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -91,6 +91,20 @@ out: } EXPORT_SYMBOL(mlx5_core_query_vendor_id); +static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev) +{ + return mlx5_query_pcam_reg(dev, dev->caps.pcam, + MLX5_PCAM_FEATURE_ENHANCED_FEATURES, + MLX5_PCAM_REGS_5000_TO_507F); +} + +static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev) +{ + return mlx5_query_mcam_reg(dev, dev->caps.mcam, + MLX5_MCAM_FEATURE_ENHANCED_FEATURES, + MLX5_MCAM_REGS_FIRST_128); +} + int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { int err; @@ -154,6 +168,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, pcam_reg)) + mlx5_get_pcam_reg(dev); + + if (MLX5_CAP_GEN(dev, mcam_reg)) + mlx5_get_mcam_reg(dev); + return 0; } diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 79f38e67fe2f..f21528abfb74 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1081,6 +1081,12 @@ enum mlx5_mcam_feature_groups { #define MLX5_CAP_QOS(mdev, cap)\ MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap) +#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ + MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) + +#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ + MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 60c2b156da8c..69c4661d391e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -739,6 +739,10 @@ struct mlx5_core_dev { struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + struct { + u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; + u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; + } caps; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; enum mlx5_device_state state; -- cgit v1.2.3 From d8dc0508c50f838f8abcf023cd74ca9a0f86b5a8 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 27 Sep 2016 17:04:51 +0300 Subject: net/mlx5: Add PPCNT physical layer statistical group infrastructure Add the needed infrastructure for future use of PPCNT physical layer statistical group. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- include/linux/mlx5/device.h | 1 + include/linux/mlx5/mlx5_ifc.h | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index f21528abfb74..5ad1d3ca47dc 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1115,6 +1115,7 @@ enum { MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, + MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index e8061a95326a..f4860fa719d9 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1384,6 +1384,42 @@ struct mlx5_ifc_phys_layer_cntrs_bits { u8 reserved_at_640[0x180]; }; +struct mlx5_ifc_phys_layer_statistical_cntrs_bits { + u8 time_since_last_clear_high[0x20]; + + u8 time_since_last_clear_low[0x20]; + + u8 phy_received_bits_high[0x20]; + + u8 phy_received_bits_low[0x20]; + + u8 phy_symbol_errors_high[0x20]; + + u8 phy_symbol_errors_low[0x20]; + + u8 phy_corrected_bits_high[0x20]; + + u8 phy_corrected_bits_low[0x20]; + + u8 phy_corrected_bits_lane0_high[0x20]; + + u8 phy_corrected_bits_lane0_low[0x20]; + + u8 phy_corrected_bits_lane1_high[0x20]; + + u8 phy_corrected_bits_lane1_low[0x20]; + + u8 phy_corrected_bits_lane2_high[0x20]; + + u8 phy_corrected_bits_lane2_low[0x20]; + + u8 phy_corrected_bits_lane3_high[0x20]; + + u8 phy_corrected_bits_lane3_low[0x20]; + + u8 reserved_at_200[0x5c0]; +}; + struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { u8 symbol_error_counter[0x10]; @@ -2928,6 +2964,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; + struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; u8 reserved_at_0[0x7c0]; }; -- cgit v1.2.3 From 5db0a4f64c042f6f64bd645488278f7a5df33c07 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 23 Aug 2016 12:23:29 +0300 Subject: net/mlx5e: Expose physical layer statistical counters to ethtool Use ethtool -S to query physical layer statistical counters including: - rx_symbol_errors_phy: Number of symbol errors that were not corrected by FEC correction algorithm or that FEC was not active on this interface. - rx_corrected_bits_phy: Number of corrected bits according to active FEC (RS/FC). Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 10 +++++++++- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 6 ++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 18 +++++++++++++++++- 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 33a399a8b5d5..dcc3e4dc1484 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -170,7 +170,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return NUM_SW_COUNTERS + MLX5E_NUM_Q_CNTRS(priv) + - NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv) + @@ -218,6 +218,10 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_phy_statistical_stats_desc[i].format); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -330,6 +334,10 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, pport_2819_stats_desc, i); + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_stats_desc, i); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2129fbd1d6ca..3be87a65d750 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -268,6 +268,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) { + out = pstats->phy_statistical_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { out = pstats->per_prio_counters[prio]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index ba5db1dd23a9..21c43d4e4f2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -201,6 +201,12 @@ static const struct counter_desc vport_stats_desc[] = { #define PPORT_2819_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ counter_set.eth_2819_cntrs_grp_data_layout.c##_high) +#define PPORT_PHY_STATISTICAL_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.phys_layer_statistical_cntrs.c##_high) +#define PPORT_PHY_STATISTICAL_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \ + counter_set.phys_layer_statistical_cntrs.c##_high) #define PPORT_PER_PRIO_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_per_prio_grp_data_layout.c##_high) @@ -215,6 +221,7 @@ struct mlx5e_pport_stats { __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; static const struct counter_desc pport_802_3_stats_desc[] = { @@ -260,6 +267,11 @@ static const struct counter_desc pport_2819_stats_desc[] = { { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, }; +static const struct counter_desc pport_phy_statistical_stats_desc[] = { + { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, + { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, +}; + static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, @@ -360,13 +372,17 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) +#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \ + (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \ + MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ ARRAY_SIZE(pport_per_prio_pfc_stats_desc) -#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \ +#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \ NUM_PPORT_2863_COUNTERS + \ NUM_PPORT_2819_COUNTERS + \ + NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) -- cgit v1.2.3 From 8ed1a6306dc7892b63be7cdb1e3b1123265f42ff Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 17 Nov 2016 13:46:01 +0200 Subject: net/mlx5: Add MPCNT register infrastructure Add the needed infrastructure for future use of MPCNT register. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- include/linux/mlx5/device.h | 4 ++++ include/linux/mlx5/driver.h | 1 + include/linux/mlx5/mlx5_ifc.h | 42 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 5ad1d3ca47dc..1cf97dce8215 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1119,6 +1119,10 @@ enum { MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; +enum { + MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, +}; + static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) { if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 69c4661d391e..f4d6d390a9cf 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -126,6 +126,7 @@ enum { MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, MLX5_REG_MLCR = 0x902b, + MLX5_REG_MPCNT = 0x9051, MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPSE = 0x9054, MLX5_REG_MCAM = 0x907f, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f4860fa719d9..d96ebc319d63 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1802,6 +1802,30 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { u8 reserved_at_4c0[0x300]; }; +struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { + u8 life_time_counter_high[0x20]; + + u8 life_time_counter_low[0x20]; + + u8 rx_errors[0x20]; + + u8 tx_errors[0x20]; + + u8 l0_to_recovery_eieos[0x20]; + + u8 l0_to_recovery_ts[0x20]; + + u8 l0_to_recovery_framing[0x20]; + + u8 l0_to_recovery_retrain[0x20]; + + u8 crc_error_dllp[0x20]; + + u8 crc_error_tlp[0x20]; + + u8 reserved_at_140[0x680]; +}; + struct mlx5_ifc_cmd_inter_comp_event_bits { u8 command_completion_vector[0x20]; @@ -2968,6 +2992,11 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { u8 reserved_at_0[0x7c0]; }; +union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits { + struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout; + u8 reserved_at_0[0x7c0]; +}; + union mlx5_ifc_event_auto_bits { struct mlx5_ifc_comp_event_bits comp_event; struct mlx5_ifc_dct_events_bits dct_events; @@ -7290,6 +7319,18 @@ struct mlx5_ifc_ppcnt_reg_bits { union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; }; +struct mlx5_ifc_mpcnt_reg_bits { + u8 reserved_at_0[0x8]; + u8 pcie_index[0x8]; + u8 reserved_at_10[0xa]; + u8 grp[0x6]; + + u8 clr[0x1]; + u8 reserved_at_21[0x1f]; + + union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set; +}; + struct mlx5_ifc_ppad_reg_bits { u8 reserved_at_0[0x3]; u8 single_mac[0x1]; @@ -8006,6 +8047,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pmtu_reg_bits pmtu_reg; struct mlx5_ifc_ppad_reg_bits ppad_reg; struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; + struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg; struct mlx5_ifc_pplm_reg_bits pplm_reg; struct mlx5_ifc_pplr_reg_bits pplr_reg; struct mlx5_ifc_ppsc_reg_bits ppsc_reg; -- cgit v1.2.3 From 0f7f348192a9ba445463261ecc15a63d0ba48722 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 17 Nov 2016 13:46:02 +0200 Subject: net/mlx5e: Expose PCIe statistics to ethtool This patch exposes PCIe performance counters, queried with ethtool -S . Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 9 +++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 23 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 22 ++++++++++++++++++++- 3 files changed, 53 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index dcc3e4dc1484..4021863e3840 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -171,6 +171,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) return NUM_SW_COUNTERS + MLX5E_NUM_Q_CNTRS(priv) + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) + + NUM_PCIE_COUNTERS(priv) + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv) + @@ -222,6 +223,10 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_phy_statistical_stats_desc[i].format); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stats_desc[i].format); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -338,6 +343,10 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, pport_phy_statistical_stats_desc, i); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc, i); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3be87a65d750..9aed892cd350 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -297,12 +297,35 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv) &qcnt->rx_out_of_buffer); } +static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); + void *out; + u32 *in; + + if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) + return; + + in = mlx5_vzalloc(sz); + if (!in) + return; + + out = pcie_stats->pcie_perf_counters; + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); + + kvfree(in); +} + void mlx5e_update_stats(struct mlx5e_priv *priv) { mlx5e_update_q_counter(priv); mlx5e_update_vport_counters(priv); mlx5e_update_pport_counters(priv); mlx5e_update_sw_counters(priv); + mlx5e_update_pcie_counters(priv); } void mlx5e_update_stats_work(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 21c43d4e4f2e..53e4992d6511 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -39,7 +39,7 @@ #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ (*(u32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ - be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) + be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) @@ -288,6 +288,21 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; +#define PCIE_PERF_OFF(c) \ + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) +#define PCIE_PERF_GET(pcie_stats, c) \ + MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ + counter_set.pcie_perf_cntrs_grp_data_layout.c) + +struct mlx5e_pcie_stats { + __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; +}; + +static const struct counter_desc pcie_perf_stats_desc[] = { + { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, + { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, +}; + struct mlx5e_rq_stats { u64 packets; u64 bytes; @@ -375,6 +390,9 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \ (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \ MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) +#define NUM_PCIE_PERF_COUNTERS(priv) \ + (ARRAY_SIZE(pcie_perf_stats_desc) * \ + MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ @@ -385,6 +403,7 @@ static const struct counter_desc sq_stats_desc[] = { NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO) +#define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) @@ -394,6 +413,7 @@ struct mlx5e_stats { struct mlx5e_vport_stats vport; struct mlx5e_pport_stats pport; struct rtnl_link_stats64 vf_vport; + struct mlx5e_pcie_stats pcie; }; static const struct counter_desc mlx5e_pme_status_desc[] = { -- cgit v1.2.3 From 701052c578195e6e02a22647fa6fd1c90c31dafd Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 14 Dec 2016 17:40:41 +0200 Subject: net/mlx5: Move cached hca caps to designated caps struct The caps structure consists of hca caps and port/management caps, all under one roof. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 6 ++-- include/linux/mlx5/device.h | 34 +++++++++++------------ include/linux/mlx5/driver.h | 4 +-- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0ac7a2fc916c..85ff4b843b4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1665,7 +1665,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, #define FLOW_TABLE_BIT_SZ 1 #define GET_FLOW_TABLE_CAP(dev, offset) \ - ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \ + ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \ offset / 32)) >> \ (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 5d160f33bc17..84f7970c5080 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -418,11 +418,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, switch (cap_mode) { case HCA_CAP_OPMOD_GET_MAX: - memcpy(dev->hca_caps_max[cap_type], hca_caps, + memcpy(dev->caps.hca_max[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; case HCA_CAP_OPMOD_GET_CUR: - memcpy(dev->hca_caps_cur[cap_type], hca_caps, + memcpy(dev->caps.hca_cur[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; default: @@ -513,7 +513,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); - memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], + memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL], MLX5_ST_SZ_BYTES(cmd_hca_cap)); mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 1cf97dce8215..7b6cd67a263f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -988,36 +988,36 @@ enum mlx5_mcam_feature_groups { /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) + MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_GEN_MAX(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) + MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_ETH(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) + mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ETH_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) + mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ROCE(mdev, cap) \ - MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) + MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ROCE_MAX(mdev, cap) \ - MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) + MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ATOMIC(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) + MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) + MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_FLOWTABLE(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) @@ -1039,11 +1039,11 @@ enum mlx5_mcam_feature_groups { #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) @@ -1065,21 +1065,21 @@ enum mlx5_mcam_feature_groups { #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) + mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) + mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ODP(mdev, cap)\ - MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) + MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ MLX5_GET(vector_calc_cap, \ - mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap) + mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) #define MLX5_CAP_QOS(mdev, cap)\ - MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap) + MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f4d6d390a9cf..1bc4641734da 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -738,9 +738,9 @@ struct mlx5_core_dev { char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; - u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; - u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; struct { + u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; } caps; -- cgit v1.2.3 From 3dd69e3dd21969a34406862ae299538e2e6387e7 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 28 Dec 2016 17:07:17 +0200 Subject: net/mlx5e: Reorder update stats Reorder update stats flow to update most important counters last, to get more accurate results. New update order: - PCIe counters - Port counters - Vport counters - Queue counters - Software counters Signed-off-by: Saeed Mahameed Reviewed-by: Gal Pressman --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9aed892cd350..3a06c81ef85e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -321,11 +321,11 @@ static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) void mlx5e_update_stats(struct mlx5e_priv *priv) { - mlx5e_update_q_counter(priv); - mlx5e_update_vport_counters(priv); + mlx5e_update_pcie_counters(priv); mlx5e_update_pport_counters(priv); + mlx5e_update_vport_counters(priv); + mlx5e_update_q_counter(priv); mlx5e_update_sw_counters(priv); - mlx5e_update_pcie_counters(priv); } void mlx5e_update_stats_work(struct work_struct *work) -- cgit v1.2.3 From 8a43c052c7c1a7fd6cde46591deeb39be63a1223 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Tue, 17 Jan 2017 16:31:19 -0600 Subject: Revert "net: qcom/emac: configure the external phy to allow pause frames" This reverts commit 3e884493448131179a5b7cae1ddca1028ffaecc8. With commit 529ed1275263 ("net: phy: phy drivers should not set SUPPORTED_[Asym_]Pause"), phylib now handles automatically enabling pause frame support in the PHY, and the MAC driver should follow suit. Since the EMAC driver driver does this, we no longer need to force pause frames support. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index d297ed961da6..98570eb6ef1a 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1004,12 +1004,6 @@ int emac_mac_up(struct emac_adapter *adpt) writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); - /* Enable pause frames. Without this feature, the EMAC has been shown - * to receive (and drop) frames with FCS errors at gigabit connections. - */ - adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - adpt->phydev->irq = PHY_IGNORE_INTERRUPT; phy_start(adpt->phydev); -- cgit v1.2.3 From dceeab0e5258fccad5dbf95b54821ae4e919e0b6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 17 Jan 2017 20:14:10 -0800 Subject: mlx4: support __GFP_MEMALLOC for rx Commit 04aeb56a1732 ("net/mlx4_en: allocate non 0-order pages for RX ring with __GFP_NOMEMALLOC") added code that appears to be not needed at that time, since mlx4 never used __GFP_MEMALLOC allocations anyway. As using memory reserves is a must in some situations (swap over NFS or iSCSI), this patch adds this flag. Note that this driver does not reuse pages (yet) so we do not have to add anything else. Signed-off-by: Eric Dumazet Cc: Konstantin Khlebnikov Cc: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index eac527e25ec9..e362f99334d0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -706,7 +706,8 @@ static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, do { if (mlx4_en_prepare_rx_desc(priv, ring, ring->prod & ring->size_mask, - GFP_ATOMIC | __GFP_COLD)) + GFP_ATOMIC | __GFP_COLD | + __GFP_MEMALLOC)) break; ring->prod++; } while (--missing); -- cgit v1.2.3 From a1a22c12060e4b9c52f45d4b3460f614e00162a2 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 18 Jan 2017 07:40:36 -0800 Subject: net: ipv6: Keep nexthop of multipath route on admin down IPv6 deletes route entries associated with multipath routes on an admin down where IPv4 does not. For example: $ ip ro ls vrf red unreachable default metric 8192 1.1.1.0/24 metric 64 nexthop via 10.100.1.254 dev eth1 weight 1 nexthop via 10.100.2.254 dev eth2 weight 1 10.100.1.0/24 dev eth1 proto kernel scope link src 10.100.1.4 10.100.2.0/24 dev eth2 proto kernel scope link src 10.100.2.4 $ ip -6 ro ls vrf red 2001:db8:1::/120 dev eth1 proto kernel metric 256 pref medium 2001:db8:2:: dev red proto none metric 0 pref medium 2001:db8:2::/120 dev eth2 proto kernel metric 256 pref medium 2001:db8:11::/120 via 2001:db8:1::16 dev eth1 metric 1024 pref medium 2001:db8:11::/120 via 2001:db8:2::17 dev eth2 metric 1024 pref medium ... Set link down: $ ip li set eth1 down IPv4 retains the multihop route but flags eth1 route as dead: $ ip ro ls vrf red unreachable default metric 8192 1.1.1.0/24 nexthop via 10.100.1.16 dev eth1 weight 1 dead linkdown nexthop via 10.100.2.16 dev eth2 weight 1 10.100.2.0/24 dev eth2 proto kernel scope link src 10.100.2.4 and IPv6 deletes the route as part of flushing all routes for the device: $ ip -6 ro ls vrf red 2001:db8:2:: dev red proto none metric 0 pref medium 2001:db8:2::/120 dev eth2 proto kernel metric 256 pref medium 2001:db8:11::/120 via 2001:db8:2::17 dev eth2 metric 1024 pref medium ... Worse, on admin up of the device the multipath route has to be deleted to get this leg of the route re-added. This patch keeps routes that are part of a multipath route if ignore_routes_with_linkdown is set with the dead and linkdown flags enabling consistency between IPv4 and IPv6: $ ip -6 ro ls vrf red 2001:db8:2:: dev red proto none metric 0 pref medium 2001:db8:2::/120 dev eth2 proto kernel metric 256 pref medium 2001:db8:11::/120 via 2001:db8:1::16 dev eth1 metric 1024 dead linkdown pref medium 2001:db8:11::/120 via 2001:db8:2::17 dev eth2 metric 1024 pref medium ... Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 5585c501a540..4b1f0f98a0e9 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2711,13 +2711,16 @@ struct arg_dev_net { struct net *net; }; +/* called with write lock held for table with rt */ static int fib6_ifdown(struct rt6_info *rt, void *arg) { const struct arg_dev_net *adn = arg; const struct net_device *dev = adn->dev; if ((rt->dst.dev == dev || !dev) && - rt != adn->net->ipv6.ip6_null_entry) + rt != adn->net->ipv6.ip6_null_entry && + (rt->rt6i_nsiblings == 0 || + !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) return -1; return 0; @@ -3216,7 +3219,7 @@ static int rt6_fill_node(struct net *net, else rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; - if (!netif_carrier_ok(rt->dst.dev)) { + if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { rtm->rtm_flags |= RTNH_F_LINKDOWN; if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) rtm->rtm_flags |= RTNH_F_DEAD; -- cgit v1.2.3 From 062e008a6e83e7c4da7df0a9c6aefdbc849e2bb3 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 13 Jan 2017 15:35:36 -0800 Subject: mwifiex: pcie: use posted write to wake up firmware Depending on system factors (e.g., the PCIe link PM state), the first read to wake up the Wifi firmware can take a long time. There is no reason to use a (blocking, non-posted) read at this point, so let's just use a write instead. Write vs. read doesn't matter functionality-wise -- it's just a dummy operation. But let's make sure to re-write with the correct "ready" signature, since we check for that in other parts of the driver. This has been shown to decrease the time spent blocking in this function on RK3399. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 4d78087ea13a..396cd4a9423a 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -460,7 +460,6 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, /* This function wakes up the card by reading fw_status register. */ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) { - u32 fw_status; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; @@ -470,10 +469,10 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) if (reg->sleep_cookie) mwifiex_pcie_dev_wakeup_delay(adapter); - /* Reading fw_status register will wakeup device */ - if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) { + /* Accessing fw_status register will wakeup device */ + if (mwifiex_write_reg(adapter, reg->fw_status, FIRMWARE_READY_PCIE)) { mwifiex_dbg(adapter, ERROR, - "Reading fw_status register failed\n"); + "Writing fw_status register failed\n"); return -1; } -- cgit v1.2.3 From 5d5ddb5e0d9bc3ff3f377c6e03a074d13606a6e5 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 13 Jan 2017 15:35:37 -0800 Subject: mwifiex: pcie: don't loop/retry interrupt status checks The following sequence occurs when using IEEE power-save on 8997: (a) driver sees SLEEP event (b) driver issues SLEEP CONFIRM (c) driver recevies CMD interrupt; within the interrupt processing loop, we do (d) and (e): (d) wait for FW sleep cookie (and often time out; it takes a while), FW is putting card into low power mode (e) re-check PCIE_HOST_INT_STATUS register; quit loop with 0 value But at (e), no one actually signaled an interrupt (i.e., we didn't check adapter->int_status). And what's more, because the card is going to sleep, this register read appears to take a very long time in some cases -- 3 milliseconds in my case! Now, I propose that (e) is completely unnecessary. If there were any additional interrupts signaled after the start of this loop, then the interrupt handler would have set adapter->int_status to non-zero and queued more work for the main loop -- and we'd catch it on the next iteration of the main loop. So this patch drops all the looping/re-reading of PCIE_HOST_INT_STATUS, which avoids the problematic (and slow) register read in step (e). Incidentally, this is a very similar issue to the one fixed in commit ec815dd2a5f1 ("mwifiex: prevent register accesses after host is sleeping"), except that the register read is just very slow instead of fatal in this case. Tested on 8997 in both MSI and (though not technically supported at the moment) MSI-X mode. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 102 +++++++++------------------- 1 file changed, 32 insertions(+), 70 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 396cd4a9423a..b8a07d35973a 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2334,79 +2334,41 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) } } } - while (pcie_ireg & HOST_INTR_MASK) { - if (pcie_ireg & HOST_INTR_DNLD_DONE) { - pcie_ireg &= ~HOST_INTR_DNLD_DONE; - mwifiex_dbg(adapter, INTR, - "info: TX DNLD Done\n"); - ret = mwifiex_pcie_send_data_complete(adapter); - if (ret) - return ret; - } - if (pcie_ireg & HOST_INTR_UPLD_RDY) { - pcie_ireg &= ~HOST_INTR_UPLD_RDY; - mwifiex_dbg(adapter, INTR, - "info: Rx DATA\n"); - ret = mwifiex_pcie_process_recv_data(adapter); - if (ret) - return ret; - } - if (pcie_ireg & HOST_INTR_EVENT_RDY) { - pcie_ireg &= ~HOST_INTR_EVENT_RDY; - mwifiex_dbg(adapter, INTR, - "info: Rx EVENT\n"); - ret = mwifiex_pcie_process_event_ready(adapter); - if (ret) - return ret; - } - - if (pcie_ireg & HOST_INTR_CMD_DONE) { - pcie_ireg &= ~HOST_INTR_CMD_DONE; - if (adapter->cmd_sent) { - mwifiex_dbg(adapter, INTR, - "info: CMD sent Interrupt\n"); - adapter->cmd_sent = false; - } - /* Handle command response */ - ret = mwifiex_pcie_process_cmd_complete(adapter); - if (ret) - return ret; - if (adapter->hs_activated) - return ret; - } - - if (card->msi_enable) { - spin_lock_irqsave(&adapter->int_lock, flags); - adapter->int_status = 0; - spin_unlock_irqrestore(&adapter->int_lock, flags); - } - - if (mwifiex_pcie_ok_to_access_hw(adapter)) { - if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, - &pcie_ireg)) { - mwifiex_dbg(adapter, ERROR, - "Read register failed\n"); - return -1; - } - if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) { - if (mwifiex_write_reg(adapter, - PCIE_HOST_INT_STATUS, - ~pcie_ireg)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return -1; - } - } - - } - if (!card->msi_enable) { - spin_lock_irqsave(&adapter->int_lock, flags); - pcie_ireg |= adapter->int_status; - adapter->int_status = 0; - spin_unlock_irqrestore(&adapter->int_lock, flags); + if (pcie_ireg & HOST_INTR_DNLD_DONE) { + pcie_ireg &= ~HOST_INTR_DNLD_DONE; + mwifiex_dbg(adapter, INTR, "info: TX DNLD Done\n"); + ret = mwifiex_pcie_send_data_complete(adapter); + if (ret) + return ret; + } + if (pcie_ireg & HOST_INTR_UPLD_RDY) { + pcie_ireg &= ~HOST_INTR_UPLD_RDY; + mwifiex_dbg(adapter, INTR, "info: Rx DATA\n"); + ret = mwifiex_pcie_process_recv_data(adapter); + if (ret) + return ret; + } + if (pcie_ireg & HOST_INTR_EVENT_RDY) { + pcie_ireg &= ~HOST_INTR_EVENT_RDY; + mwifiex_dbg(adapter, INTR, "info: Rx EVENT\n"); + ret = mwifiex_pcie_process_event_ready(adapter); + if (ret) + return ret; + } + if (pcie_ireg & HOST_INTR_CMD_DONE) { + pcie_ireg &= ~HOST_INTR_CMD_DONE; + if (adapter->cmd_sent) { + mwifiex_dbg(adapter, INTR, + "info: CMD sent Interrupt\n"); + adapter->cmd_sent = false; } + /* Handle command response */ + ret = mwifiex_pcie_process_cmd_complete(adapter); + if (ret) + return ret; } + mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n", adapter->cmd_sent, adapter->data_sent); -- cgit v1.2.3 From fe116788393942eef11d5c0f1cb16b1ef423970d Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 13 Jan 2017 15:35:38 -0800 Subject: mwifiex: pcie: read FROMDEVICE DMA-able memory with READ_ONCE() In mwifiex_delay_for_sleep_cookie(), we're looping and waiting for the PCIe endpoint to write a magic value back to memory, to signal that it has finished going to sleep. We're not letting the compiler know that this might change underneath our feet though. Let's do that, for good hygiene. I'm not aware of this fixing any concrete problems. I also give no guarantee that this loop is actually correct in any other way, but at least this looks like an improvement to me. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index b8a07d35973a..ca51411e4a4c 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -442,7 +442,7 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, for (count = 0; count < max_delay_loop_cnt; count++) { buffer = card->cmdrsp_buf->data - INTF_HEADER_LEN; - sleep_cookie = *(u32 *)buffer; + sleep_cookie = READ_ONCE(*(u32 *)buffer); if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) { mwifiex_dbg(adapter, INFO, -- cgit v1.2.3 From 0ed917d09d517646b4dffc96427e5a4cc14056de Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 13 Jan 2017 18:16:57 -0800 Subject: mwifiex: don't complain about 'unknown event id: 0x63' Marvell folks tell me this is a debugging event that the driver doesn't need to handle, but on 8997 w/ firmware 16.68.1.p97, I see several of these sorts of messages at (for instance) boot time: [ 13.825848] mwifiex_pcie 0000:01:00.0: event: unknown event id: 0x63 [ 14.838561] mwifiex_pcie 0000:01:00.0: event: unknown event id: 0x63 [ 14.850397] mwifiex_pcie 0000:01:00.0: event: unknown event id: 0x63 [ 32.529923] mwifiex_pcie 0000:01:00.0: event: unknown event id: 0x63 Let's handle this "event" with a much lower verbosity. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/fw.h | 1 + drivers/net/wireless/marvell/mwifiex/sta_event.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 55db158fd156..cb6a1a81d44e 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -550,6 +550,7 @@ enum mwifiex_channel_flags { #define EVENT_TX_DATA_PAUSE 0x00000055 #define EVENT_EXT_SCAN_REPORT 0x00000058 #define EVENT_RXBA_SYNC 0x00000059 +#define EVENT_UNKNOWN_DEBUG 0x00000063 #define EVENT_BG_SCAN_STOPPED 0x00000065 #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f #define EVENT_MULTI_CHAN_INFO 0x0000006a diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 9df0c4dc06ed..96503d3d053f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -1009,6 +1009,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) adapter->event_skb->len - sizeof(eventcause)); break; + /* Debugging event; not used, but let's not print an ERROR for it. */ + case EVENT_UNKNOWN_DEBUG: + mwifiex_dbg(adapter, EVENT, "event: debug\n"); + break; default: mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n", eventcause); -- cgit v1.2.3 From a62a77881b1b6708ffeddd9bf0529494f7b199e3 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 16 Jan 2017 11:17:57 +0100 Subject: brcmfmac: add support for BCM43455 with modalias sdio:c00v02D0dA9BF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BCM43455 is a more recent revision of the BCM4345. Some of the BCM43455 got a dedicated SDIO device ID which is currently not supported by brcmfmac. Adding the new sdio_device_id to brcmfmac is enough to get the BCM43455 supported because the chip itself is already supported (due to BCM4345 support in the driver). Signed-off-by: Martin Blumenstingl Acked-by: Arend van Spriel Reviewed-by: Andreas Färber Tested-by: Andreas Färber Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 1 + include/linux/mmc/sdio_ids.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 72139b579b18..5bc2ba214735 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1104,6 +1104,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356), { /* end: all zeroes */ } diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index d43ef96bf075..71b113e1223f 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -36,6 +36,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 +#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 #define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 -- cgit v1.2.3 From d607e396566a10ad1dc64affe82c019098deb162 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Tue, 17 Jan 2017 18:18:52 -0500 Subject: rtl8xxxu: Mark 8192eu device 0x0bda:0x818b as tested Device reported as working fine, so tell the driver not to warn about it being untested. Reported-by: Aex Aey Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 3a86675020a2..99cfd2b0d76e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -6000,6 +6000,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface, case 0x8176: case 0x8178: case 0x817f: + case 0x818b: untested = 0; break; } -- cgit v1.2.3 From c14239f23adb80aa2532909e312986317ba3cd09 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Tue, 17 Jan 2017 18:18:53 -0500 Subject: rtl8xxxu: Add another 8192eu device to the USB list TP-Link TL-WN822N v4 (2357:0108) Reported-by: Gregory Auzanneau Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 99cfd2b0d76e..016ea24a8cd5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -6197,6 +6197,9 @@ static struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8723au_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818b, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192eu_fops}, +/* TP-Link TL-WN822N v4 */ +{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0108, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, /* Tested by Myckel Habets */ {USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0109, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192eu_fops}, -- cgit v1.2.3 From 66dfa26ebca8de9764190fed6ef509cd2b514c74 Mon Sep 17 00:00:00 2001 From: Axel Köllhofer Date: Tue, 17 Jan 2017 18:18:54 -0500 Subject: rtl8xxxu: Add USB ID for D-Link DWA-131 rev E1 (rtl8192eu) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was tested by David Patiño. Reported-by: David Patiño Signed-off-by: Axel Köllhofer Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 016ea24a8cd5..a8386f4195c4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -6200,6 +6200,9 @@ static struct usb_device_id dev_table[] = { /* TP-Link TL-WN822N v4 */ {USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0108, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192eu_fops}, +/* D-Link DWA-131 rev E1, tested by David Patiño */ +{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3319, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, /* Tested by Myckel Habets */ {USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0109, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192eu_fops}, -- cgit v1.2.3 From 5407fd7de69f3352aed659244d4bef18e3cabf5c Mon Sep 17 00:00:00 2001 From: Axel Köllhofer Date: Tue, 17 Jan 2017 18:18:55 -0500 Subject: rtl8xxxu: Add additional USB IDs for rtl8192eu devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These IDs originate from the vendor driver Signed-off-by: Axel Köllhofer Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index a8386f4195c4..3585a049101b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -6354,6 +6354,13 @@ static struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7822, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, +/* found in rtl8192eu vendor driver */ +{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0107, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab33, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818c, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, #endif { } }; -- cgit v1.2.3 From 1ee83789fc1fb40f4d2f3dddd838c2c3860dadb9 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Tue, 17 Jan 2017 18:18:56 -0500 Subject: rtl8xxxu: Update author/maintainer contact info Update copyright year and email address. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- MAINTAINERS | 2 +- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 2 +- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c | 2 +- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c | 2 +- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c | 2 +- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c | 2 +- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 4 ++-- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index c8df0e1ef833..0b5c80e13911 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10584,7 +10584,7 @@ F: drivers/net/wireless/realtek/rtlwifi/ F: drivers/net/wireless/realtek/rtlwifi/rtl8192ce/ RTL8XXXU WIRELESS DRIVER (rtl8xxxu) -M: Jes Sorensen +M: Jes Sorensen L: linux-wireless@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel S: Maintained diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index df551b2b56eb..95e3993d8a33 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 - 2016 Jes Sorensen + * Copyright (c) 2014 - 2017 Jes Sorensen * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c index f9e2050812ab..a41a29612582 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver - 8188c/8188r/8192c specific subdriver * - * Copyright (c) 2014 - 2016 Jes Sorensen + * Copyright (c) 2014 - 2017 Jes Sorensen * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index a1178c5d6ad8..80fee699f58a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver - 8192e specific subdriver * - * Copyright (c) 2014 - 2016 Jes Sorensen + * Copyright (c) 2014 - 2017 Jes Sorensen * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c index aef373028155..174631132b96 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver - 8723a specific subdriver * - * Copyright (c) 2014 - 2016 Jes Sorensen + * Copyright (c) 2014 - 2017 Jes Sorensen * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index 02b8ddd98a95..c4b86a84a721 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver - 8723b specific subdriver * - * Copyright (c) 2014 - 2016 Jes Sorensen + * Copyright (c) 2014 - 2017 Jes Sorensen * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 3585a049101b..e544dd1d618c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver * - * Copyright (c) 2014 - 2016 Jes Sorensen + * Copyright (c) 2014 - 2017 Jes Sorensen * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. @@ -48,7 +48,7 @@ static bool rtl8xxxu_dma_aggregation; static int rtl8xxxu_dma_agg_timeout = -1; static int rtl8xxxu_dma_agg_pages = -1; -MODULE_AUTHOR("Jes Sorensen "); +MODULE_AUTHOR("Jes Sorensen "); MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin"); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 315ccfb2dff5..3d3e2e1ada6f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 - 2016 Jes Sorensen + * Copyright (c) 2014 - 2017 Jes Sorensen * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as -- cgit v1.2.3 From 8e290cecdd0178f3d4cf7d463c51dc7e462843b4 Mon Sep 17 00:00:00 2001 From: Gavin Li Date: Tue, 17 Jan 2017 15:24:05 -0800 Subject: brcmfmac: fix incorrect event channel deduction brcmf_sdio_fromevntchan() was being called on the the data frame rather than the software header, causing some frames to be mischaracterized as on the event channel rather than the data channel. This fixes a major performance regression (due to dropped packets). With this patch the download speed jumped from 1Mbit/s back up to 40MBit/s due to the sheer amount of packets being incorrectly processed. Fixes: c56caa9db8ab ("brcmfmac: screening firmware event packet") Signed-off-by: Gavin Li Cc: # 4.7+ Acked-by: Arend van Spriel [kvalo@codeaurora.org: improve commit logs based on email discussion] Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index dfb0658713d9..d2219885071f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -1661,7 +1661,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) pfirst->len, pfirst->next, pfirst->prev); skb_unlink(pfirst, &bus->glom); - if (brcmf_sdio_fromevntchan(pfirst->data)) + if (brcmf_sdio_fromevntchan(&dptr[SDPCM_HWHDR_LEN])) brcmf_rx_event(bus->sdiodev->dev, pfirst); else brcmf_rx_frame(bus->sdiodev->dev, pfirst, -- cgit v1.2.3 From c8d870794d5dd42d6e05a78cc92d1ff7acf11f6a Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 18 Jan 2017 11:48:51 +0100 Subject: brcmfmac: drop unneeded function declarations from headers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Functions brcmf_c_prec_enq and brcmf_sdio_init don't exist so we really don't need their declarations. Function brcmf_parse_tlvs is used in cfg80211.c only so make it static and drop from header as well. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h | 4 ---- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h | 2 -- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index e21f7600122b..b5bb971ef56a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -218,9 +218,6 @@ int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len) * interface functions from common layer */ -bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt, - int prec); - /* Receive frame for delivery to OS. Callee disposes of rxp. */ void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event); /* Receive async event packet from firmware. Callee disposes of rxp. */ @@ -247,7 +244,6 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len); #ifdef CONFIG_BRCMFMAC_SDIO void brcmf_sdio_exit(void); -void brcmf_sdio_init(void); void brcmf_sdio_register(void); #endif #ifdef CONFIG_BRCMFMAC_USB diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 729bf3351feb..ec1171c523ab 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -326,7 +326,7 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf, * triples, returning a pointer to the substring whose first element * matches tag */ -const struct brcmf_tlv * +static const struct brcmf_tlv * brcmf_parse_tlvs(const void *buf, int buflen, uint key) { const struct brcmf_tlv *elt = buf; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 0c9a7081fca9..8f19d95d4175 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -396,8 +396,6 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif); s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, const u8 *vndr_ie_buf, u32 vndr_ie_len); s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif); -const struct brcmf_tlv * -brcmf_parse_tlvs(const void *buf, int buflen, uint key); u16 channel_to_chanspec(struct brcmu_d11inf *d11inf, struct ieee80211_channel *ch); bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, -- cgit v1.2.3 From f5611e038172101561b570554c81e290a39517ed Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 18 Jan 2017 11:48:52 +0100 Subject: brcmfmac: move brcmf_c_set_joinpref_default declaration to common.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Function brcmf_c_set_joinpref_default is in common.c, so move it to the related header. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h | 2 ++ drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index bd095abca393..a62f8e70b320 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -65,6 +65,8 @@ struct brcmf_mp_device { } bus; }; +void brcmf_c_set_joinpref_default(struct brcmf_if *ifp); + struct brcmf_mp_device *brcmf_get_module_param(struct device *dev, enum brcmf_bus_type bus_type, u32 chip, u32 chiprev); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index c94dcab260d0..de3197be5491 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -216,7 +216,6 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on); -void brcmf_c_set_joinpref_default(struct brcmf_if *ifp); int __init brcmf_core_init(void); void __exit brcmf_core_exit(void); -- cgit v1.2.3 From bfa7295e5b4d32cdab28d4cdc3a9791f73aed089 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 18 Jan 2017 11:48:53 +0100 Subject: brcmfmac: drop brcmf_bus_detach and inline its code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Driver used to call brcmf_bus_detach only from one place and it already contained a check for drvr not being NULL. We can get rid of this extra function, call brcmf_bus_stop directly and simplify the code. There also isn't brcmf_bus_attach function which one could expect so it looks more consistent this way. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 9e6f60a0ec3e..372d2c480f29 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -1075,16 +1075,6 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len) } } -static void brcmf_bus_detach(struct brcmf_pub *drvr) -{ - brcmf_dbg(TRACE, "Enter\n"); - - if (drvr) { - /* Stop the bus module */ - brcmf_bus_stop(drvr->bus_if); - } -} - void brcmf_dev_reset(struct device *dev) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); @@ -1131,7 +1121,7 @@ void brcmf_detach(struct device *dev) brcmf_fws_deinit(drvr); - brcmf_bus_detach(drvr); + brcmf_bus_stop(drvr->bus_if); brcmf_proto_detach(drvr); -- cgit v1.2.3 From e8cd47501fa0c0a591bb07d5878dcc8d63d60e57 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 18 Jan 2017 11:48:54 +0100 Subject: brcmfmac: rename brcmf_bus_start function to brcmf_bus_started MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This intends to make init/attach process slightly easier to follow. What driver was doing in brcmf_bus_start wasn't bus specific at all and function brcmf_bus_stop wasn't undoing things done there. This function is supposed to be called by bus specific code when the bus is ready. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index b5bb971ef56a..76693df34742 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -238,7 +238,7 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success); /* Configure the "global" bus state used by upper layers */ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state); -int brcmf_bus_start(struct device *dev); +int brcmf_bus_started(struct device *dev); s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len); void brcmf_bus_add_txhdrlen(struct device *dev, uint len); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index b1f574f5ee6c..f7c8c2e80349 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -74,7 +74,7 @@ module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR); MODULE_PARM_DESC(roamoff, "Do not use internal roaming engine"); #ifdef DEBUG -/* always succeed brcmf_bus_start() */ +/* always succeed brcmf_bus_started() */ static int brcmf_ignore_probe_fail; module_param_named(ignore_probe_fail, brcmf_ignore_probe_fail, int, 0); MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging"); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 372d2c480f29..b73a55b00fa7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -966,7 +966,7 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data) return 0; } -int brcmf_bus_start(struct device *dev) +int brcmf_bus_started(struct device *dev) { int ret = -1; struct brcmf_bus *bus_if = dev_get_drvdata(dev); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 048027f2085b..19db8f26729a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1572,7 +1572,7 @@ static int brcmf_pcie_attach_bus(struct brcmf_pciedev_info *devinfo) if (ret) { brcmf_err("brcmf_attach failed\n"); } else { - ret = brcmf_bus_start(&devinfo->pdev->dev); + ret = brcmf_bus_started(&devinfo->pdev->dev); if (ret) brcmf_err("dongle is not responding\n"); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index d2219885071f..c5744b45ec8f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -4065,7 +4065,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, sdio_release_host(sdiodev->func[1]); - err = brcmf_bus_start(dev); + err = brcmf_bus_started(dev); if (err != 0) { brcmf_err("dongle is not responding\n"); goto fail; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 2f978a39b58a..d93ebbdc7737 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1148,7 +1148,7 @@ static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo) if (ret) goto fail; - ret = brcmf_bus_start(devinfo->dev); + ret = brcmf_bus_started(devinfo->dev); if (ret) goto fail; -- cgit v1.2.3 From 102e295ed5a42cc7428baf1d1ffc08826dad99e7 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 19 Jan 2017 11:25:19 -0600 Subject: rtlwifi: Redo debugging macros RTPRINT and RT_PRINT_DATA These two debugging formss implement debugging using rather complicated macro constructions. These are replaced with compiled code that is easier to understand. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/debug.c | 42 ++++++++++++++++++++++++---- drivers/net/wireless/realtek/rtlwifi/debug.h | 33 ++++++++++------------ 2 files changed, 51 insertions(+), 24 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c index 33905bbacad2..cd2efacd0b10 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.c +++ b/drivers/net/wireless/realtek/rtlwifi/debug.c @@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init); #ifdef CONFIG_RTLWIFI_DEBUG void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, - const char *modname, const char *fmt, ...) + const char *fmt, ...) { if (unlikely((comp & rtlpriv->dbg.global_debugcomponents) && (level <= rtlpriv->dbg.global_debuglevel))) { @@ -63,13 +63,45 @@ void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, vaf.fmt = fmt; vaf.va = &args; - printk(KERN_DEBUG "%s:%ps:<%lx-%x> %pV", - modname, __builtin_return_address(0), - in_interrupt(), in_atomic(), - &vaf); + pr_debug(":<%lx> %pV", in_interrupt(), &vaf); va_end(args); } } EXPORT_SYMBOL_GPL(_rtl_dbg_trace); + +void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level, + const char *fmt, ...) +{ + if (unlikely((comp & rtlpriv->dbg.global_debugcomponents) && + (level <= rtlpriv->dbg.global_debuglevel))) { + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + pr_debug("%pV", &vaf); + + va_end(args); + } +} +EXPORT_SYMBOL_GPL(_rtl_dbg_print); + +void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level, + const char *titlestring, + const void *hexdata, int hexdatalen) +{ + if (unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && + ((level) <= rtlpriv->dbg.global_debuglevel))) { + pr_debug("In process \"%s\" (pid %i): %s\n", + current->comm, current->pid, titlestring); + print_hex_dump_bytes("", DUMP_PREFIX_NONE, + hexdata, hexdatalen); + } +} +EXPORT_SYMBOL_GPL(_rtl_dbg_print_data); + #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h index a6cc3ca05c5e..90c670bb008f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.h +++ b/drivers/net/wireless/realtek/rtlwifi/debug.h @@ -168,34 +168,29 @@ enum dbgp_flag_e { struct rtl_priv; -__printf(5, 6) +__printf(4, 5) void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, - const char *modname, const char *fmt, ...); + const char *fmt, ...); + +__printf(4, 5) +void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level, + const char *fmt, ...); + +void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level, + const char *titlestring, + const void *hexdata, int hexdatalen); #define RT_TRACE(rtlpriv, comp, level, fmt, ...) \ _rtl_dbg_trace(rtlpriv, comp, level, \ - KBUILD_MODNAME, fmt, ##__VA_ARGS__) + fmt, ##__VA_ARGS__) #define RTPRINT(rtlpriv, dbgtype, dbgflag, fmt, ...) \ -do { \ - if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \ - printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \ - ##__VA_ARGS__); \ - } \ -} while (0) + _rtl_dbg_print(rtlpriv, dbgtype, dbgflag, fmt, ##__VA_ARGS__) #define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \ _hexdatalen) \ -do { \ - if (unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents) && \ - (_level <= rtlpriv->dbg.global_debuglevel))) { \ - printk(KERN_DEBUG "%s: In process \"%s\" (pid %i): %s\n", \ - KBUILD_MODNAME, current->comm, current->pid, \ - _titlestring); \ - print_hex_dump_bytes("", DUMP_PREFIX_NONE, \ - _hexdata, _hexdatalen); \ - } \ -} while (0) + _rtl_dbg_print_data(rtlpriv, _comp, _level, \ + _titlestring, _hexdata, _hexdatalen) #else -- cgit v1.2.3 From c34df318ec9fccd92f23b4ac15f06cbf7bfeec0c Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 19 Jan 2017 11:25:20 -0600 Subject: rtlwifi: Convert COMP_XX entries into a proper debugging mask The debugging macros contain a parameter COMP_XX that could be used as a mask; however, the code turns all these various bits on at the same time. This change implements them as a proper mask, and adds module parameters to set the mask at load time. The current name "debug" for the debug level has been changed to "debug_level" to better differentiate it from "debug_mask". The debug routines have also been changed to interrogate the structure that is loaded at entry time. As a result, the structure rtl_debug is no longer needed, and all references to it are deleted. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 6 +-- drivers/net/wireless/realtek/rtlwifi/debug.c | 43 +++++----------------- drivers/net/wireless/realtek/rtlwifi/debug.h | 2 - drivers/net/wireless/realtek/rtlwifi/pci.c | 10 ----- .../net/wireless/realtek/rtlwifi/rtl8188ee/sw.c | 11 +++--- .../net/wireless/realtek/rtlwifi/rtl8192ce/sw.c | 11 +++--- .../net/wireless/realtek/rtlwifi/rtl8192cu/sw.c | 10 +++-- .../net/wireless/realtek/rtlwifi/rtl8192de/sw.c | 11 +++--- .../net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 11 +++--- .../net/wireless/realtek/rtlwifi/rtl8192se/sw.c | 11 +++--- .../net/wireless/realtek/rtlwifi/rtl8723ae/sw.c | 11 +++--- .../net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 11 +++--- .../net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 11 +++--- drivers/net/wireless/realtek/rtlwifi/usb.c | 1 - drivers/net/wireless/realtek/rtlwifi/wifi.h | 15 ++------ 15 files changed, 70 insertions(+), 105 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 13325e15929e..1b23b2426db3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -2094,7 +2094,7 @@ static ssize_t rtl_show_debug_level(struct device *d, struct ieee80211_hw *hw = dev_get_drvdata(d); struct rtl_priv *rtlpriv = rtl_priv(hw); - return sprintf(buf, "0x%08X\n", rtlpriv->dbg.global_debuglevel); + return sprintf(buf, "0x%08X\n", rtlpriv->cfg->mod_params->debug_level); } static ssize_t rtl_store_debug_level(struct device *d, @@ -2111,10 +2111,10 @@ static ssize_t rtl_store_debug_level(struct device *d, RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, "%s is not in hex or decimal form.\n", buf); } else { - rtlpriv->dbg.global_debuglevel = val; + rtlpriv->cfg->mod_params->debug_level = val; RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, "debuglevel:%x\n", - rtlpriv->dbg.global_debuglevel); + rtlpriv->cfg->mod_params->debug_level); } return strnlen(buf, count); diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c index cd2efacd0b10..7ecac6116d5d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.c +++ b/drivers/net/wireless/realtek/rtlwifi/debug.c @@ -26,35 +26,12 @@ #include -void rtl_dbgp_flag_init(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 i; - - rtlpriv->dbg.global_debugcomponents = - COMP_ERR | COMP_FW | COMP_INIT | COMP_RECV | COMP_SEND | - COMP_MLME | COMP_SCAN | COMP_INTR | COMP_LED | COMP_SEC | - COMP_BEACON | COMP_RATE | COMP_RXDESC | COMP_DIG | COMP_TXAGC | - COMP_POWER | COMP_POWER_TRACKING | COMP_BB_POWERSAVING | COMP_SWAS | - COMP_RF | COMP_TURBO | COMP_RATR | COMP_CMD | - COMP_EFUSE | COMP_QOS | COMP_MAC80211 | COMP_REGD | COMP_CHAN | - COMP_EASY_CONCURRENT | COMP_EFUSE | COMP_QOS | COMP_MAC80211 | - COMP_REGD | COMP_CHAN | COMP_BT_COEXIST; - - - for (i = 0; i < DBGP_TYPE_MAX; i++) - rtlpriv->dbg.dbgp_type[i] = 0; - - /*Init Debug flag enable condition */ -} -EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init); - #ifdef CONFIG_RTLWIFI_DEBUG void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, const char *fmt, ...) { - if (unlikely((comp & rtlpriv->dbg.global_debugcomponents) && - (level <= rtlpriv->dbg.global_debuglevel))) { + if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) && + (level <= rtlpriv->cfg->mod_params->debug_level))) { struct va_format vaf; va_list args; @@ -63,7 +40,7 @@ void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, vaf.fmt = fmt; vaf.va = &args; - pr_debug(":<%lx> %pV", in_interrupt(), &vaf); + pr_info(":<%lx> %pV", in_interrupt(), &vaf); va_end(args); } @@ -73,8 +50,8 @@ EXPORT_SYMBOL_GPL(_rtl_dbg_trace); void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level, const char *fmt, ...) { - if (unlikely((comp & rtlpriv->dbg.global_debugcomponents) && - (level <= rtlpriv->dbg.global_debuglevel))) { + if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) && + (level <= rtlpriv->cfg->mod_params->debug_level))) { struct va_format vaf; va_list args; @@ -83,7 +60,7 @@ void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level, vaf.fmt = fmt; vaf.va = &args; - pr_debug("%pV", &vaf); + pr_info("%pV", &vaf); va_end(args); } @@ -94,10 +71,10 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level, const char *titlestring, const void *hexdata, int hexdatalen) { - if (unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && - ((level) <= rtlpriv->dbg.global_debuglevel))) { - pr_debug("In process \"%s\" (pid %i): %s\n", - current->comm, current->pid, titlestring); + if (unlikely(((comp) & rtlpriv->cfg->mod_params->debug_mask) && + ((level) <= rtlpriv->cfg->mod_params->debug_level))) { + pr_info("In process \"%s\" (pid %i): %s\n", + current->comm, current->pid, titlestring); print_hex_dump_bytes("", DUMP_PREFIX_NONE, hexdata, hexdatalen); } diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h index 90c670bb008f..bf5339f1c1bc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.h +++ b/drivers/net/wireless/realtek/rtlwifi/debug.h @@ -218,6 +218,4 @@ static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv, } #endif - -void rtl_dbgp_flag_init(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 81ac0b393304..f6d4dbeec253 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -2212,16 +2212,6 @@ int rtl_pci_probe(struct pci_dev *pdev, rtlpriv->intf_ops = &rtl_pci_ops; rtlpriv->glb_var = &rtl_global_var; - /* - *init dbgp flags before all - *other functions, because we will - *use it in other funtions like - *RT_TRACE/RT_PRINT/RTL_PRINT_DATA - *you can not use these macro - *before this - */ - rtl_dbgp_flag_init(hw); - /* MEM map */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 276c74539fb3..7661cfa53032 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -131,8 +131,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtlpci->irq_mask[1] = (u32) (IMR_RXFOVW | 0); rtlpci->sys_irq_mask = (u32) (HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -276,7 +274,8 @@ static struct rtl_mod_params rtl88ee_mod_params = { .swctrl_lps = false, .fwctrl_lps = false, .msi_support = true, - .debug = 0, + .debug_level = 0, + .debug_mask = 0, }; static const struct rtl_hal_cfg rtl88ee_hal_cfg = { @@ -392,7 +391,8 @@ MODULE_DESCRIPTION("Realtek 8188E 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8188efw.bin"); module_param_named(swenc, rtl88ee_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl88ee_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl88ee_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl88ee_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444); @@ -404,7 +404,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index 9bd2bff6212a..efedd918d10a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -130,8 +130,6 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -247,7 +245,8 @@ static struct rtl_mod_params rtl92ce_mod_params = { .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, - .debug = 0, + .debug_level = 0, + .debug_mask = 0, }; static const struct rtl_hal_cfg rtl92ce_hal_cfg = { @@ -364,7 +363,8 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cfwU.bin"); MODULE_FIRMWARE("rtlwifi/rtl8192cfwU_B.bin"); module_param_named(swenc, rtl92ce_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl92ce_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl92ce_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl92ce_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl92ce_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl92ce_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92ce_mod_params.fwctrl_lps, bool, 0444); @@ -372,7 +372,8 @@ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index 935e8308e2e6..96c923b3feb4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -61,7 +61,6 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; rtlpriv->cfg->mod_params->sw_crypto = rtlpriv->cfg->mod_params->sw_crypto; @@ -157,13 +156,16 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { static struct rtl_mod_params rtl92cu_mod_params = { .sw_crypto = 0, - .debug = 0, + .debug_level = 0, + .debug_mask = 0, }; module_param_named(swenc, rtl92cu_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl92cu_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl92cu_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl92cu_mod_params.debug_mask, ullong, 0644); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = { /* rx */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 51463d7b130a..16132c66e5e1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -140,8 +140,6 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -254,7 +252,8 @@ static struct rtl_mod_params rtl92de_mod_params = { .inactiveps = true, .swctrl_lps = true, .fwctrl_lps = false, - .debug = 0, + .debug_level = 0, + .debug_mask = 0, }; static const struct rtl_hal_cfg rtl92de_hal_cfg = { @@ -364,15 +363,17 @@ MODULE_DESCRIPTION("Realtek 8192DE 802.11n Dual Mac PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8192defw.bin"); module_param_named(swenc, rtl92de_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl92de_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl92de_mod_params.debug_level, int, 0644); module_param_named(ips, rtl92de_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444); +module_param_named(debug_mask, rtl92de_mod_params.debug_mask, ullong, 0644); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index eddc704b3ee3..554f2dc86bc5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -133,8 +133,6 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) 0); rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -258,7 +256,8 @@ static struct rtl_mod_params rtl92ee_mod_params = { .swctrl_lps = false, .fwctrl_lps = true, .msi_support = true, - .debug = 0, + .debug_level = 0, + .debug_mask = 0, }; static const struct rtl_hal_cfg rtl92ee_hal_cfg = { @@ -368,7 +367,8 @@ MODULE_DESCRIPTION("Realtek 8192EE 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8192eefw.bin"); module_param_named(swenc, rtl92ee_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl92ee_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl92ee_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl92ee_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl92ee_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl92ee_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92ee_mod_params.fwctrl_lps, bool, 0444); @@ -380,7 +380,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 3c66d00abbac..2006b09ea74f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -178,8 +178,6 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) rtlpci->first_init = true; - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -297,7 +295,8 @@ static struct rtl_mod_params rtl92se_mod_params = { .inactiveps = true, .swctrl_lps = true, .fwctrl_lps = false, - .debug = 0, + .debug_level = 0, + .debug_mask = 0, }; /* Because memory R/W bursting will cause system hang/crash @@ -416,7 +415,8 @@ MODULE_DESCRIPTION("Realtek 8192S/8191S 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8192sefw.bin"); module_param_named(swenc, rtl92se_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl92se_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl92se_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl92se_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl92se_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444); @@ -424,7 +424,8 @@ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 401f54266f15..7bf9f2557920 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -145,8 +145,6 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) (u32)(PHIMR_RXFOVW | 0); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -268,7 +266,8 @@ static struct rtl_mod_params rtl8723e_mod_params = { .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, - .debug = 0, + .debug_level = 0, + .debug_mask = 0, .msi_support = false, .disable_watchdog = false, }; @@ -382,7 +381,8 @@ MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8723efw.bin"); module_param_named(swenc, rtl8723e_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl8723e_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl8723e_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl8723e_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444); @@ -394,7 +394,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index dd42c1a6d986..e571b876f0af 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -144,8 +144,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) HSIMR_RON_INT_EN | 0); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -271,7 +269,8 @@ static struct rtl_mod_params rtl8723be_mod_params = { .fwctrl_lps = true, .msi_support = false, .disable_watchdog = false, - .debug = 0, + .debug_level = 0, + .debug_mask = 0, .ant_sel = 0, }; @@ -386,7 +385,8 @@ MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin"); module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl8723be_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl8723be_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); @@ -399,7 +399,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 220de5f89dbc..cd2a53b7e053 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -160,8 +160,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.wo_wlan_mode = WAKE_ON_MAGIC_PACKET | WAKE_ON_PATTERN_MATCH; - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -309,7 +307,8 @@ static struct rtl_mod_params rtl8821ae_mod_params = { .fwctrl_lps = true, .msi_support = true, .int_clear = true, - .debug = 0, + .debug_level = 0, + .debug_mask = 0, .disable_watchdog = 0, }; @@ -430,7 +429,8 @@ MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin"); module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl8821ae_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl8821ae_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl8821ae_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl8821ae_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8821ae_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444); @@ -443,7 +443,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index b2b62ef8b07d..20d3fcc19a58 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1073,7 +1073,6 @@ int rtl_usb_probe(struct usb_interface *intf, rtlpriv->rtlhal.interface = INTF_USB; rtlpriv->cfg = rtl_hal_cfg; rtlpriv->intf_ops = &rtl_usb_ops; - rtl_dbgp_flag_init(hw); /* Init IO handler */ _rtl_usb_io_handler_init(&udev->dev, hw); rtlpriv->cfg->ops->read_chip_version(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index dafe486f8448..262c02a347cf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2221,11 +2221,13 @@ struct rtl_intf_ops { }; struct rtl_mod_params { + /* default: 0,0 */ + u64 debug_mask; /* default: 0 = using hardware encryption */ bool sw_crypto; /* default: 0 = DBG_EMERG (0)*/ - int debug; + int debug_level; /* default: 1 = using no linked power save */ bool inactiveps; @@ -2345,16 +2347,6 @@ struct rtl_works { struct work_struct fill_h2c_cmd; }; -struct rtl_debug { - u32 dbgp_type[DBGP_TYPE_MAX]; - int global_debuglevel; - u64 global_debugcomponents; - - /* add for proc debug */ - struct proc_dir_entry *proc_dir; - char proc_name[20]; -}; - #define MIMO_PS_STATIC 0 #define MIMO_PS_DYNAMIC 1 #define MIMO_PS_NOLIMIT 3 @@ -2583,7 +2575,6 @@ struct rtl_priv { /* sta entry list for ap adhoc or mesh */ struct list_head entry_list; - struct rtl_debug dbg; int max_fw_size; /* -- cgit v1.2.3 From 9336d376edf131e00ef986741afc134b114cfc37 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 19 Jan 2017 11:25:21 -0600 Subject: rtlwifi: Remove debugging entry in sysfs As the kernel provides access to module parameters through entries in /sys/module//parameters/, there is no need for a private interface. Thus the existing code for setting the debug level is removed. Reported-by: Kalle Valo Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 59 ----------------------------- drivers/net/wireless/realtek/rtlwifi/base.h | 1 - drivers/net/wireless/realtek/rtlwifi/pci.c | 8 ---- 3 files changed, 68 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 1b23b2426db3..01cf0a9aa31b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -2083,65 +2083,6 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len) } EXPORT_SYMBOL_GPL(rtl_recognize_peer); -/********************************************************* - * - * sysfs functions - * - *********************************************************/ -static ssize_t rtl_show_debug_level(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct ieee80211_hw *hw = dev_get_drvdata(d); - struct rtl_priv *rtlpriv = rtl_priv(hw); - - return sprintf(buf, "0x%08X\n", rtlpriv->cfg->mod_params->debug_level); -} - -static ssize_t rtl_store_debug_level(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct ieee80211_hw *hw = dev_get_drvdata(d); - struct rtl_priv *rtlpriv = rtl_priv(hw); - unsigned long val; - int ret; - - ret = kstrtoul(buf, 0, &val); - if (ret) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, - "%s is not in hex or decimal form.\n", buf); - } else { - rtlpriv->cfg->mod_params->debug_level = val; - RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, - "debuglevel:%x\n", - rtlpriv->cfg->mod_params->debug_level); - } - - return strnlen(buf, count); -} - -static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, - rtl_show_debug_level, rtl_store_debug_level); - -static struct attribute *rtl_sysfs_entries[] = { - - &dev_attr_debug_level.attr, - - NULL -}; - -/* - * "name" is folder name witch will be - * put in device directory like : - * sys/devices/pci0000:00/0000:00:1c.4/ - * 0000:06:00.0/rtl_sysfs - */ -struct attribute_group rtl_attribute_group = { - .name = "rtlsysfs", - .attrs = rtl_sysfs_entries, -}; -EXPORT_SYMBOL_GPL(rtl_attribute_group); - MODULE_AUTHOR("lizhaoming "); MODULE_AUTHOR("Realtek WlanFAE "); MODULE_AUTHOR("Larry Finger "); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 74233d601a90..6c770aecebe7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -148,7 +148,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw, u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie); void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len); u8 rtl_tid_to_ac(u8 tid); -extern struct attribute_group rtl_attribute_group; void rtl_easy_concurrent_retrytimer_callback(unsigned long data); extern struct rtl_global_var rtl_global_var; void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index f6d4dbeec253..b402f438b1af 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -2289,12 +2289,6 @@ int rtl_pci_probe(struct pci_dev *pdev, } rtlpriv->mac80211.mac80211_registered = 1; - err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group); - if (err) { - pr_err("failed to create sysfs device attributes\n"); - goto fail3; - } - /*init rfkill */ rtl_init_rfkill(hw); /* Init PCI sw */ @@ -2344,8 +2338,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev) wait_for_completion(&rtlpriv->firmware_loading_complete); clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); - sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group); - /*ieee80211_unregister_hw will call ops_stop */ if (rtlmac->mac80211_registered == 1) { ieee80211_unregister_hw(hw); -- cgit v1.2.3 From 89d32c9071aacdd7f631c36ff9c7d3403229d568 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 19 Jan 2017 14:28:06 -0600 Subject: rtlwifi: Download firmware as bytes rather than as dwords The firmware is read from disk as a little-endian byte string. The code that loads the firmware into the device transfers it as 4-byte quantities. The routines that write multi-byte quantities on BE hardware assume that the data are in CPU order, and automatically do the conversion to the LE order required by the device. As a result, the firmware is transmitted incorrectly. Rather than do multiple byte swaps on the data, the download routine is revised to transmit bytes rather than dwords. Although the number of I/O operations is increased, the firmware is not often loaded. All drivers have the same bug, and use essentially the same code to download firmware. These routines have been moved into rtlwifi. Some CamelCase variables have been renamed. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/efuse.c | 45 ++++++++++++++ drivers/net/wireless/realtek/rtlwifi/efuse.h | 4 ++ .../net/wireless/realtek/rtlwifi/rtl8188ee/fw.c | 67 ++------------------- .../wireless/realtek/rtlwifi/rtl8192c/fw_common.c | 70 +++------------------- .../net/wireless/realtek/rtlwifi/rtl8192de/fw.c | 70 +++------------------- .../net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 68 ++------------------- .../realtek/rtlwifi/rtl8723com/fw_common.c | 69 ++------------------- .../realtek/rtlwifi/rtl8723com/fw_common.h | 6 -- .../net/wireless/realtek/rtlwifi/rtl8821ae/fw.c | 67 ++------------------- 9 files changed, 85 insertions(+), 381 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index afc7550e8e41..eb58633e674a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -31,6 +31,9 @@ static const u8 MAX_PGPKT_SIZE = 9; static const u8 PGPKT_DATA_SIZE = 8; static const int EFUSE_MAX_SIZE = 512; +#define START_ADDRESS 0x1000 +#define REG_MCUFWDL 0x0080 + static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = { {0, 0, 0, 2}, {0, 1, 0, 2}, @@ -1320,3 +1323,45 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, return 0; } EXPORT_SYMBOL_GPL(rtl_get_hwinfo); + +void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 *pu4byteptr = (u8 *)buffer; + u32 i; + + for (i = 0; i < size; i++) + rtl_write_byte(rtlpriv, (START_ADDRESS + i), *(pu4byteptr + i)); +} +EXPORT_SYMBOL_GPL(rtl_fw_block_write); + +void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer, + u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value8; + u8 u8page = (u8)(page & 0x07); + + value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; + + rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); + rtl_fw_block_write(hw, buffer, size); +} +EXPORT_SYMBOL_GPL(rtl_fw_page_write); + +void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen) +{ + u32 fwlen = *pfwlen; + u8 remain = (u8)(fwlen % 4); + + remain = (remain == 0) ? 0 : (4 - remain); + + while (remain > 0) { + pfwbuf[fwlen] = 0; + fwlen++; + remain--; + } + + *pfwlen = fwlen; +} +EXPORT_SYMBOL_GPL(rtl_fill_dummy); diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h index 51aa1210def5..1338ae63fe54 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.h +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h @@ -111,5 +111,9 @@ void efuse_force_write_vendor_Id(struct ieee80211_hw *hw); void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx); int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, int max_size, u8 *hwinfo, int *params); +void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen); +void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer, + u32 size); +void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index afa784a46634..21ed9ad3be7a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c @@ -27,6 +27,7 @@ #include "../pci.h" #include "../base.h" #include "../core.h" +#include "../efuse.h" #include "reg.h" #include "def.h" #include "fw.h" @@ -53,63 +54,6 @@ static void _rtl88e_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void _rtl88e_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *)buffer; - u32 *pu4BytePtr = (u32 *)buffer; - u32 i, offset, blockcount, remainsize; - - blockcount = size / blocksize; - remainsize = size % blocksize; - - for (i = 0; i < blockcount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - *(pu4BytePtr + i)); - } - - if (remainsize) { - offset = blockcount * blocksize; - bufferptr += offset; - for (i = 0; i < remainsize; i++) { - rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS + - offset + i), *(bufferptr + i)); - } - } -} - -static void _rtl88e_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8) (page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - _rtl88e_fw_block_write(hw, buffer, size); -} - -static void _rtl88e_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8) (fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - - *pfwlen = fwlen; -} - static void _rtl88e_write_fw(struct ieee80211_hw *hw, enum version_8188e version, u8 *buffer, u32 size) { @@ -120,7 +64,7 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size); - _rtl88e_fill_dummy(bufferptr, &size); + rtl_fill_dummy(bufferptr, &size); pagenums = size / FW_8192C_PAGE_SIZE; remainsize = size % FW_8192C_PAGE_SIZE; @@ -130,15 +74,14 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw, for (page = 0; page < pagenums; page++) { offset = page * FW_8192C_PAGE_SIZE; - _rtl88e_fw_page_write(hw, page, (bufferptr + offset), - FW_8192C_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192C_PAGE_SIZE); } if (remainsize) { offset = pagenums * FW_8192C_PAGE_SIZE; page = pagenums; - _rtl88e_fw_page_write(hw, page, (bufferptr + offset), - remainsize); + rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index 433ab7f33acf..c7a77467b20e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c @@ -27,6 +27,7 @@ #include "../pci.h" #include "../base.h" #include "../core.h" +#include "../efuse.h" #include "../rtl8192ce/reg.h" #include "../rtl8192ce/def.h" #include "fw_common.h" @@ -68,63 +69,6 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void _rtl92c_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *)buffer; - u32 *pu4byteptr = (u32 *)buffer; - u32 i, offset, blockcount, remainsize; - - blockcount = size / blocksize; - remainsize = size % blocksize; - - for (i = 0; i < blockcount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - *(pu4byteptr + i)); - } - - if (remainsize) { - offset = blockcount * blocksize; - bufferptr += offset; - for (i = 0; i < remainsize; i++) { - rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS + - offset + i), *(bufferptr + i)); - } - } -} - -static void _rtl92c_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8) (page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - _rtl92c_fw_block_write(hw, buffer, size); -} - -static void _rtl92c_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8) (fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - - *pfwlen = fwlen; -} - static void _rtl92c_write_fw(struct ieee80211_hw *hw, enum version_8192c version, u8 *buffer, u32 size) { @@ -140,7 +84,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw, u32 page, offset; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) - _rtl92c_fill_dummy(bufferptr, &size); + rtl_fill_dummy(bufferptr, &size); pageNums = size / FW_8192C_PAGE_SIZE; remainsize = size % FW_8192C_PAGE_SIZE; @@ -150,18 +94,18 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw, for (page = 0; page < pageNums; page++) { offset = page * FW_8192C_PAGE_SIZE; - _rtl92c_fw_page_write(hw, page, (bufferptr + offset), - FW_8192C_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192C_PAGE_SIZE); } if (remainsize) { offset = pageNums * FW_8192C_PAGE_SIZE; page = pageNums; - _rtl92c_fw_page_write(hw, page, (bufferptr + offset), - remainsize); + rtl_fw_page_write(hw, page, (bufferptr + offset), + remainsize); } } else { - _rtl92c_fw_block_write(hw, buffer, size); + rtl_fw_block_write(hw, buffer, size); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 569d572e0618..88faeab2574f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -26,6 +26,7 @@ #include "../wifi.h" #include "../pci.h" #include "../base.h" +#include "../efuse.h" #include "reg.h" #include "def.h" #include "fw.h" @@ -59,84 +60,31 @@ static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void _rtl92d_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *) buffer; - u32 *pu4BytePtr = (u32 *) buffer; - u32 i, offset, blockCount, remainSize; - - blockCount = size / blocksize; - remainSize = size % blocksize; - for (i = 0; i < blockCount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8192D_START_ADDRESS + offset), - *(pu4BytePtr + i)); - } - if (remainSize) { - offset = blockCount * blocksize; - bufferptr += offset; - for (i = 0; i < remainSize; i++) { - rtl_write_byte(rtlpriv, (FW_8192D_START_ADDRESS + - offset + i), *(bufferptr + i)); - } - } -} - -static void _rtl92d_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8) (page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - _rtl92d_fw_block_write(hw, buffer, size); -} - -static void _rtl92d_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8) (fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - *pfwlen = fwlen; -} - static void _rtl92d_write_fw(struct ieee80211_hw *hw, enum version_8192d version, u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 *bufferPtr = buffer; - u32 pagenums, remainSize; + u8 *bufferptr = buffer; + u32 pagenums, remainsize; u32 page, offset; RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) - _rtl92d_fill_dummy(bufferPtr, &size); + rtl_fill_dummy(bufferptr, &size); pagenums = size / FW_8192D_PAGE_SIZE; - remainSize = size % FW_8192D_PAGE_SIZE; + remainsize = size % FW_8192D_PAGE_SIZE; if (pagenums > 8) pr_err("Page numbers should not greater then 8\n"); for (page = 0; page < pagenums; page++) { offset = page * FW_8192D_PAGE_SIZE; - _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), - FW_8192D_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192D_PAGE_SIZE); } - if (remainSize) { + if (remainsize) { offset = pagenums * FW_8192D_PAGE_SIZE; page = pagenums; - _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), - remainSize); + rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 78ee6e1d1850..9d7a16c9e74e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -27,6 +27,7 @@ #include "../pci.h" #include "../base.h" #include "../core.h" +#include "../efuse.h" #include "reg.h" #include "def.h" #include "fw.h" @@ -48,64 +49,6 @@ static void _rtl92ee_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void _rtl92ee_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *)buffer; - u32 *pu4byteptr = (u32 *)buffer; - u32 i, offset, blockcount, remainsize; - - blockcount = size / blocksize; - remainsize = size % blocksize; - - for (i = 0; i < blockcount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - *(pu4byteptr + i)); - } - - if (remainsize) { - offset = blockcount * blocksize; - bufferptr += offset; - for (i = 0; i < remainsize; i++) { - rtl_write_byte(rtlpriv, - (FW_8192C_START_ADDRESS + offset + i), - *(bufferptr + i)); - } - } -} - -static void _rtl92ee_fw_page_write(struct ieee80211_hw *hw, u32 page, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8)(page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - - _rtl92ee_fw_block_write(hw, buffer, size); -} - -static void _rtl92ee_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8)(fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - - *pfwlen = fwlen; -} - static void _rtl92ee_write_fw(struct ieee80211_hw *hw, enum version_8192e version, u8 *buffer, u32 size) @@ -117,7 +60,7 @@ static void _rtl92ee_write_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "FW size is %d bytes,\n", size); - _rtl92ee_fill_dummy(bufferptr, &size); + rtl_fill_dummy(bufferptr, &size); pagenums = size / FW_8192C_PAGE_SIZE; remainsize = size % FW_8192C_PAGE_SIZE; @@ -127,16 +70,15 @@ static void _rtl92ee_write_fw(struct ieee80211_hw *hw, for (page = 0; page < pagenums; page++) { offset = page * FW_8192C_PAGE_SIZE; - _rtl92ee_fw_page_write(hw, page, (bufferptr + offset), - FW_8192C_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192C_PAGE_SIZE); udelay(2); } if (remainsize) { offset = pagenums * FW_8192C_PAGE_SIZE; page = pagenums; - _rtl92ee_fw_page_write(hw, page, (bufferptr + offset), - remainsize); + rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c index 8e0d038df701..ac573d69f6d6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c @@ -26,6 +26,7 @@ #include "../wifi.h" #include "../pci.h" #include "../base.h" +#include "../efuse.h" #include "fw_common.h" #include @@ -53,65 +54,6 @@ void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable) } EXPORT_SYMBOL_GPL(rtl8723_enable_fw_download); -void rtl8723_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *)buffer; - u32 *pu4byteptr = (u32 *)buffer; - u32 i, offset, blockcount, remainsize; - - blockcount = size / blocksize; - remainsize = size % blocksize; - - for (i = 0; i < blockcount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - *(pu4byteptr + i)); - } - if (remainsize) { - offset = blockcount * blocksize; - bufferptr += offset; - for (i = 0; i < remainsize; i++) { - rtl_write_byte(rtlpriv, - (FW_8192C_START_ADDRESS + offset + i), - *(bufferptr + i)); - } - } -} -EXPORT_SYMBOL_GPL(rtl8723_fw_block_write); - -void rtl8723_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8) (page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - rtl8723_fw_block_write(hw, buffer, size); -} -EXPORT_SYMBOL_GPL(rtl8723_fw_page_write); - -void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8) (fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - *pfwlen = fwlen; -} -EXPORT_SYMBOL(rtl8723_fill_dummy); - void rtl8723_write_fw(struct ieee80211_hw *hw, enum version_8723e version, u8 *buffer, u32 size, u8 max_page) @@ -123,7 +65,7 @@ void rtl8723_write_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); - rtl8723_fill_dummy(bufferptr, &size); + rtl_fill_dummy(bufferptr, &size); page_nums = size / FW_8192C_PAGE_SIZE; remain_size = size % FW_8192C_PAGE_SIZE; @@ -134,15 +76,14 @@ void rtl8723_write_fw(struct ieee80211_hw *hw, } for (page = 0; page < page_nums; page++) { offset = page * FW_8192C_PAGE_SIZE; - rtl8723_fw_page_write(hw, page, (bufferptr + offset), - FW_8192C_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192C_PAGE_SIZE); } if (remain_size) { offset = page_nums * FW_8192C_PAGE_SIZE; page = page_nums; - rtl8723_fw_page_write(hw, page, (bufferptr + offset), - remain_size); + rtl_fw_page_write(hw, page, (bufferptr + offset), remain_size); } RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n"); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h index 8ea372d1626e..77c25a976233 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h @@ -28,7 +28,6 @@ #define REG_SYS_FUNC_EN 0x0002 #define REG_MCUFWDL 0x0080 -#define FW_8192C_START_ADDRESS 0x1000 #define FW_8192C_PAGE_SIZE 4096 #define FW_8723A_POLLING_TIMEOUT_COUNT 1000 #define FW_8723B_POLLING_TIMEOUT_COUNT 6000 @@ -84,10 +83,6 @@ enum rtl8723be_cmd { void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw); void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw); void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable); -void rtl8723_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size); -void rtl8723_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size); void rtl8723_write_fw(struct ieee80211_hw *hw, enum version_8723e version, u8 *buffer, u32 size, u8 max_page); @@ -95,6 +90,5 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, int count); int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be, int count); bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); -void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index 94e97dce5457..328c64d465ba 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -27,6 +27,7 @@ #include "../pci.h" #include "../base.h" #include "../core.h" +#include "../efuse.h" #include "reg.h" #include "def.h" #include "fw.h" @@ -51,63 +52,6 @@ static void _rtl8821ae_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void _rtl8821ae_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *)buffer; - u32 *pu4byteptr = (u32 *)buffer; - u32 i, offset, blockcount, remainsize; - - blockcount = size / blocksize; - remainsize = size % blocksize; - - for (i = 0; i < blockcount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8821AE_START_ADDRESS + offset), - *(pu4byteptr + i)); - } - - if (remainsize) { - offset = blockcount * blocksize; - bufferptr += offset; - for (i = 0; i < remainsize; i++) { - rtl_write_byte(rtlpriv, (FW_8821AE_START_ADDRESS + - offset + i), *(bufferptr + i)); - } - } -} - -static void _rtl8821ae_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8)(page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - _rtl8821ae_fw_block_write(hw, buffer, size); -} - -static void _rtl8821ae_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8)(fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - - *pfwlen = fwlen; -} - static void _rtl8821ae_write_fw(struct ieee80211_hw *hw, enum version_8821ae version, u8 *buffer, u32 size) @@ -119,7 +63,7 @@ static void _rtl8821ae_write_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size); - _rtl8821ae_fill_dummy(bufferptr, &size); + rtl_fill_dummy(bufferptr, &size); pagenums = size / FW_8821AE_PAGE_SIZE; remainsize = size % FW_8821AE_PAGE_SIZE; @@ -129,15 +73,14 @@ static void _rtl8821ae_write_fw(struct ieee80211_hw *hw, for (page = 0; page < pagenums; page++) { offset = page * FW_8821AE_PAGE_SIZE; - _rtl8821ae_fw_page_write(hw, page, (bufferptr + offset), - FW_8821AE_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8821AE_PAGE_SIZE); } if (remainsize) { offset = pagenums * FW_8821AE_PAGE_SIZE; page = pagenums; - _rtl8821ae_fw_page_write(hw, page, (bufferptr + offset), - remainsize); + rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); } } -- cgit v1.2.3 From 69d8597e9fe5ca76da8ad18245d51af96cf8457a Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 19 Jan 2017 14:28:07 -0600 Subject: rtlwifi: rtl8192cu: Calculate descriptor checksum correctly for BE This driver requires a checksum for the descriptors so that the wifi chip is assured that the USB transmission was correct. These entries are little-endian, but the driver was always using cpu order in the calculation. As a result, the driver failed on BE hardware. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 6da6e2acfbec..1611e42479d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -477,14 +477,14 @@ static void _rtl_fill_usb_tx_desc(u8 *txdesc) */ static void _rtl_tx_desc_checksum(u8 *txdesc) { - u16 *ptr = (u16 *)txdesc; + __le16 *ptr = (__le16 *)txdesc; u16 checksum = 0; u32 index; /* Clear first */ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); for (index = 0; index < 16; index++) - checksum = checksum ^ (*(ptr + index)); + checksum = checksum ^ le16_to_cpu(*(ptr + index)); SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum); } -- cgit v1.2.3 From 106e0deca1ac6ac627a10617fe77a95200cb01a7 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 19 Jan 2017 14:28:08 -0600 Subject: rtlwifi: rtl8192cu: Convert driver to use common macros These drivers use a set of complicated macros to extract and insert information for the RX and TX descriptors. Driver rtl8192cu had a different set than was used for the PCI-based drivers. To simplify the code, rtl8192cu is switched to use the common version. In the process, two errors in those common macros were found and fixed. Besides simplifying the code, there is an additional benefit. We have no BE hardware to test the PCI driver, but using the common macros provides an additional test for the validity of many endian-sensitive operations. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8192cu/trx.h | 272 ++++++++++----------- drivers/net/wireless/realtek/rtlwifi/wifi.h | 23 +- 2 files changed, 132 insertions(+), 163 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index df88e39301c2..487eec89bc29 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h @@ -92,129 +92,107 @@ struct rx_drv_info_92c { u8 reserve:4; } __packed; -/* Define a macro that takes a le32 word, converts it to host ordering, - * right shifts by a specified count, creates a mask of the specified - * bit count, and extracts that number of bits. - */ - -#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits) \ - ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \ - BIT_LEN_MASK_32(__bits)) - -/* Define a macro that clears a bit field in an le32 word and - * sets the specified value into that bit field. The resulting - * value remains in le32 ordering; however, it is properly converted - * to host ordering for the clear and set operations before conversion - * back to le32. - */ - -#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \ - (*(__le32 *)(__pdesc) = \ - (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \ - (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \ - (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift))))); - /* macros to read various fields in RX descriptor */ /* DWORD 0 */ #define GET_RX_DESC_PKT_LEN(__rxdesc) \ - SHIFT_AND_MASK_LE((__rxdesc), 0, 14) + LE_BITS_TO_4BYTE((__rxdesc), 0, 14) #define GET_RX_DESC_CRC32(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 14, 1) + LE_BITS_TO_4BYTE(__rxdesc, 14, 1) #define GET_RX_DESC_ICV(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 15, 1) + LE_BITS_TO_4BYTE(__rxdesc, 15, 1) #define GET_RX_DESC_DRVINFO_SIZE(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 16, 4) + LE_BITS_TO_4BYTE(__rxdesc, 16, 4) #define GET_RX_DESC_SECURITY(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 20, 3) + LE_BITS_TO_4BYTE(__rxdesc, 20, 3) #define GET_RX_DESC_QOS(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 23, 1) + LE_BITS_TO_4BYTE(__rxdesc, 23, 1) #define GET_RX_DESC_SHIFT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 24, 2) + LE_BITS_TO_4BYTE(__rxdesc, 24, 2) #define GET_RX_DESC_PHY_STATUS(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 26, 1) + LE_BITS_TO_4BYTE(__rxdesc, 26, 1) #define GET_RX_DESC_SWDEC(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 27, 1) + LE_BITS_TO_4BYTE(__rxdesc, 27, 1) #define GET_RX_DESC_LAST_SEG(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 28, 1) + LE_BITS_TO_4BYTE(__rxdesc, 28, 1) #define GET_RX_DESC_FIRST_SEG(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 29, 1) + LE_BITS_TO_4BYTE(__rxdesc, 29, 1) #define GET_RX_DESC_EOR(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 30, 1) + LE_BITS_TO_4BYTE(__rxdesc, 30, 1) #define GET_RX_DESC_OWN(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 31, 1) + LE_BITS_TO_4BYTE(__rxdesc, 31, 1) /* DWORD 1 */ #define GET_RX_DESC_MACID(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5) + LE_BITS_TO_4BYTE(__rxdesc + 4, 0, 5) #define GET_RX_DESC_TID(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4) + LE_BITS_TO_4BYTE(__rxdesc + 4, 5, 4) #define GET_RX_DESC_PAGGR(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 14, 1) #define GET_RX_DESC_FAGGR(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 15, 1) #define GET_RX_DESC_A1_FIT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4) + LE_BITS_TO_4BYTE(__rxdesc + 4, 16, 4) #define GET_RX_DESC_A2_FIT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4) + LE_BITS_TO_4BYTE(__rxdesc + 4, 20, 4) #define GET_RX_DESC_PAM(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 24, 1) #define GET_RX_DESC_PWR(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 25, 1) #define GET_RX_DESC_MORE_DATA(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 26, 1) #define GET_RX_DESC_MORE_FRAG(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 27, 1) #define GET_RX_DESC_TYPE(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2) + LE_BITS_TO_4BYTE(__rxdesc + 4, 28, 2) #define GET_RX_DESC_MC(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 30, 1) #define GET_RX_DESC_BC(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 31, 1) /* DWORD 2 */ #define GET_RX_DESC_SEQ(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12) + LE_BITS_TO_4BYTE(__rxdesc + 8, 0, 12) #define GET_RX_DESC_FRAG(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4) + LE_BITS_TO_4BYTE(__rxdesc + 8, 12, 4) #define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8) + LE_BITS_TO_4BYTE(__rxdesc + 8, 16, 8) #define GET_RX_DESC_NEXT_IND(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1) + LE_BITS_TO_4BYTE(__rxdesc + 8, 30, 1) /* DWORD 3 */ #define GET_RX_DESC_RX_MCS(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6) + LE_BITS_TO_4BYTE(__rxdesc + 12, 0, 6) #define GET_RX_DESC_RX_HT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 6, 1) #define GET_RX_DESC_AMSDU(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 7, 1) #define GET_RX_DESC_SPLCP(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 8, 1) #define GET_RX_DESC_BW(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 9, 1) #define GET_RX_DESC_HTC(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 10, 1) #define GET_RX_DESC_TCP_CHK_RPT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 11, 1) #define GET_RX_DESC_IP_CHK_RPT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 12, 1) #define GET_RX_DESC_TCP_CHK_VALID(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 13, 1) #define GET_RX_DESC_HWPC_ERR(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 14, 1) #define GET_RX_DESC_HWPC_IND(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 15, 1) #define GET_RX_DESC_IV0(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16) + LE_BITS_TO_4BYTE(__rxdesc + 12, 16, 16) /* DWORD 4 */ #define GET_RX_DESC_IV1(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32) + LE_BITS_TO_4BYTE(__rxdesc + 16, 0, 32) /* DWORD 5 */ #define GET_RX_DESC_TSFL(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32) + LE_BITS_TO_4BYTE(__rxdesc + 20, 0, 32) /*======================= tx desc ============================================*/ @@ -222,182 +200,182 @@ struct rx_drv_info_92c { /* Dword 0 */ #define SET_TX_DESC_PKT_SIZE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 0, 16, __value) #define SET_TX_DESC_OFFSET(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 16, 8, __value) #define SET_TX_DESC_BMC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 24, 1, __value) #define SET_TX_DESC_HTC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 25, 1, __value) #define SET_TX_DESC_LAST_SEG(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 26, 1, __value) #define SET_TX_DESC_FIRST_SEG(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 27, 1, __value) #define SET_TX_DESC_LINIP(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 28, 1, __value) #define SET_TX_DESC_NO_ACM(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 29, 1, __value) #define SET_TX_DESC_GF(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 30, 1, __value) #define SET_TX_DESC_OWN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 31, 1, __value) /* Dword 1 */ #define SET_TX_DESC_MACID(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 0, 5, __value) #define SET_TX_DESC_AGG_ENABLE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 5, 1, __value) #define SET_TX_DESC_AGG_BREAK(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 6, 1, __value) #define SET_TX_DESC_RDG_ENABLE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 7, 1, __value) #define SET_TX_DESC_QUEUE_SEL(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 8, 5, __value) #define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 13, 1, __value) #define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 14, 1, __value) #define SET_TX_DESC_PIFS(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 15, 1, __value) #define SET_TX_DESC_RATE_ID(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 16, 4, __value) #define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 16, 4, __value) #define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 20, 1, __value) #define SET_TX_DESC_EN_DESC_ID(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 21, 1, __value) #define SET_TX_DESC_SEC_TYPE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 22, 2, __value) #define SET_TX_DESC_PKT_OFFSET(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 26, 5, __value) /* Dword 2 */ #define SET_TX_DESC_RTS_RC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 0, 6, __value) #define SET_TX_DESC_DATA_RC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 6, 6, __value) #define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 14, 2, __value) #define SET_TX_DESC_MORE_FRAG(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 17, 1, __value) #define SET_TX_DESC_RAW(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 18, 1, __value) #define SET_TX_DESC_CCX(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 19, 1, __value) #define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 20, 3, __value) #define SET_TX_DESC_ANTSEL_A(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 24, 1, __value) #define SET_TX_DESC_ANTSEL_B(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 25, 1, __value) #define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 26, 2, __value) #define SET_TX_DESC_TX_ANTL(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 28, 2, __value) #define SET_TX_DESC_TX_ANT_HT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 30, 2, __value) /* Dword 3 */ #define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 12, 0, 8, __value) #define SET_TX_DESC_TAIL_PAGE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 12, 8, 8, __value) #define SET_TX_DESC_SEQ(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 12, 16, 12, __value) #define SET_TX_DESC_PKT_ID(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 12, 28, 4, __value) /* Dword 4 */ #define SET_TX_DESC_RTS_RATE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 0, 5, __value) #define SET_TX_DESC_AP_DCFE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 5, 1, __value) #define SET_TX_DESC_QOS(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 6, 1, __value) #define SET_TX_DESC_HWSEQ_EN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 7, 1, __value) #define SET_TX_DESC_USE_RATE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 8, 1, __value) #define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 9, 1, __value) #define SET_TX_DESC_DISABLE_FB(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 10, 1, __value) #define SET_TX_DESC_CTS2SELF(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 11, 1, __value) #define SET_TX_DESC_RTS_ENABLE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 12, 1, __value) #define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 13, 1, __value) #define SET_TX_DESC_WAIT_DCTS(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 18, 1, __value) #define SET_TX_DESC_CTS2AP_EN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 19, 1, __value) #define SET_TX_DESC_DATA_SC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 20, 2, __value) #define SET_TX_DESC_DATA_STBC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 22, 2, __value) #define SET_TX_DESC_DATA_SHORT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 24, 1, __value) #define SET_TX_DESC_DATA_BW(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 25, 1, __value) #define SET_TX_DESC_RTS_SHORT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 26, 1, __value) #define SET_TX_DESC_RTS_BW(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 27, 1, __value) #define SET_TX_DESC_RTS_SC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 28, 2, __value) #define SET_TX_DESC_RTS_STBC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 30, 2, __value) /* Dword 5 */ #define SET_TX_DESC_TX_RATE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val) + SET_BITS_TO_LE_4BYTE(__pdesc + 20, 0, 6, __val) #define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val) + SET_BITS_TO_LE_4BYTE(__pdesc + 20, 6, 1, __val) #define SET_TX_DESC_CCX_TAG(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val) + SET_BITS_TO_LE_4BYTE(__pdesc + 20, 7, 1, __val) #define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 20, 8, 5, __value) #define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 20, 13, 4, __value) #define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 20, 17, 1, __value) #define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 20, 18, 6, __value) #define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 20, 24, 8, __value) /* Dword 6 */ #define SET_TX_DESC_TXAGC_A(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 0, 5, __value) #define SET_TX_DESC_TXAGC_B(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 5, 5, __value) #define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 10, 1, __value) #define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 11, 5, __value) #define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 16, 4, __value) #define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 20, 4, __value) #define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 24, 4, __value) #define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 28, 4, __value) /* Dword 7 */ #define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 28, 0, 16, __value) #define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 28, 16, 4, __value) #define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 28, 20, 4, __value) #define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 28, 24, 4, __value) #define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 28, 28, 4, __value) int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 262c02a347cf..310fa90200b2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2704,23 +2704,14 @@ enum bt_radio_shared { (le32_to_cpu(_val)) /* Read data from memory */ -#define READEF1BYTE(_ptr) \ +#define READEF1BYTE(_ptr) \ EF1BYTE(*((u8 *)(_ptr))) /* Read le16 data from memory and convert to host ordering */ -#define READEF2BYTE(_ptr) \ +#define READEF2BYTE(_ptr) \ EF2BYTE(*(_ptr)) -#define READEF4BYTE(_ptr) \ +#define READEF4BYTE(_ptr) \ EF4BYTE(*(_ptr)) -/* Write data to memory */ -#define WRITEEF1BYTE(_ptr, _val) \ - (*((u8 *)(_ptr))) = EF1BYTE(_val) -/* Write le16 data to memory in host ordering */ -#define WRITEEF2BYTE(_ptr, _val) \ - (*((u16 *)(_ptr))) = EF2BYTE(_val) -#define WRITEEF4BYTE(_ptr, _val) \ - (*((u32 *)(_ptr))) = EF2BYTE(_val) - /* Create a bit mask * Examples: * BIT_LEN_MASK_32(0) => 0x00000000 @@ -2801,14 +2792,14 @@ value to host byte ordering.*/ * Set subfield of little-endian 4-byte value to specified value. */ #define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \ - *((u32 *)(__pstart)) = \ - ( \ + *((__le32 *)(__pstart)) = \ + cpu_to_le32( \ LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \ ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \ ); #define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \ - *((u16 *)(__pstart)) = \ - ( \ + *((__le16 *)(__pstart)) = \ + cpu_to_le16( \ LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \ ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \ ); -- cgit v1.2.3 From 0a327889f64619ac3ec886208644191cd87de525 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 18 Jan 2017 15:52:51 +0100 Subject: cxgb4: hide unused warnings The two new variables are only used inside of an #ifdef and cause harmless warnings when that is disabled: drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c: In function 'init_one': drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:4646:9: error: unused variable 'port_vec' [-Werror=unused-variable] drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:4646:6: error: unused variable 'v' [-Werror=unused-variable] This adds another #ifdef around the declarations. Fixes: 96fe11f27b70 ("cxgb4: Implement ndo_get_phys_port_id for mgmt dev") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 4da6f900ff24..49e000ebd2b9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4643,7 +4643,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u32 whoami, pl_rev; enum chip_type chip; static int adap_idx = 1; +#ifdef CONFIG_PCI_IOV u32 v, port_vec; +#endif printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); -- cgit v1.2.3 From c2a2efbbfcb31bedcf81170fc1aa920255c33b8f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 20 Jan 2017 05:06:08 -0800 Subject: net: remove bh disabling around percpu_counter accesses Shaohua Li made percpu_counter irq safe in commit 098faf5805c8 ("percpu_counter: make APIs irq safe") We can safely remove BH disable/enable sections around various percpu_counter manipulations. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/dst_ops.h | 9 +-------- include/net/inet_frag.h | 8 +------- net/ipv4/inet_connection_sock.c | 3 +-- net/ipv4/proc.c | 2 -- net/ipv4/tcp.c | 2 -- net/ipv4/tcp_ipv4.c | 2 -- 6 files changed, 3 insertions(+), 23 deletions(-) diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index a0d443ca16fc..8a2b66d8d78d 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -46,19 +46,12 @@ static inline int dst_entries_get_fast(struct dst_ops *dst) static inline int dst_entries_get_slow(struct dst_ops *dst) { - int res; - - local_bh_disable(); - res = percpu_counter_sum_positive(&dst->pcpuc_entries); - local_bh_enable(); - return res; + return percpu_counter_sum_positive(&dst->pcpuc_entries); } static inline void dst_entries_add(struct dst_ops *dst, int val) { - local_bh_disable(); percpu_counter_add(&dst->pcpuc_entries, val); - local_bh_enable(); } static inline int dst_entries_init(struct dst_ops *dst) diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 909972aa3acd..5894730ec82a 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -164,13 +164,7 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, int i) static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) { - unsigned int res; - - local_bh_disable(); - res = percpu_counter_sum_positive(&nf->mem); - local_bh_enable(); - - return res; + return percpu_counter_sum_positive(&nf->mem); } /* RFC 3168 support : diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 096a085611ab..c7f7c5335369 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -836,9 +836,8 @@ void inet_csk_destroy_sock(struct sock *sk) sk_refcnt_debug_release(sk); - local_bh_disable(); percpu_counter_dec(sk->sk_prot->orphan_count); - local_bh_enable(); + sock_put(sk); } EXPORT_SYMBOL(inet_csk_destroy_sock); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 0247ca032232..a9deeb90dd36 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -57,10 +57,8 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) unsigned int frag_mem; int orphans, sockets; - local_bh_disable(); orphans = percpu_counter_sum_positive(&tcp_orphan_count); sockets = proto_sockets_allocated_sum_positive(&tcp_prot); - local_bh_enable(); socket_seq_show(seq); seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index aba6ea76338e..c43eb1a831d7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -420,9 +420,7 @@ void tcp_init_sock(struct sock *sk) sk->sk_sndbuf = sysctl_tcp_wmem[1]; sk->sk_rcvbuf = sysctl_tcp_rmem[1]; - local_bh_disable(); sk_sockets_allocated_inc(sk); - local_bh_enable(); } EXPORT_SYMBOL(tcp_init_sock); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3644fc117691..f7325b25b06e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1887,9 +1887,7 @@ void tcp_v4_destroy_sock(struct sock *sk) tcp_free_fastopen_req(tp); tcp_saved_syn_free(tp); - local_bh_disable(); sk_sockets_allocated_dec(sk); - local_bh_enable(); } EXPORT_SYMBOL(tcp_v4_destroy_sock); -- cgit v1.2.3 From c10aa71b9d823603306a3bcd19f6c584bdf14ef7 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 20 Jan 2017 14:53:06 +0100 Subject: gre6: Clean up unused struct ipv6_tel_txoption definition Commit b05229f44228 ("gre6: Cleanup GREv6 transmit path, call common GRE functions") removed the ip6gre specific transmit function, but left the struct ipv6_tel_txoption definition. Clean it up. Signed-off-by: Jakub Sitnicki Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 75b6108234dd..65bdfd1cca80 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -484,11 +484,6 @@ drop: return 0; } -struct ipv6_tel_txoption { - struct ipv6_txoptions ops; - __u8 dst_opt[8]; -}; - static int gre_handle_offloads(struct sk_buff *skb, bool csum) { return iptunnel_handle_offloads(skb, -- cgit v1.2.3 From 6c59ebd356ff2ca64cdf1f61c5fe17f6fa8fc045 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 20 Jan 2017 22:27:04 +0800 Subject: sock: use hlist_entry_safe Use hlist_entry_safe() instead of open-coding it. Signed-off-by: Geliang Tang Signed-off-by: David S. Miller --- include/net/sock.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 389a0a619b45..7144750d14e5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -544,8 +544,7 @@ static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) static inline struct sock *sk_next(const struct sock *sk) { - return sk->sk_node.next ? - hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; + return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node); } static inline struct sock *sk_nulls_next(const struct sock *sk) -- cgit v1.2.3 From 582a686f52d2e002da64093fe4b9aa5befcaf4e4 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Wed, 18 Jan 2017 14:04:37 +0100 Subject: device: bus_type: Introduce num_vf callback This allows for bus types to implement their own method of retrieving the number of virtual functions a NIC on that type of bus supports. Signed-off-by: Phil Sutter Signed-off-by: David S. Miller --- include/linux/device.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/device.h b/include/linux/device.h index 491b4c0ca633..6d73b70a4a5d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -88,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); * * @suspend: Called when a device on this bus wants to go to sleep mode. * @resume: Called to bring a device on this bus out of sleep mode. + * @num_vf: Called to find out how many virtual functions a device on this + * bus supports. * @pm: Power management operations of this bus, callback the specific * device driver's pm-ops. * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU @@ -127,6 +129,8 @@ struct bus_type { int (*suspend)(struct device *dev, pm_message_t state); int (*resume)(struct device *dev); + int (*num_vf)(struct device *dev); + const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; -- cgit v1.2.3 From 02e0bea6c83c657617195135667e6a3f50d3bfb3 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Wed, 18 Jan 2017 14:04:38 +0100 Subject: PCI: implement num_vf bus type callback Signed-off-by: Phil Sutter Signed-off-by: David S. Miller --- drivers/pci/pci-driver.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 1ccce1cd6aca..63d8e18fb6b1 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1432,6 +1432,11 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } +static int pci_bus_num_vf(struct device *dev) +{ + return pci_num_vf(to_pci_dev(dev)); +} + struct bus_type pci_bus_type = { .name = "pci", .match = pci_bus_match, @@ -1443,6 +1448,7 @@ struct bus_type pci_bus_type = { .bus_groups = pci_bus_groups, .drv_groups = pci_drv_groups, .pm = PCI_PM_OPS_PTR, + .num_vf = pci_bus_num_vf, }; EXPORT_SYMBOL(pci_bus_type); -- cgit v1.2.3 From 9af15c38254d81c9991eba89335ca7c537d7f2c3 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Wed, 18 Jan 2017 14:04:39 +0100 Subject: device: Implement a bus agnostic dev_num_vf routine Now that pci_bus_type has num_vf callback set, dev_num_vf can be implemented in a bus type independent way and the check for whether a PCI device is being handled in rtnetlink can be dropped. Signed-off-by: Phil Sutter Signed-off-by: David S. Miller --- include/linux/device.h | 7 +++++++ include/linux/pci.h | 2 -- net/core/rtnetlink.c | 3 +-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/include/linux/device.h b/include/linux/device.h index 6d73b70a4a5d..bd684fc8ec1d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1144,6 +1144,13 @@ extern int device_online(struct device *dev); extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +static inline int dev_num_vf(struct device *dev) +{ + if (dev->bus && dev->bus->num_vf) + return dev->bus->num_vf(dev); + return 0; +} + /* * Root device objects for grouping under /sys/devices */ diff --git a/include/linux/pci.h b/include/linux/pci.h index e2d1a124216a..adbc859fe7c4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -885,7 +885,6 @@ void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type); void pci_sort_breadthfirst(void); #define dev_is_pci(d) ((d)->bus == &pci_bus_type) #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) -#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0)) /* Generic PCI functions exported to card drivers */ @@ -1630,7 +1629,6 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #define dev_is_pci(d) (false) #define dev_is_pf(d) (false) -#define dev_num_vf(d) (0) #endif /* CONFIG_PCI */ /* Include architecture-dependent settings and functions */ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f538f764fca6..152744643074 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -837,8 +837,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, static inline int rtnl_vfinfo_size(const struct net_device *dev, u32 ext_filter_mask) { - if (dev->dev.parent && dev_is_pci(dev->dev.parent) && - (ext_filter_mask & RTEXT_FILTER_VF)) { + if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { int num_vfs = dev_num_vf(dev->dev.parent); size_t size = nla_total_size(0); size += num_vfs * -- cgit v1.2.3 From a5e8c07059d0f0b31737408711d44794928ac218 Mon Sep 17 00:00:00 2001 From: Gianluca Borello Date: Wed, 18 Jan 2017 17:55:49 +0000 Subject: bpf: add bpf_probe_read_str helper Provide a simple helper with the same semantics of strncpy_from_unsafe(): int bpf_probe_read_str(void *dst, int size, const void *unsafe_addr) This gives more flexibility to a bpf program. A typical use case is intercepting a file name during sys_open(). The current approach is: SEC("kprobe/sys_open") void bpf_sys_open(struct pt_regs *ctx) { char buf[PATHLEN]; // PATHLEN is defined to 256 bpf_probe_read(buf, sizeof(buf), ctx->di); /* consume buf */ } This is suboptimal because the size of the string needs to be estimated at compile time, causing more memory to be copied than often necessary, and can become more problematic if further processing on buf is done, for example by pushing it to userspace via bpf_perf_event_output(), since the real length of the string is unknown and the entire buffer must be copied (and defining an unrolled strnlen() inside the bpf program is a very inefficient and unfeasible approach). With the new helper, the code can easily operate on the actual string length rather than the buffer size: SEC("kprobe/sys_open") void bpf_sys_open(struct pt_regs *ctx) { char buf[PATHLEN]; // PATHLEN is defined to 256 int res = bpf_probe_read_str(buf, sizeof(buf), ctx->di); /* consume buf, for example push it to userspace via * bpf_perf_event_output(), but this time we can use * res (the string length) as event size, after checking * its boundaries. */ } Another useful use case is when parsing individual process arguments or individual environment variables navigating current->mm->arg_start and current->mm->env_start: using this helper and the return value, one can quickly iterate at the right offset of the memory area. The code changes simply leverage the already existent strncpy_from_unsafe() kernel function, which is safe to be called from a bpf program as it is used in bpf_trace_printk(). Signed-off-by: Gianluca Borello Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 15 ++++++++++++++- kernel/trace/bpf_trace.c | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0eb0e87dbe9f..54a5894bb4ea 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -430,6 +430,18 @@ union bpf_attr { * @xdp_md: pointer to xdp_md * @delta: An positive/negative integer to be added to xdp_md.data * Return: 0 on success or negative on error + * + * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) + * Copy a NUL terminated string from unsafe address. In case the string + * length is smaller than size, the target is not padded with further NUL + * bytes. In case the string length is larger than size, just count-1 + * bytes are copied and the last byte is set to NUL. + * @dst: destination address + * @size: maximum number of bytes to copy, including the trailing NUL + * @unsafe_ptr: unsafe address + * Return: + * > 0 length of the string including the trailing NUL on success + * < 0 error */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -476,7 +488,8 @@ union bpf_attr { FN(set_hash_invalid), \ FN(get_numa_node_id), \ FN(skb_change_head), \ - FN(xdp_adjust_head), + FN(xdp_adjust_head), \ + FN(probe_read_str), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index c22a961d1a42..424daa4586d1 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -395,6 +395,36 @@ static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { .arg2_type = ARG_ANYTHING, }; +BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + int ret; + + /* + * The strncpy_from_unsafe() call will likely not fill the entire + * buffer, but that's okay in this circumstance as we're probing + * arbitrary memory anyway similar to bpf_probe_read() and might + * as well probe the stack. Thus, memory is explicitly cleared + * only in error case, so that improper users ignoring return + * code altogether don't copy garbage; otherwise length of string + * is returned that can be used for bpf_perf_event_output() et al. + */ + ret = strncpy_from_unsafe(dst, unsafe_ptr, size); + if (unlikely(ret < 0)) + memset(dst, 0, size); + + return ret; +} + +static const struct bpf_func_proto bpf_probe_read_str_proto = { + .func = bpf_probe_read_str, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) { switch (func_id) { @@ -432,6 +462,8 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) return &bpf_current_task_under_cgroup_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; + case BPF_FUNC_probe_read_str: + return &bpf_probe_read_str_proto; default: return NULL; } -- cgit v1.2.3 From 9999974a8318b605ebae08a87e86232659e56a52 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Wed, 18 Jan 2017 13:50:50 -0500 Subject: tipc: add function for checking broadcast support in bearer As a preparation for the 'replicast' functionality we are going to introduce in the next commits, we need the broadcast base structure to store whether bearer broadcast is available at all from the currently used bearer or bearers. We do this by adding a new function tipc_bearer_bcast_support() to the bearer layer, and letting the bearer selection function in bcast.c use this to give a new boolean field, 'bcast_support' the appropriate value. Reviewed-by: Parthasarathy Bhuvaragan Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/bcast.c | 12 +++++++++--- net/tipc/bearer.c | 15 ++++++++++++++- net/tipc/bearer.h | 8 +++++++- net/tipc/udp_media.c | 8 ++++---- 4 files changed, 34 insertions(+), 9 deletions(-) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index c35fad3e08e8..325627612bac 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -1,7 +1,7 @@ /* * net/tipc/bcast.c: TIPC broadcast code * - * Copyright (c) 2004-2006, 2014-2015, Ericsson AB + * Copyright (c) 2004-2006, 2014-2016, Ericsson AB * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. @@ -54,12 +54,14 @@ const char tipc_bclink_name[] = "broadcast-link"; * @inputq: data input queue; will only carry SOCK_WAKEUP messages * @dest: array keeping number of reachable destinations per bearer * @primary_bearer: a bearer having links to all broadcast destinations, if any + * @bcast_support: indicates if primary bearer, if any, supports broadcast */ struct tipc_bc_base { struct tipc_link *link; struct sk_buff_head inputq; int dests[MAX_BEARERS]; int primary_bearer; + bool bcast_support; }; static struct tipc_bc_base *tipc_bc_base(struct net *net) @@ -79,9 +81,10 @@ static void tipc_bcbase_select_primary(struct net *net) { struct tipc_bc_base *bb = tipc_bc_base(net); int all_dests = tipc_link_bc_peers(bb->link); - int i, mtu; + int i, mtu, prim; bb->primary_bearer = INVALID_BEARER_ID; + bb->bcast_support = true; if (!all_dests) return; @@ -93,7 +96,7 @@ static void tipc_bcbase_select_primary(struct net *net) mtu = tipc_bearer_mtu(net, i); if (mtu < tipc_link_mtu(bb->link)) tipc_link_set_mtu(bb->link, mtu); - + bb->bcast_support &= tipc_bearer_bcast_support(net, i); if (bb->dests[i] < all_dests) continue; @@ -103,6 +106,9 @@ static void tipc_bcbase_select_primary(struct net *net) if ((i ^ tipc_own_addr(net)) & 1) break; } + prim = bb->primary_bearer; + if (prim != INVALID_BEARER_ID) + bb->bcast_support = tipc_bearer_bcast_support(net, prim); } void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id) diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 52d74760fb68..33a5bdfbef76 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -431,7 +431,7 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, memset(&b->bcast_addr, 0, sizeof(b->bcast_addr)); memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len); b->bcast_addr.media_id = b->media->type_id; - b->bcast_addr.broadcast = 1; + b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT; b->mtu = dev->mtu; b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr); rcu_assign_pointer(dev->tipc_ptr, b); @@ -482,6 +482,19 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, return 0; } +bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id) +{ + bool supp = false; + struct tipc_bearer *b; + + rcu_read_lock(); + b = bearer_get(net, bearer_id); + if (b) + supp = (b->bcast_addr.broadcast == TIPC_BROADCAST_SUPPORT); + rcu_read_unlock(); + return supp; +} + int tipc_bearer_mtu(struct net *net, u32 bearer_id) { int mtu = 0; diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 278ff7f616f9..635c9086e19a 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -60,9 +60,14 @@ #define TIPC_MEDIA_TYPE_IB 2 #define TIPC_MEDIA_TYPE_UDP 3 -/* minimum bearer MTU */ +/* Minimum bearer MTU */ #define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE) +/* Identifiers for distinguishing between broadcast/multicast and replicast + */ +#define TIPC_BROADCAST_SUPPORT 1 +#define TIPC_REPLICAST_SUPPORT 2 + /** * struct tipc_media_addr - destination address used by TIPC bearers * @value: address info (format defined by media) @@ -210,6 +215,7 @@ int tipc_bearer_setup(void); void tipc_bearer_cleanup(void); void tipc_bearer_stop(struct net *net); int tipc_bearer_mtu(struct net *net, u32 bearer_id); +bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id); void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id, struct sk_buff *skb, struct tipc_media_addr *dest); diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index b58dc95f3d35..46061cf48cd1 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -113,7 +113,7 @@ static void tipc_udp_media_addr_set(struct tipc_media_addr *addr, memcpy(addr->value, ua, sizeof(struct udp_media_addr)); if (tipc_udp_is_mcast_addr(ua)) - addr->broadcast = 1; + addr->broadcast = TIPC_BROADCAST_SUPPORT; } /* tipc_udp_addr2str - convert ip/udp address to string */ @@ -229,7 +229,7 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, goto out; } - if (!addr->broadcast || list_empty(&ub->rcast.list)) + if (addr->broadcast != TIPC_REPLICAST_SUPPORT) return tipc_udp_xmit(net, skb, ub, src, dst); /* Replicast, send an skb to each configured IP address */ @@ -296,7 +296,7 @@ static int tipc_udp_rcast_add(struct tipc_bearer *b, else if (ntohs(addr->proto) == ETH_P_IPV6) pr_info("New replicast peer: %pI6\n", &rcast->addr.ipv6); #endif - + b->bcast_addr.broadcast = TIPC_REPLICAST_SUPPORT; list_add_rcu(&rcast->list, &ub->rcast.list); return 0; } @@ -681,7 +681,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, goto err; b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP; - b->bcast_addr.broadcast = 1; + b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT; rcu_assign_pointer(b->media_ptr, ub); rcu_assign_pointer(ub->bearer, b); tipc_udp_media_addr_set(&b->addr, &local); -- cgit v1.2.3 From 2ae0b8af1fe35ddaa2e46704ae31a2f9cac0349d Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Wed, 18 Jan 2017 13:50:51 -0500 Subject: tipc: add functionality to lookup multicast destination nodes As a further preparation for the upcoming 'replicast' functionality, we add some necessary structs and functions for looking up and returning a list of all nodes that host destinations for a given multicast message. Reviewed-by: Parthasarathy Bhuvaragan Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/bcast.c | 33 +++++++++++++++++++++++++++++++-- net/tipc/bcast.h | 15 ++++++++++++++- net/tipc/name_table.c | 38 +++++++++++++++++++++++++++++++++----- net/tipc/name_table.h | 9 +++++++++ 4 files changed, 87 insertions(+), 8 deletions(-) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 325627612bac..412d3351abb7 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -39,9 +39,8 @@ #include "socket.h" #include "msg.h" #include "bcast.h" -#include "name_distr.h" #include "link.h" -#include "node.h" +#include "name_table.h" #define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */ #define BCLINK_WIN_MIN 32 /* bcast minimum link window size */ @@ -434,3 +433,33 @@ void tipc_bcast_stop(struct net *net) kfree(tn->bcbase); kfree(tn->bcl); } + +void tipc_nlist_init(struct tipc_nlist *nl, u32 self) +{ + memset(nl, 0, sizeof(*nl)); + INIT_LIST_HEAD(&nl->list); + nl->self = self; +} + +void tipc_nlist_add(struct tipc_nlist *nl, u32 node) +{ + if (node == nl->self) + nl->local = true; + else if (u32_push(&nl->list, node)) + nl->remote++; +} + +void tipc_nlist_del(struct tipc_nlist *nl, u32 node) +{ + if (node == nl->self) + nl->local = false; + else if (u32_del(&nl->list, node)) + nl->remote--; +} + +void tipc_nlist_purge(struct tipc_nlist *nl) +{ + u32_list_purge(&nl->list); + nl->remote = 0; + nl->local = 0; +} diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 855d53c64ab3..18f379198f8f 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -42,9 +42,22 @@ struct tipc_node; struct tipc_msg; struct tipc_nl_msg; -struct tipc_node_map; +struct tipc_nlist; +struct tipc_nitem; extern const char tipc_bclink_name[]; +struct tipc_nlist { + struct list_head list; + u32 self; + u16 remote; + bool local; +}; + +void tipc_nlist_init(struct tipc_nlist *nl, u32 self); +void tipc_nlist_purge(struct tipc_nlist *nl); +void tipc_nlist_add(struct tipc_nlist *nl, u32 node); +void tipc_nlist_del(struct tipc_nlist *nl, u32 node); + int tipc_bcast_init(struct net *net); void tipc_bcast_stop(struct net *net); void tipc_bcast_add_peer(struct net *net, struct tipc_link *l, diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 5a86df1e5fc2..9be6592e4a6f 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -645,6 +645,39 @@ exit: return res; } +/* tipc_nametbl_lookup_dst_nodes - find broadcast destination nodes + * - Creates list of nodes that overlap the given multicast address + * - Determines if any node local ports overlap + */ +void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower, + u32 upper, u32 domain, + struct tipc_nlist *nodes) +{ + struct sub_seq *sseq, *stop; + struct publication *publ; + struct name_info *info; + struct name_seq *seq; + + rcu_read_lock(); + seq = nametbl_find_seq(net, type); + if (!seq) + goto exit; + + spin_lock_bh(&seq->lock); + sseq = seq->sseqs + nameseq_locate_subseq(seq, lower); + stop = seq->sseqs + seq->first_free; + for (; sseq->lower <= upper && sseq != stop; sseq++) { + info = sseq->info; + list_for_each_entry(publ, &info->zone_list, zone_list) { + if (tipc_in_scope(domain, publ->node)) + tipc_nlist_add(nodes, publ->node); + } + } + spin_unlock_bh(&seq->lock); +exit: + rcu_read_unlock(); +} + /* * tipc_nametbl_publish - add name publication to network name tables */ @@ -1022,11 +1055,6 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -struct u32_item { - struct list_head list; - u32 value; -}; - bool u32_find(struct list_head *l, u32 value) { struct u32_item *item; diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index c89bb3f5c364..6ebdeb1d84a5 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -39,6 +39,7 @@ struct tipc_subscription; struct tipc_plist; +struct tipc_nlist; /* * TIPC name types reserved for internal TIPC use (both current and planned) @@ -100,6 +101,9 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node); int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper, u32 limit, struct list_head *dports); +void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower, + u32 upper, u32 domain, + struct tipc_nlist *nodes); struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, u32 upper, u32 scope, u32 port_ref, u32 key); @@ -116,6 +120,11 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s); int tipc_nametbl_init(struct net *net); void tipc_nametbl_stop(struct net *net); +struct u32_item { + struct list_head list; + u32 value; +}; + bool u32_push(struct list_head *l, u32 value); u32 u32_pop(struct list_head *l); bool u32_find(struct list_head *l, u32 value); -- cgit v1.2.3 From a853e4c6d0843729e1f25a7a7beff168e1dd7420 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Wed, 18 Jan 2017 13:50:52 -0500 Subject: tipc: introduce replicast as transport option for multicast TIPC multicast messages are currently carried over a reliable 'broadcast link', making use of the underlying media's ability to transport packets as L2 broadcast or IP multicast to all nodes in the cluster. When the used bearer is lacking that ability, we can instead emulate the broadcast service by replicating and sending the packets over as many unicast links as needed to reach all identified destinations. We now introduce a new TIPC link-level 'replicast' service that does this. Reviewed-by: Parthasarathy Bhuvaragan Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/bcast.c | 105 ++++++++++++++++++++++++++++++++++++++++++------------ net/tipc/bcast.h | 3 +- net/tipc/link.c | 8 ++++- net/tipc/msg.c | 17 +++++++++ net/tipc/msg.h | 9 +++-- net/tipc/node.c | 27 +++++++++----- net/tipc/socket.c | 27 +++++++++----- 7 files changed, 149 insertions(+), 47 deletions(-) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 412d3351abb7..672e6ef93cab 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -70,7 +70,7 @@ static struct tipc_bc_base *tipc_bc_base(struct net *net) int tipc_bcast_get_mtu(struct net *net) { - return tipc_link_mtu(tipc_bc_sndlink(net)); + return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE; } /* tipc_bcbase_select_primary(): find a bearer with links to all destinations, @@ -175,42 +175,101 @@ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq) __skb_queue_purge(&_xmitq); } -/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster - * and to identified node local sockets +/* tipc_bcast_xmit - broadcast the buffer chain to all external nodes * @net: the applicable net namespace - * @list: chain of buffers containing message + * @pkts: chain of buffers containing message + * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0 * Consumes the buffer chain. - * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE + * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE */ -int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list) +static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, + u16 *cong_link_cnt) { struct tipc_link *l = tipc_bc_sndlink(net); - struct sk_buff_head xmitq, inputq, rcvq; + struct sk_buff_head xmitq; int rc = 0; - __skb_queue_head_init(&rcvq); __skb_queue_head_init(&xmitq); - skb_queue_head_init(&inputq); - - /* Prepare message clone for local node */ - if (unlikely(!tipc_msg_reassemble(list, &rcvq))) - return -EHOSTUNREACH; - tipc_bcast_lock(net); if (tipc_link_bc_peers(l)) - rc = tipc_link_xmit(l, list, &xmitq); + rc = tipc_link_xmit(l, pkts, &xmitq); tipc_bcast_unlock(net); + tipc_bcbase_xmit(net, &xmitq); + __skb_queue_purge(pkts); + if (rc == -ELINKCONG) { + *cong_link_cnt = 1; + rc = 0; + } + return rc; +} - /* Don't send to local node if adding to link failed */ - if (unlikely(rc && (rc != -ELINKCONG))) { - __skb_queue_purge(&rcvq); - return rc; +/* tipc_rcast_xmit - replicate and send a message to given destination nodes + * @net: the applicable net namespace + * @pkts: chain of buffers containing message + * @dests: list of destination nodes + * @cong_link_cnt: returns number of congested links + * @cong_links: returns identities of congested links + * Returns 0 if success, otherwise errno + */ +static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts, + struct tipc_nlist *dests, u16 *cong_link_cnt) +{ + struct sk_buff_head _pkts; + struct u32_item *n, *tmp; + u32 dst, selector; + + selector = msg_link_selector(buf_msg(skb_peek(pkts))); + __skb_queue_head_init(&_pkts); + + list_for_each_entry_safe(n, tmp, &dests->list, list) { + dst = n->value; + if (!tipc_msg_pskb_copy(dst, pkts, &_pkts)) + return -ENOMEM; + + /* Any other return value than -ELINKCONG is ignored */ + if (tipc_node_xmit(net, &_pkts, dst, selector) == -ELINKCONG) + (*cong_link_cnt)++; } + return 0; +} - /* Broadcast to all nodes, inluding local node */ - tipc_bcbase_xmit(net, &xmitq); - tipc_sk_mcast_rcv(net, &rcvq, &inputq); - __skb_queue_purge(list); +/* tipc_mcast_xmit - deliver message to indicated destination nodes + * and to identified node local sockets + * @net: the applicable net namespace + * @pkts: chain of buffers containing message + * @dests: destination nodes for message. Not consumed. + * @cong_link_cnt: returns number of encountered congested destination links + * @cong_links: returns identities of congested links + * Consumes buffer chain. + * Returns 0 if success, otherwise errno + */ +int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, + struct tipc_nlist *dests, u16 *cong_link_cnt) +{ + struct tipc_bc_base *bb = tipc_bc_base(net); + struct sk_buff_head inputq, localq; + int rc = 0; + + skb_queue_head_init(&inputq); + skb_queue_head_init(&localq); + + /* Clone packets before they are consumed by next call */ + if (dests->local && !tipc_msg_reassemble(pkts, &localq)) { + rc = -ENOMEM; + goto exit; + } + + if (dests->remote) { + if (!bb->bcast_support) + rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt); + else + rc = tipc_bcast_xmit(net, pkts, cong_link_cnt); + } + + if (dests->local) + tipc_sk_mcast_rcv(net, &localq, &inputq); +exit: + __skb_queue_purge(pkts); return rc; } diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 18f379198f8f..dd772e6f6fa4 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -66,7 +66,8 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl); void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id); void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id); int tipc_bcast_get_mtu(struct net *net); -int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list); +int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, + struct tipc_nlist *dests, u16 *cong_link_cnt); int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, struct tipc_msg *hdr); diff --git a/net/tipc/link.c b/net/tipc/link.c index b0f8646e0631..b17b9e155469 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1032,11 +1032,17 @@ int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to, static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *inputq) { - switch (msg_user(buf_msg(skb))) { + struct tipc_msg *hdr = buf_msg(skb); + + switch (msg_user(hdr)) { case TIPC_LOW_IMPORTANCE: case TIPC_MEDIUM_IMPORTANCE: case TIPC_HIGH_IMPORTANCE: case TIPC_CRITICAL_IMPORTANCE: + if (unlikely(msg_type(hdr) == TIPC_MCAST_MSG)) { + skb_queue_tail(l->bc_rcvlink->inputq, skb); + return true; + } case CONN_MANAGER: skb_queue_tail(inputq, skb); return true; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index ab02d0742476..312ef7de57d7 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -607,6 +607,23 @@ error: return false; } +bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg, + struct sk_buff_head *cpy) +{ + struct sk_buff *skb, *_skb; + + skb_queue_walk(msg, skb) { + _skb = pskb_copy(skb, GFP_ATOMIC); + if (!_skb) { + __skb_queue_purge(cpy); + return false; + } + msg_set_destnode(buf_msg(_skb), dst); + __skb_queue_tail(cpy, _skb); + } + return true; +} + /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number * @list: list to be appended to * @seqno: sequence number of buffer to add diff --git a/net/tipc/msg.h b/net/tipc/msg.h index f07b51e3f6f1..c843fd2bc48d 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -631,14 +631,11 @@ static inline void msg_set_bc_netid(struct tipc_msg *m, u32 id) static inline u32 msg_link_selector(struct tipc_msg *m) { + if (msg_user(m) == MSG_FRAGMENTER) + m = (void *)msg_data(m); return msg_bits(m, 4, 0, 1); } -static inline void msg_set_link_selector(struct tipc_msg *m, u32 n) -{ - msg_set_bits(m, 4, 0, 1, n); -} - /* * Word 5 */ @@ -835,6 +832,8 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, int dsz, int mtu, struct sk_buff_head *list); bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err); bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq); +bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg, + struct sk_buff_head *cpy); void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, struct sk_buff *skb); diff --git a/net/tipc/node.c b/net/tipc/node.c index 2883f6a0ed98..f96dacf173ab 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1257,6 +1257,19 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) kfree_skb(skb); } +static void tipc_node_mcast_rcv(struct tipc_node *n) +{ + struct tipc_bclink_entry *be = &n->bc_entry; + + /* 'arrvq' is under inputq2's lock protection */ + spin_lock_bh(&be->inputq2.lock); + spin_lock_bh(&be->inputq1.lock); + skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); + spin_unlock_bh(&be->inputq1.lock); + spin_unlock_bh(&be->inputq2.lock); + tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); +} + static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, int bearer_id, struct sk_buff_head *xmitq) { @@ -1330,15 +1343,8 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id if (!skb_queue_empty(&xmitq)) tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); - /* Deliver. 'arrvq' is under inputq2's lock protection */ - if (!skb_queue_empty(&be->inputq1)) { - spin_lock_bh(&be->inputq2.lock); - spin_lock_bh(&be->inputq1.lock); - skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); - spin_unlock_bh(&be->inputq1.lock); - spin_unlock_bh(&be->inputq2.lock); - tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2); - } + if (!skb_queue_empty(&be->inputq1)) + tipc_node_mcast_rcv(n); if (rc & TIPC_LINK_DOWN_EVT) { /* Reception reassembly failure => reset all links to peer */ @@ -1565,6 +1571,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) tipc_named_rcv(net, &n->bc_entry.namedq); + if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) + tipc_node_mcast_rcv(n); + if (!skb_queue_empty(&le->inputq)) tipc_sk_rcv(net, &le->inputq); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index d2f353934f82..93b6ae3154c9 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -740,32 +740,43 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, struct tipc_msg *hdr = &tsk->phdr; struct net *net = sock_net(sk); int mtu = tipc_bcast_get_mtu(net); + u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE); struct sk_buff_head pkts; + struct tipc_nlist dsts; int rc; + /* Block or return if any destination link is congested */ rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); if (unlikely(rc)) return rc; + /* Lookup destination nodes */ + tipc_nlist_init(&dsts, tipc_own_addr(net)); + tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower, + seq->upper, domain, &dsts); + if (!dsts.local && !dsts.remote) + return -EHOSTUNREACH; + + /* Build message header */ msg_set_type(hdr, TIPC_MCAST_MSG); + msg_set_hdr_sz(hdr, MCAST_H_SIZE); msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); msg_set_destport(hdr, 0); msg_set_destnode(hdr, 0); msg_set_nametype(hdr, seq->type); msg_set_namelower(hdr, seq->lower); msg_set_nameupper(hdr, seq->upper); - msg_set_hdr_sz(hdr, MCAST_H_SIZE); + /* Build message as chain of buffers */ skb_queue_head_init(&pkts); rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); - if (unlikely(rc != dlen)) - return rc; - rc = tipc_bcast_xmit(net, &pkts); - if (unlikely(rc == -ELINKCONG)) { - tsk->cong_link_cnt = 1; - rc = 0; - } + /* Send message if build was successful */ + if (unlikely(rc == dlen)) + rc = tipc_mcast_xmit(net, &pkts, &dsts, + &tsk->cong_link_cnt); + + tipc_nlist_purge(&dsts); return rc ? rc : dlen; } -- cgit v1.2.3 From 01fd12bb189a0772301dd37e9b31e53761269a1b Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Wed, 18 Jan 2017 13:50:53 -0500 Subject: tipc: make replicast a user selectable option If the bearer carrying multicast messages supports broadcast, those messages will be sent to all cluster nodes, irrespective of whether these nodes host any actual destinations socket or not. This is clearly wasteful if the cluster is large and there are only a few real destinations for the message being sent. In this commit we extend the eligibility of the newly introduced "replicast" transmit option. We now make it possible for a user to select which method he wants to be used, either as a mandatory setting via setsockopt(), or as a relative setting where we let the broadcast layer decide which method to use based on the ratio between cluster size and the message's actual number of destination nodes. In the latter case, a sending socket must stick to a previously selected method until it enters an idle period of at least 5 seconds. This eliminates the risk of message reordering caused by method change, i.e., when changes to cluster size or number of destinations would otherwise mandate a new method to be used. Reviewed-by: Parthasarathy Bhuvaragan Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- include/uapi/linux/tipc.h | 6 +++-- net/tipc/bcast.c | 62 ++++++++++++++++++++++++++++++++++++++++++----- net/tipc/bcast.h | 17 ++++++++++++- net/tipc/link.c | 4 +++ net/tipc/node.h | 4 ++- net/tipc/socket.c | 36 +++++++++++++++++++++------ 6 files changed, 112 insertions(+), 17 deletions(-) diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h index bf049e8fe31b..5351b08c897a 100644 --- a/include/uapi/linux/tipc.h +++ b/include/uapi/linux/tipc.h @@ -1,7 +1,7 @@ /* * include/uapi/linux/tipc.h: Header for TIPC socket interface * - * Copyright (c) 2003-2006, Ericsson AB + * Copyright (c) 2003-2006, 2015-2016 Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -220,7 +220,7 @@ struct sockaddr_tipc { #define TIPC_DESTNAME 3 /* destination name */ /* - * TIPC-specific socket option values + * TIPC-specific socket option names */ #define TIPC_IMPORTANCE 127 /* Default: TIPC_LOW_IMPORTANCE */ @@ -229,6 +229,8 @@ struct sockaddr_tipc { #define TIPC_CONN_TIMEOUT 130 /* Default: 8000 (ms) */ #define TIPC_NODE_RECVQ_DEPTH 131 /* Default: none (read only) */ #define TIPC_SOCK_RECVQ_DEPTH 132 /* Default: none (read only) */ +#define TIPC_MCAST_BROADCAST 133 /* Default: TIPC selects. No arg */ +#define TIPC_MCAST_REPLICAST 134 /* Default: TIPC selects. No arg */ /* * Maximum sizes of TIPC bearer-related names (including terminating NULL) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 672e6ef93cab..7d99029df342 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -54,6 +54,9 @@ const char tipc_bclink_name[] = "broadcast-link"; * @dest: array keeping number of reachable destinations per bearer * @primary_bearer: a bearer having links to all broadcast destinations, if any * @bcast_support: indicates if primary bearer, if any, supports broadcast + * @rcast_support: indicates if all peer nodes support replicast + * @rc_ratio: dest count as percentage of cluster size where send method changes + * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast */ struct tipc_bc_base { struct tipc_link *link; @@ -61,6 +64,9 @@ struct tipc_bc_base { int dests[MAX_BEARERS]; int primary_bearer; bool bcast_support; + bool rcast_support; + int rc_ratio; + int bc_threshold; }; static struct tipc_bc_base *tipc_bc_base(struct net *net) @@ -73,6 +79,19 @@ int tipc_bcast_get_mtu(struct net *net) return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE; } +void tipc_bcast_disable_rcast(struct net *net) +{ + tipc_bc_base(net)->rcast_support = false; +} + +static void tipc_bcbase_calc_bc_threshold(struct net *net) +{ + struct tipc_bc_base *bb = tipc_bc_base(net); + int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net)); + + bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100); +} + /* tipc_bcbase_select_primary(): find a bearer with links to all destinations, * if any, and make it primary bearer */ @@ -175,6 +194,31 @@ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq) __skb_queue_purge(&_xmitq); } +static void tipc_bcast_select_xmit_method(struct net *net, int dests, + struct tipc_mc_method *method) +{ + struct tipc_bc_base *bb = tipc_bc_base(net); + unsigned long exp = method->expires; + + /* Broadcast supported by used bearer/bearers? */ + if (!bb->bcast_support) { + method->rcast = true; + return; + } + /* Any destinations which don't support replicast ? */ + if (!bb->rcast_support) { + method->rcast = false; + return; + } + /* Can current method be changed ? */ + method->expires = jiffies + TIPC_METHOD_EXPIRE; + if (method->mandatory || time_before(jiffies, exp)) + return; + + /* Determine method to use now */ + method->rcast = dests <= bb->bc_threshold; +} + /* tipc_bcast_xmit - broadcast the buffer chain to all external nodes * @net: the applicable net namespace * @pkts: chain of buffers containing message @@ -237,16 +281,16 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts, * and to identified node local sockets * @net: the applicable net namespace * @pkts: chain of buffers containing message - * @dests: destination nodes for message. Not consumed. + * @method: send method to be used + * @dests: destination nodes for message. * @cong_link_cnt: returns number of encountered congested destination links - * @cong_links: returns identities of congested links * Consumes buffer chain. * Returns 0 if success, otherwise errno */ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, - struct tipc_nlist *dests, u16 *cong_link_cnt) + struct tipc_mc_method *method, struct tipc_nlist *dests, + u16 *cong_link_cnt) { - struct tipc_bc_base *bb = tipc_bc_base(net); struct sk_buff_head inputq, localq; int rc = 0; @@ -258,9 +302,10 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, rc = -ENOMEM; goto exit; } - + /* Send according to determined transmit method */ if (dests->remote) { - if (!bb->bcast_support) + tipc_bcast_select_xmit_method(net, dests->remote, method); + if (method->rcast) rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt); else rc = tipc_bcast_xmit(net, pkts, cong_link_cnt); @@ -269,6 +314,7 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, if (dests->local) tipc_sk_mcast_rcv(net, &localq, &inputq); exit: + /* This queue should normally be empty by now */ __skb_queue_purge(pkts); return rc; } @@ -377,6 +423,7 @@ void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l, tipc_bcast_lock(net); tipc_link_add_bc_peer(snd_l, uc_l, xmitq); tipc_bcbase_select_primary(net); + tipc_bcbase_calc_bc_threshold(net); tipc_bcast_unlock(net); } @@ -395,6 +442,7 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l) tipc_bcast_lock(net); tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq); tipc_bcbase_select_primary(net); + tipc_bcbase_calc_bc_threshold(net); tipc_bcast_unlock(net); tipc_bcbase_xmit(net, &xmitq); @@ -477,6 +525,8 @@ int tipc_bcast_init(struct net *net) goto enomem; bb->link = l; tn->bcl = l; + bb->rc_ratio = 25; + bb->rcast_support = true; return 0; enomem: kfree(bb); diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index dd772e6f6fa4..751530ab0c49 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -46,6 +46,8 @@ struct tipc_nlist; struct tipc_nitem; extern const char tipc_bclink_name[]; +#define TIPC_METHOD_EXPIRE msecs_to_jiffies(5000) + struct tipc_nlist { struct list_head list; u32 self; @@ -58,6 +60,17 @@ void tipc_nlist_purge(struct tipc_nlist *nl); void tipc_nlist_add(struct tipc_nlist *nl, u32 node); void tipc_nlist_del(struct tipc_nlist *nl, u32 node); +/* Cookie to be used between socket and broadcast layer + * @rcast: replicast (instead of broadcast) was used at previous xmit + * @mandatory: broadcast/replicast indication was set by user + * @expires: re-evaluate non-mandatory transmit method if we are past this + */ +struct tipc_mc_method { + bool rcast; + bool mandatory; + unsigned long expires; +}; + int tipc_bcast_init(struct net *net); void tipc_bcast_stop(struct net *net); void tipc_bcast_add_peer(struct net *net, struct tipc_link *l, @@ -66,8 +79,10 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl); void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id); void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id); int tipc_bcast_get_mtu(struct net *net); +void tipc_bcast_disable_rcast(struct net *net); int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, - struct tipc_nlist *dests, u16 *cong_link_cnt); + struct tipc_mc_method *method, struct tipc_nlist *dests, + u16 *cong_link_cnt); int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, struct tipc_msg *hdr); diff --git a/net/tipc/link.c b/net/tipc/link.c index b17b9e155469..ddd2dd6f77aa 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -515,6 +515,10 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, if (link_is_bc_sndlink(l)) l->state = LINK_ESTABLISHED; + /* Disable replicast if even a single peer doesn't support it */ + if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST)) + tipc_bcast_disable_rcast(net); + return true; } diff --git a/net/tipc/node.h b/net/tipc/node.h index 39ef54c1f2ad..898c22916984 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -47,11 +47,13 @@ enum { TIPC_BCAST_SYNCH = (1 << 1), TIPC_BCAST_STATE_NACK = (1 << 2), - TIPC_BLOCK_FLOWCTL = (1 << 3) + TIPC_BLOCK_FLOWCTL = (1 << 3), + TIPC_BCAST_RCAST = (1 << 4) }; #define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \ TIPC_BCAST_STATE_NACK | \ + TIPC_BCAST_RCAST | \ TIPC_BLOCK_FLOWCTL) #define INVALID_BEARER_ID -1 diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 93b6ae3154c9..5bec8aac5008 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -79,6 +79,7 @@ enum { * @rcv_unacked: # messages read by user, but not yet acked back to peer * @peer: 'connected' peer for dgram/rdm * @node: hash table node + * @mc_method: cookie for use between socket and broadcast layer * @rcu: rcu struct for tipc_sock */ struct tipc_sock { @@ -103,6 +104,7 @@ struct tipc_sock { u16 rcv_win; struct sockaddr_tipc peer; struct rhash_head node; + struct tipc_mc_method mc_method; struct rcu_head rcu; }; @@ -740,6 +742,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, struct tipc_msg *hdr = &tsk->phdr; struct net *net = sock_net(sk); int mtu = tipc_bcast_get_mtu(net); + struct tipc_mc_method *method = &tsk->mc_method; u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE); struct sk_buff_head pkts; struct tipc_nlist dsts; @@ -773,7 +776,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, /* Send message if build was successful */ if (unlikely(rc == dlen)) - rc = tipc_mcast_xmit(net, &pkts, &dsts, + rc = tipc_mcast_xmit(net, &pkts, method, &dsts, &tsk->cong_link_cnt); tipc_nlist_purge(&dsts); @@ -2344,18 +2347,29 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - u32 value; + u32 value = 0; int res; if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) return 0; if (lvl != SOL_TIPC) return -ENOPROTOOPT; - if (ol < sizeof(value)) - return -EINVAL; - res = get_user(value, (u32 __user *)ov); - if (res) - return res; + + switch (opt) { + case TIPC_IMPORTANCE: + case TIPC_SRC_DROPPABLE: + case TIPC_DEST_DROPPABLE: + case TIPC_CONN_TIMEOUT: + if (ol < sizeof(value)) + return -EINVAL; + res = get_user(value, (u32 __user *)ov); + if (res) + return res; + break; + default: + if (ov || ol) + return -EINVAL; + } lock_sock(sk); @@ -2376,6 +2390,14 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, tipc_sk(sk)->conn_timeout = value; /* no need to set "res", since already 0 at this point */ break; + case TIPC_MCAST_BROADCAST: + tsk->mc_method.rcast = false; + tsk->mc_method.mandatory = true; + break; + case TIPC_MCAST_REPLICAST: + tsk->mc_method.rcast = true; + tsk->mc_method.mandatory = true; + break; default: res = -EINVAL; } -- cgit v1.2.3 From 22fbece133b71895ca6bb66890b2d9b1ddaa908c Mon Sep 17 00:00:00 2001 From: Lance Richardson Date: Wed, 18 Jan 2017 15:14:56 -0500 Subject: csum: eliminate sparse warning in remcsum_unadjust() Cast second parameter of csum_sub() from __sum16 to __wsum. Signed-off-by: Lance Richardson Signed-off-by: David S. Miller --- include/net/checksum.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/checksum.h b/include/net/checksum.h index 35d0fabd2782..aef2b2bb6603 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -179,7 +179,7 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum, static inline void remcsum_unadjust(__sum16 *psum, __wsum delta) { - *psum = csum_fold(csum_sub(delta, *psum)); + *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum)); } #endif -- cgit v1.2.3 From 1f6cc07e170364212b6d81321f79f166089a60d9 Mon Sep 17 00:00:00 2001 From: Lance Richardson Date: Wed, 18 Jan 2017 15:24:57 -0500 Subject: vxlan: preserve type of dst_port parm for encap_bypass_if_local() Eliminate sparse warning by maintaining type of dst_port as __be16. Signed-off-by: Lance Richardson Acked-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ca7196c40060..19b1653e1bd6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1951,7 +1951,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *vxlan, union vxlan_addr *daddr, - __be32 dst_port, __be32 vni, struct dst_entry *dst, + __be16 dst_port, __be32 vni, struct dst_entry *dst, u32 rt_flags) { #if IS_ENABLED(CONFIG_IPV6) -- cgit v1.2.3 From 264b87fa617e758966108db48db220571ff3d60e Mon Sep 17 00:00:00 2001 From: Andrew Collins Date: Wed, 18 Jan 2017 14:04:28 -0700 Subject: fq_codel: Avoid regenerating skb flow hash unless necessary The fq_codel qdisc currently always regenerates the skb flow hash. This wastes some cycles and prevents flow seperation in cases where the traffic has been encrypted and can no longer be understood by the flow dissector. Change it to use the prexisting flow hash if one exists, and only regenerate if necessary. Signed-off-by: Andrew Collins Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/sch_fq_codel.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index a5ea0e9b6be4..2f50e4c72fb4 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -57,7 +57,6 @@ struct fq_codel_sched_data { struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ u32 *backlogs; /* backlog table [flows_cnt] */ u32 flows_cnt; /* number of flows */ - u32 perturbation; /* hash perturbation */ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ u32 drop_batch_size; u32 memory_limit; @@ -75,9 +74,7 @@ struct fq_codel_sched_data { static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, struct sk_buff *skb) { - u32 hash = skb_get_hash_perturb(skb, q->perturbation); - - return reciprocal_scale(hash, q->flows_cnt); + return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); } static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, @@ -482,7 +479,6 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) q->memory_limit = 32 << 20; /* 32 MBytes */ q->drop_batch_size = 64; q->quantum = psched_mtu(qdisc_dev(sch)); - q->perturbation = prandom_u32(); INIT_LIST_HEAD(&q->new_flows); INIT_LIST_HEAD(&q->old_flows); codel_params_init(&q->cparams); -- cgit v1.2.3 From 1b7cd0044e4a9f69aaf00511870b07fcdeda591d Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Wed, 18 Jan 2017 15:02:49 -0800 Subject: net: remove duplicate code. netdev_rx_handler_register() checks to see if the handler is already busy which was recently separated into netdev_is_rx_handler_busy(). So use the same function inside register() to avoid code duplication. Essentially this change should be a no-op Signed-off-by: Mahesh Bandewar Signed-off-by: David S. Miller --- net/core/dev.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index ad5959e56116..c8f1f67ff16c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3961,9 +3961,7 @@ int netdev_rx_handler_register(struct net_device *dev, rx_handler_func_t *rx_handler, void *rx_handler_data) { - ASSERT_RTNL(); - - if (dev->rx_handler) + if (netdev_is_rx_handler_busy(dev)) return -EBUSY; /* Note: rx_handler_data must be set before rx_handler */ -- cgit v1.2.3 From c3262d9deccaa7e796b0145b2d26f53c0e2d178f Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Wed, 18 Jan 2017 15:02:53 -0800 Subject: ipvlan: use netdev_is_rx_handler_busy instead of checking specific type IPvlan checks if the master device is already used by checking a specific device (here it's macvlan device). This is technically not sufficient and it should just ensure the rx_handler is busy or not. This would be a super check that includes macvlan and any other that has already registered rx-handler. Signed-off-by: Mahesh Bandewar Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index b5c390f0f2b3..95b18f4602cf 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -102,8 +102,8 @@ static int ipvlan_port_create(struct net_device *dev) return -EINVAL; } - if (netif_is_macvlan_port(dev)) { - netdev_err(dev, "Master is a macvlan port.\n"); + if (netdev_is_rx_handler_busy(dev)) { + netdev_err(dev, "Device is already in use.\n"); return -EBUSY; } -- cgit v1.2.3 From 322dc6e067a15a17e66ce35338ca701f13c6422d Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Wed, 18 Jan 2017 15:02:55 -0800 Subject: macvlan: use netdev_is_rx_handler_busy instead of checking specific type netdev_is_rx_handler_busy() check is a superset of netif_is_ipvlan_port() check and hence should be preferred. Signed-off-by: Mahesh Bandewar Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 440ab3d8adf7..cbfc1be23a0e 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1110,7 +1110,7 @@ static int macvlan_port_create(struct net_device *dev) if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) return -EINVAL; - if (netif_is_ipvlan_port(dev)) + if (netdev_is_rx_handler_busy(dev)) return -EBUSY; port = kzalloc(sizeof(*port), GFP_KERNEL); -- cgit v1.2.3 From aafc93a3b6b033a698b528b39fa99fae9fc5299f Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Thu, 19 Jan 2017 18:58:23 +0200 Subject: net: ethernet: ti: cpsw: remove dual check from common res usage function Common res usage is possible only in case an interface is running. In case of not dual emac here can be only one interface, so while ndo_open and switch mode, only one interface can be opened, thus if open is called no any interface is running ... and no common res are used. So remove check on dual emac, it will simplify code/understanding and will match the name it's called. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 02b03ee2e314..296ddf247e92 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1240,9 +1240,6 @@ static int cpsw_common_res_usage_state(struct cpsw_common *cpsw) u32 i; u32 usage_count = 0; - if (!cpsw->data.dual_emac) - return 0; - for (i = 0; i < cpsw->data.slaves; i++) if (cpsw->slaves[i].open_stat) usage_count++; -- cgit v1.2.3 From 176b0cbffdf3f240a19386635811034dfe0fc8ab Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Thu, 19 Jan 2017 18:58:24 +0200 Subject: net: ethernet: ti: cpsw: don't disable interrupts in ndo_open No need to disable interrupts if no open devices, they are disabled anyway. Even no need to disable interrupts if some ndev is opened, In this case shared resources are not touched, only parameters of ndev shell, so no reason to disable them also. Removed lines have proved it. So, no need in redundant check and interrupt disable. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 296ddf247e92..f79890546d96 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1480,8 +1480,6 @@ static int cpsw_ndo_open(struct net_device *ndev) return ret; } - if (!cpsw_common_res_usage_state(cpsw)) - cpsw_intr_disable(cpsw); netif_carrier_off(ndev); /* Notify the stack of the actual queue counts. */ -- cgit v1.2.3 From 03fd01ad0eead23eb79294b6fb4d71dcac493855 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Thu, 19 Jan 2017 18:58:25 +0200 Subject: net: ethernet: ti: cpsw: don't duplicate ndev_running No need to create additional vars to identify if interface is running. So simplify code by removing redundant var and checking usage counter instead. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f79890546d96..c681d39419b0 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -357,7 +357,6 @@ struct cpsw_slave { struct phy_device *phy; struct net_device *ndev; u32 port_vlan; - u32 open_stat; }; static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) @@ -1235,13 +1234,13 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev, } } -static int cpsw_common_res_usage_state(struct cpsw_common *cpsw) +static int cpsw_get_usage_count(struct cpsw_common *cpsw) { u32 i; u32 usage_count = 0; for (i = 0; i < cpsw->data.slaves; i++) - if (cpsw->slaves[i].open_stat) + if (netif_running(cpsw->slaves[i].ndev)) usage_count++; return usage_count; @@ -1501,8 +1500,11 @@ static int cpsw_ndo_open(struct net_device *ndev) CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), CPSW_RTL_VERSION(reg)); - /* initialize host and slave ports */ - if (!cpsw_common_res_usage_state(cpsw)) + /* Initialize host and slave ports. + * Given ndev is marked as opened already, so init port only if 1 ndev + * is opened + */ + if (cpsw_get_usage_count(cpsw) < 2) cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); @@ -1513,7 +1515,10 @@ static int cpsw_ndo_open(struct net_device *ndev) cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); - if (!cpsw_common_res_usage_state(cpsw)) { + /* Given ndev is marked as opened already, so if more ndev + * are opened - no need to init shared resources. + */ + if (cpsw_get_usage_count(cpsw) < 2) { /* disable priority elevation */ __raw_writel(0, &cpsw->regs->ptype); @@ -1556,9 +1561,6 @@ static int cpsw_ndo_open(struct net_device *ndev) cpdma_ctlr_start(cpsw->dma); cpsw_intr_enable(cpsw); - if (cpsw->data.dual_emac) - cpsw->slaves[priv->emac_port].open_stat = true; - return 0; err_cleanup: @@ -1578,7 +1580,10 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_tx_stop_all_queues(priv->ndev); netif_carrier_off(priv->ndev); - if (cpsw_common_res_usage_state(cpsw) <= 1) { + /* Given ndev is marked as close already, + * so disable shared resources if no open devices + */ + if (!cpsw_get_usage_count(cpsw)) { napi_disable(&cpsw->napi_rx); napi_disable(&cpsw->napi_tx); cpts_unregister(cpsw->cpts); @@ -1592,8 +1597,6 @@ static int cpsw_ndo_stop(struct net_device *ndev) cpsw_split_res(ndev); pm_runtime_put_sync(cpsw->dev); - if (cpsw->data.dual_emac) - cpsw->slaves[priv->emac_port].open_stat = false; return 0; } @@ -2418,7 +2421,7 @@ static int cpsw_set_channels(struct net_device *ndev, netif_dormant_off(slave->ndev); } - if (cpsw_common_res_usage_state(cpsw)) { + if (cpsw_get_usage_count(cpsw)) { ret = cpsw_fill_rx_channels(priv); if (ret) goto err; @@ -2537,7 +2540,7 @@ static int cpsw_set_ringparam(struct net_device *ndev, netif_dormant_off(slave->ndev); } - if (cpsw_common_res_usage_state(cpsw)) { + if (cpsw_get_usage_count(cpsw)) { cpdma_chan_split_pool(cpsw->dma); ret = cpsw_fill_rx_channels(priv); -- cgit v1.2.3 From fe734d0aa9e120a48999885355f08a99fcf45c77 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Thu, 19 Jan 2017 18:58:26 +0200 Subject: net: ethernet: ti: cpsw: don't duplicate common res in rx handler No need to duplicate the same function in rx handler to get info if any interface is running. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 40 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c681d39419b0..1f14afdddb75 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -671,6 +671,18 @@ static void cpsw_intr_disable(struct cpsw_common *cpsw) return; } +static int cpsw_get_usage_count(struct cpsw_common *cpsw) +{ + u32 i; + u32 usage_count = 0; + + for (i = 0; i < cpsw->data.slaves; i++) + if (netif_running(cpsw->slaves[i].ndev)) + usage_count++; + + return usage_count; +} + static void cpsw_tx_handler(void *token, int len, int status) { struct netdev_queue *txq; @@ -703,18 +715,10 @@ static void cpsw_rx_handler(void *token, int len, int status) cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb); if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { - bool ndev_status = false; - struct cpsw_slave *slave = cpsw->slaves; - int n; - - if (cpsw->data.dual_emac) { - /* In dual emac mode check for all interfaces */ - for (n = cpsw->data.slaves; n; n--, slave++) - if (netif_running(slave->ndev)) - ndev_status = true; - } - - if (ndev_status && (status >= 0)) { + /* In dual emac mode check for all interfaces */ + if (cpsw->data.dual_emac && + cpsw_get_usage_count(cpsw) && + (status >= 0)) { /* The packet received is for the interface which * is already down and the other interface is up * and running, instead of freeing which results @@ -1234,18 +1238,6 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev, } } -static int cpsw_get_usage_count(struct cpsw_common *cpsw) -{ - u32 i; - u32 usage_count = 0; - - for (i = 0; i < cpsw->data.slaves; i++) - if (netif_running(cpsw->slaves[i].ndev)) - usage_count++; - - return usage_count; -} - static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv, struct sk_buff *skb, struct cpdma_chan *txch) -- cgit v1.2.3 From 022d7ad71dc50a6c255a68f7aa8e879615a29c4c Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Thu, 19 Jan 2017 18:58:27 +0200 Subject: net: ethernet: ti: cpsw: clarify ethtool ops changing num of descs After adding cpsw_set_ringparam ethtool op, better to carry out common parts of similar ops splitting descriptors in runtime. It allows to reuse these parts and shows what the ops actually do. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 132 ++++++++++++++++++----------------------- 1 file changed, 59 insertions(+), 73 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1f14afdddb75..897ebbe50225 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2358,17 +2358,11 @@ static int cpsw_update_channels(struct cpsw_priv *priv, return 0; } -static int cpsw_set_channels(struct net_device *ndev, - struct ethtool_channels *chs) +static void cpsw_suspend_data_pass(struct net_device *ndev) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); struct cpsw_slave *slave; - int i, ret; - - ret = cpsw_check_ch_settings(cpsw, chs); - if (ret < 0) - return ret; + int i; /* Disable NAPI scheduling */ cpsw_intr_disable(cpsw); @@ -2386,6 +2380,51 @@ static int cpsw_set_channels(struct net_device *ndev, /* Handle rest of tx packets and stop cpdma channels */ cpdma_ctlr_stop(cpsw->dma); +} + +static int cpsw_resume_data_pass(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + /* Allow rx packets handling */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) + if (slave->ndev && netif_running(slave->ndev)) + netif_dormant_off(slave->ndev); + + /* After this receive is started */ + if (cpsw_get_usage_count(cpsw)) { + ret = cpsw_fill_rx_channels(priv); + if (ret) + return ret; + + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + } + + /* Resume transmit for every affected interface */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) + if (slave->ndev && netif_running(slave->ndev)) + netif_tx_start_all_queues(slave->ndev); + + return 0; +} + +static int cpsw_set_channels(struct net_device *ndev, + struct ethtool_channels *chs) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + ret = cpsw_check_ch_settings(cpsw, chs); + if (ret < 0) + return ret; + + cpsw_suspend_data_pass(ndev); ret = cpsw_update_channels(priv, chs); if (ret) goto err; @@ -2408,30 +2447,14 @@ static int cpsw_set_channels(struct net_device *ndev, dev_err(priv->dev, "cannot set real number of rx queues\n"); goto err; } - - /* Enable rx packets handling */ - netif_dormant_off(slave->ndev); } - if (cpsw_get_usage_count(cpsw)) { - ret = cpsw_fill_rx_channels(priv); - if (ret) - goto err; - + if (cpsw_get_usage_count(cpsw)) cpsw_split_res(ndev); - /* After this receive is started */ - cpdma_ctlr_start(cpsw->dma); - cpsw_intr_enable(cpsw); - } - - /* Resume transmit for every affected interface */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - netif_tx_start_all_queues(slave->ndev); - } - return 0; + ret = cpsw_resume_data_pass(ndev); + if (!ret) + return 0; err: dev_err(priv->dev, "cannot update channels number, closing device\n"); dev_close(ndev); @@ -2492,8 +2515,7 @@ static int cpsw_set_ringparam(struct net_device *ndev, { struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - int i, ret; + int ret; /* ignore ering->tx_pending - only rx_pending adjustment is supported */ @@ -2505,54 +2527,18 @@ static int cpsw_set_ringparam(struct net_device *ndev, if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma)) return 0; - /* Disable NAPI scheduling */ - cpsw_intr_disable(cpsw); - - /* Stop all transmit queues for every network device. - * Disable re-using rx descriptors with dormant_on. - */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - - netif_tx_stop_all_queues(slave->ndev); - netif_dormant_on(slave->ndev); - } - - /* Handle rest of tx packets and stop cpdma channels */ - cpdma_ctlr_stop(cpsw->dma); + cpsw_suspend_data_pass(ndev); cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending); - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - - /* Enable rx packets handling */ - netif_dormant_off(slave->ndev); - } - - if (cpsw_get_usage_count(cpsw)) { + if (cpsw_get_usage_count(cpsw)) cpdma_chan_split_pool(cpsw->dma); - ret = cpsw_fill_rx_channels(priv); - if (ret) - goto err; - - /* After this receive is started */ - cpdma_ctlr_start(cpsw->dma); - cpsw_intr_enable(cpsw); - } + ret = cpsw_resume_data_pass(ndev); + if (!ret) + return 0; - /* Resume transmit for every affected interface */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - netif_tx_start_all_queues(slave->ndev); - } - return 0; -err: - dev_err(priv->dev, "cannot set ring params, closing device\n"); + dev_err(&ndev->dev, "cannot set ring params, closing device\n"); dev_close(ndev); return ret; } -- cgit v1.2.3 From 319554f284dda9f2737d09df82ba3610bd8ddea3 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 19 Jan 2017 17:47:46 -0500 Subject: inet: don't use sk_v6_rcv_saddr directly When comparing two sockets we need to use inet6_rcv_saddr so we get a NULL sk_v6_rcv_saddr if the socket isn't AF_INET6, otherwise our comparison function can be wrong. Fixes: 637bc8b ("inet: reset tb->fastreuseport when adding a reuseport sk") Signed-off-by: Josef Bacik Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index c7f7c5335369..b4d5980ade3b 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -99,7 +99,7 @@ int inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr, - &sk2->sk_v6_rcv_saddr, + inet6_rcv_saddr(sk2), sk->sk_rcv_saddr, sk2->sk_rcv_saddr, ipv6_only_sock(sk), -- cgit v1.2.3 From 0b04680fdae464ee51409b8cb36005f6ef8bd689 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Fri, 20 Jan 2017 01:37:49 +0100 Subject: phy: marvell: Add support for temperature sensor Some Marvell PHYs have an inbuilt temperature sensor. Add hwmon support for this sensor. There are two different variants. The simpler, older chips have a 5 degree accuracy. The newer devices have 1 degree accuracy. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 423 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 420 insertions(+), 3 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 0b78210c0fa7..64229976ace1 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -17,8 +17,10 @@ */ #include #include +#include #include #include +#include #include #include #include @@ -90,6 +92,17 @@ #define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) #define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4)) +#define MII_88E1121_MISC_TEST 0x1a +#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00 +#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT 8 +#define MII_88E1510_MISC_TEST_TEMP_IRQ_EN BIT(7) +#define MII_88E1510_MISC_TEST_TEMP_IRQ BIT(6) +#define MII_88E1121_MISC_TEST_TEMP_SENSOR_EN BIT(5) +#define MII_88E1121_MISC_TEST_TEMP_MASK 0x1f + +#define MII_88E1510_TEMP_SENSOR 0x1b +#define MII_88E1510_TEMP_SENSOR_MASK 0xff + #define MII_88E1318S_PHY_MSCR1_REG 16 #define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) @@ -172,6 +185,8 @@ static struct marvell_hw_stat marvell_hw_stats[] = { struct marvell_priv { u64 stats[ARRAY_SIZE(marvell_hw_stats)]; + char *hwmon_name; + struct device *hwmon_dev; }; static int marvell_ack_interrupt(struct phy_device *phydev) @@ -1468,6 +1483,371 @@ static void marvell_get_stats(struct phy_device *phydev, data[i] = marvell_get_stat(phydev, i); } +#ifdef CONFIG_HWMON +static int m88e1121_get_temp(struct phy_device *phydev, long *temp) +{ + int ret; + int val; + + *temp = 0; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + /* Enable temperature sensor */ + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + + ret = phy_write(phydev, MII_88E1121_MISC_TEST, + ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + val = phy_read(phydev, MII_88E1121_MISC_TEST); + if (val < 0) { + ret = val; + goto error; + } + + /* Disable temperature sensor */ + ret = phy_write(phydev, MII_88E1121_MISC_TEST, + ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); + if (ret < 0) + goto error; + + *temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000; + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +static int m88e1121_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_input: + err = m88e1121_get_temp(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static umode_t m88e1121_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + return 0444; + default: + return 0; + } +} + +static u32 m88e1121_hwmon_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0 +}; + +static const struct hwmon_channel_info m88e1121_hwmon_chip = { + .type = hwmon_chip, + .config = m88e1121_hwmon_chip_config, +}; + +static u32 m88e1121_hwmon_temp_config[] = { + HWMON_T_INPUT, + 0 +}; + +static const struct hwmon_channel_info m88e1121_hwmon_temp = { + .type = hwmon_temp, + .config = m88e1121_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *m88e1121_hwmon_info[] = { + &m88e1121_hwmon_chip, + &m88e1121_hwmon_temp, + NULL +}; + +static const struct hwmon_ops m88e1121_hwmon_hwmon_ops = { + .is_visible = m88e1121_hwmon_is_visible, + .read = m88e1121_hwmon_read, +}; + +static const struct hwmon_chip_info m88e1121_hwmon_chip_info = { + .ops = &m88e1121_hwmon_hwmon_ops, + .info = m88e1121_hwmon_info, +}; + +static int m88e1510_get_temp(struct phy_device *phydev, long *temp) +{ + int ret; + + *temp = 0; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR); + if (ret < 0) + goto error; + + *temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000; + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp) +{ + int ret; + + *temp = 0; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + + *temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >> + MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25; + /* convert to mC */ + *temp *= 1000; + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +int m88e1510_set_temp_critical(struct phy_device *phydev, long temp) +{ + int ret; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + + temp = temp / 1000; + temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); + ret = phy_write(phydev, MII_88E1121_MISC_TEST, + (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) | + (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT)); + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm) +{ + int ret; + + *alarm = false; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ); + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +static int m88e1510_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_input: + err = m88e1510_get_temp(phydev, temp); + break; + case hwmon_temp_crit: + err = m88e1510_get_temp_critical(phydev, temp); + break; + case hwmon_temp_max_alarm: + err = m88e1510_get_temp_alarm(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static int m88e1510_hwmon_write(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_crit: + err = m88e1510_set_temp_critical(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + return err; +} + +static umode_t m88e1510_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_max_alarm: + return 0444; + case hwmon_temp_crit: + return 0644; + default: + return 0; + } +} + +static u32 m88e1510_hwmon_temp_config[] = { + HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM, + 0 +}; + +static const struct hwmon_channel_info m88e1510_hwmon_temp = { + .type = hwmon_temp, + .config = m88e1510_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *m88e1510_hwmon_info[] = { + &m88e1121_hwmon_chip, + &m88e1510_hwmon_temp, + NULL +}; + +static const struct hwmon_ops m88e1510_hwmon_hwmon_ops = { + .is_visible = m88e1510_hwmon_is_visible, + .read = m88e1510_hwmon_read, + .write = m88e1510_hwmon_write, +}; + +static const struct hwmon_chip_info m88e1510_hwmon_chip_info = { + .ops = &m88e1510_hwmon_hwmon_ops, + .info = m88e1510_hwmon_info, +}; + +static int marvell_hwmon_name(struct phy_device *phydev) +{ + struct marvell_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + const char *devname = dev_name(dev); + size_t len = strlen(devname); + int i, j; + + priv->hwmon_name = devm_kzalloc(dev, len, GFP_KERNEL); + if (!priv->hwmon_name) + return -ENOMEM; + + for (i = j = 0; i < len && devname[i]; i++) { + if (isalnum(devname[i])) + priv->hwmon_name[j++] = devname[i]; + } + + return 0; +} + +static int marvell_hwmon_probe(struct phy_device *phydev, + const struct hwmon_chip_info *chip) +{ + struct marvell_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + int err; + + err = marvell_hwmon_name(phydev); + if (err) + return err; + + priv->hwmon_dev = devm_hwmon_device_register_with_info( + dev, priv->hwmon_name, phydev, chip, NULL); + + return PTR_ERR_OR_ZERO(priv->hwmon_dev); +} + +static int m88e1121_hwmon_probe(struct phy_device *phydev) +{ + return marvell_hwmon_probe(phydev, &m88e1121_hwmon_chip_info); +} + +static int m88e1510_hwmon_probe(struct phy_device *phydev) +{ + return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info); +} +#else +static int m88e1121_hwmon_probe(struct phy_device *phydev) +{ + return 0; +} + +static int m88e1510_hwmon_probe(struct phy_device *phydev) +{ + return 0; +} +#endif + static int marvell_probe(struct phy_device *phydev) { struct marvell_priv *priv; @@ -1481,6 +1861,39 @@ static int marvell_probe(struct phy_device *phydev) return 0; } +static int m88e1121_probe(struct phy_device *phydev) +{ + int err; + + err = marvell_probe(phydev); + if (err) + return err; + + return m88e1121_hwmon_probe(phydev); +} + +static int m88e1510_probe(struct phy_device *phydev) +{ + int err; + + err = marvell_probe(phydev); + if (err) + return err; + + return m88e1510_hwmon_probe(phydev); +} + +static void marvell_remove(struct phy_device *phydev) +{ +#ifdef CONFIG_HWMON + + struct marvell_priv *priv = phydev->priv; + + if (priv && priv->hwmon_dev) + hwmon_device_unregister(priv->hwmon_dev); +#endif +} + static struct phy_driver marvell_drivers[] = { { .phy_id = MARVELL_PHY_ID_88E1101, @@ -1558,9 +1971,10 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1121R, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1121R", + .probe = &m88e1121_probe, + .remove = &marvell_remove, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .probe = marvell_probe, .config_init = &m88e1121_config_init, .config_aneg = &m88e1121_config_aneg, .read_status = &marvell_read_status, @@ -1671,8 +2085,10 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1510", .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE, + .probe = &m88e1510_probe, + .remove = &marvell_remove, + .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .probe = marvell_probe, .config_init = &m88e1510_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -1689,9 +2105,10 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1540, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1540", + .probe = m88e1510_probe, + .remove = &marvell_remove, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .probe = marvell_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, -- cgit v1.2.3 From cf1a56a4cf196a2922e66e9a8e0bf80d324c5548 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Fri, 20 Jan 2017 01:37:50 +0100 Subject: net: dsa: Remove hwmon support Only the Marvell mv88e6xxx DSA driver made use of the HWMON support in DSA. The temperature sensor registers are actually in the embedded PHYs, and the PHY driver now supports it. So remove all HWMON support from DSA and drivers. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 154 ---------------------------------- drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 16 ---- include/net/dsa.h | 8 -- net/dsa/Kconfig | 11 --- net/dsa/Makefile | 1 - net/dsa/dsa.c | 4 - net/dsa/dsa_priv.h | 9 -- net/dsa/hwmon.c | 147 -------------------------------- 8 files changed, 350 deletions(-) delete mode 100644 net/dsa/hwmon.c diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 987b2dbbd35a..c7e08e13bb54 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2964,154 +2964,6 @@ static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip) of_node_put(chip->mdio_np); } -#ifdef CONFIG_NET_DSA_HWMON - -static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - u16 val; - int ret; - - *temp = 0; - - mutex_lock(&chip->reg_lock); - - ret = mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x6); - if (ret < 0) - goto error; - - /* Enable temperature sensor */ - ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val); - if (ret < 0) - goto error; - - ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val | (1 << 5)); - if (ret < 0) - goto error; - - /* Wait for temperature to stabilize */ - usleep_range(10000, 12000); - - ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val); - if (ret < 0) - goto error; - - /* Disable temperature sensor */ - ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val & ~(1 << 5)); - if (ret < 0) - goto error; - - *temp = ((val & 0x1f) - 5) * 5; - -error: - mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x0); - mutex_unlock(&chip->reg_lock); - return ret; -} - -static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int ret; - - *temp = 0; - - mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_phy_page_read(chip, phy, 6, 27, &val); - mutex_unlock(&chip->reg_lock); - if (ret < 0) - return ret; - - *temp = (val & 0xff) - 25; - - return 0; -} - -static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP)) - return -EOPNOTSUPP; - - if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip)) - return mv88e63xx_get_temp(ds, temp); - - return mv88e61xx_get_temp(ds, temp); -} - -static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int ret; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) - return -EOPNOTSUPP; - - *temp = 0; - - mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); - mutex_unlock(&chip->reg_lock); - if (ret < 0) - return ret; - - *temp = (((val >> 8) & 0x1f) * 5) - 25; - - return 0; -} - -static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int err; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) - return -EOPNOTSUPP; - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); - if (err) - goto unlock; - temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); - err = mv88e6xxx_phy_page_write(chip, phy, 6, 26, - (val & 0xe0ff) | (temp << 8)); -unlock: - mutex_unlock(&chip->reg_lock); - - return err; -} - -static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int ret; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) - return -EOPNOTSUPP; - - *alarm = false; - - mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); - mutex_unlock(&chip->reg_lock); - if (ret < 0) - return ret; - - *alarm = !!(val & 0x40); - - return 0; -} -#endif /* CONFIG_NET_DSA_HWMON */ - static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) { struct mv88e6xxx_chip *chip = ds->priv; @@ -4386,12 +4238,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_sset_count = mv88e6xxx_get_sset_count, .set_eee = mv88e6xxx_set_eee, .get_eee = mv88e6xxx_get_eee, -#ifdef CONFIG_NET_DSA_HWMON - .get_temp = mv88e6xxx_get_temp, - .get_temp_limit = mv88e6xxx_get_temp_limit, - .set_temp_limit = mv88e6xxx_set_temp_limit, - .get_temp_alarm = mv88e6xxx_get_temp_alarm, -#endif .get_eeprom_len = mv88e6xxx_get_eeprom_len, .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 466cfdadb7bd..ce8b43b14e96 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -497,12 +497,6 @@ enum mv88e6xxx_cap { */ MV88E6XXX_CAP_STU, - /* Internal temperature sensor. - * Available from any enabled port's PHY register 26, page 6. - */ - MV88E6XXX_CAP_TEMP, - MV88E6XXX_CAP_TEMP_LIMIT, - /* VLAN Table Unit. * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP. */ @@ -533,8 +527,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT) #define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU) -#define MV88E6XXX_FLAG_TEMP BIT_ULL(MV88E6XXX_CAP_TEMP) -#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT) #define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU) /* Ingress Rate Limit unit */ @@ -586,7 +578,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -605,8 +596,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -621,7 +610,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -637,8 +625,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -651,8 +637,6 @@ struct mv88e6xxx_ops; (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ diff --git a/include/net/dsa.h b/include/net/dsa.h index c72ed7af2a2a..9d6cd923c48c 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -307,14 +307,6 @@ struct dsa_switch_ops { int (*get_eee)(struct dsa_switch *ds, int port, struct ethtool_eee *e); -#ifdef CONFIG_NET_DSA_HWMON - /* Hardware monitoring */ - int (*get_temp)(struct dsa_switch *ds, int *temp); - int (*get_temp_limit)(struct dsa_switch *ds, int *temp); - int (*set_temp_limit)(struct dsa_switch *ds, int temp); - int (*get_temp_alarm)(struct dsa_switch *ds, bool *alarm); -#endif - /* EEPROM access */ int (*get_eeprom_len)(struct dsa_switch *ds); int (*get_eeprom)(struct dsa_switch *ds, diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 39bb5b3a82f2..9649238eef40 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -15,17 +15,6 @@ config NET_DSA if NET_DSA -config NET_DSA_HWMON - bool "Distributed Switch Architecture HWMON support" - default y - depends on HWMON && !(NET_DSA=y && HWMON=m) - ---help--- - Say Y if you want to expose thermal sensor data on switches supported - by the Distributed Switch Architecture. - - Some of those switches contain thermal sensors. This data is available - via the hwmon sysfs interface and exposes the onboard sensors. - # tagging formats config NET_DSA_TAG_BRCM bool diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 560b6747c276..a3380ed0e0be 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -1,7 +1,6 @@ # the core obj-$(CONFIG_NET_DSA) += dsa_core.o dsa_core-y += dsa.o slave.o dsa2.o -dsa_core-$(CONFIG_NET_DSA_HWMON) += hwmon.o # tagging formats dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 91f96e1bd2ec..77cb78767f1d 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -316,8 +316,6 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) if (ret) return ret; - dsa_hwmon_register(ds); - return 0; } @@ -376,8 +374,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds) { int port; - dsa_hwmon_unregister(ds); - /* Destroy network devices for physical switch ports. */ for (port = 0; port < DSA_MAX_PORTS; port++) { if (!(ds->enabled_port_mask & (1 << port))) diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 7e3385ec73f4..63ae1484abae 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -56,15 +56,6 @@ const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol); int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds); void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds); -/* hwmon.c */ -#ifdef CONFIG_NET_DSA_HWMON -void dsa_hwmon_register(struct dsa_switch *ds); -void dsa_hwmon_unregister(struct dsa_switch *ds); -#else -static inline void dsa_hwmon_register(struct dsa_switch *ds) { } -static inline void dsa_hwmon_unregister(struct dsa_switch *ds) { } -#endif - /* slave.c */ extern const struct dsa_device_ops notag_netdev_ops; void dsa_slave_mii_bus_init(struct dsa_switch *ds); diff --git a/net/dsa/hwmon.c b/net/dsa/hwmon.c deleted file mode 100644 index 08831a811278..000000000000 --- a/net/dsa/hwmon.c +++ /dev/null @@ -1,147 +0,0 @@ -/* - * net/dsa/hwmon.c - HWMON subsystem support - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include - -#include "dsa_priv.h" - -static ssize_t temp1_input_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dsa_switch *ds = dev_get_drvdata(dev); - int temp, ret; - - ret = ds->ops->get_temp(ds, &temp); - if (ret < 0) - return ret; - - return sprintf(buf, "%d\n", temp * 1000); -} -static DEVICE_ATTR_RO(temp1_input); - -static ssize_t temp1_max_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dsa_switch *ds = dev_get_drvdata(dev); - int temp, ret; - - ret = ds->ops->get_temp_limit(ds, &temp); - if (ret < 0) - return ret; - - return sprintf(buf, "%d\n", temp * 1000); -} - -static ssize_t temp1_max_store(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) -{ - struct dsa_switch *ds = dev_get_drvdata(dev); - int temp, ret; - - ret = kstrtoint(buf, 0, &temp); - if (ret < 0) - return ret; - - ret = ds->ops->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000)); - if (ret < 0) - return ret; - - return count; -} -static DEVICE_ATTR_RW(temp1_max); - -static ssize_t temp1_max_alarm_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dsa_switch *ds = dev_get_drvdata(dev); - bool alarm; - int ret; - - ret = ds->ops->get_temp_alarm(ds, &alarm); - if (ret < 0) - return ret; - - return sprintf(buf, "%d\n", alarm); -} -static DEVICE_ATTR_RO(temp1_max_alarm); - -static struct attribute *dsa_hwmon_attrs[] = { - &dev_attr_temp1_input.attr, /* 0 */ - &dev_attr_temp1_max.attr, /* 1 */ - &dev_attr_temp1_max_alarm.attr, /* 2 */ - NULL -}; - -static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj, - struct attribute *attr, int index) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct dsa_switch *ds = dev_get_drvdata(dev); - const struct dsa_switch_ops *ops = ds->ops; - umode_t mode = attr->mode; - - if (index == 1) { - if (!ops->get_temp_limit) - mode = 0; - else if (!ops->set_temp_limit) - mode &= ~S_IWUSR; - } else if (index == 2 && !ops->get_temp_alarm) { - mode = 0; - } - return mode; -} - -static const struct attribute_group dsa_hwmon_group = { - .attrs = dsa_hwmon_attrs, - .is_visible = dsa_hwmon_attrs_visible, -}; -__ATTRIBUTE_GROUPS(dsa_hwmon); - -void dsa_hwmon_register(struct dsa_switch *ds) -{ - const char *netname = netdev_name(ds->dst->master_netdev); - char hname[IFNAMSIZ + 1]; - int i, j; - - /* If the switch provides temperature accessors, register with hardware - * monitoring subsystem. Treat registration error as non-fatal. - */ - if (!ds->ops->get_temp) - return; - - /* Create valid hwmon 'name' attribute */ - for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) { - if (isalnum(netname[i])) - hname[j++] = netname[i]; - } - hname[j] = '\0'; - scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d", hname, - ds->index); - ds->hwmon_dev = hwmon_device_register_with_groups(NULL, ds->hwmon_name, - ds, dsa_hwmon_groups); - if (IS_ERR(ds->hwmon_dev)) { - pr_warn("DSA: failed to register HWMON subsystem for switch %d\n", - ds->index); - ds->hwmon_dev = NULL; - } else { - pr_info("DSA: registered HWMON subsystem for switch %d\n", - ds->index); - } -} - -void dsa_hwmon_unregister(struct dsa_switch *ds) -{ - if (ds->hwmon_dev) { - hwmon_device_unregister(ds->hwmon_dev); - ds->hwmon_dev = NULL; - } -} -- cgit v1.2.3 From 530cef21d98930c4d10f188325e00330455d216d Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 20 Jan 2017 22:36:53 +0800 Subject: 6lowpan: use rb_entry() To make the code clearer, use rb_entry() instead of container_of() to deal with rbtree. Signed-off-by: Geliang Tang Signed-off-by: David S. Miller --- net/6lowpan/nhc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c index 7008d53e455c..4fa2fdda174d 100644 --- a/net/6lowpan/nhc.c +++ b/net/6lowpan/nhc.c @@ -27,8 +27,8 @@ static int lowpan_nhc_insert(struct lowpan_nhc *nhc) /* Figure out where to put new node */ while (*new) { - struct lowpan_nhc *this = container_of(*new, struct lowpan_nhc, - node); + struct lowpan_nhc *this = rb_entry(*new, struct lowpan_nhc, + node); int result, len_dif, len; len_dif = nhc->idlen - this->idlen; @@ -69,8 +69,8 @@ static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb) const u8 *nhcid_skb_ptr = skb->data; while (node) { - struct lowpan_nhc *nhc = container_of(node, struct lowpan_nhc, - node); + struct lowpan_nhc *nhc = rb_entry(node, struct lowpan_nhc, + node); u8 nhcid_skb_ptr_masked[LOWPAN_NHC_MAX_ID_LEN]; int result, i; -- cgit v1.2.3 From 3704eb6f6fd09023e2c94e8ead687b908ab02135 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 20 Jan 2017 22:36:57 +0800 Subject: net/mlx4: use rb_entry() To make the code clearer, use rb_entry() instead of container_of() to deal with rbtree. Signed-off-by: Geliang Tang Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1822382212ee..6da6e01d07bd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -236,8 +236,8 @@ static void *res_tracker_lookup(struct rb_root *root, u64 res_id) struct rb_node *node = root->rb_node; while (node) { - struct res_common *res = container_of(node, struct res_common, - node); + struct res_common *res = rb_entry(node, struct res_common, + node); if (res_id < res->res_id) node = node->rb_left; @@ -255,8 +255,8 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res) /* Figure out where to put new node */ while (*new) { - struct res_common *this = container_of(*new, struct res_common, - node); + struct res_common *this = rb_entry(*new, struct res_common, + node); parent = *new; if (res->res_id < this->res_id) -- cgit v1.2.3 From f4ec60644a7f9f8bd9571c30c573ce54c4b0ad97 Mon Sep 17 00:00:00 2001 From: jpinto Date: Fri, 20 Jan 2017 16:00:26 +0000 Subject: net: stmicro: fix LS field mask in EEE configuration This patch fixes the LS mask when setting EEE timer. LS field is 10 bits long and not 11 as currently. Signed-off-by: Joao Pinto Reported-By: Rayagond Kokatanur Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 834f40f08208..202216cd6789 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -184,7 +184,7 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) { void __iomem *ioaddr = hw->pcsr; - int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); + int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16); /* Program the timers in the LPI timer control register: * LS: minimum time (ms) for which the link -- cgit v1.2.3 From 9ca677b1bd8979f9a7702db5ed4028e4b8b232e8 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 20 Jan 2017 08:08:56 -0800 Subject: ipv6: add NUMA awareness to seg6_hmac_init_algo() Since we allocate per cpu storage, let's also use NUMA hints. Signed-off-by: Eric Dumazet Acked-by: David Lebrun Signed-off-by: David S. Miller --- net/ipv6/seg6_hmac.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 5215e1eba010..b274f1d95e03 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -389,7 +389,8 @@ static int seg6_hmac_init_algo(void) return -ENOMEM; for_each_possible_cpu(cpu) { - shash = kzalloc(shsize, GFP_KERNEL); + shash = kzalloc_node(shsize, GFP_KERNEL, + cpu_to_node(cpu)); if (!shash) return -ENOMEM; *per_cpu_ptr(algo->shashs, cpu) = shash; -- cgit v1.2.3 From 7b78be48a8eb6c34e4e1b1581eceff4074bb4bf1 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 11:08:26 -0800 Subject: net: systemport: Dynamically allocate number of TX rings In preparation for adding SYSTEMPORT Lite, which has twice as less transmit queues than SYSTEMPORT make sure we do allocate TX rings based on the systemport,txq property to get an appropriate memory footprint. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 11 +++++++++++ drivers/net/ethernet/broadcom/bcmsysport.h | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 744ed6ddaf37..31bb2c3696ec 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1752,6 +1752,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) rxq = 1; + /* Sanity check the number of transmit queues */ + if (!txq || txq > TDMA_NUM_RINGS) + return -EINVAL; + dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); if (!dev) return -ENOMEM; @@ -1759,6 +1763,13 @@ static int bcm_sysport_probe(struct platform_device *pdev) /* Initialize private members */ priv = netdev_priv(dev); + /* Allocate number of TX rings */ + priv->tx_rings = devm_kcalloc(&pdev->dev, txq, + sizeof(struct bcm_sysport_tx_ring), + GFP_KERNEL); + if (!priv->tx_rings) + return -ENOMEM; + priv->irq0 = platform_get_irq(pdev, 0); priv->irq1 = platform_get_irq(pdev, 1); priv->wol_irq = platform_get_irq(pdev, 2); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 1c82e3da69a7..f051356b0274 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -659,7 +659,7 @@ struct bcm_sysport_priv { int wol_irq; /* Transmit rings */ - struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS]; + struct bcm_sysport_tx_ring *tx_rings; /* Receive queue */ void __iomem *rx_bds; -- cgit v1.2.3 From 44a4524c54af2059bd7e967b0527fb7c6618672a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 11:08:27 -0800 Subject: net: systemport: Add support for SYSTEMPORT Lite Add supporf for the SYSTEMPORT Lite Ethernet controller, this piece of hardware is largely based on the full-blown SYSTEMPORT and differs in the following: - no full-blown UniMAC, instead we have the MagicPacket matching from UniMAC at same offset, and a GMII Interface Block (GIB) for the MAC-level stuff, since we are always interfaced to an Ethernet switch which is fully Ethernet compliant shortcuts could be made - 16 transmit queues, whose interrupts are moved into the first Level-2 interrupt controller bank - slight TDMA offset change (a register was inserted after TDMA_STATUS, *sigh*) - 256 RX descriptors (512 words) and 256 TX descriptors (not visible) As a consequence of these two things, update the code paths accordingly to differentiate the full-blown from the light version. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- .../devicetree/bindings/net/brcm,systemport.txt | 5 +- drivers/net/ethernet/broadcom/bcmsysport.c | 323 +++++++++++++++++---- drivers/net/ethernet/broadcom/bcmsysport.h | 78 ++++- 3 files changed, 327 insertions(+), 79 deletions(-) diff --git a/Documentation/devicetree/bindings/net/brcm,systemport.txt b/Documentation/devicetree/bindings/net/brcm,systemport.txt index 877da34145b0..83f29e0e11ba 100644 --- a/Documentation/devicetree/bindings/net/brcm,systemport.txt +++ b/Documentation/devicetree/bindings/net/brcm,systemport.txt @@ -1,7 +1,10 @@ * Broadcom BCM7xxx Ethernet Systemport Controller (SYSTEMPORT) Required properties: -- compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport" +- compatible: should be one of: + "brcm,systemport-v1.00" + "brcm,systemportlite-v1.00" or + "brcm,systemport" - reg: address and length of the register set for the device. - interrupts: interrupts for the device, first cell must be for the rx interrupts, and the second cell should be for the transmit queues. An diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 31bb2c3696ec..a68d4889f5db 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -43,14 +43,43 @@ static inline void name##_writel(struct bcm_sysport_priv *priv, \ BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); +BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET); BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); -BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET); BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); +/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact + * same layout, except it has been moved by 4 bytes up, *sigh* + */ +static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) +{ + if (priv->is_lite && off >= RDMA_STATUS) + off += 4; + return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off); +} + +static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) +{ + if (priv->is_lite && off >= RDMA_STATUS) + off += 4; + __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off); +} + +static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) +{ + if (!priv->is_lite) { + return BIT(bit); + } else { + if (bit >= ACB_ALGO) + return BIT(bit + 1); + else + return BIT(bit); + } +} + /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. */ @@ -143,9 +172,9 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev, priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); reg = tdma_readl(priv, TDMA_CONTROL); if (priv->tsb_en) - reg |= TSB_EN; + reg |= tdma_control_bit(priv, TSB_EN); else - reg &= ~TSB_EN; + reg &= ~tdma_control_bit(priv, TSB_EN); tdma_writel(priv, reg, TDMA_CONTROL); return 0; @@ -281,11 +310,35 @@ static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) priv->msg_enable = enable; } +static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) +{ + switch (type) { + case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_RXCHK: + case BCM_SYSPORT_STAT_RBUF: + case BCM_SYSPORT_STAT_SOFT: + return true; + default: + return false; + } +} + static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) { + struct bcm_sysport_priv *priv = netdev_priv(dev); + const struct bcm_sysport_stats *s; + unsigned int i, j; + switch (string_set) { case ETH_SS_STATS: - return BCM_SYSPORT_STATS_LEN; + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + s = &bcm_sysport_gstrings_stats[i]; + if (priv->is_lite && + !bcm_sysport_lite_stat_valid(s->type)) + continue; + j++; + } + return j; default: return -EOPNOTSUPP; } @@ -294,14 +347,21 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) static void bcm_sysport_get_strings(struct net_device *dev, u32 stringset, u8 *data) { - int i; + struct bcm_sysport_priv *priv = netdev_priv(dev); + const struct bcm_sysport_stats *s; + int i, j; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - bcm_sysport_gstrings_stats[i].stat_string, + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + s = &bcm_sysport_gstrings_stats[i]; + if (priv->is_lite && + !bcm_sysport_lite_stat_valid(s->type)) + continue; + + memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, ETH_GSTRING_LEN); + j++; } break; default: @@ -327,6 +387,9 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) case BCM_SYSPORT_STAT_MIB_RX: case BCM_SYSPORT_STAT_MIB_TX: case BCM_SYSPORT_STAT_RUNT: + if (priv->is_lite) + continue; + if (s->type != BCM_SYSPORT_STAT_MIB_RX) offset = UMAC_MIB_STAT_OFFSET; val = umac_readl(priv, UMAC_MIB_START + j + offset); @@ -355,12 +418,12 @@ static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct bcm_sysport_priv *priv = netdev_priv(dev); - int i; + int i, j; if (netif_running(dev)) bcm_sysport_update_mib_counters(priv); - for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { const struct bcm_sysport_stats *s; char *p; @@ -370,7 +433,8 @@ static void bcm_sysport_get_stats(struct net_device *dev, else p = (char *)priv; p += s->stat_offset; - data[i] = *(unsigned long *)p; + data[j] = *(unsigned long *)p; + j++; } } @@ -573,8 +637,14 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, u16 len, status; struct bcm_rsb *rsb; - /* Determine how much we should process since last call */ - p_index = rdma_readl(priv, RDMA_PROD_INDEX); + /* Determine how much we should process since last call, SYSTEMPORT Lite + * groups the producer and consumer indexes into the same 32-bit + * which we access using RDMA_CONS_INDEX + */ + if (!priv->is_lite) + p_index = rdma_readl(priv, RDMA_PROD_INDEX); + else + p_index = rdma_readl(priv, RDMA_CONS_INDEX); p_index &= RDMA_PROD_INDEX_MASK; if (p_index < priv->rx_c_index) @@ -791,7 +861,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) if (work_done == 0) { napi_complete(napi); /* re-enable TX interrupt */ - intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + if (!ring->priv->is_lite) + intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + else + intrl2_0_mask_clear(ring->priv, BIT(ring->index + + INTRL2_0_TDMA_MBDONE_SHIFT)); return 0; } @@ -817,7 +891,15 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) priv->rx_c_index += work_done; priv->rx_c_index &= RDMA_CONS_INDEX_MASK; - rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); + + /* SYSTEMPORT Lite groups the producer/consumer index, producer is + * maintained by HW, but writes to it will be ignore while RDMA + * is active + */ + if (!priv->is_lite) + rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); + else + rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); if (work_done < budget) { napi_complete_done(napi, work_done); @@ -848,6 +930,8 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_tx_ring *txr; + unsigned int ring, ring_bit; priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); @@ -877,6 +961,22 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) bcm_sysport_resume_from_wol(priv); } + if (!priv->is_lite) + goto out; + + for (ring = 0; ring < dev->num_tx_queues; ring++) { + ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT); + if (!(priv->irq0_stat & ring_bit)) + continue; + + txr = &priv->tx_rings[ring]; + + if (likely(napi_schedule_prep(&txr->napi))) { + intrl2_0_mask_set(priv, ring_bit); + __napi_schedule(&txr->napi); + } + } +out: return IRQ_HANDLED; } @@ -930,9 +1030,11 @@ static void bcm_sysport_poll_controller(struct net_device *dev) bcm_sysport_rx_isr(priv->irq0, priv); enable_irq(priv->irq0); - disable_irq(priv->irq1); - bcm_sysport_tx_isr(priv->irq1, priv); - enable_irq(priv->irq1); + if (!priv->is_lite) { + disable_irq(priv->irq1); + bcm_sysport_tx_isr(priv->irq1, priv); + enable_irq(priv->irq1); + } } #endif @@ -1129,6 +1231,9 @@ static void bcm_sysport_adj_link(struct net_device *dev) priv->old_duplex = phydev->duplex; } + if (priv->is_lite) + goto out; + switch (phydev->speed) { case SPEED_2500: cmd_bits = CMD_SPEED_2500; @@ -1169,8 +1274,9 @@ static void bcm_sysport_adj_link(struct net_device *dev) reg |= cmd_bits; umac_writel(priv, reg, UMAC_CMD); } - - phy_print_status(phydev); +out: + if (changed) + phy_print_status(phydev); } static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, @@ -1315,9 +1421,9 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv, reg = tdma_readl(priv, TDMA_CONTROL); if (enable) - reg |= TDMA_EN; + reg |= tdma_control_bit(priv, TDMA_EN); else - reg &= ~TDMA_EN; + reg &= ~tdma_control_bit(priv, TDMA_EN); tdma_writel(priv, reg, TDMA_CONTROL); /* Poll for TMDA disabling completion */ @@ -1342,7 +1448,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) int i; /* Initialize SW view of the RX ring */ - priv->num_rx_bds = NUM_RX_DESC; + priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; priv->rx_c_index = 0; priv->rx_read_ptr = 0; @@ -1379,7 +1485,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) rdma_writel(priv, 0, RDMA_START_ADDR_HI); rdma_writel(priv, 0, RDMA_START_ADDR_LO); rdma_writel(priv, 0, RDMA_END_ADDR_HI); - rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO); + rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); rdma_writel(priv, 1, RDMA_MBDONE_INTR); @@ -1421,6 +1527,9 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev) struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; + if (priv->is_lite) + return; + reg = umac_readl(priv, UMAC_CMD); if (dev->flags & IFF_PROMISC) reg |= CMD_PROMISC; @@ -1438,12 +1547,21 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv, { u32 reg; - reg = umac_readl(priv, UMAC_CMD); - if (enable) - reg |= mask; - else - reg &= ~mask; - umac_writel(priv, reg, UMAC_CMD); + if (!priv->is_lite) { + reg = umac_readl(priv, UMAC_CMD); + if (enable) + reg |= mask; + else + reg &= ~mask; + umac_writel(priv, reg, UMAC_CMD); + } else { + reg = gib_readl(priv, GIB_CONTROL); + if (enable) + reg |= mask; + else + reg &= ~mask; + gib_writel(priv, reg, GIB_CONTROL); + } /* UniMAC stops on a packet boundary, wait for a full-sized packet * to be processed (1 msec). @@ -1456,6 +1574,9 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) { u32 reg; + if (priv->is_lite) + return; + reg = umac_readl(priv, UMAC_CMD); reg |= CMD_SW_RESET; umac_writel(priv, reg, UMAC_CMD); @@ -1468,9 +1589,17 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) static void umac_set_hw_addr(struct bcm_sysport_priv *priv, unsigned char *addr) { - umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | - (addr[2] << 8) | addr[3], UMAC_MAC0); - umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); + u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | + addr[3]; + u32 mac1 = (addr[4] << 8) | addr[5]; + + if (!priv->is_lite) { + umac_writel(priv, mac0, UMAC_MAC0); + umac_writel(priv, mac1, UMAC_MAC1); + } else { + gib_writel(priv, mac0, GIB_MAC0); + gib_writel(priv, mac1, GIB_MAC1); + } } static void topctrl_flush(struct bcm_sysport_priv *priv) @@ -1515,8 +1644,11 @@ static void bcm_sysport_netif_start(struct net_device *dev) phy_start(dev->phydev); - /* Enable TX interrupts for the 32 TXQs */ - intrl2_1_mask_clear(priv, 0xffffffff); + /* Enable TX interrupts for the TXQs */ + if (!priv->is_lite) + intrl2_1_mask_clear(priv, 0xffffffff); + else + intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); /* Last call before we start the real business */ netif_tx_start_all_queues(dev); @@ -1528,9 +1660,37 @@ static void rbuf_init(struct bcm_sysport_priv *priv) reg = rbuf_readl(priv, RBUF_CONTROL); reg |= RBUF_4B_ALGN | RBUF_RSB_EN; + /* Set a correct RSB format on SYSTEMPORT Lite */ + if (priv->is_lite) { + reg &= ~RBUF_RSB_SWAP1; + reg |= RBUF_RSB_SWAP0; + } rbuf_writel(priv, reg, RBUF_CONTROL); } +static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) +{ + intrl2_0_mask_set(priv, 0xffffffff); + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + if (!priv->is_lite) { + intrl2_1_mask_set(priv, 0xffffffff); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + } +} + +static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) +{ + u32 __maybe_unused reg; + + /* Include Broadcom tag in pad extension */ + if (netdev_uses_dsa(priv->netdev)) { + reg = gib_readl(priv, GIB_CONTROL); + reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); + reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; + gib_writel(priv, reg, GIB_CONTROL); + } +} + static int bcm_sysport_open(struct net_device *dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -1551,13 +1711,20 @@ static int bcm_sysport_open(struct net_device *dev) rbuf_init(priv); /* Set maximum frame length */ - umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + if (!priv->is_lite) + umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + else + gib_set_pad_extension(priv); /* Set MAC address */ umac_set_hw_addr(priv, dev->dev_addr); /* Read CRC forward */ - priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + if (!priv->is_lite) + priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + else + priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & + GIB_FCS_STRIP); phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 0, priv->phy_interface); @@ -1572,12 +1739,7 @@ static int bcm_sysport_open(struct net_device *dev) priv->old_pause = -1; /* mask all interrupts and request them */ - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + bcm_sysport_mask_all_intrs(priv); ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); if (ret) { @@ -1585,10 +1747,13 @@ static int bcm_sysport_open(struct net_device *dev) goto out_phy_disconnect; } - ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev); - if (ret) { - netdev_err(dev, "failed to request TX interrupt\n"); - goto out_free_irq0; + if (!priv->is_lite) { + ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, + dev->name, dev); + if (ret) { + netdev_err(dev, "failed to request TX interrupt\n"); + goto out_free_irq0; + } } /* Initialize both hardware and software ring */ @@ -1635,7 +1800,8 @@ out_free_rx_ring: out_free_tx_ring: for (i = 0; i < dev->num_tx_queues; i++) bcm_sysport_fini_tx_ring(priv, i); - free_irq(priv->irq1, dev); + if (!priv->is_lite) + free_irq(priv->irq1, dev); out_free_irq0: free_irq(priv->irq0, dev); out_phy_disconnect: @@ -1653,10 +1819,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) phy_stop(dev->phydev); /* mask all interrupts */ - intrl2_0_mask_set(priv, 0xffffffff); - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_1_mask_set(priv, 0xffffffff); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + bcm_sysport_mask_all_intrs(priv); } static int bcm_sysport_stop(struct net_device *dev) @@ -1694,7 +1857,8 @@ static int bcm_sysport_stop(struct net_device *dev) bcm_sysport_fini_rx_ring(priv); free_irq(priv->irq0, dev); - free_irq(priv->irq1, dev); + if (!priv->is_lite) + free_irq(priv->irq1, dev); /* Disconnect from PHY */ phy_disconnect(dev->phydev); @@ -1733,8 +1897,32 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { #define REV_FMT "v%2x.%02x" +static const struct bcm_sysport_hw_params bcm_sysport_params[] = { + [SYSTEMPORT] = { + .is_lite = false, + .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS, + }, + [SYSTEMPORT_LITE] = { + .is_lite = true, + .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS, + }, +}; + +static const struct of_device_id bcm_sysport_of_match[] = { + { .compatible = "brcm,systemportlite-v1.00", + .data = &bcm_sysport_params[SYSTEMPORT_LITE] }, + { .compatible = "brcm,systemport-v1.00", + .data = &bcm_sysport_params[SYSTEMPORT] }, + { .compatible = "brcm,systemport", + .data = &bcm_sysport_params[SYSTEMPORT] }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); + static int bcm_sysport_probe(struct platform_device *pdev) { + const struct bcm_sysport_hw_params *params; + const struct of_device_id *of_id = NULL; struct bcm_sysport_priv *priv; struct device_node *dn; struct net_device *dev; @@ -1745,6 +1933,12 @@ static int bcm_sysport_probe(struct platform_device *pdev) dn = pdev->dev.of_node; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + of_id = of_match_node(bcm_sysport_of_match, dn); + if (!of_id || !of_id->data) + return -EINVAL; + + /* Fairly quickly we need to know the type of adapter we have */ + params = of_id->data; /* Read the Transmit/Receive Queue properties */ if (of_property_read_u32(dn, "systemport,num-txq", &txq)) @@ -1770,10 +1964,14 @@ static int bcm_sysport_probe(struct platform_device *pdev) if (!priv->tx_rings) return -ENOMEM; + priv->is_lite = params->is_lite; + priv->num_rx_desc_words = params->num_rx_desc_words; + priv->irq0 = platform_get_irq(pdev, 0); - priv->irq1 = platform_get_irq(pdev, 1); + if (!priv->is_lite) + priv->irq1 = platform_get_irq(pdev, 1); priv->wol_irq = platform_get_irq(pdev, 2); - if (priv->irq0 <= 0 || priv->irq1 <= 0) { + if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { dev_err(&pdev->dev, "invalid interrupts\n"); ret = -EINVAL; goto err_free_netdev; @@ -1847,8 +2045,9 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; dev_info(&pdev->dev, - "Broadcom SYSTEMPORT" REV_FMT + "Broadcom SYSTEMPORT%s" REV_FMT " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", + priv->is_lite ? " Lite" : "", (priv->rev >> 8) & 0xff, priv->rev & 0xff, priv->base, priv->irq0, priv->irq1, txq, rxq); @@ -2044,7 +2243,10 @@ static int bcm_sysport_resume(struct device *d) rbuf_init(priv); /* Set maximum frame length */ - umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + if (!priv->is_lite) + umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + else + gib_set_pad_extension(priv); /* Set MAC address */ umac_set_hw_addr(priv, dev->dev_addr); @@ -2080,13 +2282,6 @@ out_free_tx_rings: static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, bcm_sysport_suspend, bcm_sysport_resume); -static const struct of_device_id bcm_sysport_of_match[] = { - { .compatible = "brcm,systemport-v1.00" }, - { .compatible = "brcm,systemport" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); - static struct platform_driver bcm_sysport_driver = { .probe = bcm_sysport_probe, .remove = bcm_sysport_remove, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index f051356b0274..863ddd7870b7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -127,6 +127,10 @@ struct bcm_rsb { #define INTRL2_0_DESC_ALLOC_ERR (1 << 10) #define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11) +/* SYSTEMPORT Lite groups the TX queues interrupts on instance 0 */ +#define INTRL2_0_TDMA_MBDONE_SHIFT 12 +#define INTRL2_0_TDMA_MBDONE_MASK (0xffff << INTRL2_0_TDMA_MBDONE_SHIFT) + /* RXCHK offset and defines */ #define SYS_PORT_RXCHK_OFFSET 0x300 @@ -176,7 +180,9 @@ struct bcm_rsb { #define RBUF_OK_TO_SEND_MASK 0xff #define RBUF_CRC_REPLACE (1 << 20) #define RBUF_OK_TO_SEND_MODE (1 << 21) -#define RBUF_RSB_SWAP (1 << 22) +/* SYSTEMPORT Lite uses two bits here */ +#define RBUF_RSB_SWAP0 (1 << 22) +#define RBUF_RSB_SWAP1 (1 << 23) #define RBUF_ACPI_EN (1 << 23) #define RBUF_PKT_RDY_THRESH 0x04 @@ -247,6 +253,7 @@ struct bcm_rsb { #define MIB_RUNT_CNT_RST (1 << 1) #define MIB_TX_CNT_RST (1 << 2) +/* These offsets are valid for SYSTEMPORT and SYSTEMPORT Lite */ #define UMAC_MPD_CTRL 0x620 #define MPD_EN (1 << 0) #define MSEQ_LEN_SHIFT 16 @@ -258,6 +265,34 @@ struct bcm_rsb { #define UMAC_MDF_CTRL 0x650 #define UMAC_MDF_ADDR 0x654 +/* Only valid on SYSTEMPORT Lite */ +#define SYS_PORT_GIB_OFFSET 0x1000 + +#define GIB_CONTROL 0x00 +#define GIB_TX_EN (1 << 0) +#define GIB_RX_EN (1 << 1) +#define GIB_TX_FLUSH (1 << 2) +#define GIB_RX_FLUSH (1 << 3) +#define GIB_GTX_CLK_SEL_SHIFT 4 +#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_FCS_STRIP (1 << 6) +#define GIB_LCL_LOOP_EN (1 << 7) +#define GIB_LCL_LOOP_TXEN (1 << 8) +#define GIB_RMT_LOOP_EN (1 << 9) +#define GIB_RMT_LOOP_RXEN (1 << 10) +#define GIB_RX_PAUSE_EN (1 << 11) +#define GIB_PREAMBLE_LEN_SHIFT 12 +#define GIB_PREAMBLE_LEN_MASK 0xf +#define GIB_IPG_LEN_SHIFT 16 +#define GIB_IPG_LEN_MASK 0x3f +#define GIB_PAD_EXTENSION_SHIFT 22 +#define GIB_PAD_EXTENSION_MASK 0x3f + +#define GIB_MAC1 0x08 +#define GIB_MAC0 0x0c + /* Receive DMA offset and defines */ #define SYS_PORT_RDMA_OFFSET 0x2000 @@ -409,16 +444,19 @@ struct bcm_rsb { RING_PCP_DEI_VID) #define TDMA_CONTROL 0x600 -#define TDMA_EN (1 << 0) -#define TSB_EN (1 << 1) -#define TSB_SWAP (1 << 2) -#define ACB_ALGO (1 << 3) +#define TDMA_EN 0 +#define TSB_EN 1 +/* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we + * keep the SYSTEMPORT layout here and adjust with tdma_control_bit() + */ +#define TSB_SWAP 2 +#define ACB_ALGO 3 #define BUF_DATA_OFFSET_SHIFT 4 #define BUF_DATA_OFFSET_MASK 0x3ff -#define VLAN_EN (1 << 14) -#define SW_BRCM_TAG (1 << 15) -#define WNC_KPT_SIZE_UPDATE (1 << 16) -#define SYNC_PKT_SIZE (1 << 17) +#define VLAN_EN 14 +#define SW_BRCM_TAG 15 +#define WNC_KPT_SIZE_UPDATE 16 +#define SYNC_PKT_SIZE 17 #define ACH_TXDONE_DELAY_SHIFT 18 #define ACH_TXDONE_DELAY_MASK 0xff @@ -475,12 +513,12 @@ struct dma_desc { }; /* Number of Receive hardware descriptor words */ -#define NUM_HW_RX_DESC_WORDS 1024 -/* Real number of usable descriptors */ -#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC) +#define SP_NUM_HW_RX_DESC_WORDS 1024 +#define SP_LT_NUM_HW_RX_DESC_WORDS 256 -/* Internal linked-list RAM has up to 1536 entries */ -#define NUM_TX_DESC 1536 +/* Internal linked-list RAM size */ +#define SP_NUM_TX_DESC 1536 +#define SP_LT_NUM_TX_DESC 256 #define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32)) @@ -627,6 +665,16 @@ struct bcm_sysport_cb { DEFINE_DMA_UNMAP_LEN(dma_len); }; +enum bcm_sysport_type { + SYSTEMPORT = 0, + SYSTEMPORT_LITE, +}; + +struct bcm_sysport_hw_params { + bool is_lite; + unsigned int num_rx_desc_words; +}; + /* Software view of the TX ring */ struct bcm_sysport_tx_ring { spinlock_t lock; /* Ring lock for tx reclaim/xmit */ @@ -651,6 +699,8 @@ struct bcm_sysport_priv { u32 irq0_mask; u32 irq1_stat; u32 irq1_mask; + bool is_lite; + unsigned int num_rx_desc_words; struct napi_struct napi ____cacheline_aligned; struct net_device *netdev; struct platform_device *pdev; -- cgit v1.2.3 From 329b5c58f8b2833f330d64d5a2a1517a0c1e070e Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 12:36:28 -0800 Subject: net: dsa: bcm_sf2: Make SF2_IO64_MACRO() utilize 32-bit macro There is no point inlining the 32-bit direct register read/write part, just infer it from the existing macro. This will make it easier to centralize the address rewriting that we are going to introduce later on. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 44692673e1d5..4531c2333e86 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -125,7 +125,7 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ { \ u32 indir, dir; \ spin_lock(&priv->indir_lock); \ - dir = __raw_readl(priv->name + off); \ + dir = name##_readl(priv, off); \ indir = reg_readl(priv, REG_DIR_DATA_READ); \ spin_unlock(&priv->indir_lock); \ return (u64)indir << 32 | dir; \ @@ -135,7 +135,7 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \ { \ spin_lock(&priv->indir_lock); \ reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \ - __raw_writel(lower_32_bits(val), priv->name + off); \ + name##_writel(priv, lower_32_bits(val), off); \ spin_unlock(&priv->indir_lock); \ } -- cgit v1.2.3 From a78e86ed586dba0808f765a9498f1fcd803f6ac6 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 12:36:29 -0800 Subject: net: dsa: bcm_sf2: Prepare for different register layouts In preparation for supporting a new device with a slightly different register layout, affecting the SWITCH_REG and SWITCH_CORE address spaces, perform a few preparatory steps: - allow matching the compatible string against a data description - convert the SWITCH_REG register accesses into an indirection table - prepare for supporting a SWITCH_CORE register alignment requirement Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 57 +++++++++++++++++++++++++++++++++++++----- drivers/net/dsa/bcm_sf2.h | 34 +++++++++++++++++++++++-- drivers/net/dsa/bcm_sf2_regs.h | 43 ++++++++++++++++++------------- 3 files changed, 109 insertions(+), 25 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 31d017086f8b..d952099afc60 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1009,10 +1009,49 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .port_fdb_del = b53_fdb_del, }; +struct bcm_sf2_of_data { + u32 type; + const u16 *reg_offsets; + unsigned int core_reg_align; +}; + +/* Register offsets for the SWITCH_REG_* block */ +static const u16 bcm_sf2_7445_reg_offsets[] = { + [REG_SWITCH_CNTRL] = 0x00, + [REG_SWITCH_STATUS] = 0x04, + [REG_DIR_DATA_WRITE] = 0x08, + [REG_DIR_DATA_READ] = 0x0C, + [REG_SWITCH_REVISION] = 0x18, + [REG_PHY_REVISION] = 0x1C, + [REG_SPHY_CNTRL] = 0x2C, + [REG_RGMII_0_CNTRL] = 0x34, + [REG_RGMII_1_CNTRL] = 0x40, + [REG_RGMII_2_CNTRL] = 0x4c, + [REG_LED_0_CNTRL] = 0x90, + [REG_LED_1_CNTRL] = 0x94, + [REG_LED_2_CNTRL] = 0x98, +}; + +static const struct bcm_sf2_of_data bcm_sf2_7445_data = { + .type = BCM7445_DEVICE_ID, + .core_reg_align = 0, + .reg_offsets = bcm_sf2_7445_reg_offsets, +}; + +static const struct of_device_id bcm_sf2_of_match[] = { + { .compatible = "brcm,bcm7445-switch-v4.0", + .data = &bcm_sf2_7445_data + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); + static int bcm_sf2_sw_probe(struct platform_device *pdev) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; struct device_node *dn = pdev->dev.of_node; + const struct of_device_id *of_id = NULL; + const struct bcm_sf2_of_data *data; struct b53_platform_data *pdata; struct dsa_switch_ops *ops; struct bcm_sf2_priv *priv; @@ -1040,11 +1079,22 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) if (!pdata) return -ENOMEM; + of_id = of_match_node(bcm_sf2_of_match, dn); + if (!of_id || !of_id->data) + return -EINVAL; + + data = of_id->data; + + /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */ + priv->type = data->type; + priv->reg_offsets = data->reg_offsets; + priv->core_reg_align = data->core_reg_align; + /* Auto-detection using standard registers will not work, so * provide an indication of what kind of device we are for * b53_common to work with */ - pdata->chip_id = BCM7445_DEVICE_ID; + pdata->chip_id = priv->type; dev->pdata = pdata; priv->dev = dev; @@ -1190,11 +1240,6 @@ static int bcm_sf2_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, bcm_sf2_suspend, bcm_sf2_resume); -static const struct of_device_id bcm_sf2_of_match[] = { - { .compatible = "brcm,bcm7445-switch-v4.0" }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); static struct platform_driver bcm_sf2_driver = { .probe = bcm_sf2_sw_probe, diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 4531c2333e86..a1430866bd79 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -61,6 +61,11 @@ struct bcm_sf2_priv { void __iomem *fcb; void __iomem *acb; + /* Register offsets indirection tables */ + u32 type; + const u16 *reg_offsets; + unsigned int core_reg_align; + /* spinlock protecting access to the indirect registers */ spinlock_t indir_lock; @@ -104,6 +109,11 @@ static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds) return dev->priv; } +static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off) +{ + return off << priv->core_reg_align; +} + #define SF2_IO_MACRO(name) \ static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \ { \ @@ -153,8 +163,28 @@ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ priv->irq##which##_mask |= (mask); \ } \ -SF2_IO_MACRO(core); -SF2_IO_MACRO(reg); +static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off) +{ + u32 tmp = bcm_sf2_mangle_addr(priv, off); + return __raw_readl(priv->core + tmp); +} + +static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off) +{ + u32 tmp = bcm_sf2_mangle_addr(priv, off); + __raw_writel(val, priv->core + tmp); +} + +static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off) +{ + return __raw_readl(priv->reg + priv->reg_offsets[off]); +} + +static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off) +{ + __raw_writel(val, priv->reg + priv->reg_offsets[off]); +} + SF2_IO64_MACRO(core); SF2_IO_MACRO(intrl2_0); SF2_IO_MACRO(intrl2_1); diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 838fe373cd6f..f5e566304f0c 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -12,22 +12,36 @@ #define __BCM_SF2_REGS_H /* Register set relative to 'REG' */ -#define REG_SWITCH_CNTRL 0x00 -#define MDIO_MASTER_SEL (1 << 0) -#define REG_SWITCH_STATUS 0x04 -#define REG_DIR_DATA_WRITE 0x08 -#define REG_DIR_DATA_READ 0x0C +enum bcm_sf2_reg_offs { + REG_SWITCH_CNTRL = 0, + REG_SWITCH_STATUS, + REG_DIR_DATA_WRITE, + REG_DIR_DATA_READ, + REG_SWITCH_REVISION, + REG_PHY_REVISION, + REG_SPHY_CNTRL, + REG_RGMII_0_CNTRL, + REG_RGMII_1_CNTRL, + REG_RGMII_2_CNTRL, + REG_LED_0_CNTRL, + REG_LED_1_CNTRL, + REG_LED_2_CNTRL, + REG_SWITCH_REG_MAX, +}; + +/* Relative to REG_SWITCH_CNTRL */ +#define MDIO_MASTER_SEL (1 << 0) -#define REG_SWITCH_REVISION 0x18 +/* Relative to REG_SWITCH_REVISION */ #define SF2_REV_MASK 0xffff #define SWITCH_TOP_REV_SHIFT 16 #define SWITCH_TOP_REV_MASK 0xffff -#define REG_PHY_REVISION 0x1C +/* Relative to REG_PHY_REVISION */ #define PHY_REVISION_MASK 0xffff -#define REG_SPHY_CNTRL 0x2C +/* Relative to REG_SPHY_CNTRL */ #define IDDQ_BIAS (1 << 0) #define EXT_PWR_DOWN (1 << 1) #define FORCE_DLL_EN (1 << 2) @@ -37,13 +51,8 @@ #define PHY_PHYAD_SHIFT 8 #define PHY_PHYAD_MASK 0x1F -#define REG_RGMII_0_BASE 0x34 -#define REG_RGMII_CNTRL 0x00 -#define REG_RGMII_IB_STATUS 0x04 -#define REG_RGMII_RX_CLOCK_DELAY_CNTRL 0x08 -#define REG_RGMII_CNTRL_SIZE 0x0C -#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_BASE + \ - ((x) * REG_RGMII_CNTRL_SIZE)) +#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_CNTRL + (x)) + /* Relative to REG_RGMII_CNTRL */ #define RGMII_MODE_EN (1 << 0) #define ID_MODE_DIS (1 << 1) @@ -61,8 +70,8 @@ #define LPI_COUNT_SHIFT 9 #define LPI_COUNT_MASK 0x3F -#define REG_LED_CNTRL_BASE 0x90 -#define REG_LED_CNTRL(x) (REG_LED_CNTRL_BASE + (x) * 4) +#define REG_LED_CNTRL(x) (REG_LED_0_CNTRL + (x)) + #define SPDLNK_SRC_SEL (1 << 24) /* Register set relative to 'INTRL2_0' and 'INTRL2_1' */ -- cgit v1.2.3 From 0fe9933804eb860b7c26fd3fcd5839dc6bb66533 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 12:36:30 -0800 Subject: net: dsa: bcm_sf2: Add support for BCM7278 integrated switch Add support for the integrated switch found on BCM7278: - core_reg_align is set to 1, to force a translation into the target address space which is 8 bytes aligned - an alternate SWITCH_REG layout is provided since registers are largely bit/masks compatible but have different offsets - conditional for all CORE_STS_OVERRIDE_{IMP,GMII_P} since those got moved way out of the traditional register space Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- .../bindings/net/brcm,bcm7445-switch-v4.0.txt | 2 +- drivers/net/dsa/b53/b53_common.c | 12 +++++ drivers/net/dsa/b53/b53_priv.h | 4 +- drivers/net/dsa/bcm_sf2.c | 56 ++++++++++++++++++---- drivers/net/dsa/bcm_sf2_regs.h | 4 ++ 5 files changed, 68 insertions(+), 10 deletions(-) diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt index fb40891ee606..e1b2c3e32859 100644 --- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt +++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt @@ -2,7 +2,7 @@ Required properties: -- compatible: should be "brcm,bcm7445-switch-v4.0" +- compatible: should be "brcm,bcm7445-switch-v4.0" or "brcm,bcm7278-switch-v4.0" - reg: addresses and length of the register sets for the device, must be 6 pairs of register addresses and lengths - interrupts: interrupts for the devices, must be two interrupts diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 5102a3701a1a..5cbb14f6a03b 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1685,6 +1685,18 @@ static const struct b53_chip_data b53_switch_chips[] = { .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, }, + { + .chip_id = BCM7278_DEVICE_ID, + .dev_name = "BCM7278", + .vlans = 4096, + .enabled_ports = 0x1ff, + .arl_entries= 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, }; static int b53_switch_init(struct b53_device *dev) diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 86f125d55aaf..a8031b382c55 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -62,6 +62,7 @@ enum { BCM53019_DEVICE_ID = 0x53019, BCM58XX_DEVICE_ID = 0x5800, BCM7445_DEVICE_ID = 0x7445, + BCM7278_DEVICE_ID = 0x7278, }; #define B53_N_PORTS 9 @@ -179,7 +180,8 @@ static inline int is5301x(struct b53_device *dev) static inline int is58xx(struct b53_device *dev) { return dev->chip_id == BCM58XX_DEVICE_ID || - dev->chip_id == BCM7445_DEVICE_ID; + dev->chip_id == BCM7445_DEVICE_ID || + dev->chip_id == BCM7278_DEVICE_ID; } #define B53_CPU_PORT_25 5 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index d952099afc60..02afa0598b24 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -64,7 +64,12 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 reg, val; + u32 reg, val, offset; + + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_IMP; + else + offset = CORE_STS_OVERRIDE_IMP2; /* Enable the port memories */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); @@ -121,9 +126,9 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); /* Force link status for IMP port */ - reg = core_readl(priv, CORE_STS_OVERRIDE_IMP); + reg = core_readl(priv, offset); reg |= (MII_SW_OR | LINK_STS); - core_writel(priv, reg, CORE_STS_OVERRIDE_IMP); + core_writel(priv, reg, offset); } static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) @@ -591,7 +596,12 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct ethtool_eee *p = &priv->port_sts[port].eee; u32 id_mode_dis = 0, port_mode; const char *str = NULL; - u32 reg; + u32 reg, offset; + + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); + else + offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); switch (phydev->interface) { case PHY_INTERFACE_MODE_RGMII: @@ -662,7 +672,7 @@ force_link: if (phydev->duplex == DUPLEX_FULL) reg |= DUPLX_MODE; - core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + core_writel(priv, reg, offset); if (!phydev->is_pseudo_fixed_link) p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); @@ -672,9 +682,14 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 duplex, pause; + u32 duplex, pause, offset; u32 reg; + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); + else + offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); + duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); @@ -703,13 +718,13 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, status->duplex = !!(duplex & (1 << port)); } - reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + reg = core_readl(priv, offset); reg |= SW_OVERRIDE; if (status->link) reg |= LINK_STS; else reg &= ~LINK_STS; - core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + core_writel(priv, reg, offset); if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { @@ -1038,10 +1053,35 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = { .reg_offsets = bcm_sf2_7445_reg_offsets, }; +static const u16 bcm_sf2_7278_reg_offsets[] = { + [REG_SWITCH_CNTRL] = 0x00, + [REG_SWITCH_STATUS] = 0x04, + [REG_DIR_DATA_WRITE] = 0x08, + [REG_DIR_DATA_READ] = 0x0c, + [REG_SWITCH_REVISION] = 0x10, + [REG_PHY_REVISION] = 0x14, + [REG_SPHY_CNTRL] = 0x24, + [REG_RGMII_0_CNTRL] = 0xe0, + [REG_RGMII_1_CNTRL] = 0xec, + [REG_RGMII_2_CNTRL] = 0xf8, + [REG_LED_0_CNTRL] = 0x40, + [REG_LED_1_CNTRL] = 0x4c, + [REG_LED_2_CNTRL] = 0x58, +}; + +static const struct bcm_sf2_of_data bcm_sf2_7278_data = { + .type = BCM7278_DEVICE_ID, + .core_reg_align = 1, + .reg_offsets = bcm_sf2_7278_reg_offsets, +}; + static const struct of_device_id bcm_sf2_of_match[] = { { .compatible = "brcm,bcm7445-switch-v4.0", .data = &bcm_sf2_7445_data }, + { .compatible = "brcm,bcm7278-switch-v4.0", + .data = &bcm_sf2_7278_data + }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index f5e566304f0c..3b33b8010cc8 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -134,6 +134,9 @@ enum bcm_sf2_reg_offs { #define GMII_SPEED_UP_2G (1 << 6) #define MII_SW_OR (1 << 7) +/* Alternate layout for e.g: 7278 */ +#define CORE_STS_OVERRIDE_IMP2 0x39040 + #define CORE_NEW_CTRL 0x00084 #define IP_MC (1 << 0) #define OUTRANGEERR_DISCARD (1 << 1) @@ -151,6 +154,7 @@ enum bcm_sf2_reg_offs { #define SW_LEARN_CNTL(x) (1 << (x)) #define CORE_STS_OVERRIDE_GMIIP_PORT(x) (0x160 + (x) * 4) +#define CORE_STS_OVERRIDE_GMIIP2_PORT(x) (0x39000 + (x) * 8) #define LINK_STS (1 << 0) #define DUPLX_MODE (1 << 1) #define SPEED_SHIFT 2 -- cgit v1.2.3 From ebb2ac4f32c32364b6f522270aeb0811829bd3cb Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 12:36:31 -0800 Subject: net: dsa: bcm_sf2: Move code enabling Broadcom tags In preparation for enabling Broadcom tags on different ports based on configuration information, dedicate a function that is responsible for enabling Broadcom tags for a given port and update the IMP port setup to call it. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 61 ++++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 02afa0598b24..571e112c8e34 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -61,34 +61,9 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) } } -static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) +static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port) { - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 reg, val, offset; - - if (priv->type == BCM7445_DEVICE_ID) - offset = CORE_STS_OVERRIDE_IMP; - else - offset = CORE_STS_OVERRIDE_IMP2; - - /* Enable the port memories */ - reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); - reg &= ~P_TXQ_PSM_VDD(port); - core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); - - /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ - reg = core_readl(priv, CORE_IMP_CTL); - reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); - reg &= ~(RX_DIS | TX_DIS); - core_writel(priv, reg, CORE_IMP_CTL); - - /* Enable forwarding */ - core_writel(priv, SW_FWDG_EN, CORE_SWMODE); - - /* Enable IMP port in dumb mode */ - reg = core_readl(priv, CORE_SWITCH_CTRL); - reg |= MII_DUMB_FWDG_EN; - core_writel(priv, reg, CORE_SWITCH_CTRL); + u32 reg, val; /* Resolve which bit controls the Broadcom tag */ switch (port) { @@ -124,6 +99,38 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS); reg &= ~(1 << port); core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); +} + +static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + u32 reg, offset; + + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_IMP; + else + offset = CORE_STS_OVERRIDE_IMP2; + + /* Enable the port memories */ + reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); + reg &= ~P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + + /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ + reg = core_readl(priv, CORE_IMP_CTL); + reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); + reg &= ~(RX_DIS | TX_DIS); + core_writel(priv, reg, CORE_IMP_CTL); + + /* Enable forwarding */ + core_writel(priv, SW_FWDG_EN, CORE_SWMODE); + + /* Enable IMP port in dumb mode */ + reg = core_readl(priv, CORE_SWITCH_CTRL); + reg |= MII_DUMB_FWDG_EN; + core_writel(priv, reg, CORE_SWITCH_CTRL); + + bcm_sf2_brcm_hdr_setup(priv, port); /* Force link status for IMP port */ reg = core_readl(priv, offset); -- cgit v1.2.3 From 64ff2aef91afdff7f22eaef05a65b5bc3429ef21 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 12:36:32 -0800 Subject: net: dsa: bcm_sf2: Allow non-IMP ports to have Broadcom tags enabled Parse the "brcm,use-bcm-hdr" boolean property during ports identification to fill a bitmask of ports that should have Broadcom tags enabled. This is needed in some configurations where per-packet metadata can be exchanged using Broadcom tags between the switch and an on-chip acceleration device. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- .../devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt | 8 ++++++++ drivers/net/dsa/bcm_sf2.c | 7 +++++++ drivers/net/dsa/bcm_sf2.h | 3 +++ 3 files changed, 18 insertions(+) diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt index e1b2c3e32859..9a734d808aa7 100644 --- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt +++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt @@ -41,6 +41,13 @@ Optional properties: Admission Control Block supports reporting the number of packets in-flight in a switch queue +Port subnodes: + +Optional properties: + +- brcm,use-bcm-hdr: boolean property, if present, indicates that the switch + port has Broadcom tags enabled (per-packet metadata) + Example: switch_top@f0b00000 { @@ -114,6 +121,7 @@ switch_top@f0b00000 { port@0 { label = "gphy"; reg = <0>; + brcm,use-bcm-hdr; }; ... }; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 571e112c8e34..8eecfd227e06 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -236,6 +236,10 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + /* Enable Broadcom tags for that port if requested */ + if (priv->brcm_tag_mask & BIT(port)) + bcm_sf2_brcm_hdr_setup(priv, port); + /* Clear the Rx and Tx disable bits and set to no spanning tree */ core_writel(priv, 0, CORE_G_PCTL_PORT(port)); @@ -515,6 +519,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, if (mode == PHY_INTERFACE_MODE_MOCA) priv->moca_port = port_num; + + if (of_property_read_bool(port, "brcm,use-bcm-hdr")) + priv->brcm_tag_mask |= 1 << port_num; } } diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index a1430866bd79..6e1f74e4d471 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -100,6 +100,9 @@ struct bcm_sf2_priv { struct device_node *master_mii_dn; struct mii_bus *slave_mii_bus; struct mii_bus *master_mii_bus; + + /* Bitmask of ports needing BRCM tags */ + unsigned int brcm_tag_mask; }; static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds) -- cgit v1.2.3 From 582d0ac397ca02a6a3fb653b13154ac87b884beb Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 12:36:33 -0800 Subject: net: phy: bcm7xxx: Add entry for BCM7278 Add support for the BCM7278 28nm process Gigabit Ethernet PHY. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/bcm7xxx.c | 2 ++ include/linux/brcmphy.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 264b085d796b..fb11927de0ff 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -416,6 +416,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), @@ -430,6 +431,7 @@ static struct phy_driver bcm7xxx_driver[] = { static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM7250, 0xfffffff0, }, + { PHY_ID_BCM7278, 0xfffffff0, }, { PHY_ID_BCM7364, 0xfffffff0, }, { PHY_ID_BCM7366, 0xfffffff0, }, { PHY_ID_BCM7346, 0xfffffff0, }, diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 4f7d8be9ddbf..295fb3e73de5 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -24,6 +24,7 @@ #define PHY_ID_BCM57780 0x03625d90 #define PHY_ID_BCM7250 0xae025280 +#define PHY_ID_BCM7278 0xae0251a0 #define PHY_ID_BCM7364 0xae025260 #define PHY_ID_BCM7366 0x600d8490 #define PHY_ID_BCM7346 0x600d8650 -- cgit v1.2.3 From 039a7b8592ab932f729e56f193a2534f3f911409 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 12:36:34 -0800 Subject: net: phy: bcm7xxx: Implement EGPHY workaround for 7278 Implement the HW design team recommended workaround in for 7278. Since the GPHY now returns its revision information in MII_PHYS_ID[23] we need to check whether the revision provided in flags is 0 or not. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/bcm7xxx.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index fb11927de0ff..aa01020ab1b9 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -167,6 +167,31 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev) return 0; } +static int bcm7xxx_28nm_a0_patch_afe_config_init(struct phy_device *phydev) +{ + /* +1 RC_CAL codes for RL centering for both LT and HT conditions */ + bcm_phy_write_misc(phydev, AFE_RXCONFIG_2, 0xd003); + + /* Cut master bias current by 2% to compensate for RC_CAL offset */ + bcm_phy_write_misc(phydev, DSP_TAP10, 0x791b); + + /* Improve hybrid leakage */ + bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x10e3); + + /* Change rx_on_tune 8 to 0xf */ + bcm_phy_write_misc(phydev, 0x21, 0x2, 0x87f6); + + /* Change 100Tx EEE bandwidth */ + bcm_phy_write_misc(phydev, 0x22, 0x2, 0x017d); + + /* Enable ffe zero detection for Vitesse interoperability */ + bcm_phy_write_misc(phydev, 0x26, 0x2, 0x0015); + + r_rc_cal_reset(phydev); + + return 0; +} + static int bcm7xxx_28nm_config_init(struct phy_device *phydev) { u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags); @@ -174,6 +199,12 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) u8 count; int ret = 0; + /* Newer devices have moved the revision information back into a + * standard location in MII_PHYS_ID[23] + */ + if (rev == 0) + rev = phydev->phy_id & ~phydev->drv->phy_id_mask; + pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n", phydev_name(phydev), phydev->drv->name, rev, patch); @@ -197,6 +228,9 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) case 0x10: ret = bcm7xxx_28nm_e0_plus_afe_config_init(phydev); break; + case 0x01: + ret = bcm7xxx_28nm_a0_patch_afe_config_init(phydev); + break; default: break; } -- cgit v1.2.3 From b9032741e4f86844d8c4a7c18001ee328dae2f7a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 20 Jan 2017 08:25:34 -0800 Subject: bnx2x: avoid two atomic ops per page on x86 Commit 4cace675d687 ("bnx2x: Alloc 4k fragment for each rx ring buffer element") added extra put_page() and get_page() calls on arches where PAGE_SIZE=4K like x86 Reorder things to avoid this overhead. Signed-off-by: Eric Dumazet Cc: Gabriel Krisman Bertazi Cc: Yuval Mintz Cc: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 3e199d3e461e..c0dac0e5696d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -549,14 +549,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_alloc_pool *pool = &fp->page_pool; dma_addr_t mapping; - if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) { - - /* put page reference used by the memory pool, since we - * won't be using this page as the mempool anymore. - */ - if (pool->page) - put_page(pool->page); - + if (!pool->page) { pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); if (unlikely(!pool->page)) return -ENOMEM; @@ -571,7 +564,6 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, return -ENOMEM; } - get_page(pool->page); sw_buf->page = pool->page; sw_buf->offset = pool->offset; @@ -581,7 +573,10 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, sge->addr_lo = cpu_to_le32(U64_LO(mapping)); pool->offset += SGE_PAGE_SIZE; - + if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE) + get_page(pool->page); + else + pool->page = NULL; return 0; } -- cgit v1.2.3 From 41c1093f2e1a33f4bf38848b4b1526903c5052bb Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Fri, 20 Jan 2017 17:21:03 -0600 Subject: net: qcom/emac: rename emac_phy to emac_sgmii and move it The EMAC has an internal PHY that is often called the "SGMII". This SGMII is also connected to an external PHY, which is managed by phylib. These dual PHYs often cause confusion. In this case, the data structure for managing the SGMII was mis-named and located in the wrong header file. Structure emac_phy is renamed to emac_sgmii to clearly indicate it applies to the internal PHY only. It also also moved from emac_phy.h (which supports the external PHY) to emac_sgmii.h (where it belongs). To keep the changes minimal, only the structure name is changed, not the names of any variables of that type. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-phy.c | 2 -- drivers/net/ethernet/qualcomm/emac/emac-phy.h | 13 ------------- drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c | 2 +- drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c | 2 +- drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c | 2 +- drivers/net/ethernet/qualcomm/emac/emac-sgmii.c | 8 ++++---- drivers/net/ethernet/qualcomm/emac/emac-sgmii.h | 13 +++++++++++++ drivers/net/ethernet/qualcomm/emac/emac.c | 2 +- drivers/net/ethernet/qualcomm/emac/emac.h | 3 ++- 9 files changed, 23 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 2851b4c56570..1d7852f4ccaa 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c @@ -22,8 +22,6 @@ #include #include "emac.h" #include "emac-mac.h" -#include "emac-phy.h" -#include "emac-sgmii.h" /* EMAC base register offsets */ #define EMAC_MDIO_CTRL 0x001414 diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.h b/drivers/net/ethernet/qualcomm/emac/emac-phy.h index 49f3701a6dd7..c0c301c72129 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.h @@ -13,19 +13,6 @@ #ifndef _EMAC_PHY_H_ #define _EMAC_PHY_H_ -typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt); - -/** emac_phy - internal emac phy - * @base base address - * @digital per-lane digital block - * @initialize initialization function - */ -struct emac_phy { - void __iomem *base; - void __iomem *digital; - emac_sgmii_initialize initialize; -}; - struct emac_adapter; int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c index af690e1a6e7b..10de8d0d9a56 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c @@ -214,7 +214,7 @@ static const struct emac_reg_write tx_rx_setting[] = { int emac_sgmii_init_fsm9900(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; unsigned int i; emac_reg_write_all(phy->base, physical_coding_sublayer_programming, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c index 5b8419498ef1..f62c215be779 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c @@ -174,7 +174,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = { int emac_sgmii_init_qdf2400(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; void __iomem *phy_regs = phy->base; void __iomem *laned = phy->digital; unsigned int i; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c index 6170200d7479..b9c0df7bdd15 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c @@ -167,7 +167,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = { int emac_sgmii_init_qdf2432(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; void __iomem *phy_regs = phy->base; void __iomem *laned = phy->digital; unsigned int i; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index bf722a9bb09d..0149b523eda4 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -50,7 +50,7 @@ static int emac_sgmii_link_init(struct emac_adapter *adpt) { struct phy_device *phydev = adpt->phydev; - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 val; val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); @@ -89,7 +89,7 @@ static int emac_sgmii_link_init(struct emac_adapter *adpt) static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 status; writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR); @@ -123,7 +123,7 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) static void emac_sgmii_reset_prepare(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 val; /* Reset PHY */ @@ -217,7 +217,7 @@ static const struct of_device_id emac_sgmii_dt_match[] = { int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) { struct platform_device *sgmii_pdev = NULL; - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; struct resource *res; int ret; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h index 80ed3dc3157a..4a8f6b174f4b 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h @@ -16,6 +16,19 @@ struct emac_adapter; struct platform_device; +typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt); + +/** emac_sgmii - internal emac phy + * @base base address + * @digital per-lane digital block + * @initialize initialization function + */ +struct emac_sgmii { + void __iomem *base; + void __iomem *digital; + emac_sgmii_initialize initialize; +}; + int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt); void emac_sgmii_reset(struct emac_adapter *adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 0aac0dec03e1..b74ec7fba0e0 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -602,7 +602,7 @@ static int emac_probe(struct platform_device *pdev) { struct net_device *netdev; struct emac_adapter *adpt; - struct emac_phy *phy; + struct emac_sgmii *phy; u16 devid, revid; u32 reg; int ret; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 4b8483cc2c7f..1368440ea91d 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -19,6 +19,7 @@ #include #include "emac-mac.h" #include "emac-phy.h" +#include "emac-sgmii.h" /* EMAC base register offsets */ #define EMAC_DMA_MAS_CTRL 0x001400 @@ -291,7 +292,7 @@ struct emac_adapter { void __iomem *base; void __iomem *csr; - struct emac_phy phy; + struct emac_sgmii phy; struct emac_stats stats; struct emac_irq irq; -- cgit v1.2.3 From 4404323c6ac256c2a11d86fda65fbdb7d198ff8c Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Fri, 20 Jan 2017 17:21:04 -0600 Subject: net: qcom/emac: claim the irq only when the device is opened During reset, functions emac_mac_down() and emac_mac_up() are called, so we don't want to free and claim the IRQ unnecessarily. Move those operations to open/close. Signed-off-by: Timur Tabi Reviewed-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 13 ------------- drivers/net/ethernet/qualcomm/emac/emac.c | 11 +++++++++++ drivers/net/ethernet/qualcomm/emac/emac.h | 1 - 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 98570eb6ef1a..e4793d703ed9 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -314,8 +314,6 @@ struct emac_skb_cb { RX_PKT_INT2 |\ RX_PKT_INT3) -#define EMAC_MAC_IRQ_RES "core0" - void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr) { u32 crc32, bit, reg, mta; @@ -977,26 +975,16 @@ static void emac_adjust_link(struct net_device *netdev) int emac_mac_up(struct emac_adapter *adpt) { struct net_device *netdev = adpt->netdev; - struct emac_irq *irq = &adpt->irq; int ret; emac_mac_rx_tx_ring_reset_all(adpt); emac_mac_config(adpt); - - ret = request_irq(irq->irq, emac_isr, 0, EMAC_MAC_IRQ_RES, irq); - if (ret) { - netdev_err(adpt->netdev, "could not request %s irq\n", - EMAC_MAC_IRQ_RES); - return ret; - } - emac_mac_rx_descs_refill(adpt, &adpt->rx_q); ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, PHY_INTERFACE_MODE_SGMII); if (ret) { netdev_err(adpt->netdev, "could not connect phy\n"); - free_irq(irq->irq, irq); return ret; } @@ -1030,7 +1018,6 @@ void emac_mac_down(struct emac_adapter *adpt) writel(DIS_INT, adpt->base + EMAC_INT_STATUS); writel(0, adpt->base + EMAC_INT_MASK); synchronize_irq(adpt->irq.irq); - free_irq(adpt->irq.irq, &adpt->irq); phy_disconnect(adpt->phydev); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index b74ec7fba0e0..3e1be9107d55 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -256,18 +256,27 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu) static int emac_open(struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_irq *irq = &adpt->irq; int ret; + ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq); + if (ret) { + netdev_err(adpt->netdev, "could not request emac-core0 irq\n"); + return ret; + } + /* allocate rx/tx dma buffer & descriptors */ ret = emac_mac_rx_tx_rings_alloc_all(adpt); if (ret) { netdev_err(adpt->netdev, "error allocating rx/tx rings\n"); + free_irq(irq->irq, irq); return ret; } ret = emac_mac_up(adpt); if (ret) { emac_mac_rx_tx_rings_free_all(adpt); + free_irq(irq->irq, irq); return ret; } @@ -286,6 +295,8 @@ static int emac_close(struct net_device *netdev) emac_mac_down(adpt); emac_mac_rx_tx_rings_free_all(adpt); + free_irq(adpt->irq.irq, &adpt->irq); + mutex_unlock(&adpt->reset_lock); return 0; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 1368440ea91d..2725507ae866 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -331,7 +331,6 @@ struct emac_adapter { int emac_reinit_locked(struct emac_adapter *adpt); void emac_reg_update32(void __iomem *addr, u32 mask, u32 val); -irqreturn_t emac_isr(int irq, void *data); void emac_set_ethtool_ops(struct net_device *netdev); void emac_update_hw_stats(struct emac_adapter *adpt); -- cgit v1.2.3 From 30bd2f52e56ea35028cf1d13c21484918b6ff22d Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Sat, 21 Jan 2017 12:27:26 +0530 Subject: net: moxa: constify net_device_ops structures Declare net_device_ops structure as const as it is only stored in the netdev_ops field of a net_device structure. This field is of type const, so net_device_ops structures having same properties can be made const too. Done using Coccinelle: @r1 disable optional_qualifier@ identifier i; position p; @@ static struct net_device_ops i@p={...}; @ok1@ identifier r1.i; position p; struct net_device ndev; @@ ndev.netdev_ops=&i@p @bad@ position p!={r1.p,ok1.p}; identifier r1.i; @@ i@p @depends on !bad disable optional_qualifier@ identifier r1.i; @@ +const struct net_device_ops i; File size before: text data bss dec hex filename 4821 744 0 5565 15bd ethernet/moxa/moxart_ether.o File size after: text data bss dec hex filename 5373 192 0 5565 15bd ethernet/moxa/moxart_ether.o Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- drivers/net/ethernet/moxa/moxart_ether.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 9774b50cff6e..6a6525f7d2ea 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -436,7 +436,7 @@ static void moxart_mac_set_rx_mode(struct net_device *ndev) spin_unlock_irq(&priv->txlock); } -static struct net_device_ops moxart_netdev_ops = { +static const struct net_device_ops moxart_netdev_ops = { .ndo_open = moxart_mac_open, .ndo_stop = moxart_mac_stop, .ndo_start_xmit = moxart_mac_start_xmit, -- cgit v1.2.3 From 10eeb5e645b5332b24986a554046f6c9cc2c22d4 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Sat, 21 Jan 2017 12:28:58 +0530 Subject: net: xilinx: constify net_device_ops structure Declare net_device_ops structure as const as it is only stored in the netdev_ops field of a net_device structure. This field is of type const, so net_device_ops structures having same properties can be made const too. Done using Coccinelle: @r1 disable optional_qualifier@ identifier i; position p; @@ static struct net_device_ops i@p={...}; @ok1@ identifier r1.i; position p; struct net_device ndev; @@ ndev.netdev_ops=&i@p @bad@ position p!={r1.p,ok1.p}; identifier r1.i; @@ i@p @depends on !bad disable optional_qualifier@ identifier r1.i; @@ +const struct net_device_ops i; File size before: text data bss dec hex filename 6201 744 0 6945 1b21 ethernet/xilinx/xilinx_emaclite.o File size after: text data bss dec hex filename 6745 192 0 6937 1b19 ethernet/xilinx/xilinx_emaclite.o Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 97dcc0bd5a85..e3070fd88bce 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1051,7 +1051,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s) } } -static struct net_device_ops xemaclite_netdev_ops; +static const struct net_device_ops xemaclite_netdev_ops; /** * xemaclite_of_probe - Probe method for the Emaclite device. @@ -1205,7 +1205,7 @@ xemaclite_poll_controller(struct net_device *ndev) } #endif -static struct net_device_ops xemaclite_netdev_ops = { +static const struct net_device_ops xemaclite_netdev_ops = { .ndo_open = xemaclite_open, .ndo_stop = xemaclite_close, .ndo_start_xmit = xemaclite_send, -- cgit v1.2.3 From b95a5c4db09bc7c253636cb84dc9b12c577fd5a0 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Sat, 21 Jan 2017 17:26:11 +0100 Subject: bpf: add a longest prefix match trie map implementation This trie implements a longest prefix match algorithm that can be used to match IP addresses to a stored set of ranges. Internally, data is stored in an unbalanced trie of nodes that has a maximum height of n, where n is the prefixlen the trie was created with. Tries may be created with prefix lengths that are multiples of 8, in the range from 8 to 2048. The key used for lookup and update operations is a struct bpf_lpm_trie_key, and the value is a uint64_t. The code carries more information about the internal implementation. Signed-off-by: Daniel Mack Reviewed-by: David Herrmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 7 + kernel/bpf/Makefile | 2 +- kernel/bpf/lpm_trie.c | 503 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 511 insertions(+), 1 deletion(-) create mode 100644 kernel/bpf/lpm_trie.c diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 54a5894bb4ea..bd3068485410 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -63,6 +63,12 @@ struct bpf_insn { __s32 imm; /* signed immediate constant */ }; +/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ +struct bpf_lpm_trie_key { + __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ + __u8 data[0]; /* Arbitrary size */ +}; + /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, @@ -89,6 +95,7 @@ enum bpf_map_type { BPF_MAP_TYPE_CGROUP_ARRAY, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, + BPF_MAP_TYPE_LPM_TRIE, }; enum bpf_prog_type { diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 1276474ac3cd..e1ce4f4fd7fd 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -1,7 +1,7 @@ obj-y := core.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o -obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o +obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o ifeq ($(CONFIG_PERF_EVENTS),y) obj-$(CONFIG_BPF_SYSCALL) += stackmap.o endif diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c new file mode 100644 index 000000000000..ba19241d1979 --- /dev/null +++ b/kernel/bpf/lpm_trie.c @@ -0,0 +1,503 @@ +/* + * Longest prefix match list implementation + * + * Copyright (c) 2016,2017 Daniel Mack + * Copyright (c) 2016 David Herrmann + * + * This file is subject to the terms and conditions of version 2 of the GNU + * General Public License. See the file COPYING in the main directory of the + * Linux distribution for more details. + */ + +#include +#include +#include +#include +#include +#include + +/* Intermediate node */ +#define LPM_TREE_NODE_FLAG_IM BIT(0) + +struct lpm_trie_node; + +struct lpm_trie_node { + struct rcu_head rcu; + struct lpm_trie_node __rcu *child[2]; + u32 prefixlen; + u32 flags; + u8 data[0]; +}; + +struct lpm_trie { + struct bpf_map map; + struct lpm_trie_node __rcu *root; + size_t n_entries; + size_t max_prefixlen; + size_t data_size; + raw_spinlock_t lock; +}; + +/* This trie implements a longest prefix match algorithm that can be used to + * match IP addresses to a stored set of ranges. + * + * Data stored in @data of struct bpf_lpm_key and struct lpm_trie_node is + * interpreted as big endian, so data[0] stores the most significant byte. + * + * Match ranges are internally stored in instances of struct lpm_trie_node + * which each contain their prefix length as well as two pointers that may + * lead to more nodes containing more specific matches. Each node also stores + * a value that is defined by and returned to userspace via the update_elem + * and lookup functions. + * + * For instance, let's start with a trie that was created with a prefix length + * of 32, so it can be used for IPv4 addresses, and one single element that + * matches 192.168.0.0/16. The data array would hence contain + * [0xc0, 0xa8, 0x00, 0x00] in big-endian notation. This documentation will + * stick to IP-address notation for readability though. + * + * As the trie is empty initially, the new node (1) will be places as root + * node, denoted as (R) in the example below. As there are no other node, both + * child pointers are %NULL. + * + * +----------------+ + * | (1) (R) | + * | 192.168.0.0/16 | + * | value: 1 | + * | [0] [1] | + * +----------------+ + * + * Next, let's add a new node (2) matching 192.168.0.0/24. As there is already + * a node with the same data and a smaller prefix (ie, a less specific one), + * node (2) will become a child of (1). In child index depends on the next bit + * that is outside of what (1) matches, and that bit is 0, so (2) will be + * child[0] of (1): + * + * +----------------+ + * | (1) (R) | + * | 192.168.0.0/16 | + * | value: 1 | + * | [0] [1] | + * +----------------+ + * | + * +----------------+ + * | (2) | + * | 192.168.0.0/24 | + * | value: 2 | + * | [0] [1] | + * +----------------+ + * + * The child[1] slot of (1) could be filled with another node which has bit #17 + * (the next bit after the ones that (1) matches on) set to 1. For instance, + * 192.168.128.0/24: + * + * +----------------+ + * | (1) (R) | + * | 192.168.0.0/16 | + * | value: 1 | + * | [0] [1] | + * +----------------+ + * | | + * +----------------+ +------------------+ + * | (2) | | (3) | + * | 192.168.0.0/24 | | 192.168.128.0/24 | + * | value: 2 | | value: 3 | + * | [0] [1] | | [0] [1] | + * +----------------+ +------------------+ + * + * Let's add another node (4) to the game for 192.168.1.0/24. In order to place + * it, node (1) is looked at first, and because (4) of the semantics laid out + * above (bit #17 is 0), it would normally be attached to (1) as child[0]. + * However, that slot is already allocated, so a new node is needed in between. + * That node does not have a value attached to it and it will never be + * returned to users as result of a lookup. It is only there to differentiate + * the traversal further. It will get a prefix as wide as necessary to + * distinguish its two children: + * + * +----------------+ + * | (1) (R) | + * | 192.168.0.0/16 | + * | value: 1 | + * | [0] [1] | + * +----------------+ + * | | + * +----------------+ +------------------+ + * | (4) (I) | | (3) | + * | 192.168.0.0/23 | | 192.168.128.0/24 | + * | value: --- | | value: 3 | + * | [0] [1] | | [0] [1] | + * +----------------+ +------------------+ + * | | + * +----------------+ +----------------+ + * | (2) | | (5) | + * | 192.168.0.0/24 | | 192.168.1.0/24 | + * | value: 2 | | value: 5 | + * | [0] [1] | | [0] [1] | + * +----------------+ +----------------+ + * + * 192.168.1.1/32 would be a child of (5) etc. + * + * An intermediate node will be turned into a 'real' node on demand. In the + * example above, (4) would be re-used if 192.168.0.0/23 is added to the trie. + * + * A fully populated trie would have a height of 32 nodes, as the trie was + * created with a prefix length of 32. + * + * The lookup starts at the root node. If the current node matches and if there + * is a child that can be used to become more specific, the trie is traversed + * downwards. The last node in the traversal that is a non-intermediate one is + * returned. + */ + +static inline int extract_bit(const u8 *data, size_t index) +{ + return !!(data[index / 8] & (1 << (7 - (index % 8)))); +} + +/** + * longest_prefix_match() - determine the longest prefix + * @trie: The trie to get internal sizes from + * @node: The node to operate on + * @key: The key to compare to @node + * + * Determine the longest prefix of @node that matches the bits in @key. + */ +static size_t longest_prefix_match(const struct lpm_trie *trie, + const struct lpm_trie_node *node, + const struct bpf_lpm_trie_key *key) +{ + size_t prefixlen = 0; + size_t i; + + for (i = 0; i < trie->data_size; i++) { + size_t b; + + b = 8 - fls(node->data[i] ^ key->data[i]); + prefixlen += b; + + if (prefixlen >= node->prefixlen || prefixlen >= key->prefixlen) + return min(node->prefixlen, key->prefixlen); + + if (b < 8) + break; + } + + return prefixlen; +} + +/* Called from syscall or from eBPF program */ +static void *trie_lookup_elem(struct bpf_map *map, void *_key) +{ + struct lpm_trie *trie = container_of(map, struct lpm_trie, map); + struct lpm_trie_node *node, *found = NULL; + struct bpf_lpm_trie_key *key = _key; + + /* Start walking the trie from the root node ... */ + + for (node = rcu_dereference(trie->root); node;) { + unsigned int next_bit; + size_t matchlen; + + /* Determine the longest prefix of @node that matches @key. + * If it's the maximum possible prefix for this trie, we have + * an exact match and can return it directly. + */ + matchlen = longest_prefix_match(trie, node, key); + if (matchlen == trie->max_prefixlen) { + found = node; + break; + } + + /* If the number of bits that match is smaller than the prefix + * length of @node, bail out and return the node we have seen + * last in the traversal (ie, the parent). + */ + if (matchlen < node->prefixlen) + break; + + /* Consider this node as return candidate unless it is an + * artificially added intermediate one. + */ + if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) + found = node; + + /* If the node match is fully satisfied, let's see if we can + * become more specific. Determine the next bit in the key and + * traverse down. + */ + next_bit = extract_bit(key->data, node->prefixlen); + node = rcu_dereference(node->child[next_bit]); + } + + if (!found) + return NULL; + + return found->data + trie->data_size; +} + +static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie, + const void *value) +{ + struct lpm_trie_node *node; + size_t size = sizeof(struct lpm_trie_node) + trie->data_size; + + if (value) + size += trie->map.value_size; + + node = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN); + if (!node) + return NULL; + + node->flags = 0; + + if (value) + memcpy(node->data + trie->data_size, value, + trie->map.value_size); + + return node; +} + +/* Called from syscall or from eBPF program */ +static int trie_update_elem(struct bpf_map *map, + void *_key, void *value, u64 flags) +{ + struct lpm_trie *trie = container_of(map, struct lpm_trie, map); + struct lpm_trie_node *node, *im_node, *new_node = NULL; + struct lpm_trie_node __rcu **slot; + struct bpf_lpm_trie_key *key = _key; + unsigned long irq_flags; + unsigned int next_bit; + size_t matchlen = 0; + int ret = 0; + + if (unlikely(flags > BPF_EXIST)) + return -EINVAL; + + if (key->prefixlen > trie->max_prefixlen) + return -EINVAL; + + raw_spin_lock_irqsave(&trie->lock, irq_flags); + + /* Allocate and fill a new node */ + + if (trie->n_entries == trie->map.max_entries) { + ret = -ENOSPC; + goto out; + } + + new_node = lpm_trie_node_alloc(trie, value); + if (!new_node) { + ret = -ENOMEM; + goto out; + } + + trie->n_entries++; + + new_node->prefixlen = key->prefixlen; + RCU_INIT_POINTER(new_node->child[0], NULL); + RCU_INIT_POINTER(new_node->child[1], NULL); + memcpy(new_node->data, key->data, trie->data_size); + + /* Now find a slot to attach the new node. To do that, walk the tree + * from the root and match as many bits as possible for each node until + * we either find an empty slot or a slot that needs to be replaced by + * an intermediate node. + */ + slot = &trie->root; + + while ((node = rcu_dereference_protected(*slot, + lockdep_is_held(&trie->lock)))) { + matchlen = longest_prefix_match(trie, node, key); + + if (node->prefixlen != matchlen || + node->prefixlen == key->prefixlen || + node->prefixlen == trie->max_prefixlen) + break; + + next_bit = extract_bit(key->data, node->prefixlen); + slot = &node->child[next_bit]; + } + + /* If the slot is empty (a free child pointer or an empty root), + * simply assign the @new_node to that slot and be done. + */ + if (!node) { + rcu_assign_pointer(*slot, new_node); + goto out; + } + + /* If the slot we picked already exists, replace it with @new_node + * which already has the correct data array set. + */ + if (node->prefixlen == matchlen) { + new_node->child[0] = node->child[0]; + new_node->child[1] = node->child[1]; + + if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) + trie->n_entries--; + + rcu_assign_pointer(*slot, new_node); + kfree_rcu(node, rcu); + + goto out; + } + + /* If the new node matches the prefix completely, it must be inserted + * as an ancestor. Simply insert it between @node and *@slot. + */ + if (matchlen == key->prefixlen) { + next_bit = extract_bit(node->data, matchlen); + rcu_assign_pointer(new_node->child[next_bit], node); + rcu_assign_pointer(*slot, new_node); + goto out; + } + + im_node = lpm_trie_node_alloc(trie, NULL); + if (!im_node) { + ret = -ENOMEM; + goto out; + } + + im_node->prefixlen = matchlen; + im_node->flags |= LPM_TREE_NODE_FLAG_IM; + memcpy(im_node->data, node->data, trie->data_size); + + /* Now determine which child to install in which slot */ + if (extract_bit(key->data, matchlen)) { + rcu_assign_pointer(im_node->child[0], node); + rcu_assign_pointer(im_node->child[1], new_node); + } else { + rcu_assign_pointer(im_node->child[0], new_node); + rcu_assign_pointer(im_node->child[1], node); + } + + /* Finally, assign the intermediate node to the determined spot */ + rcu_assign_pointer(*slot, im_node); + +out: + if (ret) { + if (new_node) + trie->n_entries--; + + kfree(new_node); + kfree(im_node); + } + + raw_spin_unlock_irqrestore(&trie->lock, irq_flags); + + return ret; +} + +static int trie_delete_elem(struct bpf_map *map, void *key) +{ + /* TODO */ + return -ENOSYS; +} + +static struct bpf_map *trie_alloc(union bpf_attr *attr) +{ + size_t cost, cost_per_node; + struct lpm_trie *trie; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + /* check sanity of attributes */ + if (attr->max_entries == 0 || + attr->map_flags != BPF_F_NO_PREALLOC || + attr->key_size < sizeof(struct bpf_lpm_trie_key) + 1 || + attr->key_size > sizeof(struct bpf_lpm_trie_key) + 256 || + attr->value_size == 0) + return ERR_PTR(-EINVAL); + + trie = kzalloc(sizeof(*trie), GFP_USER | __GFP_NOWARN); + if (!trie) + return ERR_PTR(-ENOMEM); + + /* copy mandatory map attributes */ + trie->map.map_type = attr->map_type; + trie->map.key_size = attr->key_size; + trie->map.value_size = attr->value_size; + trie->map.max_entries = attr->max_entries; + trie->data_size = attr->key_size - + offsetof(struct bpf_lpm_trie_key, data); + trie->max_prefixlen = trie->data_size * 8; + + cost_per_node = sizeof(struct lpm_trie_node) + + attr->value_size + trie->data_size; + cost = sizeof(*trie) + attr->max_entries * cost_per_node; + trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + + ret = bpf_map_precharge_memlock(trie->map.pages); + if (ret) { + kfree(trie); + return ERR_PTR(ret); + } + + raw_spin_lock_init(&trie->lock); + + return &trie->map; +} + +static void trie_free(struct bpf_map *map) +{ + struct lpm_trie *trie = container_of(map, struct lpm_trie, map); + struct lpm_trie_node __rcu **slot; + struct lpm_trie_node *node; + + raw_spin_lock(&trie->lock); + + /* Always start at the root and walk down to a node that has no + * children. Then free that node, nullify its reference in the parent + * and start over. + */ + + for (;;) { + slot = &trie->root; + + for (;;) { + node = rcu_dereference_protected(*slot, + lockdep_is_held(&trie->lock)); + if (!node) + goto unlock; + + if (rcu_access_pointer(node->child[0])) { + slot = &node->child[0]; + continue; + } + + if (rcu_access_pointer(node->child[1])) { + slot = &node->child[1]; + continue; + } + + kfree(node); + RCU_INIT_POINTER(*slot, NULL); + break; + } + } + +unlock: + raw_spin_unlock(&trie->lock); +} + +static const struct bpf_map_ops trie_ops = { + .map_alloc = trie_alloc, + .map_free = trie_free, + .map_lookup_elem = trie_lookup_elem, + .map_update_elem = trie_update_elem, + .map_delete_elem = trie_delete_elem, +}; + +static struct bpf_map_type_list trie_type __read_mostly = { + .ops = &trie_ops, + .type = BPF_MAP_TYPE_LPM_TRIE, +}; + +static int __init register_trie_map(void) +{ + bpf_register_map_type(&trie_type); + return 0; +} +late_initcall(register_trie_map); -- cgit v1.2.3 From 4d3381f5a322dd5db2477e224821790478488173 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sat, 21 Jan 2017 17:26:12 +0100 Subject: bpf: Add tests for the lpm trie map The first part of this program runs randomized tests against the lpm-bpf-map. It implements a "Trivial Longest Prefix Match" (tlpm) based on simple, linear, single linked lists. The implementation should be pretty straightforward. Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies the trie-based bpf-map implementation behaves the same way as tlpm. The second part uses 'real world' IPv4 and IPv6 addresses and tests the trie with those. Signed-off-by: David Herrmann Signed-off-by: Daniel Mack Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/.gitignore | 1 + tools/testing/selftests/bpf/Makefile | 4 +- tools/testing/selftests/bpf/test_lpm_map.c | 358 +++++++++++++++++++++++++++++ 3 files changed, 361 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/bpf/test_lpm_map.c diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 071431bedde8..d3b1c9bca407 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -1,3 +1,4 @@ test_verifier test_maps test_lru_map +test_lpm_map diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 7a5f24543a5f..064a3e5f2836 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -1,8 +1,8 @@ CFLAGS += -Wall -O2 -I../../../../usr/include -test_objs = test_verifier test_maps test_lru_map +test_objs = test_verifier test_maps test_lru_map test_lpm_map -TEST_PROGS := test_verifier test_maps test_lru_map test_kmod.sh +TEST_PROGS := test_verifier test_maps test_lru_map test_lpm_map test_kmod.sh TEST_FILES := $(test_objs) all: $(test_objs) diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c new file mode 100644 index 000000000000..26775c00273f --- /dev/null +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -0,0 +1,358 @@ +/* + * Randomized tests for eBPF longest-prefix-match maps + * + * This program runs randomized tests against the lpm-bpf-map. It implements a + * "Trivial Longest Prefix Match" (tlpm) based on simple, linear, singly linked + * lists. The implementation should be pretty straightforward. + * + * Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies + * the trie-based bpf-map implementation behaves the same way as tlpm. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_sys.h" +#include "bpf_util.h" + +struct tlpm_node { + struct tlpm_node *next; + size_t n_bits; + uint8_t key[]; +}; + +static struct tlpm_node *tlpm_add(struct tlpm_node *list, + const uint8_t *key, + size_t n_bits) +{ + struct tlpm_node *node; + size_t n; + + /* add new entry with @key/@n_bits to @list and return new head */ + + n = (n_bits + 7) / 8; + node = malloc(sizeof(*node) + n); + assert(node); + + node->next = list; + node->n_bits = n_bits; + memcpy(node->key, key, n); + + return node; +} + +static void tlpm_clear(struct tlpm_node *list) +{ + struct tlpm_node *node; + + /* free all entries in @list */ + + while ((node = list)) { + list = list->next; + free(node); + } +} + +static struct tlpm_node *tlpm_match(struct tlpm_node *list, + const uint8_t *key, + size_t n_bits) +{ + struct tlpm_node *best = NULL; + size_t i; + + /* Perform longest prefix-match on @key/@n_bits. That is, iterate all + * entries and match each prefix against @key. Remember the "best" + * entry we find (i.e., the longest prefix that matches) and return it + * to the caller when done. + */ + + for ( ; list; list = list->next) { + for (i = 0; i < n_bits && i < list->n_bits; ++i) { + if ((key[i / 8] & (1 << (7 - i % 8))) != + (list->key[i / 8] & (1 << (7 - i % 8)))) + break; + } + + if (i >= list->n_bits) { + if (!best || i > best->n_bits) + best = list; + } + } + + return best; +} + +static void test_lpm_basic(void) +{ + struct tlpm_node *list = NULL, *t1, *t2; + + /* very basic, static tests to verify tlpm works as expected */ + + assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8)); + + t1 = list = tlpm_add(list, (uint8_t[]){ 0xff }, 8); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0x00 }, 16)); + assert(!tlpm_match(list, (uint8_t[]){ 0x7f }, 8)); + assert(!tlpm_match(list, (uint8_t[]){ 0xfe }, 8)); + assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 7)); + + t2 = list = tlpm_add(list, (uint8_t[]){ 0xff, 0xff }, 16); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); + assert(t2 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15)); + assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16)); + + tlpm_clear(list); +} + +static void test_lpm_order(void) +{ + struct tlpm_node *t1, *t2, *l1 = NULL, *l2 = NULL; + size_t i, j; + + /* Verify the tlpm implementation works correctly regardless of the + * order of entries. Insert a random set of entries into @l1, and copy + * the same data in reverse order into @l2. Then verify a lookup of + * random keys will yield the same result in both sets. + */ + + for (i = 0; i < (1 << 12); ++i) + l1 = tlpm_add(l1, (uint8_t[]){ + rand() % 0xff, + rand() % 0xff, + }, rand() % 16 + 1); + + for (t1 = l1; t1; t1 = t1->next) + l2 = tlpm_add(l2, t1->key, t1->n_bits); + + for (i = 0; i < (1 << 8); ++i) { + uint8_t key[] = { rand() % 0xff, rand() % 0xff }; + + t1 = tlpm_match(l1, key, 16); + t2 = tlpm_match(l2, key, 16); + + assert(!t1 == !t2); + if (t1) { + assert(t1->n_bits == t2->n_bits); + for (j = 0; j < t1->n_bits; ++j) + assert((t1->key[j / 8] & (1 << (7 - j % 8))) == + (t2->key[j / 8] & (1 << (7 - j % 8)))); + } + } + + tlpm_clear(l1); + tlpm_clear(l2); +} + +static void test_lpm_map(int keysize) +{ + size_t i, j, n_matches, n_nodes, n_lookups; + struct tlpm_node *t, *list = NULL; + struct bpf_lpm_trie_key *key; + uint8_t *data, *value; + int r, map; + + /* Compare behavior of tlpm vs. bpf-lpm. Create a randomized set of + * prefixes and insert it into both tlpm and bpf-lpm. Then run some + * randomized lookups and verify both maps return the same result. + */ + + n_matches = 0; + n_nodes = 1 << 8; + n_lookups = 1 << 16; + + data = alloca(keysize); + memset(data, 0, keysize); + + value = alloca(keysize + 1); + memset(value, 0, keysize + 1); + + key = alloca(sizeof(*key) + keysize); + memset(key, 0, sizeof(*key) + keysize); + + map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, + sizeof(*key) + keysize, + keysize + 1, + 4096, + BPF_F_NO_PREALLOC); + assert(map >= 0); + + for (i = 0; i < n_nodes; ++i) { + for (j = 0; j < keysize; ++j) + value[j] = rand() & 0xff; + value[keysize] = rand() % (8 * keysize + 1); + + list = tlpm_add(list, value, value[keysize]); + + key->prefixlen = value[keysize]; + memcpy(key->data, value, keysize); + r = bpf_map_update(map, key, value, 0); + assert(!r); + } + + for (i = 0; i < n_lookups; ++i) { + for (j = 0; j < keysize; ++j) + data[j] = rand() & 0xff; + + t = tlpm_match(list, data, 8 * keysize); + + key->prefixlen = 8 * keysize; + memcpy(key->data, data, keysize); + r = bpf_map_lookup(map, key, value); + assert(!r || errno == ENOENT); + assert(!t == !!r); + + if (t) { + ++n_matches; + assert(t->n_bits == value[keysize]); + for (j = 0; j < t->n_bits; ++j) + assert((t->key[j / 8] & (1 << (7 - j % 8))) == + (value[j / 8] & (1 << (7 - j % 8)))); + } + } + + close(map); + tlpm_clear(list); + + /* With 255 random nodes in the map, we are pretty likely to match + * something on every lookup. For statistics, use this: + * + * printf(" nodes: %zu\n" + * "lookups: %zu\n" + * "matches: %zu\n", n_nodes, n_lookups, n_matches); + */ +} + +/* Test the implementation with some 'real world' examples */ + +static void test_lpm_ipaddr(void) +{ + struct bpf_lpm_trie_key *key_ipv4; + struct bpf_lpm_trie_key *key_ipv6; + size_t key_size_ipv4; + size_t key_size_ipv6; + int map_fd_ipv4; + int map_fd_ipv6; + __u64 value; + + key_size_ipv4 = sizeof(*key_ipv4) + sizeof(__u32); + key_size_ipv6 = sizeof(*key_ipv6) + sizeof(__u32) * 4; + key_ipv4 = alloca(key_size_ipv4); + key_ipv6 = alloca(key_size_ipv6); + + map_fd_ipv4 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, + key_size_ipv4, sizeof(value), + 100, BPF_F_NO_PREALLOC); + assert(map_fd_ipv4 >= 0); + + map_fd_ipv6 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, + key_size_ipv6, sizeof(value), + 100, BPF_F_NO_PREALLOC); + assert(map_fd_ipv6 >= 0); + + /* Fill data some IPv4 and IPv6 address ranges */ + value = 1; + key_ipv4->prefixlen = 16; + inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); + assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); + + value = 2; + key_ipv4->prefixlen = 24; + inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); + assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); + + value = 3; + key_ipv4->prefixlen = 24; + inet_pton(AF_INET, "192.168.128.0", key_ipv4->data); + assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); + + value = 5; + key_ipv4->prefixlen = 24; + inet_pton(AF_INET, "192.168.1.0", key_ipv4->data); + assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); + + value = 4; + key_ipv4->prefixlen = 23; + inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); + assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); + + value = 0xdeadbeef; + key_ipv6->prefixlen = 64; + inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data); + assert(bpf_map_update(map_fd_ipv6, key_ipv6, &value, 0) == 0); + + /* Set tprefixlen to maximum for lookups */ + key_ipv4->prefixlen = 32; + key_ipv6->prefixlen = 128; + + /* Test some lookups that should come back with a value */ + inet_pton(AF_INET, "192.168.128.23", key_ipv4->data); + assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == 0); + assert(value == 3); + + inet_pton(AF_INET, "192.168.0.1", key_ipv4->data); + assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == 0); + assert(value == 2); + + inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data); + assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == 0); + assert(value == 0xdeadbeef); + + inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data); + assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == 0); + assert(value == 0xdeadbeef); + + /* Test some lookups that should not match any entry */ + inet_pton(AF_INET, "10.0.0.1", key_ipv4->data); + assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == -1 && + errno == ENOENT); + + inet_pton(AF_INET, "11.11.11.11", key_ipv4->data); + assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == -1 && + errno == ENOENT); + + inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data); + assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == -1 && + errno == ENOENT); + + close(map_fd_ipv4); + close(map_fd_ipv6); +} + +int main(void) +{ + struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; + int i, ret; + + /* we want predictable, pseudo random tests */ + srand(0xf00ba1); + + /* allow unlimited locked memory */ + ret = setrlimit(RLIMIT_MEMLOCK, &limit); + if (ret < 0) + perror("Unable to lift memlock rlimit"); + + test_lpm_basic(); + test_lpm_order(); + + /* Test with 8, 16, 24, 32, ... 128 bit prefix length */ + for (i = 1; i <= 16; ++i) + test_lpm_map(i); + + test_lpm_ipaddr(); + + printf("test_lpm: OK\n"); + return 0; +} -- cgit v1.2.3 From b8a943e2942296aad37a8e7adc43db493413e54b Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sat, 21 Jan 2017 17:26:13 +0100 Subject: samples/bpf: add lpm-trie benchmark Extend the map_perf_test_{user,kern}.c infrastructure to stress test lpm-trie lookups. We hook into the kprobe on sys_gettid() and measure the latency depending on trie size and lookup count. On my Intel Haswell i7-6400U, a single gettid() syscall with an empty bpf program takes roughly 6.5us on my system. Lookups in empty tries take ~1.8us on first try, ~0.9us on retries. Lookups in tries with 8192 entries take ~7.1us (on the first _and_ any subsequent try). Signed-off-by: David Herrmann Reviewed-by: Daniel Mack Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/map_perf_test_kern.c | 30 ++++++++++++++++++++++++ samples/bpf/map_perf_test_user.c | 49 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c index 7ee1574c8ccf..a91872a97742 100644 --- a/samples/bpf/map_perf_test_kern.c +++ b/samples/bpf/map_perf_test_kern.c @@ -57,6 +57,14 @@ struct bpf_map_def SEC("maps") percpu_hash_map_alloc = { .map_flags = BPF_F_NO_PREALLOC, }; +struct bpf_map_def SEC("maps") lpm_trie_map_alloc = { + .type = BPF_MAP_TYPE_LPM_TRIE, + .key_size = 8, + .value_size = sizeof(long), + .max_entries = 10000, + .map_flags = BPF_F_NO_PREALLOC, +}; + SEC("kprobe/sys_getuid") int stress_hmap(struct pt_regs *ctx) { @@ -135,5 +143,27 @@ int stress_percpu_lru_hmap_alloc(struct pt_regs *ctx) return 0; } +SEC("kprobe/sys_gettid") +int stress_lpm_trie_map_alloc(struct pt_regs *ctx) +{ + union { + u32 b32[2]; + u8 b8[8]; + } key; + unsigned int i; + + key.b32[0] = 32; + key.b8[4] = 192; + key.b8[5] = 168; + key.b8[6] = 0; + key.b8[7] = 1; + +#pragma clang loop unroll(full) + for (i = 0; i < 32; ++i) + bpf_map_lookup_elem(&lpm_trie_map_alloc, &key); + + return 0; +} + char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c index 9505b4d112f4..680260a91f50 100644 --- a/samples/bpf/map_perf_test_user.c +++ b/samples/bpf/map_perf_test_user.c @@ -37,6 +37,7 @@ static __u64 time_get_ns(void) #define PERCPU_HASH_KMALLOC (1 << 3) #define LRU_HASH_PREALLOC (1 << 4) #define PERCPU_LRU_HASH_PREALLOC (1 << 5) +#define LPM_KMALLOC (1 << 6) static int test_flags = ~0; @@ -112,6 +113,18 @@ static void test_percpu_hash_kmalloc(int cpu) cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); } +static void test_lpm_kmalloc(int cpu) +{ + __u64 start_time; + int i; + + start_time = time_get_ns(); + for (i = 0; i < MAX_CNT; i++) + syscall(__NR_gettid); + printf("%d:lpm_perf kmalloc %lld events per sec\n", + cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); +} + static void loop(int cpu) { cpu_set_t cpuset; @@ -137,6 +150,9 @@ static void loop(int cpu) if (test_flags & PERCPU_LRU_HASH_PREALLOC) test_percpu_lru_hash_prealloc(cpu); + + if (test_flags & LPM_KMALLOC) + test_lpm_kmalloc(cpu); } static void run_perf_test(int tasks) @@ -162,6 +178,37 @@ static void run_perf_test(int tasks) } } +static void fill_lpm_trie(void) +{ + struct bpf_lpm_trie_key *key; + unsigned long value = 0; + unsigned int i; + int r; + + key = alloca(sizeof(*key) + 4); + key->prefixlen = 32; + + for (i = 0; i < 512; ++i) { + key->prefixlen = rand() % 33; + key->data[0] = rand() & 0xff; + key->data[1] = rand() & 0xff; + key->data[2] = rand() & 0xff; + key->data[3] = rand() & 0xff; + r = bpf_map_update_elem(map_fd[6], key, &value, 0); + assert(!r); + } + + key->prefixlen = 32; + key->data[0] = 192; + key->data[1] = 168; + key->data[2] = 0; + key->data[3] = 1; + value = 128; + + r = bpf_map_update_elem(map_fd[6], key, &value, 0); + assert(!r); +} + int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; @@ -182,6 +229,8 @@ int main(int argc, char **argv) return 1; } + fill_lpm_trie(); + run_perf_test(num_cpu); return 0; -- cgit v1.2.3 From 371a17ed415af2023dddf90210122241d69b6727 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 15 Aug 2016 09:26:32 +0200 Subject: iwlwifi: mvm: expose device timestamp in radiotap Set the relevant fields to export the 32-bit device timestamp to radiotap using the new mac80211 infrastructure. This will be useful to allow synchronising monitor captures taken on different hardware simultaneously. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 45122dafe922..039cb2ad2d3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -463,6 +463,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IEEE80211_RADIOTAP_MCS_HAVE_STBC; hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; + + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; + /* this is the case for CCK frames, it's better (only 8) for OFDM */ + hw->radiotap_timestamp.accuracy = 22; + hw->rate_control_algorithm = "iwl-mvm-rs"; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; -- cgit v1.2.3 From bac453ab3745eaa64137ea6e77e009b45954f0ae Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Fri, 7 Oct 2016 15:16:26 +0300 Subject: iwlwifi: mvm: don't restart HW if suspend fails with unified image For unified images, we shouldn't restart the HW if suspend fails. The only reason for restarting the HW with non-unified images is to go back to the D0 image. Fixes: 23ae61282b88 ("iwlwifi: mvm: Do not switch to D3 image on suspend") Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index b88e2048ae0b..207d8ae1e116 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1262,12 +1262,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, iwl_trans_d3_suspend(mvm->trans, test, !unified_image); out: if (ret < 0) { - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); - if (mvm->restart_fw > 0) { - mvm->restart_fw--; - ieee80211_restart_hw(mvm->hw); - } iwl_mvm_free_nd(mvm); + + if (!unified_image) { + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + if (mvm->restart_fw > 0) { + mvm->restart_fw--; + ieee80211_restart_hw(mvm->hw); + } + } } out_noreset: mutex_unlock(&mvm->mutex); -- cgit v1.2.3 From 2ed1e01910825240e5d53c8d59106230d6e5c52b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 20 Oct 2016 09:41:14 +0200 Subject: iwlwifi: mvm: accept arbitrary memory dump TLVs There's no reason to be validating the memory dump types, or checking them for duplication, or anything, since we really just pass them through from the TLV to the dump. Thus, change the way we handle memory dump TLVs to let the driver just blindly use anything specified there, dumping it into the memory dump output file. This makes the system extensible without driver changes. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 57 +++++++++--------------- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 18 +------- drivers/net/wireless/intel/iwlwifi/iwl-fw.h | 4 +- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 47 +++++++++---------- 4 files changed, 46 insertions(+), 80 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 45b2f679e4d8..431581229948 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -179,8 +179,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) kfree(drv->fw.dbg_conf_tlv[i]); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) kfree(drv->fw.dbg_trigger_tlv[i]); - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) - kfree(drv->fw.dbg_mem_tlv[i]); + kfree(drv->fw.dbg_mem_tlv); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); @@ -276,7 +275,8 @@ struct iwl_firmware_pieces { size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; - struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX]; + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; + size_t n_dbg_mem_tlv; }; /* @@ -1009,31 +1009,25 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = (void *)tlv_data; u32 type; + size_t size; + struct iwl_fw_dbg_mem_seg_tlv *n; if (tlv_len != (sizeof(*dbg_mem))) goto invalid_tlv_len; type = le32_to_cpu(dbg_mem->data_type); - drv->fw.dbg_dynamic_mem = true; - - if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) { - IWL_ERR(drv, - "Skip unknown dbg mem segment: %u\n", - dbg_mem->data_type); - break; - } - - if (pieces->dbg_mem_tlv[type]) { - IWL_ERR(drv, - "Ignore duplicate mem segment: %u\n", - dbg_mem->data_type); - break; - } IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", dbg_mem->data_type); - pieces->dbg_mem_tlv[type] = dbg_mem; + size = sizeof(*pieces->dbg_mem_tlv) * + (pieces->n_dbg_mem_tlv + 1); + n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL); + if (!n) + return -ENOMEM; + pieces->dbg_mem_tlv = n; + pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem; + pieces->n_dbg_mem_tlv++; break; } default: @@ -1345,19 +1339,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } } - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) { - if (pieces->dbg_mem_tlv[i]) { - drv->fw.dbg_mem_tlv[i] = - kmemdup(pieces->dbg_mem_tlv[i], - sizeof(*drv->fw.dbg_mem_tlv[i]), - GFP_KERNEL); - if (!drv->fw.dbg_mem_tlv[i]) - goto out_free_fw; - } - } - /* Now that we can no longer fail, copy information */ + drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv; + pieces->dbg_mem_tlv = NULL; + drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv; + /* * The (size - 16) / 12 formula is based on the information recorded * for each event, which is of mode 1 (including timestamp) for all @@ -1441,25 +1428,25 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) op->name, err); #endif } - kfree(pieces); - return; + goto free; try_again: /* try next, if any */ release_firmware(ucode_raw); if (iwl_request_firmware(drv, false)) goto out_unbind; - kfree(pieces); - return; + goto free; out_free_fw: IWL_ERR(drv, "failed to allocate pci memory\n"); iwl_dealloc_ucode(drv); release_firmware(ucode_raw); out_unbind: - kfree(pieces); complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); + free: + kfree(pieces->dbg_mem_tlv); + kfree(pieces); } struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 84813b550ef1..11245b9117c7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -488,26 +488,10 @@ enum iwl_fw_dbg_monitor_mode { MIPI_MODE = 3, }; -/** - * enum iwl_fw_mem_seg_type - data types for dumping on error - * - * @FW_DBG_MEM_SMEM: the data type is SMEM - * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC - * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC - */ -enum iwl_fw_dbg_mem_seg_type { - FW_DBG_MEM_DCCM_LMAC = 0, - FW_DBG_MEM_DCCM_UMAC, - FW_DBG_MEM_SMEM, - - /* Must be last */ - FW_DBG_MEM_MAX, -}; - /** * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments * - * @data_type: enum %iwl_fw_mem_seg_type + * @data_type: the memory segment type to record * @ofs: the memory segment offset * @len: the memory segment length, in bytes * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h index 5f229556339a..710ecb490bfc 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -295,8 +295,8 @@ struct iwl_fw { struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; - struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX]; - bool dbg_dynamic_mem; + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; + size_t n_dbg_mem_tlv; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; u8 dbg_dest_reg_num; struct iwl_gscan_capabilities gscan_capa; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 2e8e3e8e30a3..46ab6b9f680b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -495,11 +495,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) struct iwl_mvm_dump_ptrs *fw_error_dump; struct scatterlist *sg_dump_data; u32 sram_len, sram_ofs; - struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem = - mvm->fw->dbg_mem_tlv; + const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = mvm->fw->dbg_mem_tlv; u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len; - u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len; + u32 smem_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->smem_len; + u32 sram2_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->dccm2_len; bool monitor_dump_only = false; int i; @@ -624,10 +623,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; /* Make room for MEM segments */ - for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) { - if (fw_dbg_mem[i]) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - le32_to_cpu(fw_dbg_mem[i]->len); + for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + le32_to_cpu(fw_dbg_mem[i].len); } /* Make room for fw's virtual image pages, if it exists */ @@ -656,7 +654,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + mvm->fw_dump_desc->len; - if (!mvm->fw->dbg_dynamic_mem) + if (!mvm->fw->n_dbg_mem_tlv) file_len += sram_len + sizeof(*dump_mem); dump_file = vzalloc(file_len); @@ -708,7 +706,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (monitor_dump_only) goto dump_trans_data; - if (!mvm->fw->dbg_dynamic_mem) { + if (!mvm->fw->n_dbg_mem_tlv) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -719,22 +717,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data = iwl_fw_error_next_data(dump_data); } - for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) { - if (fw_dbg_mem[i]) { - u32 len = le32_to_cpu(fw_dbg_mem[i]->len); - u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs); - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(len + - sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = fw_dbg_mem[i]->data_type; - dump_mem->offset = cpu_to_le32(ofs); - iwl_trans_read_mem_bytes(mvm->trans, ofs, - dump_mem->data, - len); - dump_data = iwl_fw_error_next_data(dump_data); - } + for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { + u32 len = le32_to_cpu(fw_dbg_mem[i].len); + u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = fw_dbg_mem[i].data_type; + dump_mem->offset = cpu_to_le32(ofs); + iwl_trans_read_mem_bytes(mvm->trans, ofs, + dump_mem->data, + len); + dump_data = iwl_fw_error_next_data(dump_data); } if (smem_len) { -- cgit v1.2.3 From 1110f8e37d6601b19acdfa7fd0b0d63808156f6c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 20 Oct 2016 09:44:05 +0200 Subject: iwlwifi: mvm: make iwl_dump_prph() void The return value is never used, so make the function void. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 46ab6b9f680b..7df3c3f4749e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -406,17 +406,17 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a02400, .end = 0x00a02758 }, }; -static u32 iwl_dump_prph(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data, - const struct iwl_prph_range *iwl_prph_dump_addr, - u32 range_len) +static void iwl_dump_prph(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + const struct iwl_prph_range *iwl_prph_dump_addr, + u32 range_len) { struct iwl_fw_error_dump_prph *prph; unsigned long flags; - u32 prph_len = 0, i; + u32 i; if (!iwl_trans_grab_nic_access(trans, &flags)) - return 0; + return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ @@ -425,8 +425,6 @@ static u32 iwl_dump_prph(struct iwl_trans *trans, int reg; __le32 *val; - prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; - (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + num_bytes_in_chunk); @@ -444,8 +442,6 @@ static u32 iwl_dump_prph(struct iwl_trans *trans, } iwl_trans_release_nic_access(trans, &flags); - - return prph_len; } /* -- cgit v1.2.3 From d140199af510ad4749dc5e38b7922135258ba5fd Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Jan 2017 01:26:46 +0100 Subject: bpf, lpm: fix kfree of im_node in trie_update_elem We need to initialize im_node to NULL, otherwise in case of error path it gets passed to kfree() as uninitialized pointer. Fixes: b95a5c4db09b ("bpf: add a longest prefix match trie map implementation") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/lpm_trie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index ba19241d1979..144e9763102f 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -262,7 +262,7 @@ static int trie_update_elem(struct bpf_map *map, void *_key, void *value, u64 flags) { struct lpm_trie *trie = container_of(map, struct lpm_trie, map); - struct lpm_trie_node *node, *im_node, *new_node = NULL; + struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL; struct lpm_trie_node __rcu **slot; struct bpf_lpm_trie_key *key = _key; unsigned long irq_flags; -- cgit v1.2.3 From c6c94aea821640ac422c435f9d4c895af76ed6f6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 13 Jan 2017 10:02:13 +0100 Subject: cfg80211: fix a documentation warning The new restructured text parser complains about the formatting, and really this should be a definition list. In order to fix this without introducing trailing whitespace, convert to the inline kernel-doc format. Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 63 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 47 insertions(+), 16 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index b7aba6e1a586..870549480e9b 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3178,22 +3178,6 @@ struct ieee80211_iface_limit { /** * struct ieee80211_iface_combination - possible interface combination - * @limits: limits for the given interface types - * @n_limits: number of limitations - * @num_different_channels: can use up to this many different channels - * @max_interfaces: maximum number of interfaces in total allowed in this - * group - * @beacon_int_infra_match: In this combination, the beacon intervals - * between infrastructure and AP types must match. This is required - * only in special cases. - * @radar_detect_widths: bitmap of channel widths supported for radar detection - * @radar_detect_regions: bitmap of regions supported for radar detection - * @beacon_int_min_gcd: This interface combination supports different - * beacon intervals. - * = 0 - all beacon intervals for different interface must be same. - * > 0 - any beacon interval for the interface part of this combination AND - * *GCD* of all beacon intervals from beaconing interfaces of this - * combination must be greater or equal to this value. * * With this structure the driver can describe which interface * combinations it supports concurrently. @@ -3252,13 +3236,60 @@ struct ieee80211_iface_limit { * */ struct ieee80211_iface_combination { + /** + * @limits: + * limits for the given interface types + */ const struct ieee80211_iface_limit *limits; + + /** + * @num_different_channels: + * can use up to this many different channels + */ u32 num_different_channels; + + /** + * @max_interfaces: + * maximum number of interfaces in total allowed in this group + */ u16 max_interfaces; + + /** + * @n_limits: + * number of limitations + */ u8 n_limits; + + /** + * @beacon_int_infra_match: + * In this combination, the beacon intervals between infrastructure + * and AP types must match. This is required only in special cases. + */ bool beacon_int_infra_match; + + /** + * @radar_detect_widths: + * bitmap of channel widths supported for radar detection + */ u8 radar_detect_widths; + + /** + * @radar_detect_regions: + * bitmap of regions supported for radar detection + */ u8 radar_detect_regions; + + /** + * @beacon_int_min_gcd: + * This interface combination supports different beacon intervals. + * + * = 0 + * all beacon intervals for different interface must be same. + * > 0 + * any beacon interval for the interface part of this combination AND + * GCD of all beacon intervals from beaconing interfaces of this + * combination must be greater or equal to this value. + */ u32 beacon_int_min_gcd; }; -- cgit v1.2.3 From 57eeb2086d6477968990e1790a9d8d0ec7ee8a4d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 13 Jan 2017 11:12:01 +0100 Subject: mac80211: fix documentation warnings For a few restructured text warnings in mac80211, making the documentation warning-free (for now). In order to not add trailing whitespace, but also not introduce too much noise into this change, move just the affected docs into inline comments. Signed-off-by: Johannes Berg --- include/net/mac80211.h | 74 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 29 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 86967b85dfd0..33624ffbd5a5 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1768,15 +1768,6 @@ struct ieee80211_sta_rates { * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single * A-MSDU. Taken from the Extended Capabilities element. 0 means * unlimited. - * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes. This - * field is always valid for packets with a VHT preamble. For packets - * with a HT preamble, additional limits apply: - * + If the skb is transmitted as part of a BA agreement, the - * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes. - * + If the skb is not part of a BA aggreement, the A-MSDU maximal - * size is min(max_amsdu_len, 7935) bytes. - * Both additional HT limits must be enforced by the low level driver. - * This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2). * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not. * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control. * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) @@ -1799,6 +1790,22 @@ struct ieee80211_sta { bool tdls_initiator; bool mfp; u8 max_amsdu_subframes; + + /** + * @max_amsdu_len: + * indicates the maximal length of an A-MSDU in bytes. + * This field is always valid for packets with a VHT preamble. + * For packets with a HT preamble, additional limits apply: + * + * * If the skb is transmitted as part of a BA agreement, the + * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes. + * * If the skb is not part of a BA aggreement, the A-MSDU maximal + * size is min(max_amsdu_len, 7935) bytes. + * + * Both additional HT limits must be enforced by the low level + * driver. This is defined by the spec (IEEE 802.11-2012 section + * 8.3.2.2 NOTE 2). + */ u16 max_amsdu_len; bool support_p2p_ps; u16 max_rc_amsdu_len; @@ -3203,26 +3210,6 @@ enum ieee80211_reconfig_type { * Returns non-zero if this device sent the last beacon. * The callback can sleep. * - * @ampdu_action: Perform a certain A-MPDU action - * The RA/TID combination determines the destination and TID we want - * the ampdu action to be performed for. The action is defined through - * ieee80211_ampdu_mlme_action. - * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver - * may neither send aggregates containing more subframes than @buf_size - * nor send aggregates in a way that lost frames would exceed the - * buffer size. If just limiting the aggregate size, this would be - * possible with a buf_size of 8: - * - TX: 1.....7 - * - RX: 2....7 (lost frame #1) - * - TX: 8..1... - * which is invalid since #1 was now re-transmitted well past the - * buffer size of 8. Correct ways to retransmit #1 would be: - * - TX: 1 or 18 or 81 - * Even "189" would be wrong since 1 could be lost again. - * - * Returns a negative error code on failure. - * The callback can sleep. - * * @get_survey: Return per-channel survey information * * @rfkill_poll: Poll rfkill hardware state. If you need this, you also @@ -3575,6 +3562,35 @@ struct ieee80211_ops { s64 offset); void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int (*tx_last_beacon)(struct ieee80211_hw *hw); + + /** + * @ampdu_action: + * Perform a certain A-MPDU action. + * The RA/TID combination determines the destination and TID we want + * the ampdu action to be performed for. The action is defined through + * ieee80211_ampdu_mlme_action. + * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver + * may neither send aggregates containing more subframes than @buf_size + * nor send aggregates in a way that lost frames would exceed the + * buffer size. If just limiting the aggregate size, this would be + * possible with a buf_size of 8: + * + * - ``TX: 1.....7`` + * - ``RX: 2....7`` (lost frame #1) + * - ``TX: 8..1...`` + * + * which is invalid since #1 was now re-transmitted well past the + * buffer size of 8. Correct ways to retransmit #1 would be: + * + * - ``TX: 1 or`` + * - ``TX: 18 or`` + * - ``TX: 81`` + * + * Even ``189`` would be wrong since 1 could be lost again. + * + * Returns a negative error code on failure. + * The callback can sleep. + */ int (*ampdu_action)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params); -- cgit v1.2.3 From 1331b62c97217459780e040e8a66bb609f2acd20 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 2 Jan 2017 16:01:57 +0100 Subject: rfkill: remove rfkill-regulator There are no users of this ("vrfkill") in the tree, so it's just dead code - remove it. This also isn't really how rfkill is supposed to be used - it's intended as a signalling mechanism to/from the device, which the driver (and partially cfg80211) will handle - having a separate rfkill instance for a regulator is confusing, the driver should use the regulator instead to turn off the device when requested. Signed-off-by: Johannes Berg --- include/linux/rfkill-regulator.h | 48 ------------ net/rfkill/Kconfig | 11 --- net/rfkill/Makefile | 1 - net/rfkill/rfkill-regulator.c | 154 --------------------------------------- 4 files changed, 214 deletions(-) delete mode 100644 include/linux/rfkill-regulator.h delete mode 100644 net/rfkill/rfkill-regulator.c diff --git a/include/linux/rfkill-regulator.h b/include/linux/rfkill-regulator.h deleted file mode 100644 index aca36bc83315..000000000000 --- a/include/linux/rfkill-regulator.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * rfkill-regulator.c - Regulator consumer driver for rfkill - * - * Copyright (C) 2009 Guiming Zhuo - * Copyright (C) 2011 Antonio Ospite - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#ifndef __LINUX_RFKILL_REGULATOR_H -#define __LINUX_RFKILL_REGULATOR_H - -/* - * Use "vrfkill" as supply id when declaring the regulator consumer: - * - * static struct regulator_consumer_supply pcap_regulator_V6_consumers [] = { - * { .dev_name = "rfkill-regulator.0", .supply = "vrfkill" }, - * }; - * - * If you have several regulator driven rfkill, you can append a numerical id to - * .dev_name as done above, and use the same id when declaring the platform - * device: - * - * static struct rfkill_regulator_platform_data ezx_rfkill_bt_data = { - * .name = "ezx-bluetooth", - * .type = RFKILL_TYPE_BLUETOOTH, - * }; - * - * static struct platform_device a910_rfkill = { - * .name = "rfkill-regulator", - * .id = 0, - * .dev = { - * .platform_data = &ezx_rfkill_bt_data, - * }, - * }; - */ - -#include - -struct rfkill_regulator_platform_data { - char *name; /* the name for the rfkill switch */ - enum rfkill_type type; /* the type as specified in rfkill.h */ -}; - -#endif /* __LINUX_RFKILL_REGULATOR_H */ diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig index 868f1ad0415a..060600b03fad 100644 --- a/net/rfkill/Kconfig +++ b/net/rfkill/Kconfig @@ -23,17 +23,6 @@ config RFKILL_INPUT depends on INPUT = y || RFKILL = INPUT default y if !EXPERT -config RFKILL_REGULATOR - tristate "Generic rfkill regulator driver" - depends on RFKILL || !RFKILL - depends on REGULATOR - help - This options enable controlling radio transmitters connected to - voltage regulator using the regulator framework. - - To compile this driver as a module, choose M here: the module will - be called rfkill-regulator. - config RFKILL_GPIO tristate "GPIO RFKILL driver" depends on RFKILL diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile index 311768783f4a..87a80aded0b3 100644 --- a/net/rfkill/Makefile +++ b/net/rfkill/Makefile @@ -5,5 +5,4 @@ rfkill-y += core.o rfkill-$(CONFIG_RFKILL_INPUT) += input.o obj-$(CONFIG_RFKILL) += rfkill.o -obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o obj-$(CONFIG_RFKILL_GPIO) += rfkill-gpio.o diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c deleted file mode 100644 index 50cd26a48e87..000000000000 --- a/net/rfkill/rfkill-regulator.c +++ /dev/null @@ -1,154 +0,0 @@ -/* - * rfkill-regulator.c - Regulator consumer driver for rfkill - * - * Copyright (C) 2009 Guiming Zhuo - * Copyright (C) 2011 Antonio Ospite - * - * Implementation inspired by leds-regulator driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include - -struct rfkill_regulator_data { - struct rfkill *rf_kill; - bool reg_enabled; - - struct regulator *vcc; -}; - -static int rfkill_regulator_set_block(void *data, bool blocked) -{ - struct rfkill_regulator_data *rfkill_data = data; - int ret = 0; - - pr_debug("%s: blocked: %d\n", __func__, blocked); - - if (blocked) { - if (rfkill_data->reg_enabled) { - regulator_disable(rfkill_data->vcc); - rfkill_data->reg_enabled = false; - } - } else { - if (!rfkill_data->reg_enabled) { - ret = regulator_enable(rfkill_data->vcc); - if (!ret) - rfkill_data->reg_enabled = true; - } - } - - pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__, - regulator_is_enabled(rfkill_data->vcc)); - - return ret; -} - -static struct rfkill_ops rfkill_regulator_ops = { - .set_block = rfkill_regulator_set_block, -}; - -static int rfkill_regulator_probe(struct platform_device *pdev) -{ - struct rfkill_regulator_platform_data *pdata = pdev->dev.platform_data; - struct rfkill_regulator_data *rfkill_data; - struct regulator *vcc; - struct rfkill *rf_kill; - int ret = 0; - - if (pdata == NULL) { - dev_err(&pdev->dev, "no platform data\n"); - return -ENODEV; - } - - if (pdata->name == NULL || pdata->type == 0) { - dev_err(&pdev->dev, "invalid name or type in platform data\n"); - return -EINVAL; - } - - vcc = regulator_get_exclusive(&pdev->dev, "vrfkill"); - if (IS_ERR(vcc)) { - dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name); - ret = PTR_ERR(vcc); - goto out; - } - - rfkill_data = kzalloc(sizeof(*rfkill_data), GFP_KERNEL); - if (rfkill_data == NULL) { - ret = -ENOMEM; - goto err_data_alloc; - } - - rf_kill = rfkill_alloc(pdata->name, &pdev->dev, - pdata->type, - &rfkill_regulator_ops, rfkill_data); - if (rf_kill == NULL) { - ret = -ENOMEM; - goto err_rfkill_alloc; - } - - if (regulator_is_enabled(vcc)) { - dev_dbg(&pdev->dev, "Regulator already enabled\n"); - rfkill_data->reg_enabled = true; - } - rfkill_data->vcc = vcc; - rfkill_data->rf_kill = rf_kill; - - ret = rfkill_register(rf_kill); - if (ret) { - dev_err(&pdev->dev, "Cannot register rfkill device\n"); - goto err_rfkill_register; - } - - platform_set_drvdata(pdev, rfkill_data); - dev_info(&pdev->dev, "%s initialized\n", pdata->name); - - return 0; - -err_rfkill_register: - rfkill_destroy(rf_kill); -err_rfkill_alloc: - kfree(rfkill_data); -err_data_alloc: - regulator_put(vcc); -out: - return ret; -} - -static int rfkill_regulator_remove(struct platform_device *pdev) -{ - struct rfkill_regulator_data *rfkill_data = platform_get_drvdata(pdev); - struct rfkill *rf_kill = rfkill_data->rf_kill; - - rfkill_unregister(rf_kill); - rfkill_destroy(rf_kill); - regulator_put(rfkill_data->vcc); - kfree(rfkill_data); - - return 0; -} - -static struct platform_driver rfkill_regulator_driver = { - .probe = rfkill_regulator_probe, - .remove = rfkill_regulator_remove, - .driver = { - .name = "rfkill-regulator", - }, -}; - -module_platform_driver(rfkill_regulator_driver); - -MODULE_AUTHOR("Guiming Zhuo "); -MODULE_AUTHOR("Antonio Ospite "); -MODULE_DESCRIPTION("Regulator consumer driver for rfkill"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:rfkill-regulator"); -- cgit v1.2.3 From 12a6075cabc0d9ffbc0366b44daa22f278606312 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Tue, 10 Jan 2017 18:52:06 +0100 Subject: can: dev: add CAN interface termination API This patch adds a netlink interface to configure the CAN bus termination of CAN interfaces. Inside the driver an array of supported termination values is defined: const u16 drvname_termination[] = { 60, 120, CAN_TERMINATION_DISABLED }; struct drvname_priv *priv; priv = netdev_priv(dev); priv->termination_const = drvname_termination; priv->termination_const_cnt = ARRAY_SIZE(drvname_termination); priv->termination = CAN_TERMINATION_DISABLED; And the funtion to set the value has to be defined: priv->do_set_termination = drvname_set_termination; Signed-off-by: Oliver Hartkopp Reviewed-by: Ramesh Shanmugasundaram Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 49 +++++++++++++++++++++++++++++++++++++++- include/linux/can/dev.h | 4 ++++ include/uapi/linux/can/netlink.h | 5 ++++ 3 files changed, 57 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 8d6208c0b400..fefe2cd17721 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -958,6 +958,30 @@ static int can_changelink(struct net_device *dev, } } + if (data[IFLA_CAN_TERMINATION]) { + const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]); + const unsigned int num_term = priv->termination_const_cnt; + unsigned int i; + + if (!priv->do_set_termination) + return -EOPNOTSUPP; + + /* check whether given value is supported by the interface */ + for (i = 0; i < num_term; i++) { + if (termval == priv->termination_const[i]) + break; + } + if (i >= num_term) + return -EINVAL; + + /* Finally, set the termination value */ + err = priv->do_set_termination(dev, termval); + if (err) + return err; + + priv->termination = termval; + } + return 0; } @@ -980,6 +1004,11 @@ static size_t can_get_size(const struct net_device *dev) size += nla_total_size(sizeof(struct can_bittiming)); if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */ size += nla_total_size(sizeof(struct can_bittiming_const)); + if (priv->termination_const) { + size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */ + size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */ + priv->termination_const_cnt); + } return size; } @@ -1018,7 +1047,15 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) (priv->data_bittiming_const && nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST, sizeof(*priv->data_bittiming_const), - priv->data_bittiming_const))) + priv->data_bittiming_const)) || + + (priv->termination_const && + (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) || + nla_put(skb, IFLA_CAN_TERMINATION_CONST, + sizeof(*priv->termination_const) * + priv->termination_const_cnt, + priv->termination_const)))) + return -EMSGSIZE; return 0; @@ -1073,6 +1110,16 @@ static struct rtnl_link_ops can_link_ops __read_mostly = { */ int register_candev(struct net_device *dev) { + struct can_priv *priv = netdev_priv(dev); + + /* Ensure termination_const, termination_const_cnt and + * do_set_termination consistency. All must be either set or + * unset. + */ + if ((!priv->termination_const != !priv->termination_const_cnt) || + (!priv->termination_const != !priv->do_set_termination)) + return -EINVAL; + dev->rtnl_link_ops = &can_link_ops; return register_netdev(dev); } diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 5f5270941ba0..f6a57f322f00 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -38,6 +38,9 @@ struct can_priv { struct can_bittiming bittiming, data_bittiming; const struct can_bittiming_const *bittiming_const, *data_bittiming_const; + const u16 *termination_const; + unsigned int termination_const_cnt; + u16 termination; struct can_clock clock; enum can_state state; @@ -53,6 +56,7 @@ struct can_priv { int (*do_set_bittiming)(struct net_device *dev); int (*do_set_data_bittiming)(struct net_device *dev); int (*do_set_mode)(struct net_device *dev, enum can_mode mode); + int (*do_set_termination)(struct net_device *dev, u16 term); int (*do_get_state)(const struct net_device *dev, enum can_state *state); int (*do_get_berr_counter)(const struct net_device *dev, diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h index 94ffe0c83ce7..7414771926fb 100644 --- a/include/uapi/linux/can/netlink.h +++ b/include/uapi/linux/can/netlink.h @@ -127,9 +127,14 @@ enum { IFLA_CAN_BERR_COUNTER, IFLA_CAN_DATA_BITTIMING, IFLA_CAN_DATA_BITTIMING_CONST, + IFLA_CAN_TERMINATION, + IFLA_CAN_TERMINATION_CONST, __IFLA_CAN_MAX }; #define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1) +/* u16 termination range: 1..65535 Ohms */ +#define CAN_TERMINATION_DISABLED 0 + #endif /* !_UAPI_CAN_NETLINK_H */ -- cgit v1.2.3 From c3606d438a14faa8319eccb75b9502960a97af16 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Fri, 6 Jan 2017 11:12:41 +0100 Subject: can: dev: can_changelink: allow to set bitrate on devices not providing {data_,}bittiming_const Until commit 08da7da41ea4 can: provide a separate bittiming_const parameter to bittiming functions it was possible to have devices not providing bittiming_const. This can be used for hardware that only support pre-defined fixed bitrates. Although no mainline driver is using this feature so far. This patch re-introduces this feature for the bitrate and the data bitrate (of CANFD controllers). The driver can specify the {data_,}bittiming_const (if the bittiming parameters should be calculated from the bittiming_const) as before or no {data_,}bittiming_const but implement the do_set_{data,}bittiming callback. Acked-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index fefe2cd17721..afcf487382c9 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -284,10 +284,6 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, { int err; - /* Check if the CAN device has bit-timing parameters */ - if (!btc) - return -EOPNOTSUPP; - /* * Depending on the given can_bittiming parameter structure the CAN * timing parameters are calculated based on the provided bitrate OR @@ -872,10 +868,22 @@ static int can_changelink(struct net_device *dev, /* Do not allow changing bittiming while running */ if (dev->flags & IFF_UP) return -EBUSY; + + /* Calculate bittiming parameters based on + * bittiming_const if set, otherwise pass bitrate + * directly via do_set_bitrate(). Bail out if neither + * is given. + */ + if (!priv->bittiming_const && !priv->do_set_bittiming) + return -EOPNOTSUPP; + memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); - err = can_get_bittiming(dev, &bt, priv->bittiming_const); - if (err) - return err; + if (priv->bittiming_const) { + err = can_get_bittiming(dev, &bt, + priv->bittiming_const); + if (err) + return err; + } memcpy(&priv->bittiming, &bt, sizeof(bt)); if (priv->do_set_bittiming) { @@ -943,11 +951,23 @@ static int can_changelink(struct net_device *dev, /* Do not allow changing bittiming while running */ if (dev->flags & IFF_UP) return -EBUSY; + + /* Calculate bittiming parameters based on + * data_bittiming_const if set, otherwise pass bitrate + * directly via do_set_bitrate(). Bail out if neither + * is given. + */ + if (!priv->data_bittiming_const && !priv->do_set_data_bittiming) + return -EOPNOTSUPP; + memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(dbt)); - err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const); - if (err) - return err; + if (priv->data_bittiming_const) { + err = can_get_bittiming(dev, &dbt, + priv->data_bittiming_const); + if (err) + return err; + } memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); if (priv->do_set_data_bittiming) { -- cgit v1.2.3 From 431af779256cd6cb8328ac23c5696bae63c33a51 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 11 Jan 2017 17:05:35 +0100 Subject: can: dev: add CAN interface API for fixed bitrates Some CAN interfaces only support fixed fixed bitrates. This patch adds a netlink interface to get the list of the CAN interface's fixed bitrates and data bitrates. Inside the driver arrays of supported data- bitrate values are defined. const u32 drvname_bitrate[] = { 20000, 50000, 100000 }; const u32 drvname_data_bitrate[] = { 200000, 500000, 1000000 }; struct drvname_priv *priv; priv = netdev_priv(dev); priv->bitrate_const = drvname_bitrate; priv->bitrate_const_cnt = ARRAY_SIZE(drvname_bitrate); priv->data_bitrate_const = drvname_data_bitrate; priv->data_bitrate_const_cnt = ARRAY_SIZE(drvname_data_bitrate); Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 81 ++++++++++++++++++++++++++++++++-------- include/linux/can/dev.h | 4 ++ include/uapi/linux/can/netlink.h | 2 + 3 files changed, 71 insertions(+), 16 deletions(-) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index afcf487382c9..611d16a7061d 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -279,8 +279,29 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, return 0; } +/* Checks the validity of predefined bitrate settings */ +static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt) +{ + struct can_priv *priv = netdev_priv(dev); + unsigned int i; + + for (i = 0; i < bitrate_const_cnt; i++) { + if (bt->bitrate == bitrate_const[i]) + break; + } + + if (i >= priv->bitrate_const_cnt) + return -EINVAL; + + return 0; +} + static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc) + const struct can_bittiming_const *btc, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt) { int err; @@ -290,10 +311,13 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, * alternatively the CAN timing parameters (tq, prop_seg, etc.) are * provided directly which are then checked and fixed up. */ - if (!bt->tq && bt->bitrate) + if (!bt->tq && bt->bitrate && btc) err = can_calc_bittiming(dev, bt, btc); - else if (bt->tq && !bt->bitrate) + else if (bt->tq && !bt->bitrate && btc) err = can_fixup_bittiming(dev, bt, btc); + else if (!bt->tq && bt->bitrate && bitrate_const) + err = can_validate_bitrate(dev, bt, bitrate_const, + bitrate_const_cnt); else err = -EINVAL; @@ -878,12 +902,12 @@ static int can_changelink(struct net_device *dev, return -EOPNOTSUPP; memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); - if (priv->bittiming_const) { - err = can_get_bittiming(dev, &bt, - priv->bittiming_const); - if (err) - return err; - } + err = can_get_bittiming(dev, &bt, + priv->bittiming_const, + priv->bitrate_const, + priv->bitrate_const_cnt); + if (err) + return err; memcpy(&priv->bittiming, &bt, sizeof(bt)); if (priv->do_set_bittiming) { @@ -962,12 +986,12 @@ static int can_changelink(struct net_device *dev, memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(dbt)); - if (priv->data_bittiming_const) { - err = can_get_bittiming(dev, &dbt, - priv->data_bittiming_const); - if (err) - return err; - } + err = can_get_bittiming(dev, &dbt, + priv->data_bittiming_const, + priv->data_bitrate_const, + priv->data_bitrate_const_cnt); + if (err) + return err; memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); if (priv->do_set_data_bittiming) { @@ -1029,6 +1053,12 @@ static size_t can_get_size(const struct net_device *dev) size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */ priv->termination_const_cnt); } + if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */ + size += nla_total_size(sizeof(*priv->bitrate_const) * + priv->bitrate_const_cnt); + if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */ + size += nla_total_size(sizeof(*priv->data_bitrate_const) * + priv->data_bitrate_const_cnt); return size; } @@ -1074,7 +1104,20 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put(skb, IFLA_CAN_TERMINATION_CONST, sizeof(*priv->termination_const) * priv->termination_const_cnt, - priv->termination_const)))) + priv->termination_const))) || + + (priv->bitrate_const && + nla_put(skb, IFLA_CAN_BITRATE_CONST, + sizeof(*priv->bitrate_const) * + priv->bitrate_const_cnt, + priv->bitrate_const)) || + + (priv->data_bitrate_const && + nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST, + sizeof(*priv->data_bitrate_const) * + priv->data_bitrate_const_cnt, + priv->data_bitrate_const)) + ) return -EMSGSIZE; @@ -1140,6 +1183,12 @@ int register_candev(struct net_device *dev) (!priv->termination_const != !priv->do_set_termination)) return -EINVAL; + if (!priv->bitrate_const != !priv->bitrate_const_cnt) + return -EINVAL; + + if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt) + return -EINVAL; + dev->rtnl_link_ops = &can_link_ops; return register_netdev(dev); } diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index f6a57f322f00..141b05aade81 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -41,6 +41,10 @@ struct can_priv { const u16 *termination_const; unsigned int termination_const_cnt; u16 termination; + const u32 *bitrate_const; + unsigned int bitrate_const_cnt; + const u32 *data_bitrate_const; + unsigned int data_bitrate_const_cnt; struct can_clock clock; enum can_state state; diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h index 7414771926fb..fdf75f74fdaf 100644 --- a/include/uapi/linux/can/netlink.h +++ b/include/uapi/linux/can/netlink.h @@ -129,6 +129,8 @@ enum { IFLA_CAN_DATA_BITTIMING_CONST, IFLA_CAN_TERMINATION, IFLA_CAN_TERMINATION_CONST, + IFLA_CAN_BITRATE_CONST, + IFLA_CAN_DATA_BITRATE_CONST, __IFLA_CAN_MAX }; -- cgit v1.2.3 From 1e9bbb9bc2aa549d31fa0a0496ec33df0b3ae7a8 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 11 Jan 2017 15:02:38 +0000 Subject: can: softing_cs: ret is never non-zero, so remove non-zero check and -ENODEV return The error return ret is never zero in the error handling path in softingcs_probe, so the check for non-zero and returning -ENODEV is logically dead code and hence redundant. Remove it and just return ret. Signed-off-by: Colin Ian King Signed-off-by: Marc Kleine-Budde --- drivers/net/can/softing/softing_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index cdc0c7433a4b..4d4492884e0b 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c @@ -310,7 +310,7 @@ pcmcia_bad: pcmcia_failed: pcmcia_disable_device(pcmcia); pcmcia->priv = NULL; - return ret ?: -ENODEV; + return ret; } static const struct pcmcia_device_id softingcs_ids[] = { -- cgit v1.2.3 From 4548b683b78137f8eadeb312b94e20bb0d4a7141 Mon Sep 17 00:00:00 2001 From: Krister Johansen Date: Fri, 20 Jan 2017 17:49:11 -0800 Subject: Introduce a sysctl that modifies the value of PROT_SOCK. Add net.ipv4.ip_unprivileged_port_start, which is a per namespace sysctl that denotes the first unprivileged inet port in the namespace. To disable all privileged ports set this to zero. It also checks for overlap with the local port range. The privileged and local range may not overlap. The use case for this change is to allow containerized processes to bind to priviliged ports, but prevent them from ever being allowed to modify their container's network configuration. The latter is accomplished by ensuring that the network namespace is not a child of the user namespace. This modification was needed to allow the container manager to disable a namespace's priviliged port restrictions without exposing control of the network namespace to processes in the user namespace. Signed-off-by: Krister Johansen Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 9 ++++++ include/net/ip.h | 10 +++++++ include/net/netns/ipv4.h | 1 + net/ipv4/af_inet.c | 5 +++- net/ipv4/sysctl_net_ipv4.c | 50 +++++++++++++++++++++++++++++++++- net/ipv6/af_inet6.c | 3 +- net/netfilter/ipvs/ip_vs_ctl.c | 7 ++--- net/sctp/socket.c | 10 ++++--- security/selinux/hooks.c | 3 +- 9 files changed, 86 insertions(+), 12 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index aa1bb49f1dc6..17f2e7791042 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -822,6 +822,15 @@ ip_local_reserved_ports - list of comma separated ranges Default: Empty +ip_unprivileged_port_start - INTEGER + This is a per-namespace sysctl. It defines the first + unprivileged port in the network namespace. Privileged ports + require root or CAP_NET_BIND_SERVICE in order to bind to them. + To disable all privileged ports, set this to 0. It may not + overlap with the ip_local_reserved_ports range. + + Default: 1024 + ip_nonlocal_bind - BOOLEAN If set, allows processes to bind() to non-local IP addresses, which can be quite useful - but may break some applications. diff --git a/include/net/ip.h b/include/net/ip.h index ab6761a7c883..bf264a8db1ce 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -263,11 +263,21 @@ static inline bool sysctl_dev_name_is_allowed(const char *name) return strcmp(name, "default") != 0 && strcmp(name, "all") != 0; } +static inline int inet_prot_sock(struct net *net) +{ + return net->ipv4.sysctl_ip_prot_sock; +} + #else static inline int inet_is_local_reserved_port(struct net *net, int port) { return 0; } + +static inline int inet_prot_sock(struct net *net) +{ + return PROT_SOCK; +} #endif __be32 inet_current_timestamp(void); diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 8e3f5b6f26d5..e365732b8051 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -135,6 +135,7 @@ struct netns_ipv4 { #ifdef CONFIG_SYSCTL unsigned long *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; #endif #ifdef CONFIG_IP_MROUTE diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index aae410bb655a..28fe8da4e1ac 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -479,7 +479,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) snum = ntohs(addr->sin_port); err = -EACCES; - if (snum && snum < PROT_SOCK && + if (snum && snum < inet_prot_sock(net) && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) goto out; @@ -1700,6 +1700,9 @@ static __net_init int inet_init_net(struct net *net) net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; net->ipv4.sysctl_ip_dynaddr = 0; net->ipv4.sysctl_ip_early_demux = 1; +#ifdef CONFIG_SYSCTL + net->ipv4.sysctl_ip_prot_sock = PROT_SOCK; +#endif return 0; } diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index c8d283615c6f..1b861997fdc5 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -35,6 +35,8 @@ static int ip_local_port_range_min[] = { 1, 1 }; static int ip_local_port_range_max[] = { 65535, 65535 }; static int tcp_adv_win_scale_min = -31; static int tcp_adv_win_scale_max = 31; +static int ip_privileged_port_min; +static int ip_privileged_port_max = 65535; static int ip_ttl_min = 1; static int ip_ttl_max = 255; static int tcp_syn_retries_min = 1; @@ -79,7 +81,12 @@ static int ipv4_local_port_range(struct ctl_table *table, int write, ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && ret == 0) { - if (range[1] < range[0]) + /* Ensure that the upper limit is not smaller than the lower, + * and that the lower does not encroach upon the privileged + * port limit. + */ + if ((range[1] < range[0]) || + (range[0] < net->ipv4.sysctl_ip_prot_sock)) ret = -EINVAL; else set_local_port_range(net, range); @@ -88,6 +95,40 @@ static int ipv4_local_port_range(struct ctl_table *table, int write, return ret; } +/* Validate changes from /proc interface. */ +static int ipv4_privileged_ports(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct net *net = container_of(table->data, struct net, + ipv4.sysctl_ip_prot_sock); + int ret; + int pports; + int range[2]; + struct ctl_table tmp = { + .data = &pports, + .maxlen = sizeof(pports), + .mode = table->mode, + .extra1 = &ip_privileged_port_min, + .extra2 = &ip_privileged_port_max, + }; + + pports = net->ipv4.sysctl_ip_prot_sock; + + ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); + + if (write && ret == 0) { + inet_get_local_port_range(net, &range[0], &range[1]); + /* Ensure that the local port range doesn't overlap with the + * privileged port range. + */ + if (range[0] < pports) + ret = -EINVAL; + else + net->ipv4.sysctl_ip_prot_sock = pports; + } + + return ret; +} static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high) { @@ -964,6 +1005,13 @@ static struct ctl_table ipv4_net_table[] = { .extra2 = &one, }, #endif + { + .procname = "ip_unprivileged_port_start", + .maxlen = sizeof(int), + .data = &init_net.ipv4.sysctl_ip_prot_sock, + .mode = 0644, + .proc_handler = ipv4_privileged_ports, + }, { } }; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index aa42123bc301..04db40620ea6 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -302,7 +302,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return -EINVAL; snum = ntohs(addr->sin6_port); - if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) + if (snum && snum < inet_prot_sock(net) && + !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; lock_sock(sk); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 55e0169caa4c..8b7416f4e01a 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -426,10 +426,9 @@ ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol */ svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, vport); - if (svc == NULL - && protocol == IPPROTO_TCP - && atomic_read(&ipvs->ftpsvc_counter) - && (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) { + if (!svc && protocol == IPPROTO_TCP && + atomic_read(&ipvs->ftpsvc_counter) && + (vport == FTPDATA || ntohs(vport) >= inet_prot_sock(ipvs->net))) { /* * Check if ftp service entry exists, the packet * might belong to FTP data connections. diff --git a/net/sctp/socket.c b/net/sctp/socket.c index bee4dd3feabb..d699d2cbf275 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -360,7 +360,7 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) } } - if (snum && snum < PROT_SOCK && + if (snum && snum < inet_prot_sock(net) && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; @@ -1152,8 +1152,10 @@ static int __sctp_connect(struct sock *sk, * accept new associations, but it SHOULD NOT * be permitted to open new associations. */ - if (ep->base.bind_addr.port < PROT_SOCK && - !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { + if (ep->base.bind_addr.port < + inet_prot_sock(net) && + !ns_capable(net->user_ns, + CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_free; } @@ -1818,7 +1820,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) * but it SHOULD NOT be permitted to open new * associations. */ - if (ep->base.bind_addr.port < PROT_SOCK && + if (ep->base.bind_addr.port < inet_prot_sock(net) && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_unlock; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index c7c6619431d5..53cb6da5f1c6 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4365,7 +4365,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in inet_get_local_port_range(sock_net(sk), &low, &high); - if (snum < max(PROT_SOCK, low) || snum > high) { + if (snum < max(inet_prot_sock(sock_net(sk)), low) || + snum > high) { err = sel_netport_sid(sk->sk_protocol, snum, &sid); if (err) -- cgit v1.2.3 From 6db6f0eae6052b70885562e1733896647ec1d807 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 21 Jan 2017 21:01:32 +0100 Subject: bridge: multicast to unicast MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implements an optional, per bridge port flag and feature to deliver multicast packets to any host on the according port via unicast individually. This is done by copying the packet per host and changing the multicast destination MAC to a unicast one accordingly. multicast-to-unicast works on top of the multicast snooping feature of the bridge. Which means unicast copies are only delivered to hosts which are interested in it and signalized this via IGMP/MLD reports previously. This feature is intended for interface types which have a more reliable and/or efficient way to deliver unicast packets than broadcast ones (e.g. wifi). However, it should only be enabled on interfaces where no IGMPv2/MLDv1 report suppression takes place. This feature is disabled by default. The initial patch and idea is from Felix Fietkau. Signed-off-by: Felix Fietkau [linus.luessing@c0d3.blue: various bug + style fixes, commit message] Signed-off-by: Linus Lüssing Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/linux/if_bridge.h | 1 + include/uapi/linux/if_link.h | 1 + net/bridge/br_forward.c | 39 ++++++++++++++++++- net/bridge/br_mdb.c | 2 +- net/bridge/br_multicast.c | 90 ++++++++++++++++++++++++++++++++------------ net/bridge/br_netlink.c | 5 +++ net/bridge/br_private.h | 3 +- net/bridge/br_sysfs_if.c | 2 + 8 files changed, 114 insertions(+), 29 deletions(-) diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index c6587c01d951..debc9d5904e5 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -46,6 +46,7 @@ struct br_ip_list { #define BR_LEARNING_SYNC BIT(9) #define BR_PROXYARP_WIFI BIT(10) #define BR_MCAST_FLOOD BIT(11) +#define BR_MULTICAST_TO_UNICAST BIT(12) #define BR_DEFAULT_AGEING_TIME (300 * HZ) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 184b16ed2b84..b9aa5641ebe5 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -321,6 +321,7 @@ enum { IFLA_BRPORT_MULTICAST_ROUTER, IFLA_BRPORT_PAD, IFLA_BRPORT_MCAST_FLOOD, + IFLA_BRPORT_MCAST_TO_UCAST, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 7cb41aee4c82..a0f9d0037d24 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -174,6 +174,31 @@ out: return p; } +static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb, + const unsigned char *addr, bool local_orig) +{ + struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; + const unsigned char *src = eth_hdr(skb)->h_source; + + if (!should_deliver(p, skb)) + return; + + /* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */ + if (skb->dev == p->dev && ether_addr_equal(src, addr)) + return; + + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) { + dev->stats.tx_dropped++; + return; + } + + if (!is_broadcast_ether_addr(addr)) + memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN); + + __br_forward(p, skb, local_orig); +} + /* called under rcu_read_lock */ void br_flood(struct net_bridge *br, struct sk_buff *skb, enum br_pkt_type pkt_type, bool local_rcv, bool local_orig) @@ -241,10 +266,20 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst, rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : NULL; - port = (unsigned long)lport > (unsigned long)rport ? - lport : rport; + if ((unsigned long)lport > (unsigned long)rport) { + port = lport; + + if (port->flags & BR_MULTICAST_TO_UNICAST) { + maybe_deliver_addr(lport, skb, p->eth_addr, + local_orig); + goto delivered; + } + } else { + port = rport; + } prev = maybe_deliver(prev, port, skb, local_orig); +delivered: if (IS_ERR(prev)) goto out; if (prev == port) diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 7dbc80d01eb0..056e6ac49d8f 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -531,7 +531,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, break; } - p = br_multicast_new_port_group(port, group, *pp, state); + p = br_multicast_new_port_group(port, group, *pp, state, NULL); if (unlikely(!p)) return -ENOMEM; rcu_assign_pointer(*pp, p); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index f66346122dc4..1de3438e36bf 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -43,12 +43,14 @@ static void br_multicast_add_router(struct net_bridge *br, static void br_ip4_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, __be32 group, - __u16 vid); + __u16 vid, + const unsigned char *src); + #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group, - __u16 vid); + __u16 vid, const unsigned char *src); #endif unsigned int br_mdb_rehash_seq; @@ -711,7 +713,8 @@ struct net_bridge_port_group *br_multicast_new_port_group( struct net_bridge_port *port, struct br_ip *group, struct net_bridge_port_group __rcu *next, - unsigned char flags) + unsigned char flags, + const unsigned char *src) { struct net_bridge_port_group *p; @@ -726,12 +729,32 @@ struct net_bridge_port_group *br_multicast_new_port_group( hlist_add_head(&p->mglist, &port->mglist); setup_timer(&p->timer, br_multicast_port_group_expired, (unsigned long)p); + + if (src) + memcpy(p->eth_addr, src, ETH_ALEN); + else + memset(p->eth_addr, 0xff, ETH_ALEN); + return p; } +static bool br_port_group_equal(struct net_bridge_port_group *p, + struct net_bridge_port *port, + const unsigned char *src) +{ + if (p->port != port) + return false; + + if (!(port->flags & BR_MULTICAST_TO_UNICAST)) + return true; + + return ether_addr_equal(src, p->eth_addr); +} + static int br_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, - struct br_ip *group) + struct br_ip *group, + const unsigned char *src) { struct net_bridge_port_group __rcu **pp; struct net_bridge_port_group *p; @@ -758,13 +781,13 @@ static int br_multicast_add_group(struct net_bridge *br, for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { - if (p->port == port) + if (br_port_group_equal(p, port, src)) goto found; if ((unsigned long)p->port < (unsigned long)port) break; } - p = br_multicast_new_port_group(port, group, *pp, 0); + p = br_multicast_new_port_group(port, group, *pp, 0, src); if (unlikely(!p)) goto err; rcu_assign_pointer(*pp, p); @@ -783,7 +806,8 @@ err: static int br_ip4_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, __be32 group, - __u16 vid) + __u16 vid, + const unsigned char *src) { struct br_ip br_group; @@ -794,14 +818,15 @@ static int br_ip4_multicast_add_group(struct net_bridge *br, br_group.proto = htons(ETH_P_IP); br_group.vid = vid; - return br_multicast_add_group(br, port, &br_group); + return br_multicast_add_group(br, port, &br_group, src); } #if IS_ENABLED(CONFIG_IPV6) static int br_ip6_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group, - __u16 vid) + __u16 vid, + const unsigned char *src) { struct br_ip br_group; @@ -812,7 +837,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br, br_group.proto = htons(ETH_P_IPV6); br_group.vid = vid; - return br_multicast_add_group(br, port, &br_group); + return br_multicast_add_group(br, port, &br_group, src); } #endif @@ -1081,6 +1106,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, struct sk_buff *skb, u16 vid) { + const unsigned char *src; struct igmpv3_report *ih; struct igmpv3_grec *grec; int i; @@ -1121,12 +1147,14 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, continue; } + src = eth_hdr(skb)->h_source; if ((type == IGMPV3_CHANGE_TO_INCLUDE || type == IGMPV3_MODE_IS_INCLUDE) && ntohs(grec->grec_nsrcs) == 0) { - br_ip4_multicast_leave_group(br, port, group, vid); + br_ip4_multicast_leave_group(br, port, group, vid, src); } else { - err = br_ip4_multicast_add_group(br, port, group, vid); + err = br_ip4_multicast_add_group(br, port, group, vid, + src); if (err) break; } @@ -1141,6 +1169,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, struct sk_buff *skb, u16 vid) { + const unsigned char *src; struct icmp6hdr *icmp6h; struct mld2_grec *grec; int i; @@ -1188,14 +1217,16 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, continue; } + src = eth_hdr(skb)->h_source; if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || grec->grec_type == MLD2_MODE_IS_INCLUDE) && ntohs(*nsrcs) == 0) { br_ip6_multicast_leave_group(br, port, &grec->grec_mca, - vid); + vid, src); } else { err = br_ip6_multicast_add_group(br, port, - &grec->grec_mca, vid); + &grec->grec_mca, vid, + src); if (err) break; } @@ -1511,7 +1542,8 @@ br_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group, struct bridge_mcast_other_query *other_query, - struct bridge_mcast_own_query *own_query) + struct bridge_mcast_own_query *own_query, + const unsigned char *src) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; @@ -1535,7 +1567,7 @@ br_multicast_leave_group(struct net_bridge *br, for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { - if (p->port != port) + if (!br_port_group_equal(p, port, src)) continue; rcu_assign_pointer(*pp, p->next); @@ -1566,7 +1598,7 @@ br_multicast_leave_group(struct net_bridge *br, for (p = mlock_dereference(mp->ports, br); p != NULL; p = mlock_dereference(p->next, br)) { - if (p->port != port) + if (!br_port_group_equal(p, port, src)) continue; if (!hlist_unhashed(&p->mglist) && @@ -1617,7 +1649,8 @@ out: static void br_ip4_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, __be32 group, - __u16 vid) + __u16 vid, + const unsigned char *src) { struct br_ip br_group; struct bridge_mcast_own_query *own_query; @@ -1632,14 +1665,15 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, br_group.vid = vid; br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, - own_query); + own_query, src); } #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group, - __u16 vid) + __u16 vid, + const unsigned char *src) { struct br_ip br_group; struct bridge_mcast_own_query *own_query; @@ -1654,7 +1688,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, br_group.vid = vid; br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, - own_query); + own_query, src); } #endif @@ -1712,6 +1746,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, u16 vid) { struct sk_buff *skb_trimmed = NULL; + const unsigned char *src; struct igmphdr *ih; int err; @@ -1731,13 +1766,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, } ih = igmp_hdr(skb); + src = eth_hdr(skb)->h_source; BR_INPUT_SKB_CB(skb)->igmp = ih->type; switch (ih->type) { case IGMP_HOST_MEMBERSHIP_REPORT: case IGMPV2_HOST_MEMBERSHIP_REPORT: BR_INPUT_SKB_CB(skb)->mrouters_only = 1; - err = br_ip4_multicast_add_group(br, port, ih->group, vid); + err = br_ip4_multicast_add_group(br, port, ih->group, vid, src); break; case IGMPV3_HOST_MEMBERSHIP_REPORT: err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); @@ -1746,7 +1782,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, err = br_ip4_multicast_query(br, port, skb_trimmed, vid); break; case IGMP_HOST_LEAVE_MESSAGE: - br_ip4_multicast_leave_group(br, port, ih->group, vid); + br_ip4_multicast_leave_group(br, port, ih->group, vid, src); break; } @@ -1766,6 +1802,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, u16 vid) { struct sk_buff *skb_trimmed = NULL; + const unsigned char *src; struct mld_msg *mld; int err; @@ -1785,8 +1822,10 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, switch (mld->mld_type) { case ICMPV6_MGM_REPORT: + src = eth_hdr(skb)->h_source; BR_INPUT_SKB_CB(skb)->mrouters_only = 1; - err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); + err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid, + src); break; case ICMPV6_MLD2_REPORT: err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); @@ -1795,7 +1834,8 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, err = br_ip6_multicast_query(br, port, skb_trimmed, vid); break; case ICMPV6_MGM_REDUCTION: - br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); + src = eth_hdr(skb)->h_source; + br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src); break; } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 71c7453268c1..6c087cd049b9 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -123,6 +123,7 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_GUARD */ + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */ + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ @@ -173,6 +174,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, !!(p->flags & BR_ROOT_BLOCK)) || nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || + nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST, + !!(p->flags & BR_MULTICAST_TO_UNICAST)) || nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) || @@ -586,6 +589,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 }, + [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ @@ -636,6 +640,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); + br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 8ce621e8345c..0b82a227fc34 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -177,6 +177,7 @@ struct net_bridge_port_group { struct timer_list timer; struct br_ip addr; unsigned char flags; + unsigned char eth_addr[ETH_ALEN]; }; struct net_bridge_mdb_entry @@ -599,7 +600,7 @@ void br_multicast_free_pg(struct rcu_head *head); struct net_bridge_port_group * br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group, struct net_bridge_port_group __rcu *next, - unsigned char flags); + unsigned char flags, const unsigned char *src); void br_mdb_init(void); void br_mdb_uninit(void); void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 8bd569695e76..05e8946ccc03 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -188,6 +188,7 @@ static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router, store_multicast_router); BRPORT_ATTR_FLAG(multicast_fast_leave, BR_MULTICAST_FAST_LEAVE); +BRPORT_ATTR_FLAG(multicast_to_unicast, BR_MULTICAST_TO_UNICAST); #endif static const struct brport_attribute *brport_attrs[] = { @@ -214,6 +215,7 @@ static const struct brport_attribute *brport_attrs[] = { #ifdef CONFIG_BRIDGE_IGMP_SNOOPING &brport_attr_multicast_router, &brport_attr_multicast_fast_leave, + &brport_attr_multicast_to_unicast, #endif &brport_attr_proxyarp, &brport_attr_proxyarp_wifi, -- cgit v1.2.3 From 23e3d618e49eb560052fe59a3c9629d3650ba46e Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 22 Jan 2017 22:16:45 +0100 Subject: net: dsa: Fix inverted test for multiple CPU interface Remove the wrong !, otherwise we get false positives about having multiple CPU interfaces. Fixes: b22de490869d ("net: dsa: store CPU switch structure in the tree") Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 77cb78767f1d..1f3afeb673d6 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -225,7 +225,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) continue; if (!strcmp(name, "cpu")) { - if (!dst->cpu_switch) { + if (dst->cpu_switch) { netdev_err(dst->master_netdev, "multiple cpu ports?!\n"); return -EINVAL; -- cgit v1.2.3 From d5490f1f67116419ddf3b4c7b9f3d5a0fb2fd407 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Sun, 22 Jan 2017 23:02:45 +0100 Subject: net: dt-bindings: add RGMII TX delay configuration to meson8b-dwmac This allows configuring the RGMII TX clock delay. The RGMII clock is generated by underlying hardware of the the Meson 8b / GXBB DWMAC glue. The configuration depends on the actual hardware (no delay may be needed due to the design of the actual circuit, the PHY might add this delay, etc.). Signed-off-by: Martin Blumenstingl Tested-by: Neil Armstrong Acked-by: Rob Herring Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/meson-dwmac.txt | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt index 89e62ddc69ca..0703ad3f3c1e 100644 --- a/Documentation/devicetree/bindings/net/meson-dwmac.txt +++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt @@ -25,6 +25,22 @@ Required properties on Meson8b and newer: - "clkin0" - first parent clock of the internal mux - "clkin1" - second parent clock of the internal mux +Optional properties on Meson8b and newer: +- amlogic,tx-delay-ns: The internal RGMII TX clock delay (provided + by this driver) in nanoseconds. Allowed values + are: 0ns, 2ns, 4ns, 6ns. + When phy-mode is set to "rgmii" then the TX + delay should be explicitly configured. When + not configured a fallback of 2ns is used. + When the phy-mode is set to either "rgmii-id" + or "rgmii-txid" the TX clock delay is already + provided by the PHY. In that case this + property should be set to 0ns (which disables + the TX clock delay in the MAC to prevent the + clock from going off because both PHY and MAC + are adding a delay). + Any configuration is ignored when the phy-mode + is set to "rmii". Example for Meson6: -- cgit v1.2.3 From b765234e72fcba484a5fdd219e81ab526f416bf7 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Sun, 22 Jan 2017 23:02:46 +0100 Subject: net: stmmac: dwmac-meson8b: make the RGMII TX delay configurable Prior to this patch we were using a hardcoded RGMII TX clock delay of 2ns (= 1/4 cycle of the 125MHz RGMII TX clock). This value works for many boards, but unfortunately not for all (due to the way the actual circuit is designed, sometimes because the TX delay is enabled in the PHY, etc.). Making the TX delay on the MAC side configurable allows us to support all possible hardware combinations. This allows fixing a compatibility issue on some boards, where the RTL8211F PHY is configured to generate the TX delay. We can now turn off the TX delay in the MAC, because otherwise we would be applying the delay twice (which results in non-working TX traffic). Signed-off-by: Martin Blumenstingl Tested-by: Neil Armstrong Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index ffaed1f35efe..8840a360a0b7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -35,10 +35,6 @@ #define PRG_ETH0_TXDLY_SHIFT 5 #define PRG_ETH0_TXDLY_MASK GENMASK(6, 5) -#define PRG_ETH0_TXDLY_OFF (0x0 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_QUARTER (0x1 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_HALF (0x2 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT) /* divider for the result of m250_sel */ #define PRG_ETH0_CLK_M250_DIV_SHIFT 7 @@ -69,6 +65,8 @@ struct meson8b_dwmac { struct clk_divider m25_div; struct clk *m25_div_clk; + + u32 tx_delay_ns; }; static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, @@ -179,6 +177,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) { int ret; unsigned long clk_rate; + u8 tx_dly_val; switch (dwmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: @@ -196,9 +195,13 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, 0); - /* TX clock delay - all known boards use a 1/4 cycle delay */ + /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where + * 8ns are exactly one cycle of the 125MHz RGMII TX clock): + * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3 + */ + tx_dly_val = dwmac->tx_delay_ns >> 1; meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, - PRG_ETH0_TXDLY_QUARTER); + tx_dly_val << PRG_ETH0_TXDLY_SHIFT); break; case PHY_INTERFACE_MODE_RMII: @@ -284,6 +287,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + /* use 2ns as fallback since this value was previously hardcoded */ + if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns", + &dwmac->tx_delay_ns)) + dwmac->tx_delay_ns = 2; + ret = meson8b_init_clk(dwmac); if (ret) goto err_remove_config_dt; -- cgit v1.2.3 From b70f43a16182c7dbc0779bc5bd9f621711f13eb3 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 22 Jan 2017 21:17:32 -0800 Subject: net: phy: Fix typo for MDIO module boilerplate comment The module boilerplate macro is named mdio_module_driver and not module_mdio_driver, fix that. Fixes: a9049e0c513c ("mdio: Add support for mdio drivers.") Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/mdio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mdio.h b/include/linux/mdio.h index b6587a4b32e7..55a80d73cfc1 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -265,7 +265,7 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr); struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr); /** - * module_mdio_driver() - Helper macro for registering mdio drivers + * mdio_module_driver() - Helper macro for registering mdio drivers * * Helper macro for MDIO drivers which do not do anything special in module * init/exit. Each module may only use this macro once, and calling it -- cgit v1.2.3 From 8a180cc79debf6302eb0afc819673b44d0fa5b21 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 22 Jan 2017 21:17:33 -0800 Subject: net: dsa: b53: Utilize mdio_module_driver Eliminate a bit of boilerplate code. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_mdio.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index 477a16b5660a..fa7556f5d4fb 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -375,18 +375,7 @@ static struct mdio_driver b53_mdio_driver = { .of_match_table = b53_of_match, }, }; - -static int __init b53_mdio_driver_register(void) -{ - return mdio_driver_register(&b53_mdio_driver); -} -module_init(b53_mdio_driver_register); - -static void __exit b53_mdio_driver_unregister(void) -{ - mdio_driver_unregister(&b53_mdio_driver); -} -module_exit(b53_mdio_driver_unregister); +mdio_module_driver(b53_mdio_driver); MODULE_DESCRIPTION("B53 MDIO access driver"); MODULE_LICENSE("Dual BSD/GPL"); -- cgit v1.2.3 From 6ae0a6286171154661b74f7f550f9441c6008424 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Mon, 23 Jan 2017 11:07:08 +0100 Subject: net: Introduce psample, a new genetlink channel for packet sampling Add a general way for kernel modules to sample packets, without being tied to any specific subsystem. This netlink channel can be used by tc, iptables, etc. and allow to standardize packet sampling in the kernel. For every sampled packet, the psample module adds the following metadata fields: PSAMPLE_ATTR_IIFINDEX - the packets input ifindex, if applicable PSAMPLE_ATTR_OIFINDEX - the packet output ifindex, if applicable PSAMPLE_ATTR_ORIGSIZE - the packet's original size, in case it has been truncated during sampling PSAMPLE_ATTR_SAMPLE_GROUP - the packet's sample group, which is set by the user who initiated the sampling. This field allows the user to differentiate between several samplers working simultaneously and filter packets relevant to him PSAMPLE_ATTR_GROUP_SEQ - sequence counter of last sent packet. The sequence is kept for each group PSAMPLE_ATTR_SAMPLE_RATE - the sampling rate used for sampling the packets PSAMPLE_ATTR_DATA - the actual packet bits The sampled packets are sent to the PSAMPLE_NL_MCGRP_SAMPLE multicast group. In addition, add the GET_GROUPS netlink command which allows the user to see the current sample groups, their refcount and sequence number. This command currently supports only netlink dump mode. Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Reviewed-by: Jamal Hadi Salim Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- MAINTAINERS | 7 + include/net/psample.h | 36 ++++++ include/uapi/linux/Kbuild | 1 + include/uapi/linux/psample.h | 35 +++++ net/Kconfig | 1 + net/Makefile | 1 + net/psample/Kconfig | 15 +++ net/psample/Makefile | 5 + net/psample/psample.c | 301 +++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 402 insertions(+) create mode 100644 include/net/psample.h create mode 100644 include/uapi/linux/psample.h create mode 100644 net/psample/Kconfig create mode 100644 net/psample/Makefile create mode 100644 net/psample/psample.c diff --git a/MAINTAINERS b/MAINTAINERS index 3c84a8fecc09..d76fccd09266 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9957,6 +9957,13 @@ L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/block/ps3vram.c +PSAMPLE PACKET SAMPLING SUPPORT: +M: Yotam Gigi +S: Maintained +F: net/psample +F: include/net/psample.h +F: include/uapi/linux/psample.h + PSTORE FILESYSTEM M: Anton Vorontsov M: Colin Cross diff --git a/include/net/psample.h b/include/net/psample.h new file mode 100644 index 000000000000..8888b0e1a82e --- /dev/null +++ b/include/net/psample.h @@ -0,0 +1,36 @@ +#ifndef __NET_PSAMPLE_H +#define __NET_PSAMPLE_H + +#include +#include +#include + +struct psample_group { + struct list_head list; + struct net *net; + u32 group_num; + u32 refcount; + u32 seq; +}; + +struct psample_group *psample_group_get(struct net *net, u32 group_num); +void psample_group_put(struct psample_group *group); + +#if IS_ENABLED(CONFIG_PSAMPLE) + +void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, + u32 trunc_size, int in_ifindex, int out_ifindex, + u32 sample_rate); + +#else + +static inline void psample_sample_packet(struct psample_group *group, + struct sk_buff *skb, u32 trunc_size, + int in_ifindex, int out_ifindex, + u32 sample_rate) +{ +} + +#endif + +#endif /* __NET_PSAMPLE_H */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index e600b50be77e..80ad741a42fa 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -305,6 +305,7 @@ header-y += netrom.h header-y += net_namespace.h header-y += net_tstamp.h header-y += nfc.h +header-y += psample.h header-y += nfs2.h header-y += nfs3.h header-y += nfs4.h diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h new file mode 100644 index 000000000000..ed48996ec0e8 --- /dev/null +++ b/include/uapi/linux/psample.h @@ -0,0 +1,35 @@ +#ifndef __UAPI_PSAMPLE_H +#define __UAPI_PSAMPLE_H + +enum { + /* sampled packet metadata */ + PSAMPLE_ATTR_IIFINDEX, + PSAMPLE_ATTR_OIFINDEX, + PSAMPLE_ATTR_ORIGSIZE, + PSAMPLE_ATTR_SAMPLE_GROUP, + PSAMPLE_ATTR_GROUP_SEQ, + PSAMPLE_ATTR_SAMPLE_RATE, + PSAMPLE_ATTR_DATA, + + /* commands attributes */ + PSAMPLE_ATTR_GROUP_REFCOUNT, + + __PSAMPLE_ATTR_MAX +}; + +enum psample_command { + PSAMPLE_CMD_SAMPLE, + PSAMPLE_CMD_GET_GROUP, + PSAMPLE_CMD_NEW_GROUP, + PSAMPLE_CMD_DEL_GROUP, +}; + +/* Can be overridden at runtime by module option */ +#define PSAMPLE_ATTR_MAX (__PSAMPLE_ATTR_MAX - 1) + +#define PSAMPLE_NL_MCGRP_CONFIG_NAME "config" +#define PSAMPLE_NL_MCGRP_SAMPLE_NAME "packets" +#define PSAMPLE_GENL_NAME "psample" +#define PSAMPLE_GENL_VERSION 1 + +#endif diff --git a/net/Kconfig b/net/Kconfig index 92ae1500d9e1..ce4aee69fc0d 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -390,6 +390,7 @@ source "net/9p/Kconfig" source "net/caif/Kconfig" source "net/ceph/Kconfig" source "net/nfc/Kconfig" +source "net/psample/Kconfig" config LWTUNNEL bool "Network light weight tunnels" diff --git a/net/Makefile b/net/Makefile index 5d6e0e5ff7f8..7d41de48310e 100644 --- a/net/Makefile +++ b/net/Makefile @@ -70,6 +70,7 @@ obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ obj-$(CONFIG_CEPH_LIB) += ceph/ obj-$(CONFIG_BATMAN_ADV) += batman-adv/ obj-$(CONFIG_NFC) += nfc/ +obj-$(CONFIG_PSAMPLE) += psample/ obj-$(CONFIG_OPENVSWITCH) += openvswitch/ obj-$(CONFIG_VSOCKETS) += vmw_vsock/ obj-$(CONFIG_MPLS) += mpls/ diff --git a/net/psample/Kconfig b/net/psample/Kconfig new file mode 100644 index 000000000000..d850246a6059 --- /dev/null +++ b/net/psample/Kconfig @@ -0,0 +1,15 @@ +# +# psample packet sampling configuration +# + +menuconfig PSAMPLE + depends on NET + tristate "Packet-sampling netlink channel" + default n + help + Say Y here to add support for packet-sampling netlink channel + This netlink channel allows transferring packets alongside some + metadata to userspace. + + To compile this support as a module, choose M here: the module will + be called psample. diff --git a/net/psample/Makefile b/net/psample/Makefile new file mode 100644 index 000000000000..609b0a79c9f3 --- /dev/null +++ b/net/psample/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the psample netlink channel +# + +obj-$(CONFIG_PSAMPLE) += psample.o diff --git a/net/psample/psample.c b/net/psample/psample.c new file mode 100644 index 000000000000..8aa58a918783 --- /dev/null +++ b/net/psample/psample.c @@ -0,0 +1,301 @@ +/* + * net/psample/psample.c - Netlink channel for packet sampling + * Copyright (c) 2017 Yotam Gigi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PSAMPLE_MAX_PACKET_SIZE 0xffff + +static LIST_HEAD(psample_groups_list); +static DEFINE_SPINLOCK(psample_groups_lock); + +/* multicast groups */ +enum psample_nl_multicast_groups { + PSAMPLE_NL_MCGRP_CONFIG, + PSAMPLE_NL_MCGRP_SAMPLE, +}; + +static const struct genl_multicast_group psample_nl_mcgrps[] = { + [PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME }, + [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME }, +}; + +static struct genl_family psample_nl_family __ro_after_init; + +static int psample_group_nl_fill(struct sk_buff *msg, + struct psample_group *group, + enum psample_command cmd, u32 portid, u32 seq, + int flags) +{ + void *hdr; + int ret; + + hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num); + if (ret < 0) + goto error; + + ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount); + if (ret < 0) + goto error; + + ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq); + if (ret < 0) + goto error; + + genlmsg_end(msg, hdr); + return 0; + +error: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct psample_group *group; + int start = cb->args[0]; + int idx = 0; + int err; + + spin_lock(&psample_groups_lock); + list_for_each_entry(group, &psample_groups_list, list) { + if (!net_eq(group->net, sock_net(msg->sk))) + continue; + if (idx < start) { + idx++; + continue; + } + err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI); + if (err) + break; + idx++; + } + + spin_unlock(&psample_groups_lock); + cb->args[0] = idx; + return msg->len; +} + +static const struct genl_ops psample_nl_ops[] = { + { + .cmd = PSAMPLE_CMD_GET_GROUP, + .dumpit = psample_nl_cmd_get_group_dumpit, + /* can be retrieved by unprivileged users */ + } +}; + +static struct genl_family psample_nl_family __ro_after_init = { + .name = PSAMPLE_GENL_NAME, + .version = PSAMPLE_GENL_VERSION, + .maxattr = PSAMPLE_ATTR_MAX, + .netnsok = true, + .module = THIS_MODULE, + .mcgrps = psample_nl_mcgrps, + .ops = psample_nl_ops, + .n_ops = ARRAY_SIZE(psample_nl_ops), + .n_mcgrps = ARRAY_SIZE(psample_nl_mcgrps), +}; + +static void psample_group_notify(struct psample_group *group, + enum psample_command cmd) +{ + struct sk_buff *msg; + int err; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!msg) + return; + + err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI); + if (!err) + genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0, + PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC); + else + nlmsg_free(msg); +} + +static struct psample_group *psample_group_create(struct net *net, + u32 group_num) +{ + struct psample_group *group; + + group = kzalloc(sizeof(*group), GFP_ATOMIC); + if (!group) + return NULL; + + group->net = net; + group->group_num = group_num; + list_add_tail(&group->list, &psample_groups_list); + + psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP); + return group; +} + +static void psample_group_destroy(struct psample_group *group) +{ + psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP); + list_del(&group->list); + kfree(group); +} + +static struct psample_group * +psample_group_lookup(struct net *net, u32 group_num) +{ + struct psample_group *group; + + list_for_each_entry(group, &psample_groups_list, list) + if ((group->group_num == group_num) && (group->net == net)) + return group; + return NULL; +} + +struct psample_group *psample_group_get(struct net *net, u32 group_num) +{ + struct psample_group *group; + + spin_lock(&psample_groups_lock); + + group = psample_group_lookup(net, group_num); + if (!group) { + group = psample_group_create(net, group_num); + if (!group) + goto out; + } + group->refcount++; + +out: + spin_unlock(&psample_groups_lock); + return group; +} +EXPORT_SYMBOL_GPL(psample_group_get); + +void psample_group_put(struct psample_group *group) +{ + spin_lock(&psample_groups_lock); + + if (--group->refcount == 0) + psample_group_destroy(group); + + spin_unlock(&psample_groups_lock); +} +EXPORT_SYMBOL_GPL(psample_group_put); + +void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, + u32 trunc_size, int in_ifindex, int out_ifindex, + u32 sample_rate) +{ + struct sk_buff *nl_skb; + int data_len; + int meta_len; + void *data; + int ret; + + meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) + + (out_ifindex ? nla_total_size(sizeof(u16)) : 0) + + nla_total_size(sizeof(u32)) + /* sample_rate */ + nla_total_size(sizeof(u32)) + /* orig_size */ + nla_total_size(sizeof(u32)) + /* group_num */ + nla_total_size(sizeof(u32)); /* seq */ + + data_len = min(skb->len, trunc_size); + if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE) + data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN + - NLA_ALIGNTO; + + nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC); + if (unlikely(!nl_skb)) + return; + + data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0, + PSAMPLE_CMD_SAMPLE); + if (unlikely(!data)) + goto error; + + if (in_ifindex) { + ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex); + if (unlikely(ret < 0)) + goto error; + } + + if (out_ifindex) { + ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex); + if (unlikely(ret < 0)) + goto error; + } + + ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate); + if (unlikely(ret < 0)) + goto error; + + ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len); + if (unlikely(ret < 0)) + goto error; + + ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num); + if (unlikely(ret < 0)) + goto error; + + ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++); + if (unlikely(ret < 0)) + goto error; + + if (data_len) { + int nla_len = nla_total_size(data_len); + struct nlattr *nla; + + nla = (struct nlattr *)skb_put(nl_skb, nla_len); + nla->nla_type = PSAMPLE_ATTR_DATA; + nla->nla_len = nla_attr_size(data_len); + + if (skb_copy_bits(skb, 0, nla_data(nla), data_len)) + goto error; + } + + genlmsg_end(nl_skb, data); + genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0, + PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC); + + return; +error: + pr_err_ratelimited("Could not create psample log message\n"); + nlmsg_free(nl_skb); +} +EXPORT_SYMBOL_GPL(psample_sample_packet); + +static int __init psample_module_init(void) +{ + return genl_register_family(&psample_nl_family); +} + +static void __exit psample_module_exit(void) +{ + genl_unregister_family(&psample_nl_family); +} + +module_init(psample_module_init); +module_exit(psample_module_exit); + +MODULE_AUTHOR("Yotam Gigi "); +MODULE_DESCRIPTION("netlink channel for packet sampling"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 5c5670fae43027778e84b9d9ff3b9d91a10a8131 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Mon, 23 Jan 2017 11:07:09 +0100 Subject: net/sched: Introduce sample tc action This action allows the user to sample traffic matched by tc classifier. The sampling consists of choosing packets randomly and sampling them using the psample module. The user can configure the psample group number, the sampling rate and the packet's truncation (to save kernel-user traffic). Example: To sample ingress traffic from interface eth1, one may use the commands: tc qdisc add dev eth1 handle ffff: ingress tc filter add dev eth1 parent ffff: \ matchall action sample rate 12 group 4 Where the first command adds an ingress qdisc and the second starts sampling randomly with an average of one sampled packet per 12 packets on dev eth1 to psample group 4. Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- include/net/tc_act/tc_sample.h | 50 +++++++ include/uapi/linux/tc_act/Kbuild | 1 + include/uapi/linux/tc_act/tc_sample.h | 26 ++++ net/sched/Kconfig | 12 ++ net/sched/Makefile | 1 + net/sched/act_sample.c | 274 ++++++++++++++++++++++++++++++++++ 6 files changed, 364 insertions(+) create mode 100644 include/net/tc_act/tc_sample.h create mode 100644 include/uapi/linux/tc_act/tc_sample.h create mode 100644 net/sched/act_sample.c diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h new file mode 100644 index 000000000000..89e9305be880 --- /dev/null +++ b/include/net/tc_act/tc_sample.h @@ -0,0 +1,50 @@ +#ifndef __NET_TC_SAMPLE_H +#define __NET_TC_SAMPLE_H + +#include +#include +#include + +struct tcf_sample { + struct tc_action common; + u32 rate; + bool truncate; + u32 trunc_size; + struct psample_group __rcu *psample_group; + u32 psample_group_num; + struct list_head tcfm_list; + struct rcu_head rcu; +}; +#define to_sample(a) ((struct tcf_sample *)a) + +static inline bool is_tcf_sample(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + return a->ops && a->ops->type == TCA_ACT_SAMPLE; +#else + return false; +#endif +} + +static inline __u32 tcf_sample_rate(const struct tc_action *a) +{ + return to_sample(a)->rate; +} + +static inline bool tcf_sample_truncate(const struct tc_action *a) +{ + return to_sample(a)->truncate; +} + +static inline int tcf_sample_trunc_size(const struct tc_action *a) +{ + return to_sample(a)->trunc_size; +} + +static inline struct psample_group * +tcf_sample_psample_group(const struct tc_action *a) +{ + return rcu_dereference(to_sample(a)->psample_group); +} + +#endif /* __NET_TC_SAMPLE_H */ diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild index e3db7403296f..ba62ddf0e58a 100644 --- a/include/uapi/linux/tc_act/Kbuild +++ b/include/uapi/linux/tc_act/Kbuild @@ -4,6 +4,7 @@ header-y += tc_defact.h header-y += tc_gact.h header-y += tc_ipt.h header-y += tc_mirred.h +header-y += tc_sample.h header-y += tc_nat.h header-y += tc_pedit.h header-y += tc_skbedit.h diff --git a/include/uapi/linux/tc_act/tc_sample.h b/include/uapi/linux/tc_act/tc_sample.h new file mode 100644 index 000000000000..edc9058bb30d --- /dev/null +++ b/include/uapi/linux/tc_act/tc_sample.h @@ -0,0 +1,26 @@ +#ifndef __LINUX_TC_SAMPLE_H +#define __LINUX_TC_SAMPLE_H + +#include +#include +#include + +#define TCA_ACT_SAMPLE 26 + +struct tc_sample { + tc_gen; +}; + +enum { + TCA_SAMPLE_UNSPEC, + TCA_SAMPLE_TM, + TCA_SAMPLE_PARMS, + TCA_SAMPLE_RATE, + TCA_SAMPLE_TRUNC_SIZE, + TCA_SAMPLE_PSAMPLE_GROUP, + TCA_SAMPLE_PAD, + __TCA_SAMPLE_MAX +}; +#define TCA_SAMPLE_MAX (__TCA_SAMPLE_MAX - 1) + +#endif diff --git a/net/sched/Kconfig b/net/sched/Kconfig index a9aa38d43fa7..72cfa3a6bac0 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -650,6 +650,18 @@ config NET_ACT_MIRRED To compile this code as a module, choose M here: the module will be called act_mirred. +config NET_ACT_SAMPLE + tristate "Traffic Sampling" + depends on NET_CLS_ACT + select PSAMPLE + ---help--- + Say Y here to allow packet sampling tc action. The packet sample + action consists of statistically choosing packets and sampling + them using the psample module. + + To compile this code as a module, choose M here: the + module will be called act_sample. + config NET_ACT_IPT tristate "IPtables targets" depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES diff --git a/net/sched/Makefile b/net/sched/Makefile index 4bdda3634e0b..7b915d226de7 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_NET_CLS_ACT) += act_api.o obj-$(CONFIG_NET_ACT_POLICE) += act_police.o obj-$(CONFIG_NET_ACT_GACT) += act_gact.o obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o +obj-$(CONFIG_NET_ACT_SAMPLE) += act_sample.o obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o obj-$(CONFIG_NET_ACT_NAT) += act_nat.o obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c new file mode 100644 index 000000000000..39229756de07 --- /dev/null +++ b/net/sched/act_sample.c @@ -0,0 +1,274 @@ +/* + * net/sched/act_sample.c - Packet sampling tc action + * Copyright (c) 2017 Yotam Gigi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define SAMPLE_TAB_MASK 7 +static unsigned int sample_net_id; +static struct tc_action_ops act_sample_ops; + +static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = { + [TCA_SAMPLE_PARMS] = { .len = sizeof(struct tc_sample) }, + [TCA_SAMPLE_RATE] = { .type = NLA_U32 }, + [TCA_SAMPLE_TRUNC_SIZE] = { .type = NLA_U32 }, + [TCA_SAMPLE_PSAMPLE_GROUP] = { .type = NLA_U32 }, +}; + +static int tcf_sample_init(struct net *net, struct nlattr *nla, + struct nlattr *est, struct tc_action **a, int ovr, + int bind) +{ + struct tc_action_net *tn = net_generic(net, sample_net_id); + struct nlattr *tb[TCA_SAMPLE_MAX + 1]; + struct psample_group *psample_group; + struct tc_sample *parm; + struct tcf_sample *s; + bool exists = false; + int ret; + + if (!nla) + return -EINVAL; + ret = nla_parse_nested(tb, TCA_SAMPLE_MAX, nla, sample_policy); + if (ret < 0) + return ret; + if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] || + !tb[TCA_SAMPLE_PSAMPLE_GROUP]) + return -EINVAL; + + parm = nla_data(tb[TCA_SAMPLE_PARMS]); + + exists = tcf_hash_check(tn, parm->index, a, bind); + if (exists && bind) + return 0; + + if (!exists) { + ret = tcf_hash_create(tn, parm->index, est, a, + &act_sample_ops, bind, false); + if (ret) + return ret; + ret = ACT_P_CREATED; + } else { + tcf_hash_release(*a, bind); + if (!ovr) + return -EEXIST; + } + s = to_sample(*a); + + ASSERT_RTNL(); + s->tcf_action = parm->action; + s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); + s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); + psample_group = psample_group_get(net, s->psample_group_num); + if (!psample_group) + return -ENOMEM; + RCU_INIT_POINTER(s->psample_group, psample_group); + + if (tb[TCA_SAMPLE_TRUNC_SIZE]) { + s->truncate = true; + s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); + } + + if (ret == ACT_P_CREATED) + tcf_hash_insert(tn, *a); + return ret; +} + +static void tcf_sample_cleanup_rcu(struct rcu_head *rcu) +{ + struct tcf_sample *s = container_of(rcu, struct tcf_sample, rcu); + struct psample_group *psample_group; + + psample_group = rcu_dereference_protected(s->psample_group, 1); + RCU_INIT_POINTER(s->psample_group, NULL); + psample_group_put(psample_group); +} + +static void tcf_sample_cleanup(struct tc_action *a, int bind) +{ + struct tcf_sample *s = to_sample(a); + + call_rcu(&s->rcu, tcf_sample_cleanup_rcu); +} + +static bool tcf_sample_dev_ok_push(struct net_device *dev) +{ + switch (dev->type) { + case ARPHRD_TUNNEL: + case ARPHRD_TUNNEL6: + case ARPHRD_SIT: + case ARPHRD_IPGRE: + case ARPHRD_VOID: + case ARPHRD_NONE: + return false; + default: + return true; + } +} + +static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) +{ + struct tcf_sample *s = to_sample(a); + struct psample_group *psample_group; + int retval; + int size; + int iif; + int oif; + + tcf_lastuse_update(&s->tcf_tm); + bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb); + retval = READ_ONCE(s->tcf_action); + + rcu_read_lock(); + psample_group = rcu_dereference(s->psample_group); + + /* randomly sample packets according to rate */ + if (psample_group && (prandom_u32() % s->rate == 0)) { + if (!skb_at_tc_ingress(skb)) { + iif = skb->skb_iif; + oif = skb->dev->ifindex; + } else { + iif = skb->dev->ifindex; + oif = 0; + } + + /* on ingress, the mac header gets popped, so push it back */ + if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev)) + skb_push(skb, skb->mac_len); + + size = s->truncate ? s->trunc_size : skb->len; + psample_sample_packet(psample_group, skb, size, iif, oif, + s->rate); + + if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev)) + skb_pull(skb, skb->mac_len); + } + + rcu_read_unlock(); + return retval; +} + +static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a, + int bind, int ref) +{ + unsigned char *b = skb_tail_pointer(skb); + struct tcf_sample *s = to_sample(a); + struct tc_sample opt = { + .index = s->tcf_index, + .action = s->tcf_action, + .refcnt = s->tcf_refcnt - ref, + .bindcnt = s->tcf_bindcnt - bind, + }; + struct tcf_t t; + + if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; + + tcf_tm_dump(&t, &s->tcf_tm); + if (nla_put_64bit(skb, TCA_SAMPLE_TM, sizeof(t), &t, TCA_SAMPLE_PAD)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_SAMPLE_RATE, s->rate)) + goto nla_put_failure; + + if (s->truncate) + if (nla_put_u32(skb, TCA_SAMPLE_TRUNC_SIZE, s->trunc_size)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num)) + goto nla_put_failure; + return skb->len; + +nla_put_failure: + nlmsg_trim(skb, b); + return -1; +} + +static int tcf_sample_walker(struct net *net, struct sk_buff *skb, + struct netlink_callback *cb, int type, + const struct tc_action_ops *ops) +{ + struct tc_action_net *tn = net_generic(net, sample_net_id); + + return tcf_generic_walker(tn, skb, cb, type, ops); +} + +static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index) +{ + struct tc_action_net *tn = net_generic(net, sample_net_id); + + return tcf_hash_search(tn, a, index); +} + +static struct tc_action_ops act_sample_ops = { + .kind = "sample", + .type = TCA_ACT_SAMPLE, + .owner = THIS_MODULE, + .act = tcf_sample_act, + .dump = tcf_sample_dump, + .init = tcf_sample_init, + .cleanup = tcf_sample_cleanup, + .walk = tcf_sample_walker, + .lookup = tcf_sample_search, + .size = sizeof(struct tcf_sample), +}; + +static __net_init int sample_init_net(struct net *net) +{ + struct tc_action_net *tn = net_generic(net, sample_net_id); + + return tc_action_net_init(tn, &act_sample_ops, SAMPLE_TAB_MASK); +} + +static void __net_exit sample_exit_net(struct net *net) +{ + struct tc_action_net *tn = net_generic(net, sample_net_id); + + tc_action_net_exit(tn); +} + +static struct pernet_operations sample_net_ops = { + .init = sample_init_net, + .exit = sample_exit_net, + .id = &sample_net_id, + .size = sizeof(struct tc_action_net), +}; + +static int __init sample_init_module(void) +{ + return tcf_register_action(&act_sample_ops, &sample_net_ops); +} + +static void __exit sample_cleanup_module(void) +{ + tcf_unregister_action(&act_sample_ops, &sample_net_ops); +} + +module_init(sample_init_module); +module_exit(sample_cleanup_module); + +MODULE_AUTHOR("Yotam Gigi "); +MODULE_DESCRIPTION("Packet sampling action"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 0677d6828b039bdccc6161a4ea6ae715489dca7e Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Mon, 23 Jan 2017 11:07:10 +0100 Subject: mlxsw: reg: add the Monitoring Packet Sampling Configuration Register The MPSC register allows to configure ingress packet sampling on specific port of the mlxsw device. The sampled packets are then trapped via PKT_SAMPLE trap. Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 41 +++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1357fe04391b..9fb0316b3a30 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4965,6 +4965,46 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, MLXSW_REG_MLCR_DURATION_MAX : 0); } +/* MPSC - Monitoring Packet Sampling Configuration Register + * -------------------------------------------------------- + * MPSC Register is used to configure the Packet Sampling mechanism. + */ +#define MLXSW_REG_MPSC_ID 0x9080 +#define MLXSW_REG_MPSC_LEN 0x1C + +MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN); + +/* reg_mpsc_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8); + +/* reg_mpsc_e + * Enable sampling on port local_port + * Access: RW + */ +MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1); + +#define MLXSW_REG_MPSC_RATE_MAX 3500000000UL + +/* reg_mpsc_rate + * Sampling rate = 1 out of rate packets (with randomization around + * the point). Valid values are: 1 to MLXSW_REG_MPSC_RATE_MAX + * Access: RW + */ +MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32); + +static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e, + u32 rate) +{ + MLXSW_REG_ZERO(mpsc, payload); + mlxsw_reg_mpsc_local_port_set(payload, local_port); + mlxsw_reg_mpsc_e_set(payload, e); + mlxsw_reg_mpsc_rate_set(payload, rate); +} + /* SBPR - Shared Buffer Pools Register * ----------------------------------- * The SBPR configures and retrieves the shared buffer pools and configuration. @@ -5429,6 +5469,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mpat), MLXSW_REG(mpar), MLXSW_REG(mlcr), + MLXSW_REG(mpsc), MLXSW_REG(sbpr), MLXSW_REG(sbcm), MLXSW_REG(sbpm), -- cgit v1.2.3 From 98d0f7b9acda0cf15259f83ca1758ad6017c0942 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Mon, 23 Jan 2017 11:07:11 +0100 Subject: mlxsw: spectrum: Add packet sample offloading support Using the MPSC register, add the functions that configure port-based packet sampling in hardware and the necessary datatypes in the mlxsw_sp_port struct. In addition, add the necessary trap for sampled packets and integrate with matchall offloading to allow offloading of the sample tc action. The current offload support is for the tc command: tc filter add dev parent ffff: \ matchall skip_sw \ action sample rate group [trunc ] Where only ingress qdiscs are supported, and only a combination of matchall classifier and sample action will lead to activating hardware packet sampling. Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 111 +++++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 10 +++ drivers/net/ethernet/mellanox/mlxsw/trap.h | 1 + 3 files changed, 122 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3dbd82e636f8..467aa5288935 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -57,6 +57,7 @@ #include #include #include +#include #include "spectrum.h" #include "pci.h" @@ -469,6 +470,16 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); } +static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool enable, u32 rate) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char mpsc_pl[MLXSW_REG_MPSC_LEN]; + + mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); +} + static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, bool is_up) { @@ -1218,6 +1229,51 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); } +static int +mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *cls, + const struct tc_action *a, + bool ingress) +{ + int err; + + if (!mlxsw_sp_port->sample) + return -EOPNOTSUPP; + if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { + netdev_err(mlxsw_sp_port->dev, "sample already active\n"); + return -EEXIST; + } + if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { + netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); + return -EOPNOTSUPP; + } + + rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, + tcf_sample_psample_group(a)); + mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); + mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); + mlxsw_sp_port->sample->rate = tcf_sample_rate(a); + + err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); + if (err) + goto err_port_sample_set; + return 0; + +err_port_sample_set: + RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); + return err; +} + +static void +mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) +{ + if (!mlxsw_sp_port->sample) + return; + + mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); + RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); +} + static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, __be16 protocol, struct tc_cls_matchall_offload *cls, @@ -1248,6 +1304,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, mirror = &mall_tc_entry->mirror; err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, mirror, a, ingress); + } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { + mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; + err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls, + a, ingress); } else { err = -EOPNOTSUPP; } @@ -1281,6 +1341,9 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, &mall_tc_entry->mirror); break; + case MLXSW_SP_PORT_MALL_SAMPLE: + mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); + break; default: WARN_ON(1); } @@ -2259,6 +2322,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_alloc_stats; } + mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), + GFP_KERNEL); + if (!mlxsw_sp_port->sample) { + err = -ENOMEM; + goto err_alloc_sample; + } + mlxsw_sp_port->hw_stats.cache = kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); @@ -2387,6 +2457,8 @@ err_dev_addr_init: err_port_swid_set: kfree(mlxsw_sp_port->hw_stats.cache); err_alloc_hw_stats: + kfree(mlxsw_sp_port->sample); +err_alloc_sample: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: kfree(mlxsw_sp_port->untagged_vlans); @@ -2433,6 +2505,7 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); kfree(mlxsw_sp_port->hw_stats.cache); + kfree(mlxsw_sp_port->sample); free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); @@ -2734,6 +2807,41 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); } +static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + struct psample_group *psample_group; + u32 size; + + if (unlikely(!mlxsw_sp_port)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", + local_port); + goto out; + } + if (unlikely(!mlxsw_sp_port->sample)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", + local_port); + goto out; + } + + size = mlxsw_sp_port->sample->truncate ? + mlxsw_sp_port->sample->trunc_size : skb->len; + + rcu_read_lock(); + psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); + if (!psample_group) + goto out_unlock; + psample_sample_packet(psample_group, skb, size, + mlxsw_sp_port->dev->ifindex, 0, + mlxsw_sp_port->sample->rate); +out_unlock: + rcu_read_unlock(); +out: + consume_skb(skb); +} + #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ _is_ctrl, SP_##_trap_group, DISCARD) @@ -2769,6 +2877,9 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), + /* PKT Sample trap */ + MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, + false, SP_IP2ME, DISCARD) }; static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index cc1af19d699a..bc3efe1629c4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -46,6 +46,7 @@ #include #include #include +#include #include "port.h" #include "core.h" @@ -229,6 +230,7 @@ struct mlxsw_sp_span_entry { enum mlxsw_sp_port_mall_action_type { MLXSW_SP_PORT_MALL_MIRROR, + MLXSW_SP_PORT_MALL_SAMPLE, }; struct mlxsw_sp_port_mall_mirror_tc_entry { @@ -315,6 +317,13 @@ struct mlxsw_sp_port_pcpu_stats { u32 tx_dropped; }; +struct mlxsw_sp_port_sample { + struct psample_group __rcu *psample_group; + u32 trunc_size; + u32 rate; + bool truncate; +}; + struct mlxsw_sp_port { struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; @@ -361,6 +370,7 @@ struct mlxsw_sp_port { struct rtnl_link_stats64 *cache; struct delayed_work update_dw; } hw_stats; + struct mlxsw_sp_port_sample *sample; }; struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 7ab275deacac..02ea48b15eb5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -54,6 +54,7 @@ enum { MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32, MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, + MLXSW_TRAP_ID_PKT_SAMPLE = 0x38, MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_MTUERROR = 0x52, -- cgit v1.2.3 From 8b86b2c1b83e3a3a9ac5192b4e9d4ee23d88b218 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sat, 21 Jan 2017 14:43:16 +0100 Subject: net: broadcom: bnx2x: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Acked-by: Yuval Mintz Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 199 +++++++++++---------- 1 file changed, 109 insertions(+), 90 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 5f19427c7b27..43423744fdfa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -216,165 +216,184 @@ static int bnx2x_get_port_type(struct bnx2x *bp) return port_type; } -static int bnx2x_get_vf_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int bnx2x_get_vf_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); + u32 supported, advertising; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); if (bp->state == BNX2X_STATE_OPEN) { if (test_bit(BNX2X_LINK_REPORT_FD, &bp->vf_link_vars.link_report_flags)) - cmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - cmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; - ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed); + cmd->base.speed = bp->vf_link_vars.line_speed; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } - cmd->port = PORT_OTHER; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + cmd->base.port = PORT_OTHER; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_DISABLE; DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); return 0; } -static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnx2x_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); int cfg_idx = bnx2x_get_link_cfg_idx(bp); u32 media_type; + u32 supported, advertising, lp_advertising; + + ethtool_convert_link_mode_to_legacy_u32(&lp_advertising, + cmd->link_modes.lp_advertising); /* Dual Media boards present all available port types */ - cmd->supported = bp->port.supported[cfg_idx] | + supported = bp->port.supported[cfg_idx] | (bp->port.supported[cfg_idx ^ 1] & (SUPPORTED_TP | SUPPORTED_FIBRE)); - cmd->advertising = bp->port.advertising[cfg_idx]; + advertising = bp->port.advertising[cfg_idx]; media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type; if (media_type == ETH_PHY_SFP_1G_FIBER) { - cmd->supported &= ~(SUPPORTED_10000baseT_Full); - cmd->advertising &= ~(ADVERTISED_10000baseT_Full); + supported &= ~(SUPPORTED_10000baseT_Full); + advertising &= ~(ADVERTISED_10000baseT_Full); } if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up && !(bp->flags & MF_FUNC_DIS)) { - cmd->duplex = bp->link_vars.duplex; + cmd->base.duplex = bp->link_vars.duplex; if (IS_MF(bp) && !BP_NOMCP(bp)) - ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); + cmd->base.speed = bnx2x_get_mf_speed(bp); else - ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); + cmd->base.speed = bp->link_vars.line_speed; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } - cmd->port = bnx2x_get_port_type(bp); + cmd->base.port = bnx2x_get_port_type(bp); - cmd->phy_address = bp->mdio.prtad; - cmd->transceiver = XCVR_INTERNAL; + cmd->base.phy_address = bp->mdio.prtad; if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) - cmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; else - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; /* Publish LP advertised speeds and FC */ if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { u32 status = bp->link_vars.link_status; - cmd->lp_advertising |= ADVERTISED_Autoneg; + lp_advertising |= ADVERTISED_Autoneg; if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE) - cmd->lp_advertising |= ADVERTISED_Pause; + lp_advertising |= ADVERTISED_Pause; if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) - cmd->lp_advertising |= ADVERTISED_Asym_Pause; + lp_advertising |= ADVERTISED_Asym_Pause; if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_10baseT_Half; + lp_advertising |= ADVERTISED_10baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_10baseT_Full; + lp_advertising |= ADVERTISED_10baseT_Full; if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_100baseT_Half; + lp_advertising |= ADVERTISED_100baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_100baseT_Full; + lp_advertising |= ADVERTISED_100baseT_Full; if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_1000baseT_Half; + lp_advertising |= ADVERTISED_1000baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) { if (media_type == ETH_PHY_KR) { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_1000baseKX_Full; } else { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_1000baseT_Full; } } if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_2500baseX_Full; + lp_advertising |= ADVERTISED_2500baseX_Full; if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) { if (media_type == ETH_PHY_KR) { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_10000baseKR_Full; } else { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_10000baseT_Full; } } if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full; + lp_advertising |= ADVERTISED_20000baseKR2_Full; } - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); return 0; } -static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnx2x_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; u32 speed, phy_idx; + u32 supported; + u8 duplex = cmd->base.duplex; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); if (IS_MF_SD(bp)) return 0; DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); - speed = ethtool_cmd_speed(cmd); + speed = cmd->base.speed; /* If received a request for an unknown duplex, assume full*/ - if (cmd->duplex == DUPLEX_UNKNOWN) - cmd->duplex = DUPLEX_FULL; + if (duplex == DUPLEX_UNKNOWN) + duplex = DUPLEX_FULL; if (IS_MF_SI(bp)) { u32 part; @@ -410,8 +429,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) cfg_idx = bnx2x_get_link_cfg_idx(bp); old_multi_phy_config = bp->link_params.multi_phy_config; - if (cmd->port != bnx2x_get_port_type(bp)) { - switch (cmd->port) { + if (cmd->base.port != bnx2x_get_port_type(bp)) { + switch (cmd->base.port) { case PORT_TP: if (!(bp->port.supported[0] & SUPPORTED_TP || bp->port.supported[1] & SUPPORTED_TP)) { @@ -461,7 +480,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) bp->link_params.multi_phy_config = old_multi_phy_config; DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx); - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 an_supported_speed = bp->port.supported[cfg_idx]; if (bp->link_params.phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) @@ -473,51 +492,51 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } /* advertise the requested speed and duplex if supported */ - if (cmd->advertising & ~an_supported_speed) { + if (advertising & ~an_supported_speed) { DP(BNX2X_MSG_ETHTOOL, "Advertisement parameters are not supported\n"); return -EINVAL; } bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; - bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->link_params.req_duplex[cfg_idx] = duplex; bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | - cmd->advertising); - if (cmd->advertising) { + advertising); + if (advertising) { bp->link_params.speed_cap_mask[cfg_idx] = 0; - if (cmd->advertising & ADVERTISED_10baseT_Half) { + if (advertising & ADVERTISED_10baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; } - if (cmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; - if (cmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; - if (cmd->advertising & ADVERTISED_100baseT_Half) { + if (advertising & ADVERTISED_100baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; } - if (cmd->advertising & ADVERTISED_1000baseT_Half) { + if (advertising & ADVERTISED_1000baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; } - if (cmd->advertising & (ADVERTISED_1000baseT_Full | + if (advertising & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseKX_Full)) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; - if (cmd->advertising & (ADVERTISED_10000baseT_Full | + if (advertising & (ADVERTISED_10000baseT_Full | ADVERTISED_10000baseKX4_Full | ADVERTISED_10000baseKR_Full)) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; - if (cmd->advertising & ADVERTISED_20000baseKR2_Full) + if (advertising & ADVERTISED_20000baseKR2_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_20G; } @@ -525,7 +544,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) /* advertise the requested speed and duplex if supported */ switch (speed) { case SPEED_10: - if (cmd->duplex == DUPLEX_FULL) { + if (duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_10baseT_Full)) { DP(BNX2X_MSG_ETHTOOL, @@ -549,7 +568,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_100: - if (cmd->duplex == DUPLEX_FULL) { + if (duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_100baseT_Full)) { DP(BNX2X_MSG_ETHTOOL, @@ -573,7 +592,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_1000: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "1G half not supported\n"); return -EINVAL; @@ -596,7 +615,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_2500: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "2.5G half not supported\n"); return -EINVAL; @@ -614,7 +633,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_10000: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "10G half not supported\n"); return -EINVAL; @@ -644,7 +663,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } bp->link_params.req_line_speed[cfg_idx] = speed; - bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->link_params.req_duplex[cfg_idx] = duplex; bp->port.advertising[cfg_idx] = advertising; } @@ -3605,8 +3624,6 @@ static int bnx2x_get_ts_info(struct net_device *dev, } static const struct ethtool_ops bnx2x_ethtool_ops = { - .get_settings = bnx2x_get_settings, - .set_settings = bnx2x_set_settings, .get_drvinfo = bnx2x_get_drvinfo, .get_regs_len = bnx2x_get_regs_len, .get_regs = bnx2x_get_regs, @@ -3646,10 +3663,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_eee = bnx2x_get_eee, .set_eee = bnx2x_set_eee, .get_ts_info = bnx2x_get_ts_info, + .get_link_ksettings = bnx2x_get_link_ksettings, + .set_link_ksettings = bnx2x_set_link_ksettings, }; static const struct ethtool_ops bnx2x_vf_ethtool_ops = { - .get_settings = bnx2x_get_vf_settings, .get_drvinfo = bnx2x_get_drvinfo, .get_msglevel = bnx2x_get_msglevel, .set_msglevel = bnx2x_set_msglevel, @@ -3667,6 +3685,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = { .set_rxfh = bnx2x_set_rxfh, .get_channels = bnx2x_get_channels, .set_channels = bnx2x_set_channels, + .get_link_ksettings = bnx2x_get_vf_link_ksettings, }; void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) -- cgit v1.2.3 From e1636836e0a080b71da1954f8604b1238e53d3b9 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Mon, 23 Jan 2017 12:17:33 +0100 Subject: net: dummy: Introduce dummy virtual functions The idea for this was born when testing VF support in iproute2 which was impeded by hardware requirements. In fact, not every VF-capable hardware driver implements all netdev ops, so testing the interface is still hard to do even with a well-sorted hardware shelf. To overcome this and allow for testing the user-kernel interface, this patch allows to turn dummy into a PF with a configurable amount of VFs. Since my patch series 'bus-agnostic-num-vf' has been accepted, implementing the required interfaces is pretty straightforward: Iff 'num_vfs' module parameter was given a value >0, a dummy bus type is being registered which implements the 'num_vf()' callback. Additionally, a dummy parent device common to all dummy devices is registered which sits on the above dummy bus. Joint work with Sabrina Dubroca. Signed-off-by: Sabrina Dubroca Signed-off-by: Phil Sutter Signed-off-by: David S. Miller --- drivers/net/dummy.c | 217 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 215 insertions(+), 2 deletions(-) diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 1f2de4e8207c..2c80611b94ae 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -41,7 +41,48 @@ #define DRV_NAME "dummy" #define DRV_VERSION "1.0" +#undef pr_fmt +#define pr_fmt(fmt) DRV_NAME ": " fmt + static int numdummies = 1; +static int num_vfs; + +struct vf_data_storage { + u8 vf_mac[ETH_ALEN]; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + __be16 vlan_proto; + u16 min_tx_rate; + u16 max_tx_rate; + u8 spoofchk_enabled; + bool rss_query_enabled; + u8 trusted; + int link_state; +}; + +struct dummy_priv { + struct vf_data_storage *vfinfo; +}; + +static int dummy_num_vf(struct device *dev) +{ + return num_vfs; +} + +static struct bus_type dummy_bus = { + .name = "dummy", + .num_vf = dummy_num_vf, +}; + +static void release_dummy_parent(struct device *dev) +{ +} + +static struct device dummy_parent = { + .init_name = "dummy", + .bus = &dummy_bus, + .release = release_dummy_parent, +}; /* fake multicast ability */ static void set_multicast_list(struct net_device *dev) @@ -90,10 +131,25 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) static int dummy_dev_init(struct net_device *dev) { + struct dummy_priv *priv = netdev_priv(dev); + dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); if (!dev->dstats) return -ENOMEM; + priv->vfinfo = NULL; + + if (!num_vfs) + return 0; + + dev->dev.parent = &dummy_parent; + priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), + GFP_KERNEL); + if (!priv->vfinfo) { + free_percpu(dev->dstats); + return -ENOMEM; + } + return 0; } @@ -111,6 +167,117 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier) return 0; } +static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) + return -EINVAL; + + memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN); + + return 0; +} + +static int dummy_set_vf_vlan(struct net_device *dev, int vf, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + + priv->vfinfo[vf].pf_vlan = vlan; + priv->vfinfo[vf].pf_qos = qos; + priv->vfinfo[vf].vlan_proto = vlan_proto; + + return 0; +} + +static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].min_tx_rate = min; + priv->vfinfo[vf].max_tx_rate = max; + + return 0; +} + +static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].spoofchk_enabled = val; + + return 0; +} + +static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].rss_query_enabled = val; + + return 0; +} + +static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].trusted = val; + + return 0; +} + +static int dummy_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + ivi->vf = vf; + memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN); + ivi->vlan = priv->vfinfo[vf].pf_vlan; + ivi->qos = priv->vfinfo[vf].pf_qos; + ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled; + ivi->linkstate = priv->vfinfo[vf].link_state; + ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate; + ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate; + ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled; + ivi->trusted = priv->vfinfo[vf].trusted; + ivi->vlan_proto = priv->vfinfo[vf].vlan_proto; + + return 0; +} + +static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].link_state = state; + + return 0; +} + static const struct net_device_ops dummy_netdev_ops = { .ndo_init = dummy_dev_init, .ndo_uninit = dummy_dev_uninit, @@ -120,6 +287,14 @@ static const struct net_device_ops dummy_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = dummy_get_stats64, .ndo_change_carrier = dummy_change_carrier, + .ndo_set_vf_mac = dummy_set_vf_mac, + .ndo_set_vf_vlan = dummy_set_vf_vlan, + .ndo_set_vf_rate = dummy_set_vf_rate, + .ndo_set_vf_spoofchk = dummy_set_vf_spoofchk, + .ndo_set_vf_trust = dummy_set_vf_trust, + .ndo_get_vf_config = dummy_get_vf_config, + .ndo_set_vf_link_state = dummy_set_vf_link_state, + .ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en, }; static void dummy_get_drvinfo(struct net_device *dev, @@ -133,6 +308,14 @@ static const struct ethtool_ops dummy_ethtool_ops = { .get_drvinfo = dummy_get_drvinfo, }; +static void dummy_free_netdev(struct net_device *dev) +{ + struct dummy_priv *priv = netdev_priv(dev); + + kfree(priv->vfinfo); + free_netdev(dev); +} + static void dummy_setup(struct net_device *dev) { ether_setup(dev); @@ -140,7 +323,7 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; dev->ethtool_ops = &dummy_ethtool_ops; - dev->destructor = free_netdev; + dev->destructor = dummy_free_netdev; /* Fill in device structure with ethernet-generic values. */ dev->flags |= IFF_NOARP; @@ -171,6 +354,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[]) static struct rtnl_link_ops dummy_link_ops __read_mostly = { .kind = DRV_NAME, + .priv_size = sizeof(struct dummy_priv), .setup = dummy_setup, .validate = dummy_validate, }; @@ -179,12 +363,16 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = { module_param(numdummies, int, 0); MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices"); +module_param(num_vfs, int, 0); +MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device"); + static int __init dummy_init_one(void) { struct net_device *dev_dummy; int err; - dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_UNKNOWN, dummy_setup); + dev_dummy = alloc_netdev(sizeof(struct dummy_priv), + "dummy%d", NET_NAME_UNKNOWN, dummy_setup); if (!dev_dummy) return -ENOMEM; @@ -203,6 +391,21 @@ static int __init dummy_init_module(void) { int i, err = 0; + if (num_vfs) { + err = bus_register(&dummy_bus); + if (err < 0) { + pr_err("registering dummy bus failed\n"); + return err; + } + + err = device_register(&dummy_parent); + if (err < 0) { + pr_err("registering dummy parent device failed\n"); + bus_unregister(&dummy_bus); + return err; + } + } + rtnl_lock(); err = __rtnl_link_register(&dummy_link_ops); if (err < 0) @@ -218,12 +421,22 @@ static int __init dummy_init_module(void) out: rtnl_unlock(); + if (err && num_vfs) { + device_unregister(&dummy_parent); + bus_unregister(&dummy_bus); + } + return err; } static void __exit dummy_cleanup_module(void) { rtnl_link_unregister(&dummy_link_ops); + + if (num_vfs) { + device_unregister(&dummy_parent); + bus_unregister(&dummy_bus); + } } module_init(dummy_init_module); -- cgit v1.2.3 From 187024144c6e0c82bc32ebb46f3cd0585264833e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 23 Jan 2017 13:18:41 +0100 Subject: phy: marvell: remove conflicting initializer One line was apparently pasted incorrectly during a new feature patch: drivers/net/phy/marvell.c:2090:15: error: initialized field overwritten [-Werror=override-init] .features = PHY_GBIT_FEATURES, I'm removing the extraneous line here to avoid the W=1 warning and restore the previous flags value, and I'm slightly reordering the lines for consistency to make it less likely to happen again in the future. The ordering in the array is still not the same as in the structure definition, instead I picked the order that is most common in this file and that seems to make more sense here. Fixes: 0b04680fdae4 ("phy: marvell: Add support for temperature sensor") Signed-off-by: Arnd Bergmann Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 64229976ace1..b5b73ff4329a 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1900,8 +1900,8 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1101", .features = PHY_GBIT_FEATURES, - .probe = marvell_probe, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_init = &marvell_config_init, .config_aneg = &marvell_config_aneg, .read_status = &genphy_read_status, @@ -1971,10 +1971,10 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1121R, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1121R", - .probe = &m88e1121_probe, - .remove = &marvell_remove, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = &m88e1121_probe, + .remove = &marvell_remove, .config_init = &m88e1121_config_init, .config_aneg = &m88e1121_config_aneg, .read_status = &marvell_read_status, @@ -2085,10 +2085,9 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1510", .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE, + .flags = PHY_HAS_INTERRUPT, .probe = &m88e1510_probe, .remove = &marvell_remove, - .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &m88e1510_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -2105,10 +2104,10 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1540, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1540", - .probe = m88e1510_probe, - .remove = &marvell_remove, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .probe = m88e1510_probe, + .remove = &marvell_remove, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, -- cgit v1.2.3 From 073ff3c8e6acdd6bae91a037e6f2d0edeed4165d Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 11 Dec 2016 12:15:08 +0200 Subject: net/mlx5: Use exact encap header size for the FW input buffer The current code is allocating the max encap size supported by the firmware and not the size requested by the caller, fix that. Also, spare a warning when the size of the encapsulation headers is bigger from what is supported by the firmware. Signed-off-by: Or Gerlitz Reviewed-by: Hadar Hen Zion Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index c4478ecd8056..b5253b59e8fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -473,10 +473,13 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev, int err; u32 *in; - if (size > MLX5_CAP_ESW(dev, max_encap_header_size)) + if (size > max_encap_size) { + mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n", + size, max_encap_size); return -EINVAL; + } - in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size, + in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size, GFP_KERNEL); if (!in) return -ENOMEM; -- cgit v1.2.3 From 19f4440141af8cd9b2f280ec38476baa86dc87f9 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 11 Dec 2016 12:20:53 +0200 Subject: net/mlx5e: Add TC offloads matching on IPv6 encapsulation headers Enhance the parsing of offloaded TC rules to set HW matching on outer IPv6 encapsulation headers. This effectively adds support for TC tunnel key release action (decapsulation) of SRIOV offloads over IPv6 tunnels. Signed-off-by: Or Gerlitz Reviewed-by: Hadar Hen Zion Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 30 ++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index d4af5507679f..e9f0854e9509 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -298,6 +298,32 @@ vxlan_match_offload_err: MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); + } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, + f->mask); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); } /* Enforce DMAC when offloading incoming tunneled flows. @@ -358,12 +384,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, f->key); switch (key->addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: if (parse_tunnel_attr(priv, spec, f)) return -EOPNOTSUPP; break; - case FLOW_DISSECTOR_KEY_IPV6_ADDRS: - netdev_warn(priv->netdev, - "IPv6 tunnel decap offload isn't supported\n"); default: return -EOPNOTSUPP; } -- cgit v1.2.3 From 75c33da827365afa6f3d708ad1f7abe18e0ba4a3 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 21 Dec 2016 17:31:18 +0200 Subject: net/mlx5e: TC ipv4 tunnel encap offload cosmetic changes Move around some settings of variables as pre-step to make things more robust and clear for the ipv6 case in down-stream patch. This patch doesn't change any functionality. Signed-off-by: Or Gerlitz Reviewed-by: Hadar Hen Zion Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index e9f0854e9509..a515444d2489 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -689,7 +689,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, { struct rtable *rt; struct neighbour *n = NULL; - int ttl; #if IS_ENABLED(CONFIG_INET) int ret; @@ -708,7 +707,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - ttl = ip4_dst_hoplimit(&rt->dst); + *out_ttl = ip4_dst_hoplimit(&rt->dst); n = dst_neigh_lookup(&rt->dst, &fl4->daddr); ip_rt_put(rt); if (!n) @@ -716,7 +715,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, *out_n = n; *saddr = fl4->saddr; - *out_ttl = ttl; *out_dev = rt->dst.dev; return 0; @@ -792,15 +790,15 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, if (err) goto out; - e->n = n; - e->out_dev = *out_dev; - if (!(n->nud_state & NUD_VALID)) { pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr); err = -EOPNOTSUPP; goto out; } + e->n = n; + e->out_dev = *out_dev; + neigh_ha_snapshot(e->h_dest, n, *out_dev); switch (e->tunnel_type) { -- cgit v1.2.3 From 76f7444dd5a4349af40e4c67e4b995d4ae3c5c92 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 5 Jan 2017 16:43:29 +0200 Subject: net/mlx5e: Use the full tunnel key info for encapsulation offload house-keeping Currently we use subset of the input tunnel key fields (id, ip daddr, dst port) which are provided by upper layers to indentify flows that should go through the same encapsulation and maintain the HW encapsulation table. This is redundant and can get us wrong. Instead, keep a copy of the ip tunnel info provided by the user through TC and have the tunnel key part as the key to our internal hash. Signed-off-by: Or Gerlitz Reviewed-by: Hadar Hen Zion Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 38 +++++++++-------------- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 9 ++---- 2 files changed, 17 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index a515444d2489..477b2796c12e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -668,15 +668,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return 0; } -static inline int cmp_encap_info(struct mlx5_encap_info *a, - struct mlx5_encap_info *b) +static inline int cmp_encap_info(struct ip_tunnel_key *a, + struct ip_tunnel_key *b) { return memcmp(a, b, sizeof(*a)); } -static inline int hash_encap_info(struct mlx5_encap_info *info) +static inline int hash_encap_info(struct ip_tunnel_key *key) { - return jhash(info, sizeof(*info), 0); + return jhash(key, sizeof(*key), 0); } static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, @@ -762,6 +762,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct net_device **out_dev) { int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct ip_tunnel_key *tun_key = &e->tun_info.key; struct neighbour *n = NULL; struct flowi4 fl4 = {}; char *encap_header; @@ -777,13 +778,13 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: fl4.flowi4_proto = IPPROTO_UDP; - fl4.fl4_dport = e->tun_info.tp_dst; + fl4.fl4_dport = tun_key->tp_dst; break; default: err = -EOPNOTSUPP; goto out; } - fl4.daddr = e->tun_info.daddr; + fl4.daddr = tun_key->u.ipv4.dst; err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev, &fl4, &n, &saddr, &ttl); @@ -805,9 +806,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, case MLX5_HEADER_TYPE_VXLAN: encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, e->h_dest, ttl, - e->tun_info.daddr, - saddr, e->tun_info.tp_dst, - e->tun_info.tun_id); + tun_key->u.ipv4.dst, + saddr, tun_key->tp_dst, + tunnel_id_to_key32(tun_key->tun_id)); break; default: err = -EOPNOTSUPP; @@ -831,13 +832,11 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; unsigned short family = ip_tunnel_info_af(tun_info); struct ip_tunnel_key *key = &tun_info->key; - struct mlx5_encap_info info; struct mlx5_encap_entry *e; struct net_device *out_dev; + int tunnel_type, err; uintptr_t hash_key; bool found = false; - int tunnel_type; - int err; /* udp dst port must be set */ if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst))) @@ -853,8 +852,6 @@ vxlan_encap_offload_err: if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { - info.tp_dst = key->tp_dst; - info.tun_id = tunnel_id_to_key32(key->tun_id); tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { netdev_warn(priv->netdev, @@ -862,22 +859,17 @@ vxlan_encap_offload_err: return -EOPNOTSUPP; } - switch (family) { - case AF_INET: - info.daddr = key->u.ipv4.dst; - break; - case AF_INET6: + if (family == AF_INET6) { netdev_warn(priv->netdev, "IPv6 tunnel encap offload isn't supported\n"); - default: return -EOPNOTSUPP; } - hash_key = hash_encap_info(&info); + hash_key = hash_encap_info(key); hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, encap_hlist, hash_key) { - if (!cmp_encap_info(&e->tun_info, &info)) { + if (!cmp_encap_info(&e->tun_info.key, key)) { found = true; break; } @@ -892,7 +884,7 @@ vxlan_encap_offload_err: if (!e) return -ENOMEM; - e->tun_info = info; + e->tun_info = *tun_info; e->tunnel_type = tunnel_type; INIT_LIST_HEAD(&e->flows); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 8661dd3f542c..cea1660d59ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -36,6 +36,7 @@ #include #include #include +#include #include #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -274,18 +275,12 @@ enum { #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80 -struct mlx5_encap_info { - __be32 daddr; - __be32 tun_id; - __be16 tp_dst; -}; - struct mlx5_encap_entry { struct hlist_node encap_hlist; struct list_head flows; u32 encap_id; struct neighbour *n; - struct mlx5_encap_info tun_info; + struct ip_tunnel_info tun_info; unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ struct net_device *out_dev; -- cgit v1.2.3 From 9a941117fb761dcfb4f698f1f67340484b781b90 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 3 Jan 2017 19:03:00 +0200 Subject: net/mlx5e: Maximize ip tunnel key usage on the TC offloading path Use more fields out of the tunnel key (e.g the tunnel source IP address) provided by upper layers for the route lookup done on the encap offload path. Signed-off-by: Or Gerlitz Reviewed-by: Hadar Hen Zion Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 477b2796c12e..3d0dbfb018ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -684,7 +684,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, struct net_device **out_dev, struct flowi4 *fl4, struct neighbour **out_n, - __be32 *saddr, int *out_ttl) { struct rtable *rt; @@ -714,7 +713,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, return -ENOMEM; *out_n = n; - *saddr = fl4->saddr; *out_dev = rt->dst.dev; return 0; @@ -763,13 +761,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, { int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); struct ip_tunnel_key *tun_key = &e->tun_info.key; + int encap_size, ttl, err; struct neighbour *n = NULL; struct flowi4 fl4 = {}; char *encap_header; - int encap_size; - __be32 saddr; - int ttl; - int err; encap_header = kzalloc(max_encap_size, GFP_KERNEL); if (!encap_header) @@ -784,10 +779,12 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, err = -EOPNOTSUPP; goto out; } + fl4.flowi4_tos = tun_key->tos; fl4.daddr = tun_key->u.ipv4.dst; + fl4.saddr = tun_key->u.ipv4.src; err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev, - &fl4, &n, &saddr, &ttl); + &fl4, &n, &ttl); if (err) goto out; @@ -806,8 +803,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, case MLX5_HEADER_TYPE_VXLAN: encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, e->h_dest, ttl, - tun_key->u.ipv4.dst, - saddr, tun_key->tp_dst, + fl4.daddr, + fl4.saddr, tun_key->tp_dst, tunnel_id_to_key32(tun_key->tun_id)); break; default: -- cgit v1.2.3 From ce99f6b97fcdcb4e7f6f7e2fe5e5fe6c65585cab Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 11 Dec 2016 21:28:28 +0200 Subject: net/mlx5e: Support SRIOV TC encapsulation offloads for IPv6 tunnels Add the missing parts for offloading IPv6 tunnels. This includes route and neigh lookups and construnction of the IPv6 tunnel headers. Signed-off-by: Or Gerlitz Reviewed-by: Hadar Hen Zion Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 159 ++++++++++++++++++++++-- 1 file changed, 151 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3d0dbfb018ca..640f10f2e994 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -718,6 +718,47 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, return 0; } +static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct net_device **out_dev, + struct flowi6 *fl6, + struct neighbour **out_n, + int *out_ttl) +{ + struct neighbour *n = NULL; + struct dst_entry *dst; + +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int ret; + + dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6); + if (dst->error) { + ret = dst->error; + dst_release(dst); + return ret; + } + + *out_ttl = ip6_dst_hoplimit(dst); + + /* if the egress device isn't on the same HW e-switch, we use the uplink */ + if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) + *out_dev = mlx5_eswitch_get_uplink_netdev(esw); + else + *out_dev = dst->dev; +#else + return -EOPNOTSUPP; +#endif + + n = dst_neigh_lookup(dst, &fl6->daddr); + dst_release(dst); + if (!n) + return -ENOMEM; + + *out_n = n; + return 0; +} + static int gen_vxlan_header_ipv4(struct net_device *out_dev, char buf[], unsigned char h_dest[ETH_ALEN], @@ -754,6 +795,41 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev, return encap_size; } +static int gen_vxlan_header_ipv6(struct net_device *out_dev, + char buf[], + unsigned char h_dest[ETH_ALEN], + int ttl, + struct in6_addr *daddr, + struct in6_addr *saddr, + __be16 udp_dst_port, + __be32 vx_vni) +{ + int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN; + struct ethhdr *eth = (struct ethhdr *)buf; + struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); + struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); + struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + + memset(buf, 0, encap_size); + + ether_addr_copy(eth->h_dest, h_dest); + ether_addr_copy(eth->h_source, out_dev->dev_addr); + eth->h_proto = htons(ETH_P_IPV6); + + ip6_flow_hdr(ip6h, 0, 0); + /* the HW fills up ipv6 payload len */ + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + + udp->dest = udp_dst_port; + vxh->vx_flags = VXLAN_HF_VNI; + vxh->vx_vni = vxlan_vni_field(vx_vni); + + return encap_size; +} + static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5_encap_entry *e, @@ -821,6 +897,75 @@ out: return err; } +static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5_encap_entry *e, + struct net_device **out_dev) + +{ + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct ip_tunnel_key *tun_key = &e->tun_info.key; + int encap_size, err, ttl = 0; + struct neighbour *n = NULL; + struct flowi6 fl6 = {}; + char *encap_header; + + encap_header = kzalloc(max_encap_size, GFP_KERNEL); + if (!encap_header) + return -ENOMEM; + + switch (e->tunnel_type) { + case MLX5_HEADER_TYPE_VXLAN: + fl6.flowi6_proto = IPPROTO_UDP; + fl6.fl6_dport = tun_key->tp_dst; + break; + default: + err = -EOPNOTSUPP; + goto out; + } + + fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); + fl6.daddr = tun_key->u.ipv6.dst; + fl6.saddr = tun_key->u.ipv6.src; + + err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev, + &fl6, &n, &ttl); + if (err) + goto out; + + if (!(n->nud_state & NUD_VALID)) { + pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr); + err = -EOPNOTSUPP; + goto out; + } + + e->n = n; + e->out_dev = *out_dev; + + neigh_ha_snapshot(e->h_dest, n, *out_dev); + + switch (e->tunnel_type) { + case MLX5_HEADER_TYPE_VXLAN: + encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header, + e->h_dest, ttl, + &fl6.daddr, + &fl6.saddr, tun_key->tp_dst, + tunnel_id_to_key32(tun_key->tun_id)); + break; + default: + err = -EOPNOTSUPP; + goto out; + } + + err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, + encap_size, encap_header, &e->encap_id); +out: + if (err && n) + neigh_release(n); + kfree(encap_header); + return err; +} + static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_info *tun_info, struct net_device *mirred_dev, @@ -831,7 +976,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_key *key = &tun_info->key; struct mlx5_encap_entry *e; struct net_device *out_dev; - int tunnel_type, err; + int tunnel_type, err = -EOPNOTSUPP; uintptr_t hash_key; bool found = false; @@ -856,12 +1001,6 @@ vxlan_encap_offload_err: return -EOPNOTSUPP; } - if (family == AF_INET6) { - netdev_warn(priv->netdev, - "IPv6 tunnel encap offload isn't supported\n"); - return -EOPNOTSUPP; - } - hash_key = hash_encap_info(key); hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, @@ -885,7 +1024,11 @@ vxlan_encap_offload_err: e->tunnel_type = tunnel_type; INIT_LIST_HEAD(&e->flows); - err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); + if (family == AF_INET) + err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); + else if (family == AF_INET6) + err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev); + if (err) goto out_err; -- cgit v1.2.3 From 264d7bf3c1cfd3a128d621b367f57b81d038ba10 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 20 Dec 2016 12:38:05 +0200 Subject: net/mlx5: E-Switch, Enlarge the FDB size for the switchdev mode The E-Switch FDB size was hard coded to 8k. Change it to be min(max eswitch table size, max flow counters * num flow groups) where the max values are read from the firmware and the number of flow groups is hard-coded as before this change. We don't know upfront the division of flows to group. This setup allows each group to be of size up to the where we want to support (we mandate pairing of flows with counters for offloading). Thus, we don't expect multiple occurences for a group which in turn adds steering hops. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Tested-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 03293ed1cc22..348182599294 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -402,19 +402,18 @@ out: } #define MAX_PF_SQ 256 -#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */ #define ESW_OFFLOADS_NUM_GROUPS 4 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int table_size, ix, esw_size, err = 0; struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb = NULL; struct mlx5_flow_group *g; u32 *flow_group_in; void *match_criteria; - int table_size, ix, err = 0; u32 flags = 0; flow_group_in = mlx5_vzalloc(inlen); @@ -427,15 +426,19 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) goto ns_err; } - esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n", - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), + MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS); + + esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS, + 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN; fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, - ESW_OFFLOADS_NUM_ENTRIES, + esw_size, ESW_OFFLOADS_NUM_GROUPS, 0, flags); if (IS_ERR(fdb)) { -- cgit v1.2.3 From c9497c98901c689bf6c357f812bf864ed8f50ace Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Thu, 15 Dec 2016 14:02:53 +0200 Subject: net/mlx5: Add support for setting VF min rate Add support for SRIOV VF min rate guarantee by using the TSAR BW share weights mechanism. The TSAR BW share vport attribute represents the weight of that vport among the other vports weights which means that the actual vport BW percentage is the same vport weight percentage among the total vports weights sum. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 5 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 97 +++++++++++++++++++++-- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 11 ++- include/linux/mlx5/mlx5_ifc.h | 4 +- 4 files changed, 104 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3a06c81ef85e..c819d07fbdb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3021,11 +3021,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; - if (min_tx_rate) - return -EOPNOTSUPP; - return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1, - max_tx_rate); + max_tx_rate, min_tx_rate); } static int mlx5_vport_link2ifla(u8 esw_link) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 4b3b60be319d..efa1a7a76d8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1415,7 +1415,7 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw) } static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, - u32 initial_max_rate) + u32 initial_max_rate, u32 initial_bw_share) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -1439,6 +1439,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, esw->qos.root_tsar_id); MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, initial_max_rate); + MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share); err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -1473,7 +1474,7 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) } static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, - u32 max_rate) + u32 max_rate, u32 bw_share) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -1497,7 +1498,9 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, esw->qos.root_tsar_id); MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, max_rate); + MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share); bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -1563,7 +1566,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, esw_apply_vport_conf(esw, vport); /* Attach vport to the eswitch rate limiter */ - if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate)) + if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate, + vport->qos.bw_share)) esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); /* Sync with current vport context */ @@ -1952,6 +1956,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ivi->qos = evport->info.qos; ivi->spoofchk = evport->info.spoofchk; ivi->trusted = evport->info.trusted; + ivi->min_tx_rate = evport->info.min_rate; ivi->max_tx_rate = evport->info.max_rate; mutex_unlock(&esw->state_lock); @@ -2046,23 +2051,103 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, return 0; } -int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, - int vport, u32 max_rate) +static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) { + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); struct mlx5_vport *evport; + u32 max_guarantee = 0; + int i; + + for (i = 0; i <= esw->total_vports; i++) { + evport = &esw->vports[i]; + if (!evport->enabled || evport->info.min_rate < max_guarantee) + continue; + max_guarantee = evport->info.min_rate; + } + + return max_t(u32, max_guarantee / fw_max_bw_share, 1); +} + +static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + struct mlx5_vport *evport; + u32 vport_max_rate; + u32 vport_min_rate; + u32 bw_share; + int err; + int i; + + for (i = 0; i <= esw->total_vports; i++) { + evport = &esw->vports[i]; + if (!evport->enabled) + continue; + vport_min_rate = evport->info.min_rate; + vport_max_rate = evport->info.max_rate; + bw_share = MLX5_MIN_BW_SHARE; + + if (vport_min_rate) + bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate, + divider, + fw_max_bw_share); + + if (bw_share == evport->qos.bw_share) + continue; + + err = esw_vport_qos_config(esw, i, vport_max_rate, + bw_share); + if (!err) + evport->qos.bw_share = bw_share; + else + return err; + } + + return 0; +} + +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, + u32 max_rate, u32 min_rate) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && + fw_max_bw_share >= MLX5_MIN_BW_SHARE; + bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); + struct mlx5_vport *evport; + u32 previous_min_rate; + u32 divider; int err = 0; if (!ESW_ALLOWED(esw)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; + if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) + return -EOPNOTSUPP; mutex_lock(&esw->state_lock); evport = &esw->vports[vport]; - err = esw_vport_qos_config(esw, vport, max_rate); + + if (min_rate == evport->info.min_rate) + goto set_max_rate; + + previous_min_rate = evport->info.min_rate; + evport->info.min_rate = min_rate; + divider = calculate_vports_min_rate_divider(esw); + err = normalize_vports_min_rate(esw, divider); + if (err) { + evport->info.min_rate = previous_min_rate; + goto unlock; + } + +set_max_rate: + if (max_rate == evport->info.max_rate) + goto unlock; + + err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share); if (!err) evport->info.max_rate = max_rate; +unlock: mutex_unlock(&esw->state_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index cea1660d59ac..5b78883d5654 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -50,6 +50,11 @@ #define FDB_UPLINK_VPORT 0xffff +#define MLX5_MIN_BW_SHARE 1 + +#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ + min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) + /* L2 -mac address based- hash helpers */ struct l2addr_node { struct hlist_node hlist; @@ -116,6 +121,7 @@ struct mlx5_vport_info { u8 qos; u64 node_guid; int link_state; + u32 min_rate; u32 max_rate; bool spoofchk; bool trusted; @@ -138,6 +144,7 @@ struct mlx5_vport { struct { bool enabled; u32 esw_tsar_ix; + u32 bw_share; } qos; bool enabled; @@ -249,8 +256,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, int vport, bool spoofchk); int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int vport_num, bool setting); -int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, - int vport, u32 max_rate); +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, + u32 max_rate, u32 min_rate); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi); int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index d96ebc319d63..a919dfb920ae 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -547,7 +547,9 @@ struct mlx5_ifc_e_switch_cap_bits { struct mlx5_ifc_qos_cap_bits { u8 packet_pacing[0x1]; u8 esw_scheduling[0x1]; - u8 reserved_at_2[0x1e]; + u8 esw_bw_share[0x1]; + u8 esw_rate_limit[0x1]; + u8 reserved_at_4[0x1c]; u8 reserved_at_20[0x20]; -- cgit v1.2.3 From 8c7245a60ef8f8c4a427349690c5a141cfed6217 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 30 Nov 2016 20:23:51 +0200 Subject: net/mlx5: Push min-inline mode resolution helper into the core So we can use that from the IB driver too in downstream patches. This patch doesn't change any functionality. Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 18 +----------------- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 17 +++++++++++++++++ include/linux/mlx5/vport.h | 1 + 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c819d07fbdb3..636372873e64 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3458,22 +3458,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; } -static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, - u8 *min_inline_mode) -{ - switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { - case MLX5_CAP_INLINE_MODE_L2: - *min_inline_mode = MLX5_INLINE_MODE_L2; - break; - case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: - mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); - break; - case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: - *min_inline_mode = MLX5_INLINE_MODE_NONE; - break; - } -} - u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) { int i; @@ -3533,7 +3517,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); - mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode); + mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); priv->params.num_tc = 1; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 269e4401c342..b49cfc895c10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -127,6 +127,23 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); +void mlx5_query_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline_mode) +{ + switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { + case MLX5_CAP_INLINE_MODE_L2: + *min_inline_mode = MLX5_INLINE_MODE_L2; + break; + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); + break; + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: + *min_inline_mode = MLX5_INLINE_MODE_NONE; + break; + } +} +EXPORT_SYMBOL_GPL(mlx5_query_min_inline); + int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline) { diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index ec35157ea725..656c70b65dd2 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -51,6 +51,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr); int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 *min_inline); +void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, -- cgit v1.2.3 From 7898489880f55a9c3a954cd5660a0fb4fd81b625 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 30 Nov 2016 20:33:33 +0200 Subject: IB/mlx5: Enable Eth VFs to query their min-inline value for user-space For some mlx5 HW models (CX4, CX4Lx), the VF driver needs to put part of the packet headers on the TX descriptor so the e-switch can do proper matching and steering. This is called "min-inline", it's advertized to the VF by the FW and also enforced on them by the HW, such that if they don't obey, their packets are dropped. SRIOV VF libmlx5 instances should take into account the min-inline value of their vports. For that end, we provide this value through the vendor response part of init_ucontext command. The min inline value is reported in a way which will let newer libmlx5 instances realize that they are running over an older kernel and act accordingly (e.g apply some educated guess). Signed-off-by: Or Gerlitz Reviewed-by: Matan Barak Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/main.c | 9 +++++++++ include/uapi/rdma/mlx5-abi.h | 14 +++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8727116a4cab..9d8535385bb8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -53,6 +53,7 @@ #include #include #include +#include #include "mlx5_ib.h" #define DRIVER_NAME "mlx5_ib" @@ -1202,6 +1203,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, resp.response_length += sizeof(resp.cmds_supp_uhw); } + if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) { + if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { + mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline); + resp.eth_min_inline++; + } + resp.response_length += sizeof(resp.eth_min_inline); + } + /* * We don't want to expose information from the PCI bar that is located * after 4096 bytes, so if the arch only supports larger pages, let's diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 85dc966ea70b..da7cd62bace7 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -90,6 +90,17 @@ enum mlx5_user_cmds_supp_uhw { MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, }; +/* The eth_min_inline response value is set to off-by-one vs the FW + * returned value to allow user-space to deal with older kernels. + */ +enum mlx5_user_inline_mode { + MLX5_USER_INLINE_MODE_NA, + MLX5_USER_INLINE_MODE_NONE, + MLX5_USER_INLINE_MODE_L2, + MLX5_USER_INLINE_MODE_IP, + MLX5_USER_INLINE_MODE_TCP_UDP, +}; + struct mlx5_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 bf_reg_size; @@ -106,7 +117,8 @@ struct mlx5_ib_alloc_ucontext_resp { __u32 response_length; __u8 cqe_version; __u8 cmds_supp_uhw; - __u16 reserved2; + __u8 eth_min_inline; + __u8 reserved2; __u64 hca_core_clock_offset; __u32 log_uar_size; __u32 num_uars_per_page; -- cgit v1.2.3 From b4e029da29ce8244879b607fbf0514d1087dc3f5 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Tue, 22 Nov 2016 11:03:32 +0200 Subject: net/mlx5e: Reduce memory consumption on kdump kernel Reduce memory consumption on kdump kernel by decreasing the number of channels to 1 and the size of RQs and SQs to the minimal values. Signed-off-by: Kamal Heib Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 7 +------ .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 21 ++++++++++++++++++--- 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d603a30ad639..8d660a018bf8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -101,6 +101,7 @@ #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) +#define MLX5E_MIN_NUM_CHANNELS 0x1 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 @@ -847,12 +848,6 @@ static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); } -static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) -{ - return min_t(int, mdev->priv.eq_table.num_comp_vectors, - MLX5E_MAX_NUM_CHANNELS); -} - extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 4021863e3840..9d520f8d6d1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -552,7 +552,7 @@ static void mlx5e_get_channels(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); - ch->max_combined = mlx5e_get_max_num_channels(priv->mdev); + ch->max_combined = priv->profile->max_nch(priv->mdev); ch->combined_count = priv->params.num_channels; } @@ -560,7 +560,7 @@ static int mlx5e_set_channels(struct net_device *dev, struct ethtool_channels *ch) { struct mlx5e_priv *priv = netdev_priv(dev); - int ncv = mlx5e_get_max_num_channels(priv->mdev); + int ncv = priv->profile->max_nch(priv->mdev); unsigned int count = ch->combined_count; bool arfs_enabled; bool was_opened; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 636372873e64..e829143efc14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -31,6 +31,7 @@ */ #include +#include #include #include #include @@ -83,7 +84,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) priv->params.rq_wq_type = rq_type; switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; + priv->params.log_rq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; priv->params.mpwqe_log_stride_sz = MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : @@ -92,7 +95,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) priv->params.mpwqe_log_stride_sz; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + priv->params.log_rq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; } priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); @@ -1508,6 +1513,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } +static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) +{ + return is_kdump_kernel() ? + MLX5E_MIN_NUM_CHANNELS : + min_t(int, mdev->priv.eq_table.num_comp_vectors, + MLX5E_MAX_NUM_CHANNELS); +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) @@ -3491,7 +3504,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->params.lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); - priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + priv->params.log_sq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; /* set CQE compression */ priv->params.rx_cqe_compress_def = false; -- cgit v1.2.3 From 5eb0249b4352c813f0a3c31b967f6cb4b9869a50 Mon Sep 17 00:00:00 2001 From: Shaker Daibes Date: Sat, 10 Dec 2016 18:45:55 +0200 Subject: net/mlx5e: CQE compression control code reuse This patch is intended for code reuse of mlx5e_modify_rx_cqe_compression function. Signed-off-by: Shaker Daibes Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_clock.c | 7 +++++-- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 13 ++----------- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 8 ++------ 4 files changed, 10 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8d660a018bf8..46f728de9e76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -787,7 +787,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv, struct ptp_clock_event *event); int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); -void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val); +void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 349dc72cd2b6..37e66eef6fb5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -106,11 +106,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } + mutex_lock(&priv->state_lock); /* RX HW timestamp */ switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: /* Reset CQE compression to Admin default */ - mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def); + mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -128,14 +129,16 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* Disable CQE compression */ netdev_warn(dev, "Disabling cqe compression"); - mlx5e_modify_rx_cqe_compression(priv, false); + mlx5e_modify_rx_cqe_compression_locked(priv, false); config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: + mutex_unlock(&priv->state_lock); return -ERANGE; } memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config)); + mutex_unlock(&priv->state_lock); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 9d520f8d6d1a..6c1a5cb43f8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1476,8 +1476,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - int err = 0; - bool reset; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -ENOTSUPP; @@ -1487,17 +1485,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, return -EINVAL; } - reset = test_bit(MLX5E_STATE_OPENED, &priv->state); - - if (reset) - mlx5e_close_locked(netdev); - - MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable); + mlx5e_modify_rx_cqe_compression_locked(priv, enable); priv->params.rx_cqe_compress_def = enable; - if (reset) - err = mlx5e_open_locked(netdev); - return err; + return 0; } static int mlx5e_handle_pflag(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 20f116f8c457..ba50583ea3ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -155,17 +155,15 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; } -void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) +void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val) { bool was_opened; if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) return; - mutex_lock(&priv->state_lock); - if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val) - goto unlock; + return; was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); if (was_opened) @@ -176,8 +174,6 @@ void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) if (was_opened) mlx5e_open_locked(priv->netdev); -unlock: - mutex_unlock(&priv->state_lock); } #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT) -- cgit v1.2.3 From 4faf940dd869c36436ff6f0a0b20369fdf5da68b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Jan 2017 01:06:26 +0100 Subject: bpf: simplify __is_valid_access test on cb The __is_valid_access() test for cb[] from 62c7989b24db ("bpf: allow b/h/w/dw access for bpf's cb in ctx") was done unnecessarily complex, we can just simplify it the same way as recent fix from 2d071c643f1c ("bpf, trace: make ctx access checks more robust") did. Overflow can never happen as size is 1/2/4/8 depending on access. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/filter.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 90383860e224..883975fa4ed1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2784,19 +2784,8 @@ static bool __is_valid_access(int off, int size) switch (off) { case offsetof(struct __sk_buff, cb[0]) ... offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1: - if (size == sizeof(__u16) && - off > offsetof(struct __sk_buff, cb[4]) + sizeof(__u16)) - return false; - if (size == sizeof(__u32) && - off > offsetof(struct __sk_buff, cb[4])) - return false; - if (size == sizeof(__u64) && - off > offsetof(struct __sk_buff, cb[2])) - return false; - if (size != sizeof(__u8) && - size != sizeof(__u16) && - size != sizeof(__u32) && - size != sizeof(__u64)) + if (off + size > + offsetof(struct __sk_buff, cb[4]) + sizeof(__u32)) return false; break; default: -- cgit v1.2.3 From 2492d3b867043f6880708d095a7a5d65debcfc32 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Jan 2017 01:06:27 +0100 Subject: bpf: enable load bytes helper for filter/reuseport progs BPF_PROG_TYPE_SOCKET_FILTER are used in various facilities such as for SO_REUSEPORT and packet fanout demuxing, packet filtering, kcm, etc, and yet the only facility they can use is BPF_LD with {BPF_ABS, BPF_IND} for single byte/half/word access. Direct packet access is only restricted to tc programs right now, but we can still facilitate usage by allowing skb_load_bytes() helper added back then in 05c74e5e53f6 ("bpf: add bpf_skb_load_bytes helper") that calls skb_header_pointer() similarly to bpf_load_pointer(), but for stack buffers with larger access size. Name the previous sk_filter_func_proto() as bpf_base_func_proto() since this is used everywhere else as well, similarly for the ctx converter, that is, bpf_convert_ctx_access(). Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/filter.c | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 883975fa4ed1..e2263da505be 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2598,7 +2598,7 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = { }; static const struct bpf_func_proto * -sk_filter_func_proto(enum bpf_func_id func_id) +bpf_base_func_proto(enum bpf_func_id func_id) { switch (func_id) { case BPF_FUNC_map_lookup_elem: @@ -2625,6 +2625,17 @@ sk_filter_func_proto(enum bpf_func_id func_id) } } +static const struct bpf_func_proto * +sk_filter_func_proto(enum bpf_func_id func_id) +{ + switch (func_id) { + case BPF_FUNC_skb_load_bytes: + return &bpf_skb_load_bytes_proto; + default: + return bpf_base_func_proto(func_id); + } +} + static const struct bpf_func_proto * tc_cls_act_func_proto(enum bpf_func_id func_id) { @@ -2680,7 +2691,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) case BPF_FUNC_skb_under_cgroup: return &bpf_skb_under_cgroup_proto; default: - return sk_filter_func_proto(func_id); + return bpf_base_func_proto(func_id); } } @@ -2695,7 +2706,7 @@ xdp_func_proto(enum bpf_func_id func_id) case BPF_FUNC_xdp_adjust_head: return &bpf_xdp_adjust_head_proto; default: - return sk_filter_func_proto(func_id); + return bpf_base_func_proto(func_id); } } @@ -2706,7 +2717,7 @@ cg_skb_func_proto(enum bpf_func_id func_id) case BPF_FUNC_skb_load_bytes: return &bpf_skb_load_bytes_proto; default: - return sk_filter_func_proto(func_id); + return bpf_base_func_proto(func_id); } } @@ -2733,7 +2744,7 @@ lwt_inout_func_proto(enum bpf_func_id func_id) case BPF_FUNC_skb_under_cgroup: return &bpf_skb_under_cgroup_proto; default: - return sk_filter_func_proto(func_id); + return bpf_base_func_proto(func_id); } } @@ -2983,10 +2994,10 @@ void bpf_warn_invalid_xdp_action(u32 act) } EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); -static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, - const struct bpf_insn *si, - struct bpf_insn *insn_buf, - struct bpf_prog *prog) +static u32 bpf_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog) { struct bpf_insn *insn = insn_buf; int off; @@ -3210,7 +3221,7 @@ static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, offsetof(struct net_device, ifindex)); break; default: - return sk_filter_convert_ctx_access(type, si, insn_buf, prog); + return bpf_convert_ctx_access(type, si, insn_buf, prog); } return insn - insn_buf; @@ -3242,7 +3253,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, static const struct bpf_verifier_ops sk_filter_ops = { .get_func_proto = sk_filter_func_proto, .is_valid_access = sk_filter_is_valid_access, - .convert_ctx_access = sk_filter_convert_ctx_access, + .convert_ctx_access = bpf_convert_ctx_access, }; static const struct bpf_verifier_ops tc_cls_act_ops = { @@ -3261,24 +3272,24 @@ static const struct bpf_verifier_ops xdp_ops = { static const struct bpf_verifier_ops cg_skb_ops = { .get_func_proto = cg_skb_func_proto, .is_valid_access = sk_filter_is_valid_access, - .convert_ctx_access = sk_filter_convert_ctx_access, + .convert_ctx_access = bpf_convert_ctx_access, }; static const struct bpf_verifier_ops lwt_inout_ops = { .get_func_proto = lwt_inout_func_proto, .is_valid_access = lwt_is_valid_access, - .convert_ctx_access = sk_filter_convert_ctx_access, + .convert_ctx_access = bpf_convert_ctx_access, }; static const struct bpf_verifier_ops lwt_xmit_ops = { .get_func_proto = lwt_xmit_func_proto, .is_valid_access = lwt_is_valid_access, - .convert_ctx_access = sk_filter_convert_ctx_access, + .convert_ctx_access = bpf_convert_ctx_access, .gen_prologue = tc_cls_act_prologue, }; static const struct bpf_verifier_ops cg_sock_ops = { - .get_func_proto = sk_filter_func_proto, + .get_func_proto = bpf_base_func_proto, .is_valid_access = sock_filter_is_valid_access, .convert_ctx_access = sock_filter_convert_ctx_access, }; -- cgit v1.2.3 From d1b662adcdb87944b7f6f7bd2f95cbb1404dbf18 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Jan 2017 01:06:28 +0100 Subject: bpf: allow option for setting bpf_l4_csum_replace from scratch When programs need to calculate the csum from scratch for small UDP packets and use bpf_l4_csum_replace() to feed the result from helpers like bpf_csum_diff(), then we need a flag besides BPF_F_MARK_MANGLED_0 that would ignore the case of current csum being 0, and which would still allow for the helper to set the csum and transform when needed to CSUM_MANGLED_0. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 1 + net/core/filter.c | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index bd3068485410..e07fd5a324e6 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -522,6 +522,7 @@ enum bpf_func_id { /* BPF_FUNC_l4_csum_replace flags. */ #define BPF_F_PSEUDO_HDR (1ULL << 4) #define BPF_F_MARK_MANGLED_0 (1ULL << 5) +#define BPF_F_MARK_ENFORCE (1ULL << 6) /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ #define BPF_F_INGRESS (1ULL << 0) diff --git a/net/core/filter.c b/net/core/filter.c index e2263da505be..1e00737e3bc3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1522,10 +1522,11 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, { bool is_pseudo = flags & BPF_F_PSEUDO_HDR; bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; + bool do_mforce = flags & BPF_F_MARK_ENFORCE; __sum16 *ptr; - if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR | - BPF_F_HDR_FIELD_MASK))) + if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | + BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) return -EINVAL; if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; @@ -1533,7 +1534,7 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, return -EFAULT; ptr = (__sum16 *)(skb->data + offset); - if (is_mmzero && !*ptr) + if (is_mmzero && !do_mforce && !*ptr) return 0; switch (flags & BPF_F_HDR_FIELD_MASK) { -- cgit v1.2.3 From 62b64660262ab512cb428c9dd6e5a5586a0beb53 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Jan 2017 01:06:29 +0100 Subject: bpf: add prog tag test case to bpf selftests Add the test case used to compare the results from fdinfo with af_alg's output on the tag. Tests are from min to max sized programs, with and without maps included. # ./test_tag test_tag: OK (40945 tests) Tested on x86_64 and s390x. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/Makefile | 4 +- tools/testing/selftests/bpf/test_tag.c | 202 +++++++++++++++++++++++++++++++++ 2 files changed, 204 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/bpf/test_tag.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 064a3e5f2836..769a6cb42b4b 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -1,8 +1,8 @@ CFLAGS += -Wall -O2 -I../../../../usr/include -test_objs = test_verifier test_maps test_lru_map test_lpm_map +test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map -TEST_PROGS := test_verifier test_maps test_lru_map test_lpm_map test_kmod.sh +TEST_PROGS := $(test_objs) test_kmod.sh TEST_FILES := $(test_objs) all: $(test_objs) diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c new file mode 100644 index 000000000000..6ab4793b3d16 --- /dev/null +++ b/tools/testing/selftests/bpf/test_tag.c @@ -0,0 +1,202 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include "../../../include/linux/filter.h" + +#include "bpf_sys.h" + +static struct bpf_insn prog[BPF_MAXINSNS]; + +static void bpf_gen_imm_prog(unsigned int insns, int fd_map) +{ + int i; + + srand(time(NULL)); + for (i = 0; i < insns; i++) + prog[i] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, rand()); + prog[i - 1] = BPF_EXIT_INSN(); +} + +static void bpf_gen_map_prog(unsigned int insns, int fd_map) +{ + int i, j = 0; + + for (i = 0; i + 1 < insns; i += 2) { + struct bpf_insn tmp[] = { + BPF_LD_MAP_FD(j++ % BPF_REG_10, fd_map) + }; + + memcpy(&prog[i], tmp, sizeof(tmp)); + } + if (insns % 2 == 0) + prog[insns - 2] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, 42); + prog[insns - 1] = BPF_EXIT_INSN(); +} + +static int bpf_try_load_prog(int insns, int fd_map, + void (*bpf_filler)(unsigned int insns, + int fd_map)) +{ + int fd_prog; + + bpf_filler(insns, fd_map); + fd_prog = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, prog, insns * + sizeof(struct bpf_insn), "", NULL, 0); + assert(fd_prog > 0); + if (fd_map > 0) + bpf_filler(insns, 0); + return fd_prog; +} + +static int __hex2bin(char ch) +{ + if ((ch >= '0') && (ch <= '9')) + return ch - '0'; + ch = tolower(ch); + if ((ch >= 'a') && (ch <= 'f')) + return ch - 'a' + 10; + return -1; +} + +static int hex2bin(uint8_t *dst, const char *src, size_t count) +{ + while (count--) { + int hi = __hex2bin(*src++); + int lo = __hex2bin(*src++); + + if ((hi < 0) || (lo < 0)) + return -1; + *dst++ = (hi << 4) | lo; + } + return 0; +} + +static void tag_from_fdinfo(int fd_prog, uint8_t *tag, uint32_t len) +{ + const int prefix_len = sizeof("prog_tag:\t") - 1; + char buff[256]; + int ret = -1; + FILE *fp; + + snprintf(buff, sizeof(buff), "/proc/%d/fdinfo/%d", getpid(), + fd_prog); + fp = fopen(buff, "r"); + assert(fp); + + while (fgets(buff, sizeof(buff), fp)) { + if (strncmp(buff, "prog_tag:\t", len)) + continue; + ret = hex2bin(tag, buff + prefix_len, len); + break; + } + + fclose(fp); + assert(!ret); +} + +static void tag_from_alg(int insns, uint8_t *tag, uint32_t len) +{ + static const struct sockaddr_alg alg = { + .salg_family = AF_ALG, + .salg_type = "hash", + .salg_name = "sha1", + }; + int fd_base, fd_alg, ret; + ssize_t size; + + fd_base = socket(AF_ALG, SOCK_SEQPACKET, 0); + assert(fd_base > 0); + + ret = bind(fd_base, (struct sockaddr *)&alg, sizeof(alg)); + assert(!ret); + + fd_alg = accept(fd_base, NULL, 0); + assert(fd_alg > 0); + + insns *= sizeof(struct bpf_insn); + size = write(fd_alg, prog, insns); + assert(size == insns); + + size = read(fd_alg, tag, len); + assert(size == len); + + close(fd_alg); + close(fd_base); +} + +static void tag_dump(const char *prefix, uint8_t *tag, uint32_t len) +{ + int i; + + printf("%s", prefix); + for (i = 0; i < len; i++) + printf("%02x", tag[i]); + printf("\n"); +} + +static void tag_exit_report(int insns, int fd_map, uint8_t *ftag, + uint8_t *atag, uint32_t len) +{ + printf("Program tag mismatch for %d insns%s!\n", insns, + fd_map < 0 ? "" : " with map"); + + tag_dump(" fdinfo result: ", ftag, len); + tag_dump(" af_alg result: ", atag, len); + exit(1); +} + +static void do_test(uint32_t *tests, int start_insns, int fd_map, + void (*bpf_filler)(unsigned int insns, int fd)) +{ + int i, fd_prog; + + for (i = start_insns; i <= BPF_MAXINSNS; i++) { + uint8_t ftag[8], atag[sizeof(ftag)]; + + fd_prog = bpf_try_load_prog(i, fd_map, bpf_filler); + tag_from_fdinfo(fd_prog, ftag, sizeof(ftag)); + tag_from_alg(i, atag, sizeof(atag)); + if (memcmp(ftag, atag, sizeof(ftag))) + tag_exit_report(i, fd_map, ftag, atag, sizeof(ftag)); + + close(fd_prog); + sched_yield(); + (*tests)++; + } +} + +int main(void) +{ + struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; + uint32_t tests = 0; + int i, fd_map; + + setrlimit(RLIMIT_MEMLOCK, &rinf); + fd_map = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(int), + sizeof(int), 1, BPF_F_NO_PREALLOC); + assert(fd_map > 0); + + for (i = 0; i < 5; i++) { + do_test(&tests, 2, -1, bpf_gen_imm_prog); + do_test(&tests, 3, fd_map, bpf_gen_map_prog); + } + + printf("test_tag: OK (%u tests)\n", tests); + close(fd_map); + return 0; +} -- cgit v1.2.3 From 3fadc80115837b86f989d17c4aa92bb5cb7bc1b6 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Jan 2017 01:06:30 +0100 Subject: bpf: enable verifier to better track const alu ops William reported couple of issues in relation to direct packet access. Typical scheme is to check for data + [off] <= data_end, where [off] can be either immediate or coming from a tracked register that contains an immediate, depending on the branch, we can then access the data. However, in case of calculating [off] for either the mentioned test itself or for access after the test in a more "complex" way, then the verifier will stop tracking the CONST_IMM marked register and will mark it as UNKNOWN_VALUE one. Adding that UNKNOWN_VALUE typed register to a pkt() marked register, the verifier then bails out in check_packet_ptr_add() as it finds the registers imm value below 48. In the first below example, that is due to evaluate_reg_imm_alu() not handling right shifts and thus marking the register as UNKNOWN_VALUE via helper __mark_reg_unknown_value() that resets imm to 0. In the second case the same happens at the time when r4 is set to r4 &= r5, where it transitions to UNKNOWN_VALUE from evaluate_reg_imm_alu(). Later on r4 we shift right by 3 inside evaluate_reg_alu(), where the register's imm turns into 3. That is, for registers with type UNKNOWN_VALUE, imm of 0 means that we don't know what value the register has, and for imm > 0 it means that the value has [imm] upper zero bits. F.e. when shifting an UNKNOWN_VALUE register by 3 to the right, no matter what value it had, we know that the 3 upper most bits must be zero now. This is to make sure that ALU operations with unknown registers don't overflow. Meaning, once we know that we have more than 48 upper zero bits, or, in other words cannot go beyond 0xffff offset with ALU ops, such an addition will track the target register as a new pkt() register with a new id, but 0 offset and 0 range, so for that a new data/data_end test will be required. Is the source register a CONST_IMM one that is to be added to the pkt() register, or the source instruction is an add instruction with immediate value, then it will get added if it stays within max 0xffff bounds. >From there, pkt() type, can be accessed should reg->off + imm be within the access range of pkt(). [...] from 28 to 30: R0=imm1,min_value=1,max_value=1 R1=pkt(id=0,off=0,r=22) R2=pkt_end R3=imm144,min_value=144,max_value=144 R4=imm0,min_value=0,max_value=0 R5=inv48,min_value=2054,max_value=2054 R10=fp 30: (bf) r5 = r3 31: (07) r5 += 23 32: (77) r5 >>= 3 33: (bf) r6 = r1 34: (0f) r6 += r5 cannot add integer value with 0 upper zero bits to ptr_to_packet [...] from 52 to 80: R0=imm1,min_value=1,max_value=1 R1=pkt(id=0,off=0,r=34) R2=pkt_end R3=inv R4=imm272 R5=inv56,min_value=17,max_value=17 R6=pkt(id=0,off=26,r=34) R10=fp 80: (07) r4 += 71 81: (18) r5 = 0xfffffff8 83: (5f) r4 &= r5 84: (77) r4 >>= 3 85: (0f) r1 += r4 cannot add integer value with 3 upper zero bits to ptr_to_packet Thus to get above use-cases working, evaluate_reg_imm_alu() has been extended for further ALU ops. This is fine, because we only operate strictly within realm of CONST_IMM types, so here we don't care about overflows as they will happen in the simulated but also real execution and interaction with pkt() in check_packet_ptr_add() will check actual imm value once added to pkt(), but it's irrelevant before. With regards to 06c1c049721a ("bpf: allow helpers access to variable memory") that works on UNKNOWN_VALUE registers, the verifier becomes now a bit smarter as it can better resolve ALU ops, so we need to adapt two test cases there, as min/max bound tracking only becomes necessary when registers were spilled to stack. So while mask was set before to track upper bound for UNKNOWN_VALUE case, it's now resolved directly as CONST_IMM, and such contructs are only necessary when f.e. registers are spilled. For commit 6b17387307ba ("bpf: recognize 64bit immediate loads as consts") that initially enabled dw load tracking only for nfp jit/ analyzer, I did couple of tests on large, complex programs and we don't increase complexity badly (my tests were in ~3% range on avg). I've added a couple of tests similar to affected code above, and it works fine with verifier now. Reported-by: William Tu Signed-off-by: Daniel Borkmann Cc: Gianluca Borello Cc: William Tu Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 64 +++++++++++++++------- tools/testing/selftests/bpf/test_verifier.c | 82 +++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 19 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8f69df7e8167..fb3513b35c0b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1566,22 +1566,54 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; struct bpf_reg_state *src_reg = ®s[insn->src_reg]; u8 opcode = BPF_OP(insn->code); + u64 dst_imm = dst_reg->imm; - /* dst_reg->type == CONST_IMM here, simulate execution of 'add'/'or' - * insn. Don't care about overflow or negative values, just add them + /* dst_reg->type == CONST_IMM here. Simulate execution of insns + * containing ALU ops. Don't care about overflow or negative + * values, just add/sub/... them; registers are in u64. */ - if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) - dst_reg->imm += insn->imm; - else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X && - src_reg->type == CONST_IMM) - dst_reg->imm += src_reg->imm; - else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) - dst_reg->imm |= insn->imm; - else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X && - src_reg->type == CONST_IMM) - dst_reg->imm |= src_reg->imm; - else + if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) { + dst_imm += insn->imm; + } else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X && + src_reg->type == CONST_IMM) { + dst_imm += src_reg->imm; + } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_K) { + dst_imm -= insn->imm; + } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_X && + src_reg->type == CONST_IMM) { + dst_imm -= src_reg->imm; + } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_K) { + dst_imm *= insn->imm; + } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_X && + src_reg->type == CONST_IMM) { + dst_imm *= src_reg->imm; + } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) { + dst_imm |= insn->imm; + } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X && + src_reg->type == CONST_IMM) { + dst_imm |= src_reg->imm; + } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_K) { + dst_imm &= insn->imm; + } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_X && + src_reg->type == CONST_IMM) { + dst_imm &= src_reg->imm; + } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_K) { + dst_imm >>= insn->imm; + } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_X && + src_reg->type == CONST_IMM) { + dst_imm >>= src_reg->imm; + } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_K) { + dst_imm <<= insn->imm; + } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_X && + src_reg->type == CONST_IMM) { + dst_imm <<= src_reg->imm; + } else { mark_reg_unknown_value(regs, insn->dst_reg); + goto out; + } + + dst_reg->imm = dst_imm; +out: return 0; } @@ -2225,14 +2257,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) return err; if (insn->src_reg == 0) { - /* generic move 64-bit immediate into a register, - * only analyzer needs to collect the ld_imm value. - */ u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; - if (!env->analyzer_ops) - return 0; - regs[insn->dst_reg].type = CONST_IMM; regs[insn->dst_reg].imm = imm; return 0; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 1aa73241c999..0d0912c7f03c 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -2325,6 +2325,84 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, + { + "direct packet access: test11 (shift, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), + BPF_MOV64_IMM(BPF_REG_3, 144), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "direct packet access: test12 (and, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), + BPF_MOV64_IMM(BPF_REG_3, 144), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), + BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "direct packet access: test13 (branches, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_MOV64_IMM(BPF_REG_4, 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2), + BPF_MOV64_IMM(BPF_REG_3, 14), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_3, 24), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), + BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, { "helper access to packet: test1, valid packet_ptr range", .insns = { @@ -4208,6 +4286,8 @@ static struct bpf_test tests[] = { .insns = { BPF_MOV64_IMM(BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_4, 0), @@ -4251,6 +4331,8 @@ static struct bpf_test tests[] = { BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), BPF_MOV64_IMM(BPF_REG_3, 0), -- cgit v1.2.3 From 3ebe8344eb8a4214c09424bf8094025c8bf61823 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 23 Jan 2017 17:49:20 -0800 Subject: net: ks8851: Drop eeprom_size structure member After commit 51b7b1c34e19 (KSZ8851-SNL: Add ethtool support for EEPROM via eeprom_93cx6, 2011-11-21) this structure member is unused. Delete it. Signed-off-by: Stephen Boyd Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ks8851.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index e7e1aff40bd9..955d69a8e8d3 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -84,7 +84,6 @@ union ks8851_tx_hdr { * @rc_ier: Cached copy of KS_IER. * @rc_ccr: Cached copy of KS_CCR. * @rc_rxqcr: Cached copy of KS_RXQCR. - * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. * @vdd_reg: Optional regulator supplying the chip * @vdd_io: Optional digital power supply for IO @@ -120,7 +119,6 @@ struct ks8851_net { u16 rc_ier; u16 rc_rxqcr; u16 rc_ccr; - u16 eeprom_size; struct mii_if_info mii; struct ks8851_rxctrl rxctrl; @@ -1533,11 +1531,6 @@ static int ks8851_probe(struct spi_device *spi) /* cache the contents of the CCR register for EEPROM, etc. */ ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR); - if (ks->rc_ccr & CCR_EEPROM) - ks->eeprom_size = 128; - else - ks->eeprom_size = 0; - ks8851_read_selftest(ks); ks8851_init_mac(ks); -- cgit v1.2.3 From 82272db84d874ebe8b4ae24d4e64d770882dbdd1 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 23 Jan 2017 19:19:07 -0800 Subject: net: dsa: Drop WARN() in tag_brcm.c We may be able to see invalid Broadcom tags when the hardware and drivers are misconfigured, or just while exercising the error path. Instead of flooding the console with messages, flat out drop the packet. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/tag_brcm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index af82927674e0..cb5a2b7a0118 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -121,7 +121,8 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, /* We should never see a reserved reason code without knowing how to * handle it */ - WARN_ON(brcm_tag[2] & BRCM_EG_RC_RSVD); + if (unlikely(brcm_tag[2] & BRCM_EG_RC_RSVD)) + goto out_drop; /* Locate which port this is coming from */ source_port = brcm_tag[3] & BRCM_EG_PID_MASK; -- cgit v1.2.3 From 5015024ddfe5efccf1b964f14f078c2152b3b335 Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:08 -0800 Subject: net: ethernet: aquantia: Make and configuration files. Patches to create the make and configuration files. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/Kconfig | 24 ++++++++++++++ drivers/net/ethernet/aquantia/Makefile | 5 +++ drivers/net/ethernet/aquantia/atlantic/Makefile | 42 +++++++++++++++++++++++++ drivers/net/ethernet/aquantia/atlantic/ver.h | 18 +++++++++++ 4 files changed, 89 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/Kconfig create mode 100644 drivers/net/ethernet/aquantia/Makefile create mode 100644 drivers/net/ethernet/aquantia/atlantic/Makefile create mode 100644 drivers/net/ethernet/aquantia/atlantic/ver.h diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig new file mode 100644 index 000000000000..cdf78e069a39 --- /dev/null +++ b/drivers/net/ethernet/aquantia/Kconfig @@ -0,0 +1,24 @@ +# +# aQuantia device configuration +# + +config NET_VENDOR_AQUANTIA + bool "aQuantia devices" + default y + ---help--- + Set this to y if you have an Ethernet network cards that uses the aQuantia + AQC107/AQC108 chipset. + + This option does not build any drivers; it casues the aQuantia + drivers that can be built to appear in the list of Ethernet drivers. + + +if NET_VENDOR_AQUANTIA + +config AQTION + tristate "aQuantia AQtion(tm) Support" + depends on PCI && X86_64 + ---help--- + This enables the support for the aQuantia AQtion(tm) Ethernet card. + +endif # NET_VENDOR_AQUANTIA diff --git a/drivers/net/ethernet/aquantia/Makefile b/drivers/net/ethernet/aquantia/Makefile new file mode 100644 index 000000000000..4f4897b689b2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the aQuantia device drivers. +# + +obj-$(CONFIG_AQTION) += atlantic/ diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile new file mode 100644 index 000000000000..e4ae696920ef --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/Makefile @@ -0,0 +1,42 @@ +################################################################################ +# +# aQuantia Ethernet Controller AQtion Linux Driver +# Copyright(c) 2014-2017 aQuantia Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA +# +################################################################################ + +# +# Makefile for the AQtion(tm) Ethernet driver +# + +obj-$(CONFIG_AQTION) += atlantic.o + +atlantic-objs := aq_main.o \ + aq_nic.o \ + aq_pci_func.o \ + aq_vec.o \ + aq_ring.o \ + aq_hw_utils.o \ + aq_ethtool.o \ + hw_atl/hw_atl_a0.o \ + hw_atl/hw_atl_b0.o \ + hw_atl/hw_atl_utils.o \ + hw_atl/hw_atl_llh.o diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h new file mode 100644 index 000000000000..0de858d215c2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -0,0 +1,18 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef VER_H +#define VER_H + +#define NIC_MAJOR_DRIVER_VERSION 1 +#define NIC_MINOR_DRIVER_VERSION 5 +#define NIC_BUILD_DRIVER_VERSION 345 +#define NIC_REVISION_DRIVER_VERSION 0 + +#endif /* VER_H */ -- cgit v1.2.3 From 3a35780f31679724732738a7eb94f78bb97e0f0c Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:09 -0800 Subject: net: ethernet: aquantia: Common functions and definitions Add files containing the functions and definitions used in common in different functional areas. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | 77 ++++++++++++++++++++++ drivers/net/ethernet/aquantia/atlantic/aq_common.h | 23 +++++++ drivers/net/ethernet/aquantia/atlantic/aq_utils.h | 50 ++++++++++++++ 3 files changed, 150 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_cfg.h create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_common.h create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_utils.h diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h new file mode 100644 index 000000000000..5f99237a9d52 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -0,0 +1,77 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_cfg.h: Definition of configuration parameters and constants. */ + +#ifndef AQ_CFG_H +#define AQ_CFG_H + +#define AQ_CFG_VECS_DEF 4U +#define AQ_CFG_TCS_DEF 1U + +#define AQ_CFG_TXDS_DEF 4096U +#define AQ_CFG_RXDS_DEF 1024U + +#define AQ_CFG_IS_POLLING_DEF 0U + +#define AQ_CFG_FORCE_LEGACY_INT 0U + +#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U +#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU +#define AQ_CFG_IRQ_MASK 0x1FFU + +#define AQ_CFG_VECS_MAX 8U +#define AQ_CFG_TCS_MAX 8U + +#define AQ_CFG_TX_FRAME_MAX (16U * 1024U) +#define AQ_CFG_RX_FRAME_MAX (4U * 1024U) + +/* LRO */ +#define AQ_CFG_IS_LRO_DEF 1U + +/* RSS */ +#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U +#define AQ_CFG_RSS_HASHKEY_SIZE 320U + +#define AQ_CFG_IS_RSS_DEF 1U +#define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF +#define AQ_CFG_RSS_BASE_CPU_NUM_DEF 0U + +#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U +#define AQ_CFG_PCI_FUNC_PORTS 2U + +#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ) +#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) + +#define AQ_CFG_SKB_FRAGS_MAX 32U + +#define AQ_CFG_NAPI_WEIGHT 64U + +#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U + +/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ + +#define AQ_CFG_FC_MODE 3U + +#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */ + +#define AQ_CFG_IS_AUTONEG_DEF 1U +#define AQ_CFG_MTU_DEF 1514U + +#define AQ_CFG_LOCK_TRYS 100U + +#define AQ_CFG_DRV_AUTHOR "aQuantia" +#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver" +#define AQ_CFG_DRV_NAME "aquantia" +#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ + __stringify(NIC_MINOR_DRIVER_VERSION)"."\ + __stringify(NIC_BUILD_DRIVER_VERSION)"."\ + __stringify(NIC_REVISION_DRIVER_VERSION) + +#endif /* AQ_CFG_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h new file mode 100644 index 000000000000..9eb5e222a234 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -0,0 +1,23 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_common.h: Basic includes for all files in project. */ + +#ifndef AQ_COMMON_H +#define AQ_COMMON_H + +#include +#include + +#include "ver.h" +#include "aq_nic.h" +#include "aq_cfg.h" +#include "aq_utils.h" + +#endif /* AQ_COMMON_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h new file mode 100644 index 000000000000..4446bd90fd86 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h @@ -0,0 +1,50 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_utils.h: Useful macro and structures used in all layers of driver. */ + +#ifndef AQ_UTILS_H +#define AQ_UTILS_H + +#include "aq_common.h" + +#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_) + +struct aq_obj_s { + spinlock_t lock; /* spinlock for nic/rings processing */ + atomic_t flags; + atomic_t busy_count; +}; + +static inline void aq_utils_obj_set(atomic_t *flags, u32 mask) +{ + unsigned long flags_old, flags_new; + + do { + flags_old = atomic_read(flags); + flags_new = flags_old | (mask); + } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old); +} + +static inline void aq_utils_obj_clear(atomic_t *flags, u32 mask) +{ + unsigned long flags_old, flags_new; + + do { + flags_old = atomic_read(flags); + flags_new = flags_old & ~(mask); + } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old); +} + +static inline bool aq_utils_obj_test(atomic_t *flags, u32 mask) +{ + return atomic_read(flags) & mask; +} + +#endif /* AQ_UTILS_H */ -- cgit v1.2.3 From 018423e90bee8978105eaaa265a26e70637f9f1e Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:10 -0800 Subject: net: ethernet: aquantia: Add ring support code Add code to support the transmit and receive ring buffers. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 376 +++++++++++++++++++++++ drivers/net/ethernet/aquantia/atlantic/aq_ring.h | 157 ++++++++++ 2 files changed, 533 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_ring.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_ring.h diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c new file mode 100644 index 000000000000..b517b2670a71 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -0,0 +1,376 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ring.c: Definition of functions for Rx/Tx rings. */ + +#include "aq_ring.h" +#include "aq_nic.h" +#include "aq_hw.h" + +#include +#include + +static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic) +{ + int err = 0; + + self->buff_ring = + kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL); + + if (!self->buff_ring) { + err = -ENOMEM; + goto err_exit; + } + self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), + self->size * self->dx_size, + &self->dx_ring_pa, GFP_KERNEL); + if (!self->dx_ring) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + self->aq_nic = aq_nic; + self->idx = idx; + self->size = aq_nic_cfg->txds; + self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size; + + self = aq_ring_alloc(self, aq_nic); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + self->aq_nic = aq_nic; + self->idx = idx; + self->size = aq_nic_cfg->rxds; + self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; + + self = aq_ring_alloc(self, aq_nic); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +int aq_ring_init(struct aq_ring_s *self) +{ + self->hw_head = 0; + self->sw_head = 0; + self->sw_tail = 0; + return 0; +} + +void aq_ring_tx_append_buffs(struct aq_ring_s *self, + struct aq_ring_buff_s *buffer, + unsigned int buffers) +{ + if (likely(self->sw_tail + buffers < self->size)) { + memcpy(&self->buff_ring[self->sw_tail], buffer, + sizeof(buffer[0]) * buffers); + } else { + unsigned int first_part = self->size - self->sw_tail; + unsigned int second_part = buffers - first_part; + + memcpy(&self->buff_ring[self->sw_tail], buffer, + sizeof(buffer[0]) * first_part); + + memcpy(&self->buff_ring[0], &buffer[first_part], + sizeof(buffer[0]) * second_part); + } +} + +int aq_ring_tx_clean(struct aq_ring_s *self) +{ + struct device *dev = aq_nic_get_dev(self->aq_nic); + + for (; self->sw_head != self->hw_head; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + + if (likely(buff->is_mapped)) { + if (unlikely(buff->is_sop)) + dma_unmap_single(dev, buff->pa, buff->len, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, buff->pa, buff->len, + DMA_TO_DEVICE); + } + + if (unlikely(buff->is_eop)) + dev_kfree_skb_any(buff->skb); + } + + if (aq_ring_avail_dx(self) > AQ_CFG_SKB_FRAGS_MAX) + aq_nic_ndev_queue_start(self->aq_nic, self->idx); + + return 0; +} + +static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, + unsigned int t) +{ + return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); +} + +#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) +{ + struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); + int err = 0; + bool is_rsc_completed = true; + + for (; (self->sw_head != self->hw_head) && budget; + self->sw_head = aq_ring_next_dx(self, self->sw_head), + --budget, ++(*work_done)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + struct sk_buff *skb = NULL; + unsigned int next_ = 0U; + unsigned int i = 0U; + struct aq_ring_buff_s *buff_ = NULL; + + if (buff->is_error) { + __free_pages(buff->page, 0); + continue; + } + + if (buff->is_cleaned) + continue; + + if (!buff->is_eop) { + for (next_ = buff->next, + buff_ = &self->buff_ring[next_]; true; + next_ = buff_->next, + buff_ = &self->buff_ring[next_]) { + is_rsc_completed = + aq_ring_dx_in_range(self->sw_head, + next_, + self->hw_head); + + if (unlikely(!is_rsc_completed)) { + is_rsc_completed = false; + break; + } + + if (buff_->is_eop) + break; + } + + if (!is_rsc_completed) { + err = 0; + goto err_exit; + } + } + + /* for single fragment packets use build_skb() */ + if (buff->is_eop) { + skb = build_skb(page_address(buff->page), + buff->len + AQ_SKB_ALIGN); + if (unlikely(!skb)) { + err = -ENOMEM; + goto err_exit; + } + + skb->dev = ndev; + skb_put(skb, buff->len); + } else { + skb = netdev_alloc_skb(ndev, ETH_HLEN); + if (unlikely(!skb)) { + err = -ENOMEM; + goto err_exit; + } + skb_put(skb, ETH_HLEN); + memcpy(skb->data, page_address(buff->page), ETH_HLEN); + + skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN, + buff->len - ETH_HLEN, + SKB_TRUESIZE(buff->len - ETH_HLEN)); + + for (i = 1U, next_ = buff->next, + buff_ = &self->buff_ring[next_]; true; + next_ = buff_->next, + buff_ = &self->buff_ring[next_], ++i) { + skb_add_rx_frag(skb, i, buff_->page, 0, + buff_->len, + SKB_TRUESIZE(buff->len - + ETH_HLEN)); + buff_->is_cleaned = 1; + + if (buff_->is_eop) + break; + } + } + + skb->protocol = eth_type_trans(skb, ndev); + if (unlikely(buff->is_cso_err)) { + ++self->stats.rx.errors; + __skb_mark_checksum_bad(skb); + } else { + if (buff->is_ip_cso) { + __skb_incr_checksum_unnecessary(skb); + if (buff->is_udp_cso || buff->is_tcp_cso) + __skb_incr_checksum_unnecessary(skb); + } else { + skb->ip_summed = CHECKSUM_NONE; + } + } + + skb_set_hash(skb, buff->rss_hash, + buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_NONE); + + skb_record_rx_queue(skb, self->idx); + + netif_receive_skb(skb); + + ++self->stats.rx.packets; + self->stats.rx.bytes += skb->len; + } + +err_exit: + return err; +} + +int aq_ring_rx_fill(struct aq_ring_s *self) +{ + struct aq_ring_buff_s *buff = NULL; + int err = 0; + int i = 0; + + for (i = aq_ring_avail_dx(self); i--; + self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { + buff = &self->buff_ring[self->sw_tail]; + + buff->flags = 0U; + buff->len = AQ_CFG_RX_FRAME_MAX; + + buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD | + __GFP_COMP, 0); + if (!buff->page) { + err = -ENOMEM; + goto err_exit; + } + + buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic), + buff->page, 0, + AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); + + err = dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa); + if (err < 0) + goto err_exit; + + buff = NULL; + } + if (err < 0) + goto err_exit; + +err_exit: + if (err < 0) { + if (buff && buff->page) + __free_pages(buff->page, 0); + } + + return err; +} + +void aq_ring_rx_deinit(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + for (; self->sw_head != self->sw_tail; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + + dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa, + AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); + + __free_pages(buff->page, 0); + } + +err_exit:; +} + +void aq_ring_tx_deinit(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + for (; self->sw_head != self->sw_tail; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + struct device *ndev = aq_nic_get_dev(self->aq_nic); + + if (likely(buff->is_mapped)) { + if (unlikely(buff->is_sop)) { + dma_unmap_single(ndev, buff->pa, buff->len, + DMA_TO_DEVICE); + } else { + dma_unmap_page(ndev, buff->pa, buff->len, + DMA_TO_DEVICE); + } + } + + if (unlikely(buff->is_eop)) + dev_kfree_skb_any(buff->skb); + } +err_exit:; +} + +void aq_ring_free(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + kfree(self->buff_ring); + + if (self->dx_ring) + dma_free_coherent(aq_nic_get_dev(self->aq_nic), + self->size * self->dx_size, self->dx_ring, + self->dx_ring_pa); + +err_exit:; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h new file mode 100644 index 000000000000..0ac3f9e7bee6 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -0,0 +1,157 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */ + +#ifndef AQ_RING_H +#define AQ_RING_H + +#include "aq_common.h" + +struct page; + +/* TxC SOP DX EOP + * +----------+----------+----------+----------- + * 8bytes|len l3,l4 | pa | pa | pa + * +----------+----------+----------+----------- + * 4/8bytes|len pkt |len pkt | | skb + * +----------+----------+----------+----------- + * 4/8bytes|is_txc |len,flags |len |len,is_eop + * +----------+----------+----------+----------- + * + * This aq_ring_buff_s doesn't have endianness dependency. + * It is __packed for cache line optimizations. + */ +struct __packed aq_ring_buff_s { + union { + /* RX */ + struct { + u32 rss_hash; + u16 next; + u8 is_hash_l4; + u8 rsvd1; + struct page *page; + }; + /* EOP */ + struct { + dma_addr_t pa_eop; + struct sk_buff *skb; + }; + /* DX */ + struct { + dma_addr_t pa; + }; + /* SOP */ + struct { + dma_addr_t pa_sop; + u32 len_pkt_sop; + }; + /* TxC */ + struct { + u32 mss; + u8 len_l2; + u8 len_l3; + u8 len_l4; + u8 rsvd2; + u32 len_pkt; + }; + }; + union { + struct { + u32 len:16; + u32 is_ip_cso:1; + u32 is_udp_cso:1; + u32 is_tcp_cso:1; + u32 is_cso_err:1; + u32 is_sop:1; + u32 is_eop:1; + u32 is_txc:1; + u32 is_mapped:1; + u32 is_cleaned:1; + u32 is_error:1; + u32 rsvd3:6; + }; + u32 flags; + }; +}; + +struct aq_ring_stats_rx_s { + u64 errors; + u64 packets; + u64 bytes; + u64 lro_packets; + u64 jumbo_packets; +}; + +struct aq_ring_stats_tx_s { + u64 errors; + u64 packets; + u64 bytes; +}; + +union aq_ring_stats_s { + struct aq_ring_stats_rx_s rx; + struct aq_ring_stats_tx_s tx; +}; + +struct aq_ring_s { + struct aq_obj_s header; + struct aq_ring_buff_s *buff_ring; + u8 *dx_ring; /* descriptors ring, dma shared mem */ + struct aq_nic_s *aq_nic; + unsigned int idx; /* for HW layer registers operations */ + unsigned int hw_head; + unsigned int sw_head; + unsigned int sw_tail; + unsigned int size; /* descriptors number */ + unsigned int dx_size; /* TX or RX descriptor size, */ + /* stored here for fater math */ + union aq_ring_stats_s stats; + dma_addr_t dx_ring_pa; +}; + +struct aq_ring_param_s { + unsigned int vec_idx; + unsigned int cpu; + cpumask_t affinity_mask; +}; + +static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self, + unsigned int dx) +{ + return (++dx >= self->size) ? 0U : dx; +} + +static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self) +{ + return (((self->sw_tail >= self->sw_head)) ? + (self->size - 1) - self->sw_tail + self->sw_head : + self->sw_head - self->sw_tail - 1); +} + +struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +int aq_ring_init(struct aq_ring_s *self); +void aq_ring_tx_deinit(struct aq_ring_s *self); +void aq_ring_rx_deinit(struct aq_ring_s *self); +void aq_ring_free(struct aq_ring_s *self); +void aq_ring_tx_append_buffs(struct aq_ring_s *ring, + struct aq_ring_buff_s *buffer, + unsigned int buffers); +int aq_ring_tx_clean(struct aq_ring_s *self); +int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); +int aq_ring_rx_fill(struct aq_ring_s *self); + +#endif /* AQ_RING_H */ -- cgit v1.2.3 From ef8115356aeef87969124d3bfe92daed7c137666 Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:11 -0800 Subject: net: ethernet: aquantia: Low-level hardware interfaces Add definitions of functions that interface directly with the hardware. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel.Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c | 1394 ++++++++++++ .../ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h | 677 ++++++ .../aquantia/atlantic/hw_atl/hw_atl_llh_internal.h | 2375 ++++++++++++++++++++ 3 files changed, 4446 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c new file mode 100644 index 000000000000..3de651afa8c7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -0,0 +1,1394 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh.c: Definitions of bitfield and register access functions for + * Atlantic registers. + */ + +#include "hw_atl_llh.h" +#include "hw_atl_llh_internal.h" +#include "../aq_hw_utils.h" + +/* global */ +void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore) +{ + aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem); +} + +u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore) +{ + return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore)); +} + +void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr, + glb_reg_res_dis_msk, + glb_reg_res_dis_shift, + glb_reg_res_dis); +} + +void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res) +{ + aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk, + glb_soft_res_shift, soft_res); +} + +u32 glb_soft_res_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr, + glb_soft_res_msk, + glb_soft_res_shift); +} + +u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr); +} + +u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, glb_mif_id_adr); +} + +/* stats */ +u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr); +} + +u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr); +} + +u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr); +} + +u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr); +} + +u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr); +} + +u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr); +} + +u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr); +} + +u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr); +} + +u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr); +} + +/* interrupt */ +void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw) +{ + aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw); +} + +void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_msk[32] = { + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U + }; + +/* lower bit position of bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_shift[32] = { + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx], + itr_imr_rxren_msk[rx], + itr_imr_rxren_shift[rx], + irq_map_en_rx); +} + +void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_msk[32] = { + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U + }; + +/* lower bit position of bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_shift[32] = { + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx], + itr_imr_txten_msk[tx], + itr_imr_txten_shift[tx], + irq_map_en_tx); +} + +void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_msk[32] = { + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU + }; + +/* lower bit position of bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_shift[32] = { + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx], + itr_imr_rxr_msk[rx], + itr_imr_rxr_shift[rx], + irq_map_rx); +} + +void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_msk[32] = { + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U + }; + +/* lower bit position of bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_shift[32] = { + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx], + itr_imr_txt_msk[tx], + itr_imr_txt_shift[tx], + irq_map_tx); +} + +void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw) +{ + aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw); +} + +void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw) +{ + aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw); +} + +void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr, + itr_reg_res_dsbl_msk, + itr_reg_res_dsbl_shift, irq_reg_res_dis); +} + +void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw) +{ + aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw); +} + +u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, itr_isrlsw_adr); +} + +u32 itr_res_irq_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk, + itr_res_shift); +} + +void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq) +{ + aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk, + itr_res_shift, res_irq); +} + +/* rdm */ +void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca), + rdm_dcadcpuid_msk, + rdm_dcadcpuid_shift, cpuid); +} + +void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk, + rdm_dca_en_shift, rx_dca_en); +} + +void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk, + rdm_dca_mode_shift, rx_dca_mode); +} + +void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor), + rdm_descddata_size_msk, + rdm_descddata_size_shift, + rx_desc_data_buff_size); +} + +void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca), + rdm_dcaddesc_en_msk, + rdm_dcaddesc_en_shift, + rx_desc_dca_en); +} + +void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor), + rdm_descden_msk, + rdm_descden_shift, + rx_desc_en); +} + +void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor), + rdm_descdhdr_size_msk, + rdm_descdhdr_size_shift, + rx_desc_head_buff_size); +} + +void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor), + rdm_descdhdr_split_msk, + rdm_descdhdr_split_shift, + rx_desc_head_splitting); +} + +u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor), + rdm_descdhd_msk, rdm_descdhd_shift); +} + +void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor), + rdm_descdlen_msk, rdm_descdlen_shift, + rx_desc_len); +} + +void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor), + rdm_descdreset_msk, rdm_descdreset_shift, + rx_desc_res); +} + +void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr, + rdm_int_desc_wrb_en_msk, + rdm_int_desc_wrb_en_shift, + rx_desc_wr_wb_irq_en); +} + +void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca), + rdm_dcadhdr_en_msk, + rdm_dcadhdr_en_shift, + rx_head_dca_en); +} + +void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca), + rdm_dcadpay_en_msk, rdm_dcadpay_en_shift, + rx_pld_dca_en); +} + +void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr, + rdm_int_rim_en_msk, + rdm_int_rim_en_shift, + rdm_intr_moder_en); +} + +/* reg */ +void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx) +{ + aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map); +} + +u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, gen_intr_stat_adr); +} + +void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl) +{ + aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl); +} + +void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle) +{ + aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr); +} + +void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor), + rx_dma_desc_base_addrlsw); +} + +void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor), + rx_dma_desc_base_addrmsw); +} + +u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor)); +} + +void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor), + rx_dma_desc_tail_ptr); +} + +void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk) +{ + aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk); +} + +void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter) +{ + aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr); +} + +void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1) +{ + aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1); +} + +void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2) +{ + aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2); +} + +void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue), + rx_intr_moderation_ctl); +} + +void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl) +{ + aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl); +} + +void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor), + tx_dma_desc_base_addrlsw); +} + +void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor), + tx_dma_desc_base_addrmsw); +} + +void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor), + tx_dma_desc_tail_ptr); +} + +void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue), + tx_intr_moderation_ctl); +} + +/* RPB: rx packet buffer */ +void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk) +{ + aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr, + rpb_dma_sys_lbk_msk, + rpb_dma_sys_lbk_shift, dma_sys_lbk); +} + +void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr, + rpb_rpf_rx_tc_mode_msk, + rpb_rpf_rx_tc_mode_shift, + rx_traf_class_mode); +} + +void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk, + rpb_rx_buf_en_shift, rx_buff_en); +} + +void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer), + rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift, + rx_buff_hi_threshold_per_tc); +} + +void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer), + rpb_rxblo_thresh_msk, + rpb_rxblo_thresh_shift, + rx_buff_lo_threshold_per_tc); +} + +void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr, + rpb_rx_fc_mode_msk, + rpb_rx_fc_mode_shift, rx_flow_ctl_mode); +} + +void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer), + rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift, + rx_pkt_buff_size_per_tc); +} + +void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer), + rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift, + rx_xoff_en_per_tc); +} + +/* rpf */ + +void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr, + rpfl2bc_thresh_msk, + rpfl2bc_thresh_shift, + l2broadcast_count_threshold); +} + +void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk, + rpfl2bc_en_shift, l2broadcast_en); +} + +void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk, + rpfl2bc_act_shift, l2broadcast_flr_act); +} + +void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter), + rpfl2mc_enf_msk, + rpfl2mc_enf_shift, l2multicast_flr_en); +} + +void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr, + rpfl2promis_mode_msk, + rpfl2promis_mode_shift, + l2promiscuous_mode_en); +} + +void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter), + rpfl2uc_actf_msk, rpfl2uc_actf_shift, + l2unicast_flr_act); +} + +void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter), + rpfl2uc_enf_msk, + rpfl2uc_enf_shift, l2unicast_flr_en); +} + +void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter) +{ + aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter), + l2unicast_dest_addresslsw); +} + +void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter), + rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift, + l2unicast_dest_addressmsw); +} + +void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr, + rpfl2mc_accept_all_msk, + rpfl2mc_accept_all_shift, + l2_accept_all_mc_packets); +} + +void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc) +{ +/* register address for bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_adr[8] = { + 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U, + 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U + }; + +/* bitmask for bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_msk[8] = { + 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U, + 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U + }; + +/* lower bit position of bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_shft[8] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc], + rpf_rpb_rx_tc_upt_msk[tc], + rpf_rpb_rx_tc_upt_shft[tc], + user_priority_tc_map); +} + +void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr, + rpf_rss_key_addr_msk, + rpf_rss_key_addr_shift, + rss_key_addr); +} + +void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data) +{ + aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr, + rss_key_wr_data); +} + +u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr, + rpf_rss_key_wr_eni_msk, + rpf_rss_key_wr_eni_shift); +} + +void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr, + rpf_rss_key_wr_eni_msk, + rpf_rss_key_wr_eni_shift, + rss_key_wr_en); +} + +void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr, + rpf_rss_redir_addr_msk, + rpf_rss_redir_addr_shift, rss_redir_tbl_addr); +} + +void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr, + rpf_rss_redir_wr_data_msk, + rpf_rss_redir_wr_data_shift, + rss_redir_tbl_wr_data); +} + +u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr, + rpf_rss_redir_wr_eni_msk, + rpf_rss_redir_wr_eni_shift); +} + +void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr, + rpf_rss_redir_wr_eni_msk, + rpf_rss_redir_wr_eni_shift, rss_redir_wr_en); +} + +void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk) +{ + aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr, + rpf_tpo_rpf_sys_lbk_msk, + rpf_tpo_rpf_sys_lbk_shift, + tpo_to_rpf_sys_lbk); +} + +void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr, + rpf_vl_inner_tpid_msk, + rpf_vl_inner_tpid_shift, + vlan_inner_etht); +} + +void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr, + rpf_vl_outer_tpid_msk, + rpf_vl_outer_tpid_shift, + vlan_outer_etht); +} + +void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr, + rpf_vl_promis_mode_msk, + rpf_vl_promis_mode_shift, + vlan_prom_mode_en); +} + +void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_accept_untagged_packets) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr, + rpf_vl_accept_untagged_mode_msk, + rpf_vl_accept_untagged_mode_shift, + vlan_accept_untagged_packets); +} + +void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr, + rpf_vl_untagged_act_msk, + rpf_vl_untagged_act_shift, + vlan_untagged_act); +} + +void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter), + rpf_vl_en_f_msk, + rpf_vl_en_f_shift, + vlan_flr_en); +} + +void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter), + rpf_vl_act_f_msk, + rpf_vl_act_f_shift, + vlan_flr_act); +} + +void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter), + rpf_vl_id_f_msk, + rpf_vl_id_f_shift, + vlan_id_flr); +} + +void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter), + rpf_et_enf_msk, + rpf_et_enf_shift, etht_flr_en); +} + +void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter), + rpf_et_upfen_msk, rpf_et_upfen_shift, + etht_user_priority_en); +} + +void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter), + rpf_et_rxqfen_msk, rpf_et_rxqfen_shift, + etht_rx_queue_en); +} + +void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter), + rpf_et_upf_msk, + rpf_et_upf_shift, etht_user_priority); +} + +void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter), + rpf_et_rxqf_msk, + rpf_et_rxqf_shift, etht_rx_queue); +} + +void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter), + rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift, + etht_mgt_queue); +} + +void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter), + rpf_et_actf_msk, + rpf_et_actf_shift, etht_flr_act); +} + +void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter), + rpf_et_valf_msk, + rpf_et_valf_shift, etht_flr); +} + +/* RPO: rx packet offload */ +void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr, + rpo_ipv4chk_en_msk, + rpo_ipv4chk_en_shift, + ipv4header_crc_offload_en); +} + +void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor), + rpo_descdvl_strip_msk, + rpo_descdvl_strip_shift, + rx_desc_vlan_stripping); +} + +void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk, + rpol4chk_en_shift, tcp_udp_crc_offload_en); +} + +void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en) +{ + aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en); +} + +void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr, + rpo_lro_ptopt_en_msk, + rpo_lro_ptopt_en_shift, + lro_patch_optimization_en); +} + +void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, + u32 lro_qsessions_lim) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr, + rpo_lro_qses_lmt_msk, + rpo_lro_qses_lmt_shift, + lro_qsessions_lim); +} + +void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr, + rpo_lro_tot_dsc_lmt_msk, + rpo_lro_tot_dsc_lmt_shift, + lro_total_desc_lim); +} + +void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr, + rpo_lro_pkt_min_msk, + rpo_lro_pkt_min_shift, + lro_min_pld_of_first_pkt); +} + +void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim) +{ + aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim); +} + +void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_number_of_descriptors, + u32 lro) +{ +/* Register address for bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_adr[32] = { + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU + }; + +/* Bitmask for bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_msk[32] = { + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U + }; + +/* Lower bit position of bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_shift[32] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro], + rpo_lro_ldes_max_msk[lro], + rpo_lro_ldes_max_shift[lro], + lro_max_number_of_descriptors); +} + +void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr, + rpo_lro_tb_div_msk, + rpo_lro_tb_div_shift, + lro_time_base_divider); +} + +void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr, + rpo_lro_ina_ival_msk, + rpo_lro_ina_ival_shift, + lro_inactive_interval); +} + +void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coalescing_interval) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr, + rpo_lro_max_ival_msk, + rpo_lro_max_ival_shift, + lro_max_coalescing_interval); +} + +/* rx */ +void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr, + rx_reg_res_dsbl_msk, + rx_reg_res_dsbl_shift, + rx_reg_res_dis); +} + +/* tdm */ +void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca), + tdm_dcadcpuid_msk, + tdm_dcadcpuid_shift, cpuid); +} + +void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en) +{ + aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en); +} + +void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk, + tdm_dca_en_shift, tx_dca_en); +} + +void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk, + tdm_dca_mode_shift, tx_dca_mode); +} + +void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca), + tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift, + tx_desc_dca_en); +} + +void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor), + tdm_descden_msk, + tdm_descden_shift, + tx_desc_en); +} + +u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor), + tdm_descdhd_msk, tdm_descdhd_shift); +} + +void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor), + tdm_descdlen_msk, + tdm_descdlen_shift, + tx_desc_len); +} + +void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr, + tdm_int_desc_wrb_en_msk, + tdm_int_desc_wrb_en_shift, + tx_desc_wr_wb_irq_en); +} + +void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor), + tdm_descdwrb_thresh_msk, + tdm_descdwrb_thresh_shift, + tx_desc_wr_wb_threshold); +} + +void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr, + tdm_int_mod_en_msk, + tdm_int_mod_en_shift, + tdm_irq_moderation_en); +} + +/* thm */ +void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr, + thm_lso_tcp_flag_first_msk, + thm_lso_tcp_flag_first_shift, + lso_tcp_flag_of_first_pkt); +} + +void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr, + thm_lso_tcp_flag_last_msk, + thm_lso_tcp_flag_last_shift, + lso_tcp_flag_of_last_pkt); +} + +void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr, + thm_lso_tcp_flag_mid_msk, + thm_lso_tcp_flag_mid_shift, + lso_tcp_flag_of_middle_pkt); +} + +/* TPB: tx packet buffer */ +void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk, + tpb_tx_buf_en_shift, tx_buff_en); +} + +void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer), + tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift, + tx_buff_hi_threshold_per_tc); +} + +void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer), + tpb_txblo_thresh_msk, tpb_txblo_thresh_shift, + tx_buff_lo_threshold_per_tc); +} + +void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr, + tpb_dma_sys_lbk_msk, + tpb_dma_sys_lbk_shift, + tx_dma_sys_lbk_en); +} + +void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer), + tpb_txbbuf_size_msk, + tpb_txbbuf_size_shift, + tx_pkt_buff_size_per_tc); +} + +void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr, + tpb_tx_scp_ins_en_msk, + tpb_tx_scp_ins_en_shift, + tx_path_scp_ins_en); +} + +/* TPO: tx packet offload */ +void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr, + tpo_ipv4chk_en_msk, + tpo_ipv4chk_en_shift, + ipv4header_crc_offload_en); +} + +void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr, + tpol4chk_en_msk, + tpol4chk_en_shift, + tcp_udp_crc_offload_en); +} + +void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr, + tpo_pkt_sys_lbk_msk, + tpo_pkt_sys_lbk_shift, + tx_pkt_sys_lbk_en); +} + +/* TPS: tx packet scheduler */ +void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr, + tps_data_tc_arb_mode_msk, + tps_data_tc_arb_mode_shift, + tx_pkt_shed_data_arb_mode); +} + +void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr, + tps_desc_rate_ta_rst_msk, + tps_desc_rate_ta_rst_shift, + curr_time_res); +} + +void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr, + tps_desc_rate_lim_msk, + tps_desc_rate_lim_shift, + tx_pkt_shed_desc_rate_lim); +} + +void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr, + tps_desc_tc_arb_mode_msk, + tps_desc_tc_arb_mode_shift, + tx_pkt_shed_desc_tc_arb_mode); +} + +void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_max_credit, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc), + tps_desc_tctcredit_max_msk, + tps_desc_tctcredit_max_shift, + tx_pkt_shed_desc_tc_max_credit); +} + +void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc), + tps_desc_tctweight_msk, + tps_desc_tctweight_shift, + tx_pkt_shed_desc_tc_weight); +} + +void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_vm_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr, + tps_desc_vm_arb_mode_msk, + tps_desc_vm_arb_mode_shift, + tx_pkt_shed_desc_vm_arb_mode); +} + +void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_max_credit, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc), + tps_data_tctcredit_max_msk, + tps_data_tctcredit_max_shift, + tx_pkt_shed_tc_data_max_credit); +} + +void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc), + tps_data_tctweight_msk, + tps_data_tctweight_shift, + tx_pkt_shed_tc_data_weight); +} + +/* tx */ +void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr, + tx_reg_res_dsbl_msk, + tx_reg_res_dsbl_shift, tx_reg_res_dis); +} + +/* msm */ +u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr, + msm_reg_access_busy_msk, + msm_reg_access_busy_shift); +} + +void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr, + msm_reg_addr_msk, + msm_reg_addr_shift, + reg_addr_for_indirect_addr); +} + +void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr, + msm_reg_rd_strobe_msk, + msm_reg_rd_strobe_shift, + reg_rd_strobe); +} + +u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr); +} + +void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data) +{ + aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data); +} + +void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr, + msm_reg_wr_strobe_msk, + msm_reg_wr_strobe_shift, + reg_wr_strobe); +} + +/* pci */ +void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr, + pci_reg_res_dsbl_msk, + pci_reg_res_dsbl_shift, + pci_reg_res_dis); +} + +void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp, + u32 scratch_scp) +{ + aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp), + glb_cpu_scratch_scp); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h new file mode 100644 index 000000000000..ed1085b95adb --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -0,0 +1,677 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh.h: Declarations of bitfield and register access functions for + * Atlantic registers. + */ + +#ifndef HW_ATL_LLH_H +#define HW_ATL_LLH_H + +#include + +struct aq_hw_s; + +/* global */ + +/* set global microprocessor semaphore */ +void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, + u32 semaphore); + +/* get global microprocessor semaphore */ +u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore); + +/* set global register reset disable */ +void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis); + +/* set soft reset */ +void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res); + +/* get soft reset */ +u32 glb_soft_res_get(struct aq_hw_s *aq_hw); + +/* stats */ + +u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw); + +/* get rx dma good octet counter lsw */ +u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good packet counter lsw */ +u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good octet counter lsw */ +u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good packet counter lsw */ +u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good octet counter msw */ +u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good packet counter msw */ +u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good octet counter msw */ +u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good packet counter msw */ +u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); + +/* get msm rx errors counter register */ +u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx unicast frames counter register */ +u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx multicast frames counter register */ +u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx broadcast frames counter register */ +u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx broadcast octets counter register 1 */ +u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm rx unicast octets counter register 0 */ +u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); + +/* get rx dma statistics counter 7 */ +u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw); + +/* get msm tx errors counter register */ +u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx unicast frames counter register */ +u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx multicast frames counter register */ +u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx broadcast frames counter register */ +u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx multicast octets counter register 1 */ +u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm tx broadcast octets counter register 1 */ +u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm tx unicast octets counter register 0 */ +u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); + +/* get global mif identification */ +u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw); + +/* interrupt */ + +/* set interrupt auto mask lsw */ +void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw); + +/* set interrupt mapping enable rx */ +void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx); + +/* set interrupt mapping enable tx */ +void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx); + +/* set interrupt mapping rx */ +void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx); + +/* set interrupt mapping tx */ +void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx); + +/* set interrupt mask clear lsw */ +void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw); + +/* set interrupt mask set lsw */ +void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw); + +/* set interrupt register reset disable */ +void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis); + +/* set interrupt status clear lsw */ +void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw); + +/* get interrupt status lsw */ +u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw); + +/* get reset interrupt */ +u32 itr_res_irq_get(struct aq_hw_s *aq_hw); + +/* set reset interrupt */ +void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq); + +/* rdm */ + +/* set cpu id */ +void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); + +/* set rx dca enable */ +void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en); + +/* set rx dca mode */ +void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode); + +/* set rx descriptor data buffer size */ +void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, + u32 descriptor); + +/* set rx descriptor dca enable */ +void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, + u32 dca); + +/* set rx descriptor enable */ +void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, + u32 descriptor); + +/* set rx descriptor header splitting */ +void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, + u32 descriptor); + +/* get rx descriptor head pointer */ +u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set rx descriptor length */ +void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, + u32 descriptor); + +/* set rx descriptor write-back interrupt enable */ +void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en); + +/* set rx header dca enable */ +void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, + u32 dca); + +/* set rx payload dca enable */ +void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca); + +/* set rx descriptor header buffer size */ +void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, + u32 descriptor); + +/* set rx descriptor reset */ +void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, + u32 descriptor); + +/* Set RDM Interrupt Moderation Enable */ +void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en); + +/* reg */ + +/* set general interrupt mapping register */ +void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx); + +/* get general interrupt status register */ +u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw); + +/* set interrupt global control register */ +void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl); + +/* set interrupt throttle register */ +void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle); + +/* set rx dma descriptor base address lsw */ +void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set rx dma descriptor base address msw */ +void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor); + +/* get rx dma descriptor status register */ +u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set rx dma descriptor tail pointer register */ +void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, + u32 descriptor); + +/* set rx filter multicast filter mask register */ +void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, + u32 rx_flr_mcst_flr_msk); + +/* set rx filter multicast filter register */ +void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter); + +/* set rx filter rss control register 1 */ +void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, + u32 rx_flr_rss_control1); + +/* Set RX Filter Control Register 2 */ +void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2); + +/* Set RX Interrupt Moderation Control Register */ +void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue); + +/* set tx dma debug control */ +void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl); + +/* set tx dma descriptor base address lsw */ +void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set tx dma descriptor base address msw */ +void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor); + +/* set tx dma descriptor tail pointer register */ +void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, + u32 descriptor); + +/* Set TX Interrupt Moderation Control Register */ +void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue); + +/* set global microprocessor scratch pad */ +void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, + u32 glb_cpu_scratch_scp, u32 scratch_scp); + +/* rpb */ + +/* set dma system loopback */ +void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk); + +/* set rx traffic class mode */ +void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode); + +/* set rx buffer enable */ +void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en); + +/* set rx buffer high threshold (per tc) */ +void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set rx buffer low threshold (per tc) */ +void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set rx flow control mode */ +void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode); + +/* set rx packet buffer size (per tc) */ +void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, + u32 buffer); + +/* set rx xoff enable (per tc) */ +void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer); + +/* rpf */ + +/* set l2 broadcast count threshold */ +void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold); + +/* set l2 broadcast enable */ +void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en); + +/* set l2 broadcast filter action */ +void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_flr_act); + +/* set l2 multicast filter enable */ +void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, + u32 filter); + +/* set l2 promiscuous mode enable */ +void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en); + +/* set l2 unicast filter action */ +void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, + u32 filter); + +/* set l2 unicast filter enable */ +void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter); + +/* set l2 unicast destination address lsw */ +void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter); + +/* set l2 unicast destination address msw */ +void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter); + +/* Set L2 Accept all Multicast packets */ +void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets); + +/* set user-priority tc mapping */ +void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc); + +/* set rss key address */ +void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr); + +/* set rss key write data */ +void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data); + +/* get rss key write enable */ +u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw); + +/* set rss key write enable */ +void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en); + +/* set rss redirection table address */ +void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_addr); + +/* set rss redirection table write data */ +void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data); + +/* get rss redirection write enable */ +u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw); + +/* set rss redirection write enable */ +void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en); + +/* set tpo to rpf system loopback */ +void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, + u32 tpo_to_rpf_sys_lbk); + +/* set vlan inner ethertype */ +void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht); + +/* set vlan outer ethertype */ +void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht); + +/* set vlan promiscuous mode enable */ +void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en); + +/* Set VLAN untagged action */ +void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act); + +/* Set VLAN accept untagged packets */ +void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_accept_untagged_packets); + +/* Set VLAN filter enable */ +void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter); + +/* Set VLAN Filter Action */ +void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act, + u32 filter); + +/* Set VLAN ID Filter */ +void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter); + +/* set ethertype filter enable */ +void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter); + +/* set ethertype user-priority enable */ +void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter); + +/* set ethertype rx queue enable */ +void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, + u32 filter); + +/* set ethertype rx queue */ +void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter); + +/* set ethertype user-priority */ +void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, + u32 filter); + +/* set ethertype management queue */ +void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter); + +/* set ethertype filter action */ +void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, + u32 filter); + +/* set ethertype filter */ +void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter); + +/* rpo */ + +/* set ipv4 header checksum offload enable */ +void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set rx descriptor vlan stripping */ +void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, + u32 descriptor); + +/* set tcp/udp checksum offload enable */ +void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* Set LRO Patch Optimization Enable. */ +void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en); + +/* Set Large Receive Offload Enable */ +void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en); + +/* Set LRO Q Sessions Limit */ +void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim); + +/* Set LRO Total Descriptor Limit */ +void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim); + +/* Set LRO Min Payload of First Packet */ +void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt); + +/* Set LRO Packet Limit */ +void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim); + +/* Set LRO Max Number of Descriptors */ +void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_desc_num, u32 lro); + +/* Set LRO Time Base Divider */ +void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider); + +/*Set LRO Inactive Interval */ +void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval); + +/*Set LRO Max Coalescing Interval */ +void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coalescing_interval); + +/* rx */ + +/* set rx register reset disable */ +void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis); + +/* tdm */ + +/* set cpu id */ +void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); + +/* set large send offload enable */ +void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en); + +/* set tx descriptor enable */ +void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor); + +/* set tx dca enable */ +void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en); + +/* set tx dca mode */ +void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode); + +/* set tx descriptor dca enable */ +void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca); + +/* get tx descriptor head pointer */ +u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set tx descriptor length */ +void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor); + +/* set tx descriptor write-back interrupt enable */ +void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en); + +/* set tx descriptor write-back threshold */ +void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor); + +/* Set TDM Interrupt Moderation Enable */ +void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en); +/* thm */ + +/* set lso tcp flag of first packet */ +void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt); + +/* set lso tcp flag of last packet */ +void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt); + +/* set lso tcp flag of middle packet */ +void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt); + +/* tpb */ + +/* set tx buffer enable */ +void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); + +/* set tx buffer high threshold (per tc) */ +void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set tx buffer low threshold (per tc) */ +void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set tx dma system loopback enable */ +void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en); + +/* set tx packet buffer size (per tc) */ +void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer); + +/* set tx path pad insert enable */ +void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en); + +/* tpo */ + +/* set ipv4 header checksum offload enable */ +void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set tcp/udp checksum offload enable */ +void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* set tx pkt system loopback enable */ +void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en); + +/* tps */ + +/* set tx packet scheduler data arbitration mode */ +void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode); + +/* set tx packet scheduler descriptor rate current time reset */ +void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res); + +/* set tx packet scheduler descriptor rate limit */ +void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim); + +/* set tx packet scheduler descriptor tc arbitration mode */ +void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_arb_mode); + +/* set tx packet scheduler descriptor tc max credit */ +void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_max_credit, + u32 tc); + +/* set tx packet scheduler descriptor tc weight */ +void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, + u32 tc); + +/* set tx packet scheduler descriptor vm arbitration mode */ +void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_vm_arb_mode); + +/* set tx packet scheduler tc data max credit */ +void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_max_credit, + u32 tc); + +/* set tx packet scheduler tc data weight */ +void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, + u32 tc); + +/* tx */ + +/* set tx register reset disable */ +void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis); + +/* msm */ + +/* get register access status */ +u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw); + +/* set register address for indirect address */ +void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr); + +/* set register read strobe */ +void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe); + +/* get register read data */ +u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw); + +/* set register write data */ +void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data); + +/* set register write strobe */ +void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); + +/* pci */ + +/* set pci register reset disable */ +void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); + +#endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h new file mode 100644 index 000000000000..5527fc0e5942 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -0,0 +1,2375 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh_internal.h: Preprocessor definitions + * for Atlantic registers. + */ + +#ifndef HW_ATL_LLH_INTERNAL_H +#define HW_ATL_LLH_INTERNAL_H + +/* global microprocessor semaphore definitions + * base address: 0x000003a0 + * parameter: semaphore {s} | stride size 0x4 | range [0, 15] + */ +#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4) +/* register address for bitfield rx dma good octet counter lsw [1f:0] */ +#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808 +/* register address for bitfield rx dma good packet counter lsw [1f:0] */ +#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800 +/* register address for bitfield tx dma good octet counter lsw [1f:0] */ +#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808 +/* register address for bitfield tx dma good packet counter lsw [1f:0] */ +#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800 + +/* register address for bitfield rx dma good octet counter msw [3f:20] */ +#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c +/* register address for bitfield rx dma good packet counter msw [3f:20] */ +#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804 +/* register address for bitfield tx dma good octet counter msw [3f:20] */ +#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c +/* register address for bitfield tx dma good packet counter msw [3f:20] */ +#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804 + +/* preprocessor definitions for msm rx errors counter register */ +#define mac_msm_rx_errs_cnt_adr 0x00000120u + +/* preprocessor definitions for msm rx unicast frames counter register */ +#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u + +/* preprocessor definitions for msm rx multicast frames counter register */ +#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u + +/* preprocessor definitions for msm rx broadcast frames counter register */ +#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u + +/* preprocessor definitions for msm rx broadcast octets counter register 1 */ +#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u + +/* preprocessor definitions for msm rx broadcast octets counter register 2 */ +#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u + +/* preprocessor definitions for msm rx unicast octets counter register 0 */ +#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u + +/* preprocessor definitions for rx dma statistics counter 7 */ +#define rx_dma_stat_counter7_adr 0x00006818u + +/* preprocessor definitions for msm tx unicast frames counter register */ +#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u + +/* preprocessor definitions for msm tx multicast frames counter register */ +#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u + +/* preprocessor definitions for global mif identification */ +#define glb_mif_id_adr 0x0000001cu + +/* register address for bitfield iamr_lsw[1f:0] */ +#define itr_iamrlsw_adr 0x00002090 +/* register address for bitfield rx dma drop packet counter [1f:0] */ +#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818 + +/* register address for bitfield imcr_lsw[1f:0] */ +#define itr_imcrlsw_adr 0x00002070 +/* register address for bitfield imsr_lsw[1f:0] */ +#define itr_imsrlsw_adr 0x00002060 +/* register address for bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_adr 0x00002300 +/* bitmask for bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_msk 0x20000000 +/* lower bit position of bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_shift 29 +/* register address for bitfield iscr_lsw[1f:0] */ +#define itr_iscrlsw_adr 0x00002050 +/* register address for bitfield isr_lsw[1f:0] */ +#define itr_isrlsw_adr 0x00002000 +/* register address for bitfield itr_reset */ +#define itr_res_adr 0x00002300 +/* bitmask for bitfield itr_reset */ +#define itr_res_msk 0x80000000 +/* lower bit position of bitfield itr_reset */ +#define itr_res_shift 31 +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_msk 0x000000ff +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_shift 0 +/* register address for bitfield dca_en */ +#define rdm_dca_en_adr 0x00006180 + +/* rx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_rdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define rdm_dca_en_adr 0x00006180 +/* bitmask for bitfield dca_en */ +#define rdm_dca_en_msk 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define rdm_dca_en_mskn 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define rdm_dca_en_shift 31 +/* width of bitfield dca_en */ +#define rdm_dca_en_width 1 +/* default value of bitfield dca_en */ +#define rdm_dca_en_default 0x1 + +/* rx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_rdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_adr 0x00006180 +/* bitmask for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_msk 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_mskn 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_shift 0 +/* width of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_width 4 +/* default value of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_default 0x0 + +/* rx desc{d}_data_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_data_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_msk 0x0000001f +/* inverted bitmask for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_mskn 0xffffffe0 +/* lower bit position of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_shift 0 +/* width of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_width 5 +/* default value of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_default 0x0 + +/* rx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_msk 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_mskn 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_shift 31 +/* width of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_width 1 +/* default value of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_default 0x0 + +/* rx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_en */ +#define rdm_descden_msk 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define rdm_descden_mskn 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define rdm_descden_shift 31 +/* width of bitfield desc{d}_en */ +#define rdm_descden_width 1 +/* default value of bitfield desc{d}_en */ +#define rdm_descden_default 0x0 + +/* rx desc{d}_hdr_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_hdr_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_msk 0x00001f00 +/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_mskn 0xffffe0ff +/* lower bit position of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_shift 8 +/* width of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_width 5 +/* default value of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_default 0x0 + +/* rx desc{d}_hdr_split bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_split". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_hdr_split_i[0]" + */ + +/* register address for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_msk 0x10000000 +/* inverted bitmask for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_mskn 0xefffffff +/* lower bit position of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_shift 28 +/* width of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_width 1 +/* default value of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_default 0x0 + +/* rx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="rdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_msk 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_mskn 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_shift 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_width 13 + +/* rx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_msk 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_mskn 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_shift 3 +/* width of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_width 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_default 0x0 + +/* rx desc{d}_reset bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_reset". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_q_pf_res_i[0]" + */ + +/* register address for bitfield desc{d}_reset */ +#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_reset */ +#define rdm_descdreset_msk 0x02000000 +/* inverted bitmask for bitfield desc{d}_reset */ +#define rdm_descdreset_mskn 0xfdffffff +/* lower bit position of bitfield desc{d}_reset */ +#define rdm_descdreset_shift 25 +/* width of bitfield desc{d}_reset */ +#define rdm_descdreset_width 1 +/* default value of bitfield desc{d}_reset */ +#define rdm_descdreset_default 0x0 + +/* rx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_rdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_adr 0x00005a30 +/* bitmask for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_msk 0x00000004 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_mskn 0xfffffffb +/* lower bit position of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_shift 2 +/* width of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_width 1 +/* default value of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_default 0x0 + +/* rx dca{d}_hdr_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_hdr_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_hdr_en_i[0]" + */ + +/* register address for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_msk 0x40000000 +/* inverted bitmask for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_mskn 0xbfffffff +/* lower bit position of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_shift 30 +/* width of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_width 1 +/* default value of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_default 0x0 + +/* rx dca{d}_pay_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_pay_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_pay_en_i[0]" + */ + +/* register address for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_msk 0x20000000 +/* inverted bitmask for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_mskn 0xdfffffff +/* lower bit position of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_shift 29 +/* width of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_width 1 +/* default value of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_default 0x0 + +/* RX rdm_int_rim_en Bitfield Definitions + * Preprocessor definitions for the bitfield "rdm_int_rim_en". + * PORT="pif_rdm_int_rim_en_i" + */ + +/* Register address for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_adr 0x00005A30 +/* Bitmask for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_msk 0x00000008 +/* Inverted bitmask for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_mskn 0xFFFFFFF7 +/* Lower bit position of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_shift 3 +/* Width of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_width 1 +/* Default value of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_default 0x0 + +/* general interrupt mapping register definitions + * preprocessor definitions for general interrupt mapping register + * base address: 0x00002180 + * parameter: regidx {f} | stride size 0x4 | range [0, 3] + */ +#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4) + +/* general interrupt status register definitions + * preprocessor definitions for general interrupt status register + * address: 0x000021A0 + */ + +#define gen_intr_stat_adr 0x000021A4U + +/* interrupt global control register definitions + * preprocessor definitions for interrupt global control register + * address: 0x00002300 + */ +#define intr_glb_ctl_adr 0x00002300u + +/* interrupt throttle register definitions + * preprocessor definitions for interrupt throttle register + * base address: 0x00002800 + * parameter: throttle {t} | stride size 0x4 | range [0, 31] + */ +#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4) + +/* rx dma descriptor base address lsw definitions + * preprocessor definitions for rx dma descriptor base address lsw + * base address: 0x00005b00 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_base_addrlsw_adr(descriptor) \ +(0x00005b00u + (descriptor) * 0x20) + +/* rx dma descriptor base address msw definitions + * preprocessor definitions for rx dma descriptor base address msw + * base address: 0x00005b04 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_base_addrmsw_adr(descriptor) \ +(0x00005b04u + (descriptor) * 0x20) + +/* rx dma descriptor status register definitions + * preprocessor definitions for rx dma descriptor status register + * base address: 0x00005b14 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20) + +/* rx dma descriptor tail pointer register definitions + * preprocessor definitions for rx dma descriptor tail pointer register + * base address: 0x00005b10 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20) + +/* rx interrupt moderation control register definitions + * Preprocessor definitions for RX Interrupt Moderation Control Register + * Base Address: 0x00005A40 + * Parameter: RIM {R} | stride size 0x4 | range [0, 31] + */ +#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4) + +/* rx filter multicast filter mask register definitions + * preprocessor definitions for rx filter multicast filter mask register + * address: 0x00005270 + */ +#define rx_flr_mcst_flr_msk_adr 0x00005270u + +/* rx filter multicast filter register definitions + * preprocessor definitions for rx filter multicast filter register + * base address: 0x00005250 + * parameter: filter {f} | stride size 0x4 | range [0, 7] + */ +#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4) + +/* RX Filter RSS Control Register 1 Definitions + * Preprocessor definitions for RX Filter RSS Control Register 1 + * Address: 0x000054C0 + */ +#define rx_flr_rss_control1_adr 0x000054C0u + +/* RX Filter Control Register 2 Definitions + * Preprocessor definitions for RX Filter Control Register 2 + * Address: 0x00005104 + */ +#define rx_flr_control2_adr 0x00005104u + +/* tx tx dma debug control [1f:0] bitfield definitions + * preprocessor definitions for the bitfield "tx dma debug control [1f:0]". + * port="pif_tdm_debug_cntl_i[31:0]" + */ + +/* register address for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_adr 0x00008920 +/* bitmask for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_msk 0xffffffff +/* inverted bitmask for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_mskn 0x00000000 +/* lower bit position of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_shift 0 +/* width of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_width 32 +/* default value of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_default 0x0 + +/* tx dma descriptor base address lsw definitions + * preprocessor definitions for tx dma descriptor base address lsw + * base address: 0x00007c00 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define tx_dma_desc_base_addrlsw_adr(descriptor) \ + (0x00007c00u + (descriptor) * 0x40) + +/* tx dma descriptor tail pointer register definitions + * preprocessor definitions for tx dma descriptor tail pointer register + * base address: 0x00007c10 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40) + +/* rx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_rpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_adr 0x00005000 +/* bitmask for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_msk 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_mskn 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_shift 6 +/* width of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_width 1 +/* default value of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_default 0x0 + +/* rx rx_tc_mode bitfield definitions + * preprocessor definitions for the bitfield "rx_tc_mode". + * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i" + */ + +/* register address for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_adr 0x00005700 +/* bitmask for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_msk 0x00000100 +/* inverted bitmask for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff +/* lower bit position of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_shift 8 +/* width of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_width 1 +/* default value of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_default 0x0 + +/* rx rx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "rx_buf_en". + * port="pif_rpb_rx_buf_en_i" + */ + +/* register address for bitfield rx_buf_en */ +#define rpb_rx_buf_en_adr 0x00005700 +/* bitmask for bitfield rx_buf_en */ +#define rpb_rx_buf_en_msk 0x00000001 +/* inverted bitmask for bitfield rx_buf_en */ +#define rpb_rx_buf_en_mskn 0xfffffffe +/* lower bit position of bitfield rx_buf_en */ +#define rpb_rx_buf_en_shift 0 +/* width of bitfield rx_buf_en */ +#define rpb_rx_buf_en_width 1 +/* default value of bitfield rx_buf_en */ +#define rpb_rx_buf_en_default 0x0 + +/* rx rx{b}_hi_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_hi_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_msk 0x3fff0000 +/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_mskn 0xc000ffff +/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_shift 16 +/* width of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_width 14 +/* default value of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_default 0x0 + +/* rx rx{b}_lo_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_lo_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_msk 0x00003fff +/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_mskn 0xffffc000 +/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_shift 0 +/* width of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_width 14 +/* default value of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_default 0x0 + +/* rx rx_fc_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "rx_fc_mode[1:0]". + * port="pif_rpb_rx_fc_mode_i[1:0]" + */ + +/* register address for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_adr 0x00005700 +/* bitmask for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_msk 0x00000030 +/* inverted bitmask for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_mskn 0xffffffcf +/* lower bit position of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_shift 4 +/* width of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_width 2 +/* default value of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_default 0x0 + +/* rx rx{b}_buf_size[8:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_buf_size_i[8:0]" + */ + +/* register address for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_msk 0x000001ff +/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_mskn 0xfffffe00 +/* lower bit position of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_shift 0 +/* width of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_width 9 +/* default value of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_default 0x0 + +/* rx rx{b}_xoff_en bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_xoff_en". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx_xoff_en_i[0]" + */ + +/* register address for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_msk 0x80000000 +/* inverted bitmask for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_mskn 0x7fffffff +/* lower bit position of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_shift 31 +/* width of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_width 1 +/* default value of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_default 0x0 + +/* rx l2_bc_thresh[f:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]". + * port="pif_rpf_l2_bc_thresh_i[15:0]" + */ + +/* register address for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_adr 0x00005100 +/* bitmask for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_msk 0xffff0000 +/* inverted bitmask for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_mskn 0x0000ffff +/* lower bit position of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_shift 16 +/* width of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_width 16 +/* default value of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_default 0x0 + +/* rx l2_bc_en bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_en". + * port="pif_rpf_l2_bc_en_i" + */ + +/* register address for bitfield l2_bc_en */ +#define rpfl2bc_en_adr 0x00005100 +/* bitmask for bitfield l2_bc_en */ +#define rpfl2bc_en_msk 0x00000001 +/* inverted bitmask for bitfield l2_bc_en */ +#define rpfl2bc_en_mskn 0xfffffffe +/* lower bit position of bitfield l2_bc_en */ +#define rpfl2bc_en_shift 0 +/* width of bitfield l2_bc_en */ +#define rpfl2bc_en_width 1 +/* default value of bitfield l2_bc_en */ +#define rpfl2bc_en_default 0x0 + +/* rx l2_bc_act[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_act[2:0]". + * port="pif_rpf_l2_bc_act_i[2:0]" + */ + +/* register address for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_adr 0x00005100 +/* bitmask for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_msk 0x00007000 +/* inverted bitmask for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_mskn 0xffff8fff +/* lower bit position of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_shift 12 +/* width of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_width 3 +/* default value of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_default 0x0 + +/* rx l2_mc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_mc_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 7] + * port="pif_rpf_l2_mc_en_i[0]" + */ + +/* register address for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4) +/* bitmask for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_msk 0x80000000 +/* inverted bitmask for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_mskn 0x7fffffff +/* lower bit position of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_shift 31 +/* width of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_width 1 +/* default value of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_default 0x0 + +/* rx l2_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "l2_promis_mode". + * port="pif_rpf_l2_promis_mode_i" + */ + +/* register address for bitfield l2_promis_mode */ +#define rpfl2promis_mode_adr 0x00005100 +/* bitmask for bitfield l2_promis_mode */ +#define rpfl2promis_mode_msk 0x00000008 +/* inverted bitmask for bitfield l2_promis_mode */ +#define rpfl2promis_mode_mskn 0xfffffff7 +/* lower bit position of bitfield l2_promis_mode */ +#define rpfl2promis_mode_shift 3 +/* width of bitfield l2_promis_mode */ +#define rpfl2promis_mode_width 1 +/* default value of bitfield l2_promis_mode */ +#define rpfl2promis_mode_default 0x0 + +/* rx l2_uc_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_act0_i[2:0]" + */ + +/* register address for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_msk 0x00070000 +/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_mskn 0xfff8ffff +/* lower bit position of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_shift 16 +/* width of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_width 3 +/* default value of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_default 0x0 + +/* rx l2_uc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_en{f}". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_en_i[0]" + */ + +/* register address for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_msk 0x80000000 +/* inverted bitmask for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_mskn 0x7fffffff +/* lower bit position of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_shift 31 +/* width of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_width 1 +/* default value of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_default 0x0 + +/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */ +#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8) +/* register address for bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_msk 0x0000ffff +/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_shift 0 + +/* rx l2_mc_accept_all bitfield definitions + * Preprocessor definitions for the bitfield "l2_mc_accept_all". + * PORT="pif_rpf_l2_mc_all_accept_i" + */ + +/* Register address for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_adr 0x00005270 +/* Bitmask for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_msk 0x00004000 +/* Inverted bitmask for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_mskn 0xFFFFBFFF +/* Lower bit position of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_shift 14 +/* Width of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_width 1 +/* Default value of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_default 0x0 + +/* width of bitfield rx_tc_up{t}[2:0] */ +#define rpf_rpb_rx_tc_upt_width 3 +/* default value of bitfield rx_tc_up{t}[2:0] */ +#define rpf_rpb_rx_tc_upt_default 0x0 + +/* rx rss_key_addr[4:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_addr[4:0]". + * port="pif_rpf_rss_key_addr_i[4:0]" + */ + +/* register address for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_adr 0x000054d0 +/* bitmask for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_msk 0x0000001f +/* inverted bitmask for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_mskn 0xffffffe0 +/* lower bit position of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_shift 0 +/* width of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_width 5 +/* default value of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_default 0x0 + +/* rx rss_key_wr_data[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]". + * port="pif_rpf_rss_key_wr_data_i[31:0]" + */ + +/* register address for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_adr 0x000054d4 +/* bitmask for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_msk 0xffffffff +/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_mskn 0x00000000 +/* lower bit position of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_shift 0 +/* width of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_width 32 +/* default value of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_default 0x0 + +/* rx rss_key_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_en_i". + * port="pif_rpf_rss_key_wr_en_i" + */ + +/* register address for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_adr 0x000054d0 +/* bitmask for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_msk 0x00000020 +/* inverted bitmask for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_mskn 0xffffffdf +/* lower bit position of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_shift 5 +/* width of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_width 1 +/* default value of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_default 0x0 + +/* rx rss_redir_addr[3:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_addr[3:0]". + * port="pif_rpf_rss_redir_addr_i[3:0]" + */ + +/* register address for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_adr 0x000054e0 +/* bitmask for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_msk 0x0000000f +/* inverted bitmask for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_mskn 0xfffffff0 +/* lower bit position of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_shift 0 +/* width of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_width 4 +/* default value of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_default 0x0 + +/* rx rss_redir_wr_data[f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]". + * port="pif_rpf_rss_redir_wr_data_i[15:0]" + */ + +/* register address for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_adr 0x000054e4 +/* bitmask for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_msk 0x0000ffff +/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_mskn 0xffff0000 +/* lower bit position of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_shift 0 +/* width of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_width 16 +/* default value of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_default 0x0 + +/* rx rss_redir_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_en_i". + * port="pif_rpf_rss_redir_wr_en_i" + */ + +/* register address for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_adr 0x000054e0 +/* bitmask for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_msk 0x00000010 +/* inverted bitmask for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_mskn 0xffffffef +/* lower bit position of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_shift 4 +/* width of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_width 1 +/* default value of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_default 0x0 + +/* rx tpo_rpf_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback". + * port="pif_rpf_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_adr 0x00005000 +/* bitmask for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_msk 0x00000100 +/* inverted bitmask for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff +/* lower bit position of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_shift 8 +/* width of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_width 1 +/* default value of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_default 0x0 + +/* rx vl_inner_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]". + * port="pif_rpf_vl_inner_tpid_i[15:0]" + */ + +/* register address for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_adr 0x00005284 +/* bitmask for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_msk 0x0000ffff +/* inverted bitmask for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_mskn 0xffff0000 +/* lower bit position of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_shift 0 +/* width of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_width 16 +/* default value of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_default 0x8100 + +/* rx vl_outer_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]". + * port="pif_rpf_vl_outer_tpid_i[15:0]" + */ + +/* register address for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_adr 0x00005284 +/* bitmask for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_msk 0xffff0000 +/* inverted bitmask for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_mskn 0x0000ffff +/* lower bit position of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_shift 16 +/* width of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_width 16 +/* default value of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_default 0x88a8 + +/* rx vl_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "vl_promis_mode". + * port="pif_rpf_vl_promis_mode_i" + */ + +/* register address for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_adr 0x00005280 +/* bitmask for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_msk 0x00000002 +/* inverted bitmask for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_mskn 0xfffffffd +/* lower bit position of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_shift 1 +/* width of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_width 1 +/* default value of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_default 0x0 + +/* RX vl_accept_untagged_mode Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_accept_untagged_mode". + * PORT="pif_rpf_vl_accept_untagged_i" + */ + +/* Register address for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_adr 0x00005280 +/* Bitmask for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_msk 0x00000004 +/* Inverted bitmask for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB +/* Lower bit position of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_shift 2 +/* Width of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_width 1 +/* Default value of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_default 0x0 + +/* rX vl_untagged_act[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]". + * PORT="pif_rpf_vl_untagged_act_i[2:0]" + */ + +/* Register address for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_adr 0x00005280 +/* Bitmask for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_msk 0x00000038 +/* Inverted bitmask for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_mskn 0xFFFFFFC7 +/* Lower bit position of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_shift 3 +/* Width of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_width 3 +/* Default value of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_default 0x0 + +/* RX vl_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_en_i[0]" + */ + +/* Register address for bitfield vl_en{F} */ +#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_en{F} */ +#define rpf_vl_en_f_msk 0x80000000 +/* Inverted bitmask for bitfield vl_en{F} */ +#define rpf_vl_en_f_mskn 0x7FFFFFFF +/* Lower bit position of bitfield vl_en{F} */ +#define rpf_vl_en_f_shift 31 +/* Width of bitfield vl_en{F} */ +#define rpf_vl_en_f_width 1 +/* Default value of bitfield vl_en{F} */ +#define rpf_vl_en_f_default 0x0 + +/* RX vl_act{F}[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_act{F}[2:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_act0_i[2:0]" + */ + +/* Register address for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_msk 0x00070000 +/* Inverted bitmask for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_mskn 0xFFF8FFFF +/* Lower bit position of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_shift 16 +/* Width of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_width 3 +/* Default value of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_default 0x0 + +/* RX vl_id{F}[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_id{F}[B:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_id0_i[11:0]" + */ + +/* Register address for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_msk 0x00000FFF +/* Inverted bitmask for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_mskn 0xFFFFF000 +/* Lower bit position of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_shift 0 +/* Width of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_width 12 +/* Default value of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_default 0x0 + +/* RX et_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "et_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_et_en_i[0]" + */ + +/* Register address for bitfield et_en{F} */ +#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4) +/* Bitmask for bitfield et_en{F} */ +#define rpf_et_en_f_msk 0x80000000 +/* Inverted bitmask for bitfield et_en{F} */ +#define rpf_et_en_f_mskn 0x7FFFFFFF +/* Lower bit position of bitfield et_en{F} */ +#define rpf_et_en_f_shift 31 +/* Width of bitfield et_en{F} */ +#define rpf_et_en_f_width 1 +/* Default value of bitfield et_en{F} */ +#define rpf_et_en_f_default 0x0 + +/* rx et_en{f} bitfield definitions + * preprocessor definitions for the bitfield "et_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_en_i[0]" + */ + +/* register address for bitfield et_en{f} */ +#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_en{f} */ +#define rpf_et_enf_msk 0x80000000 +/* inverted bitmask for bitfield et_en{f} */ +#define rpf_et_enf_mskn 0x7fffffff +/* lower bit position of bitfield et_en{f} */ +#define rpf_et_enf_shift 31 +/* width of bitfield et_en{f} */ +#define rpf_et_enf_width 1 +/* default value of bitfield et_en{f} */ +#define rpf_et_enf_default 0x0 + +/* rx et_up{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up_en_i[0]" + */ + +/* register address for bitfield et_up{f}_en */ +#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}_en */ +#define rpf_et_upfen_msk 0x40000000 +/* inverted bitmask for bitfield et_up{f}_en */ +#define rpf_et_upfen_mskn 0xbfffffff +/* lower bit position of bitfield et_up{f}_en */ +#define rpf_et_upfen_shift 30 +/* width of bitfield et_up{f}_en */ +#define rpf_et_upfen_width 1 +/* default value of bitfield et_up{f}_en */ +#define rpf_et_upfen_default 0x0 + +/* rx et_rxq{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq_en_i[0]" + */ + +/* register address for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_msk 0x20000000 +/* inverted bitmask for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_mskn 0xdfffffff +/* lower bit position of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_shift 29 +/* width of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_width 1 +/* default value of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_default 0x0 + +/* rx et_up{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up0_i[2:0]" + */ + +/* register address for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_msk 0x1c000000 +/* inverted bitmask for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_mskn 0xe3ffffff +/* lower bit position of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_shift 26 +/* width of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_width 3 +/* default value of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_default 0x0 + +/* rx et_rxq{f}[4:0] bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}[4:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq0_i[4:0]" + */ + +/* register address for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_msk 0x01f00000 +/* inverted bitmask for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_mskn 0xfe0fffff +/* lower bit position of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_shift 20 +/* width of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_width 5 +/* default value of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_default 0x0 + +/* rx et_mng_rxq{f} bitfield definitions + * preprocessor definitions for the bitfield "et_mng_rxq{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_mng_rxq_i[0]" + */ + +/* register address for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_msk 0x00080000 +/* inverted bitmask for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_mskn 0xfff7ffff +/* lower bit position of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_shift 19 +/* width of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_width 1 +/* default value of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_default 0x0 + +/* rx et_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_act{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_act0_i[2:0]" + */ + +/* register address for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_msk 0x00070000 +/* inverted bitmask for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_mskn 0xfff8ffff +/* lower bit position of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_shift 16 +/* width of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_width 3 +/* default value of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_default 0x0 + +/* rx et_val{f}[f:0] bitfield definitions + * preprocessor definitions for the bitfield "et_val{f}[f:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_val0_i[15:0]" + */ + +/* register address for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_msk 0x0000ffff +/* inverted bitmask for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_mskn 0xffff0000 +/* lower bit position of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_shift 0 +/* width of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_width 16 +/* default value of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_default 0x0 + +/* rx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_rpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_adr 0x00005580 +/* bitmask for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_msk 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_mskn 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_shift 1 +/* width of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_width 1 +/* default value of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_default 0x0 + +/* rx desc{d}_vl_strip bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_vl_strip". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rpo_desc_vl_strip_i[0]" + */ + +/* register address for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_msk 0x20000000 +/* inverted bitmask for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_mskn 0xdfffffff +/* lower bit position of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_shift 29 +/* width of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_width 1 +/* default value of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_default 0x0 + +/* rx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_rpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define rpol4chk_en_adr 0x00005580 +/* bitmask for bitfield l4_chk_en */ +#define rpol4chk_en_msk 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define rpol4chk_en_mskn 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define rpol4chk_en_shift 0 +/* width of bitfield l4_chk_en */ +#define rpol4chk_en_width 1 +/* default value of bitfield l4_chk_en */ +#define rpol4chk_en_default 0x0 + +/* rx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_rx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_adr 0x00005000 +/* bitmask for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_default 0x1 + +/* tx dca{d}_cpuid[7:0] bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_msk 0x000000ff +/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_mskn 0xffffff00 +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_shift 0 +/* width of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_width 8 +/* default value of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_default 0x0 + +/* tx lso_en[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_en[1f:0]". + * port="pif_tdm_lso_en_i[31:0]" + */ + +/* register address for bitfield lso_en[1f:0] */ +#define tdm_lso_en_adr 0x00007810 +/* bitmask for bitfield lso_en[1f:0] */ +#define tdm_lso_en_msk 0xffffffff +/* inverted bitmask for bitfield lso_en[1f:0] */ +#define tdm_lso_en_mskn 0x00000000 +/* lower bit position of bitfield lso_en[1f:0] */ +#define tdm_lso_en_shift 0 +/* width of bitfield lso_en[1f:0] */ +#define tdm_lso_en_width 32 +/* default value of bitfield lso_en[1f:0] */ +#define tdm_lso_en_default 0x0 + +/* tx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_tdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define tdm_dca_en_adr 0x00008480 +/* bitmask for bitfield dca_en */ +#define tdm_dca_en_msk 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define tdm_dca_en_mskn 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define tdm_dca_en_shift 31 +/* width of bitfield dca_en */ +#define tdm_dca_en_width 1 +/* default value of bitfield dca_en */ +#define tdm_dca_en_default 0x1 + +/* tx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_tdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_adr 0x00008480 +/* bitmask for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_msk 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_mskn 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_shift 0 +/* width of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_width 4 +/* default value of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_default 0x0 + +/* tx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_msk 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_mskn 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_shift 31 +/* width of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_width 1 +/* default value of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_default 0x0 + +/* tx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_en */ +#define tdm_descden_msk 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define tdm_descden_mskn 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define tdm_descden_shift 31 +/* width of bitfield desc{d}_en */ +#define tdm_descden_width 1 +/* default value of bitfield desc{d}_en */ +#define tdm_descden_default 0x0 + +/* tx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="tdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_msk 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_mskn 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_shift 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_width 13 + +/* tx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_msk 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_mskn 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_shift 3 +/* width of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_width 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_default 0x0 + +/* tx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_tdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_adr 0x00007b40 +/* bitmask for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_msk 0x00000002 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_mskn 0xfffffffd +/* lower bit position of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_shift 1 +/* width of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_width 1 +/* default value of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_default 0x0 + +/* tx desc{d}_wrb_thresh[6:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* register address for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_msk 0x00007f00 +/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_mskn 0xffff80ff +/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_shift 8 +/* width of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_width 7 +/* default value of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_default 0x0 + +/* tx lso_tcp_flag_first[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]". + * port="pif_thm_lso_tcp_flag_first_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_adr 0x00007820 +/* bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_msk 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_mskn 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_shift 0 +/* width of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_width 12 +/* default value of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_default 0x0 + +/* tx lso_tcp_flag_last[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]". + * port="pif_thm_lso_tcp_flag_last_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_adr 0x00007824 +/* bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_msk 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_mskn 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_shift 0 +/* width of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_width 12 +/* default value of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_default 0x0 + +/* tx lso_tcp_flag_mid[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]". + * port="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ + +/* Register address for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_adr 0x00005598 +/* Bitmask for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_msk 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_mskn 0x00000000 +/* Lower bit position of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_shift 0 +/* Width of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_width 32 +/* Default value of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_default 0x0 + +/* RX lro_en[1F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_en[1F:0]". + * PORT="pif_rpo_lro_en_i[31:0]" + */ + +/* Register address for bitfield lro_en[1F:0] */ +#define rpo_lro_en_adr 0x00005590 +/* Bitmask for bitfield lro_en[1F:0] */ +#define rpo_lro_en_msk 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_en[1F:0] */ +#define rpo_lro_en_mskn 0x00000000 +/* Lower bit position of bitfield lro_en[1F:0] */ +#define rpo_lro_en_shift 0 +/* Width of bitfield lro_en[1F:0] */ +#define rpo_lro_en_width 32 +/* Default value of bitfield lro_en[1F:0] */ +#define rpo_lro_en_default 0x0 + +/* RX lro_ptopt_en Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ptopt_en". + * PORT="pif_rpo_lro_ptopt_en_i" + */ + +/* Register address for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_adr 0x00005594 +/* Bitmask for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_msk 0x00008000 +/* Inverted bitmask for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF +/* Lower bit position of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_shift 15 +/* Width of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_width 1 +/* Default value of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_defalt 0x1 + +/* RX lro_q_ses_lmt Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_q_ses_lmt". + * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_adr 0x00005594 +/* Bitmask for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_msk 0x00003000 +/* Inverted bitmask for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF +/* Lower bit position of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_shift 12 +/* Width of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_width 2 +/* Default value of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_default 0x1 + +/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]". + * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_adr 0x00005594 +/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_msk 0x00000060 +/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F +/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_shift 5 +/* Width of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_width 2 +/* Default value of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_defalt 0x1 + +/* RX lro_pkt_min[4:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]". + * PORT="pif_rpo_lro_pkt_min_i[4:0]" + */ + +/* Register address for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_adr 0x00005594 +/* Bitmask for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_msk 0x0000001F +/* Inverted bitmask for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_mskn 0xFFFFFFE0 +/* Lower bit position of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_shift 0 +/* Width of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_width 5 +/* Default value of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_default 0x8 + +/* Width of bitfield lro{L}_des_max[1:0] */ +#define rpo_lro_ldes_max_width 2 +/* Default value of bitfield lro{L}_des_max[1:0] */ +#define rpo_lro_ldes_max_default 0x0 + +/* RX lro_tb_div[11:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tb_div[11:0]". + * PORT="pif_rpo_lro_tb_div_i[11:0]" + */ + +/* Register address for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_adr 0x00005620 +/* Bitmask for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_msk 0xFFF00000 +/* Inverted bitmask for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_mskn 0x000FFFFF +/* Lower bit position of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_shift 20 +/* Width of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_width 12 +/* Default value of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_default 0xC35 + +/* RX lro_ina_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]". + * PORT="pif_rpo_lro_ina_ival_i[9:0]" + */ + +/* Register address for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_adr 0x00005620 +/* Bitmask for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_msk 0x000FFC00 +/* Inverted bitmask for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_mskn 0xFFF003FF +/* Lower bit position of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_shift 10 +/* Width of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_width 10 +/* Default value of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_default 0xA + +/* RX lro_max_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_max_ival[9:0]". + * PORT="pif_rpo_lro_max_ival_i[9:0]" + */ + +/* Register address for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_adr 0x00005620 +/* Bitmask for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_msk 0x000003FF +/* Inverted bitmask for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_mskn 0xFFFFFC00 +/* Lower bit position of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_shift 0 +/* Width of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_width 10 +/* Default value of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_default 0x19 + +/* TX dca{D}_cpuid[7:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* Register address for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_msk 0x000000FF +/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_mskn 0xFFFFFF00 +/* Lower bit position of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_shift 0 +/* Width of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_width 8 +/* Default value of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_default 0x0 + +/* TX dca{D}_desc_en Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_desc_en". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca_desc_en_i[0]" + */ + +/* Register address for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_msk 0x80000000 +/* Inverted bitmask for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF +/* Lower bit position of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_shift 31 +/* Width of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_width 1 +/* Default value of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_default 0x0 + +/* TX desc{D}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_en". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc_en_i[0]" + */ + +/* Register address for bitfield desc{D}_en */ +#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_en */ +#define tdm_desc_den_msk 0x80000000 +/* Inverted bitmask for bitfield desc{D}_en */ +#define tdm_desc_den_mskn 0x7FFFFFFF +/* Lower bit position of bitfield desc{D}_en */ +#define tdm_desc_den_shift 31 +/* Width of bitfield desc{D}_en */ +#define tdm_desc_den_width 1 +/* Default value of bitfield desc{D}_en */ +#define tdm_desc_den_default 0x0 + +/* TX desc{D}_hd[C:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="tdm_pif_desc0_hd_o[12:0]" + */ + +/* Register address for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_msk 0x00001FFF +/* Inverted bitmask for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_mskn 0xFFFFE000 +/* Lower bit position of bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_shift 0 +/* Width of bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_width 13 + +/* TX desc{D}_len[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_len[9:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_len_i[9:0]" + */ + +/* Register address for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_msk 0x00001FF8 +/* Inverted bitmask for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_mskn 0xFFFFE007 +/* Lower bit position of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_shift 3 +/* Width of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_width 10 +/* Default value of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_default 0x0 + +/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* Register address for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_adr(descriptor) \ + (0x00007C18 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_msk 0x00007F00 +/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF +/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_shift 8 +/* Width of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_width 7 +/* Default value of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_default 0x0 + +/* TX tdm_int_mod_en Bitfield Definitions + * Preprocessor definitions for the bitfield "tdm_int_mod_en". + * PORT="pif_tdm_int_mod_en_i" + */ + +/* Register address for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_adr 0x00007B40 +/* Bitmask for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_msk 0x00000010 +/* Inverted bitmask for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_mskn 0xFFFFFFEF +/* Lower bit position of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_shift 4 +/* Width of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_width 1 +/* Default value of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_default 0x0 + +/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]". + * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ +/* register address for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_adr 0x00007820 +/* bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_msk 0x0fff0000 +/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_mskn 0xf000ffff +/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_shift 16 +/* width of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_width 12 +/* default value of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_default 0x0 + +/* tx tx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "tx_buf_en". + * port="pif_tpb_tx_buf_en_i" + */ + +/* register address for bitfield tx_buf_en */ +#define tpb_tx_buf_en_adr 0x00007900 +/* bitmask for bitfield tx_buf_en */ +#define tpb_tx_buf_en_msk 0x00000001 +/* inverted bitmask for bitfield tx_buf_en */ +#define tpb_tx_buf_en_mskn 0xfffffffe +/* lower bit position of bitfield tx_buf_en */ +#define tpb_tx_buf_en_shift 0 +/* width of bitfield tx_buf_en */ +#define tpb_tx_buf_en_width 1 +/* default value of bitfield tx_buf_en */ +#define tpb_tx_buf_en_default 0x0 + +/* tx tx{b}_hi_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_hi_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_msk 0x1fff0000 +/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_mskn 0xe000ffff +/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_shift 16 +/* width of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_width 13 +/* default value of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_default 0x0 + +/* tx tx{b}_lo_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_lo_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_msk 0x00001fff +/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_mskn 0xffffe000 +/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_shift 0 +/* width of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_width 13 +/* default value of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_default 0x0 + +/* tx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_tpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_adr 0x00007000 +/* bitmask for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_msk 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_mskn 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_shift 6 +/* width of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_width 1 +/* default value of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_default 0x0 + +/* tx tx{b}_buf_size[7:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_buf_size_i[7:0]" + */ + +/* register address for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_msk 0x000000ff +/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_mskn 0xffffff00 +/* lower bit position of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_shift 0 +/* width of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_width 8 +/* default value of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_default 0x0 + +/* tx tx_scp_ins_en bitfield definitions + * preprocessor definitions for the bitfield "tx_scp_ins_en". + * port="pif_tpb_scp_ins_en_i" + */ + +/* register address for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_adr 0x00007900 +/* bitmask for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_msk 0x00000004 +/* inverted bitmask for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_mskn 0xfffffffb +/* lower bit position of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_shift 2 +/* width of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_width 1 +/* default value of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_default 0x0 + +/* tx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_tpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_adr 0x00007800 +/* bitmask for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_msk 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_mskn 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_shift 1 +/* width of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_width 1 +/* default value of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_default 0x0 + +/* tx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_tpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define tpol4chk_en_adr 0x00007800 +/* bitmask for bitfield l4_chk_en */ +#define tpol4chk_en_msk 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define tpol4chk_en_mskn 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define tpol4chk_en_shift 0 +/* width of bitfield l4_chk_en */ +#define tpol4chk_en_width 1 +/* default value of bitfield l4_chk_en */ +#define tpol4chk_en_default 0x0 + +/* tx pkt_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "pkt_sys_loopback". + * port="pif_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_adr 0x00007000 +/* bitmask for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_msk 0x00000080 +/* inverted bitmask for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_mskn 0xffffff7f +/* lower bit position of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_shift 7 +/* width of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_width 1 +/* default value of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_default 0x0 + +/* tx data_tc_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "data_tc_arb_mode". + * port="pif_tps_data_tc_arb_mode_i" + */ + +/* register address for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_adr 0x00007100 +/* bitmask for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_msk 0x00000001 +/* inverted bitmask for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_mskn 0xfffffffe +/* lower bit position of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_shift 0 +/* width of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_width 1 +/* default value of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_default 0x0 + +/* tx desc_rate_ta_rst bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_ta_rst". + * port="pif_tps_desc_rate_ta_rst_i" + */ + +/* register address for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_adr 0x00007310 +/* bitmask for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_msk 0x80000000 +/* inverted bitmask for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_mskn 0x7fffffff +/* lower bit position of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_shift 31 +/* width of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_width 1 +/* default value of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_default 0x0 + +/* tx desc_rate_limit[a:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_limit[a:0]". + * port="pif_tps_desc_rate_lim_i[10:0]" + */ + +/* register address for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_adr 0x00007310 +/* bitmask for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_msk 0x000007ff +/* inverted bitmask for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_mskn 0xfffff800 +/* lower bit position of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_shift 0 +/* width of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_width 11 +/* default value of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_default 0x0 + +/* tx desc_tc_arb_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]". + * port="pif_tps_desc_tc_arb_mode_i[1:0]" + */ + +/* register address for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_adr 0x00007200 +/* bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_msk 0x00000003 +/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_mskn 0xfffffffc +/* lower bit position of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_shift 0 +/* width of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_width 2 +/* default value of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_default 0x0 + +/* tx desc_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_msk 0x0fff0000 +/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_mskn 0xf000ffff +/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_shift 16 +/* width of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_width 12 +/* default value of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_default 0x0 + +/* tx desc_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_weight_i[8:0]" + */ + +/* register address for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_msk 0x000001ff +/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_mskn 0xfffffe00 +/* lower bit position of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_shift 0 +/* width of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_width 9 +/* default value of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_default 0x0 + +/* tx desc_vm_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "desc_vm_arb_mode". + * port="pif_tps_desc_vm_arb_mode_i" + */ + +/* register address for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_adr 0x00007300 +/* bitmask for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_msk 0x00000001 +/* inverted bitmask for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_mskn 0xfffffffe +/* lower bit position of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_shift 0 +/* width of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_width 1 +/* default value of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_default 0x0 + +/* tx data_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_msk 0x0fff0000 +/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_mskn 0xf000ffff +/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_shift 16 +/* width of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_width 12 +/* default value of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_default 0x0 + +/* tx data_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_weight_i[8:0]" + */ + +/* register address for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_msk 0x000001ff +/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_mskn 0xfffffe00 +/* lower bit position of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_shift 0 +/* width of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_width 9 +/* default value of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_default 0x0 + +/* tx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_tx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_adr 0x00007000 +/* bitmask for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_default 0x1 + +/* mac_phy register access busy bitfield definitions + * preprocessor definitions for the bitfield "register access busy". + * port="msm_pif_reg_busy_o" + */ + +/* register address for bitfield register access busy */ +#define msm_reg_access_busy_adr 0x00004400 +/* bitmask for bitfield register access busy */ +#define msm_reg_access_busy_msk 0x00001000 +/* inverted bitmask for bitfield register access busy */ +#define msm_reg_access_busy_mskn 0xffffefff +/* lower bit position of bitfield register access busy */ +#define msm_reg_access_busy_shift 12 +/* width of bitfield register access busy */ +#define msm_reg_access_busy_width 1 + +/* mac_phy msm register address[7:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register address[7:0]". + * port="pif_msm_reg_addr_i[7:0]" + */ + +/* register address for bitfield msm register address[7:0] */ +#define msm_reg_addr_adr 0x00004400 +/* bitmask for bitfield msm register address[7:0] */ +#define msm_reg_addr_msk 0x000000ff +/* inverted bitmask for bitfield msm register address[7:0] */ +#define msm_reg_addr_mskn 0xffffff00 +/* lower bit position of bitfield msm register address[7:0] */ +#define msm_reg_addr_shift 0 +/* width of bitfield msm register address[7:0] */ +#define msm_reg_addr_width 8 +/* default value of bitfield msm register address[7:0] */ +#define msm_reg_addr_default 0x0 + +/* mac_phy register read strobe bitfield definitions + * preprocessor definitions for the bitfield "register read strobe". + * port="pif_msm_reg_rden_i" + */ + +/* register address for bitfield register read strobe */ +#define msm_reg_rd_strobe_adr 0x00004400 +/* bitmask for bitfield register read strobe */ +#define msm_reg_rd_strobe_msk 0x00000200 +/* inverted bitmask for bitfield register read strobe */ +#define msm_reg_rd_strobe_mskn 0xfffffdff +/* lower bit position of bitfield register read strobe */ +#define msm_reg_rd_strobe_shift 9 +/* width of bitfield register read strobe */ +#define msm_reg_rd_strobe_width 1 +/* default value of bitfield register read strobe */ +#define msm_reg_rd_strobe_default 0x0 + +/* mac_phy msm register read data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register read data[31:0]". + * port="msm_pif_reg_rd_data_o[31:0]" + */ + +/* register address for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_adr 0x00004408 +/* bitmask for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_msk 0xffffffff +/* inverted bitmask for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_mskn 0x00000000 +/* lower bit position of bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_shift 0 +/* width of bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_width 32 + +/* mac_phy msm register write data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register write data[31:0]". + * port="pif_msm_reg_wr_data_i[31:0]" + */ + +/* register address for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_adr 0x00004404 +/* bitmask for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_msk 0xffffffff +/* inverted bitmask for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_mskn 0x00000000 +/* lower bit position of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_shift 0 +/* width of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_width 32 +/* default value of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_default 0x0 + +/* mac_phy register write strobe bitfield definitions + * preprocessor definitions for the bitfield "register write strobe". + * port="pif_msm_reg_wren_i" + */ + +/* register address for bitfield register write strobe */ +#define msm_reg_wr_strobe_adr 0x00004400 +/* bitmask for bitfield register write strobe */ +#define msm_reg_wr_strobe_msk 0x00000100 +/* inverted bitmask for bitfield register write strobe */ +#define msm_reg_wr_strobe_mskn 0xfffffeff +/* lower bit position of bitfield register write strobe */ +#define msm_reg_wr_strobe_shift 8 +/* width of bitfield register write strobe */ +#define msm_reg_wr_strobe_width 1 +/* default value of bitfield register write strobe */ +#define msm_reg_wr_strobe_default 0x0 + +/* mif soft reset bitfield definitions + * preprocessor definitions for the bitfield "soft reset". + * port="pif_glb_res_i" + */ + +/* register address for bitfield soft reset */ +#define glb_soft_res_adr 0x00000000 +/* bitmask for bitfield soft reset */ +#define glb_soft_res_msk 0x00008000 +/* inverted bitmask for bitfield soft reset */ +#define glb_soft_res_mskn 0xffff7fff +/* lower bit position of bitfield soft reset */ +#define glb_soft_res_shift 15 +/* width of bitfield soft reset */ +#define glb_soft_res_width 1 +/* default value of bitfield soft reset */ +#define glb_soft_res_default 0x0 + +/* mif register reset disable bitfield definitions + * preprocessor definitions for the bitfield "register reset disable". + * port="pif_glb_reg_res_dsbl_i" + */ + +/* register address for bitfield register reset disable */ +#define glb_reg_res_dis_adr 0x00000000 +/* bitmask for bitfield register reset disable */ +#define glb_reg_res_dis_msk 0x00004000 +/* inverted bitmask for bitfield register reset disable */ +#define glb_reg_res_dis_mskn 0xffffbfff +/* lower bit position of bitfield register reset disable */ +#define glb_reg_res_dis_shift 14 +/* width of bitfield register reset disable */ +#define glb_reg_res_dis_width 1 +/* default value of bitfield register reset disable */ +#define glb_reg_res_dis_default 0x1 + +/* tx dma debug control definitions */ +#define tx_dma_debug_ctl_adr 0x00008920u + +/* tx dma descriptor base address msw definitions */ +#define tx_dma_desc_base_addrmsw_adr(descriptor) \ + (0x00007c04u + (descriptor) * 0x40) + +/* tx interrupt moderation control register definitions + * Preprocessor definitions for TX Interrupt Moderation Control Register + * Base Address: 0x00008980 + * Parameter: queue {Q} | stride size 0x4 | range [0, 31] + */ + +#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4) + +/* pcie reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_pci_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_adr 0x00001000 +/* bitmask for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_default 0x1 + +/* global microprocessor scratch pad definitions */ +#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) + +#endif /* HW_ATL_LLH_INTERNAL_H */ -- cgit v1.2.3 From 97bde5c4f909a55ab4c36cf0ac9094f6c9e4cdf6 Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:12 -0800 Subject: net: ethernet: aquantia: Support for NIC-specific code Add support for code specific to the Atlantic NIC. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_main.c | 273 ++++++ drivers/net/ethernet/aquantia/atlantic/aq_main.h | 17 + drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 937 +++++++++++++++++++++ drivers/net/ethernet/aquantia/atlantic/aq_nic.h | 108 +++ .../ethernet/aquantia/atlantic/aq_nic_internal.h | 46 + 5 files changed, 1381 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_main.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_main.h create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_nic.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_nic.h create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c new file mode 100644 index 000000000000..c17c70adef0d --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -0,0 +1,273 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_main.c: Main file for aQuantia Linux driver. */ + +#include "aq_main.h" +#include "aq_nic.h" +#include "aq_pci_func.h" +#include "aq_ethtool.h" +#include "hw_atl/hw_atl_a0.h" +#include "hw_atl/hw_atl_b0.h" + +#include +#include + +static const struct pci_device_id aq_pci_tbl[] = { + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), }, + {} +}; + +MODULE_DEVICE_TABLE(pci, aq_pci_tbl); + +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(AQ_CFG_DRV_VERSION); +MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR); +MODULE_DESCRIPTION(AQ_CFG_DRV_DESC); + +static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev) +{ + struct aq_hw_ops *ops = NULL; + + ops = hw_atl_a0_get_ops_by_id(pdev); + if (!ops) + ops = hw_atl_b0_get_ops_by_id(pdev); + + return ops; +} + +static int aq_ndev_open(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = NULL; + int err = 0; + + aq_nic = aq_nic_alloc_hot(ndev); + if (!aq_nic) { + err = -ENOMEM; + goto err_exit; + } + err = aq_nic_init(aq_nic); + if (err < 0) + goto err_exit; + err = aq_nic_start(aq_nic); + if (err < 0) + goto err_exit; + +err_exit: + if (err < 0) + aq_nic_deinit(aq_nic); + return err; +} + +static int aq_ndev_close(struct net_device *ndev) +{ + int err = 0; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + err = aq_nic_stop(aq_nic); + if (err < 0) + goto err_exit; + aq_nic_deinit(aq_nic); + aq_nic_free_hot_resources(aq_nic); + +err_exit: + return err; +} + +static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = aq_nic_xmit(aq_nic, skb); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + if (new_mtu == ndev->mtu) { + err = 0; + goto err_exit; + } + if (new_mtu < 68) { + err = -EINVAL; + goto err_exit; + } + err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); + if (err < 0) + goto err_exit; + ndev->mtu = new_mtu; + + if (netif_running(ndev)) { + aq_ndev_close(ndev); + aq_ndev_open(ndev); + } + +err_exit: + return err; +} + +static int aq_ndev_set_features(struct net_device *ndev, + netdev_features_t features) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic); + bool is_lro = false; + + if (aq_cfg->hw_features & NETIF_F_LRO) { + is_lro = features & NETIF_F_LRO; + + if (aq_cfg->is_lro != is_lro) { + aq_cfg->is_lro = is_lro; + + if (netif_running(ndev)) { + aq_ndev_close(ndev); + aq_ndev_open(ndev); + } + } + } + + return 0; +} + +static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = eth_mac_addr(ndev, addr); + if (err < 0) + goto err_exit; + err = aq_nic_set_mac(aq_nic, ndev); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static void aq_ndev_set_multicast_settings(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = aq_nic_set_packet_filter(aq_nic, ndev->flags); + if (err < 0) + goto err_exit; + + if (netdev_mc_count(ndev)) { + err = aq_nic_set_multicast_list(aq_nic, ndev); + if (err < 0) + goto err_exit; + } + +err_exit:; +} + +static const struct net_device_ops aq_ndev_ops = { + .ndo_open = aq_ndev_open, + .ndo_stop = aq_ndev_close, + .ndo_start_xmit = aq_ndev_start_xmit, + .ndo_set_rx_mode = aq_ndev_set_multicast_settings, + .ndo_change_mtu = aq_ndev_change_mtu, + .ndo_set_mac_address = aq_ndev_set_mac_address, + .ndo_set_features = aq_ndev_set_features +}; + +static int aq_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct aq_hw_ops *aq_hw_ops = NULL; + struct aq_pci_func_s *aq_pci_func = NULL; + int err = 0; + + err = pci_enable_device(pdev); + if (err < 0) + goto err_exit; + aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev); + aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev, + &aq_ndev_ops, &aq_ethtool_ops); + if (!aq_pci_func) { + err = -ENOMEM; + goto err_exit; + } + err = aq_pci_func_init(aq_pci_func); + if (err < 0) + goto err_exit; + +err_exit: + if (err < 0) { + if (aq_pci_func) + aq_pci_func_free(aq_pci_func); + } + return err; +} + +static void aq_pci_remove(struct pci_dev *pdev) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + + aq_pci_func_deinit(aq_pci_func); + aq_pci_func_free(aq_pci_func); +} + +static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + + return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg); +} + +static int aq_pci_resume(struct pci_dev *pdev) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + pm_message_t pm_msg = PMSG_RESTORE; + + return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg); +} + +static struct pci_driver aq_pci_ops = { + .name = AQ_CFG_DRV_NAME, + .id_table = aq_pci_tbl, + .probe = aq_pci_probe, + .remove = aq_pci_remove, + .suspend = aq_pci_suspend, + .resume = aq_pci_resume, +}; + +static int __init aq_module_init(void) +{ + int err = 0; + + err = pci_register_driver(&aq_pci_ops); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static void __exit aq_module_exit(void) +{ + pci_unregister_driver(&aq_pci_ops); +} + +module_init(aq_module_init); +module_exit(aq_module_exit); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h new file mode 100644 index 000000000000..9748e7e575e0 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -0,0 +1,17 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_main.h: Main file for aQuantia Linux driver. */ + +#ifndef AQ_MAIN_H +#define AQ_MAIN_H + +#include "aq_common.h" + +#endif /* AQ_MAIN_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c new file mode 100644 index 000000000000..84bb44186750 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -0,0 +1,937 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic.c: Definition of common code for NIC. */ + +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_vec.h" +#include "aq_hw.h" +#include "aq_pci_func.h" +#include "aq_nic_internal.h" + +#include +#include +#include +#include +#include +#include +#include + +static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + struct aq_rss_parameters *rss_params = &cfg->aq_rss; + int i = 0; + + static u8 rss_key[40] = { + 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, + 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, + 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, + 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, + 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c + }; + + rss_params->hash_secret_key_size = sizeof(rss_key); + memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key)); + rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX; + + for (i = rss_params->indirection_table_size; i--;) + rss_params->indirection_table[i] = i & (num_rss_queues - 1); +} + +/* Fills aq_nic_cfg with valid defaults */ +static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + cfg->aq_hw_caps = &self->aq_hw_caps; + + cfg->vecs = AQ_CFG_VECS_DEF; + cfg->tcs = AQ_CFG_TCS_DEF; + + cfg->rxds = AQ_CFG_RXDS_DEF; + cfg->txds = AQ_CFG_TXDS_DEF; + + cfg->is_polling = AQ_CFG_IS_POLLING_DEF; + + cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; + cfg->itr = cfg->is_interrupt_moderation ? + AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; + + cfg->is_rss = AQ_CFG_IS_RSS_DEF; + cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; + cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; + cfg->flow_control = AQ_CFG_FC_MODE; + + cfg->mtu = AQ_CFG_MTU_DEF; + cfg->link_speed_msk = AQ_CFG_SPEED_MSK; + cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF; + + cfg->is_lro = AQ_CFG_IS_LRO_DEF; + + cfg->vlan_id = 0U; + + aq_nic_rss_init(self, cfg->num_rss_queues); +} + +/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ +int aq_nic_cfg_start(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + /*descriptors */ + cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds); + cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds); + + /*rss rings */ + cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs); + cfg->vecs = min(cfg->vecs, num_online_cpus()); + /* cfg->vecs should be power of 2 for RSS */ + if (cfg->vecs >= 8U) + cfg->vecs = 8U; + else if (cfg->vecs >= 4U) + cfg->vecs = 4U; + else if (cfg->vecs >= 2U) + cfg->vecs = 2U; + else + cfg->vecs = 1U; + + cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); + + if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || + (self->aq_hw_caps.vecs == 1U) || + (cfg->vecs == 1U)) { + cfg->is_rss = 0U; + cfg->vecs = 1U; + } + + cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk; + cfg->hw_features = self->aq_hw_caps.hw_features; + return 0; +} + +static void aq_nic_service_timer_cb(unsigned long param) +{ + struct aq_nic_s *self = (struct aq_nic_s *)param; + struct net_device *ndev = aq_nic_get_ndev(self); + int err = 0; + bool is_busy = false; + unsigned int i = 0U; + struct aq_hw_link_status_s link_status; + struct aq_ring_stats_rx_s stats_rx; + struct aq_ring_stats_tx_s stats_tx; + + atomic_inc(&self->header.busy_count); + is_busy = true; + if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) + goto err_exit; + + err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status); + if (err < 0) + goto err_exit; + + self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, + self->aq_nic_cfg.is_interrupt_moderation); + + if (memcmp(&link_status, &self->link_status, sizeof(link_status))) { + if (link_status.mbps) { + aq_utils_obj_set(&self->header.flags, + AQ_NIC_FLAG_STARTED); + aq_utils_obj_clear(&self->header.flags, + AQ_NIC_LINK_DOWN); + netif_carrier_on(self->ndev); + } else { + netif_carrier_off(self->ndev); + aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); + } + + self->link_status = link_status; + } + + memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); + memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); + for (i = AQ_DIMOF(self->aq_vec); i--;) { + if (self->aq_vec[i]) + aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx); + } + + ndev->stats.rx_packets = stats_rx.packets; + ndev->stats.rx_bytes = stats_rx.bytes; + ndev->stats.rx_errors = stats_rx.errors; + ndev->stats.tx_packets = stats_tx.packets; + ndev->stats.tx_bytes = stats_tx.bytes; + ndev->stats.tx_errors = stats_tx.errors; + +err_exit: + if (is_busy) + atomic_dec(&self->header.busy_count); + mod_timer(&self->service_timer, + jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); +} + +static void aq_nic_polling_timer_cb(unsigned long param) +{ + struct aq_nic_s *self = (struct aq_nic_s *)param; + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_isr(i, (void *)aq_vec); + + mod_timer(&self->polling_timer, jiffies + + AQ_CFG_POLLING_TIMER_INTERVAL); +} + +static struct net_device *aq_nic_ndev_alloc(void) +{ + return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX); +} + +struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, + const struct ethtool_ops *et_ops, + struct device *dev, + struct aq_pci_func_s *aq_pci_func, + unsigned int port, + const struct aq_hw_ops *aq_hw_ops) +{ + struct net_device *ndev = NULL; + struct aq_nic_s *self = NULL; + int err = 0; + + ndev = aq_nic_ndev_alloc(); + self = netdev_priv(ndev); + if (!self) { + err = -EINVAL; + goto err_exit; + } + + ndev->netdev_ops = ndev_ops; + ndev->ethtool_ops = et_ops; + + SET_NETDEV_DEV(ndev, dev); + + ndev->if_port = port; + self->ndev = ndev; + + self->aq_pci_func = aq_pci_func; + + self->aq_hw_ops = *aq_hw_ops; + self->port = (u8)port; + + self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, + &self->aq_hw_ops); + err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); + if (err < 0) + goto err_exit; + + aq_nic_cfg_init_defaults(self); + +err_exit: + if (err < 0) { + aq_nic_free_hot_resources(self); + self = NULL; + } + return self; +} + +int aq_nic_ndev_register(struct aq_nic_s *self) +{ + int err = 0; + unsigned int i = 0U; + + if (!self->ndev) { + err = -EINVAL; + goto err_exit; + } + err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw, + self->aq_nic_cfg.aq_hw_caps, + self->ndev->dev_addr); + if (err < 0) + goto err_exit; + +#if defined(AQ_CFG_MAC_ADDR_PERMANENT) + { + static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; + + ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); + } +#endif + err = register_netdev(self->ndev); + if (err < 0) + goto err_exit; + + self->is_ndev_registered = true; + netif_carrier_off(self->ndev); + + for (i = AQ_CFG_VECS_MAX; i--;) + aq_nic_ndev_queue_stop(self, i); + +err_exit: + return err; +} + +int aq_nic_ndev_init(struct aq_nic_s *self) +{ + struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; + struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg; + + self->ndev->hw_features |= aq_hw_caps->hw_features; + self->ndev->features = aq_hw_caps->hw_features; + self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; + self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; + + return 0; +} + +void aq_nic_ndev_free(struct aq_nic_s *self) +{ + if (!self->ndev) + goto err_exit; + + if (self->is_ndev_registered) + unregister_netdev(self->ndev); + + if (self->aq_hw) + self->aq_hw_ops.destroy(self->aq_hw); + + free_netdev(self->ndev); + +err_exit:; +} + +struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) +{ + struct aq_nic_s *self = NULL; + int err = 0; + + if (!ndev) { + err = -EINVAL; + goto err_exit; + } + self = netdev_priv(ndev); + + if (!self) { + err = -EINVAL; + goto err_exit; + } + if (netif_running(ndev)) { + unsigned int i; + + for (i = AQ_CFG_VECS_MAX; i--;) + netif_stop_subqueue(ndev, i); + } + + for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; + self->aq_vecs++) { + self->aq_vec[self->aq_vecs] = + aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg); + if (!self->aq_vec[self->aq_vecs]) { + err = -ENOMEM; + goto err_exit; + } + } + +err_exit: + if (err < 0) { + aq_nic_free_hot_resources(self); + self = NULL; + } + return self; +} + +void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, + struct aq_ring_s *ring) +{ + self->aq_ring_tx[idx] = ring; +} + +struct device *aq_nic_get_dev(struct aq_nic_s *self) +{ + return self->ndev->dev.parent; +} + +struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) +{ + return self->ndev; +} + +int aq_nic_init(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + int err = 0; + unsigned int i = 0U; + + self->power_state = AQ_HW_POWER_STATE_D0; + err = self->aq_hw_ops.hw_reset(self->aq_hw); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg, + aq_nic_get_ndev(self)->dev_addr); + if (err < 0) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw); + +err_exit: + return err; +} + +void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx) +{ + netif_start_subqueue(self->ndev, idx); +} + +void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx) +{ + netif_stop_subqueue(self->ndev, idx); +} + +int aq_nic_start(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + int err = 0; + unsigned int i = 0U; + + err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, + self->packet_filter); + if (err < 0) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + err = aq_vec_start(aq_vec); + if (err < 0) + goto err_exit; + } + + err = self->aq_hw_ops.hw_start(self->aq_hw); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, + self->aq_nic_cfg.is_interrupt_moderation); + if (err < 0) + goto err_exit; + setup_timer(&self->service_timer, &aq_nic_service_timer_cb, + (unsigned long)self); + mod_timer(&self->service_timer, jiffies + + AQ_CFG_SERVICE_TIMER_INTERVAL); + + if (self->aq_nic_cfg.is_polling) { + setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb, + (unsigned long)self); + mod_timer(&self->polling_timer, jiffies + + AQ_CFG_POLLING_TIMER_INTERVAL); + } else { + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + err = aq_pci_func_alloc_irq(self->aq_pci_func, i, + self->ndev->name, aq_vec, + aq_vec_get_affinity_mask(aq_vec)); + if (err < 0) + goto err_exit; + } + + err = self->aq_hw_ops.hw_irq_enable(self->aq_hw, + AQ_CFG_IRQ_MASK); + if (err < 0) + goto err_exit; + } + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_nic_ndev_queue_start(self, i); + + err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); + if (err < 0) + goto err_exit; + + err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self, + struct sk_buff *skb, + struct aq_ring_buff_s *dx) +{ + unsigned int ret = 0U; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int frag_count = 0U; + + dx->flags = 0U; + dx->len = skb_headlen(skb); + dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len, + DMA_TO_DEVICE); + dx->len_pkt = skb->len; + dx->is_sop = 1U; + dx->is_mapped = 1U; + + ++ret; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U; + dx->is_tcp_cso = + (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; + dx->is_udp_cso = + (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; + } + + for (; nr_frags--; ++frag_count) { + unsigned int frag_len; + dma_addr_t frag_pa; + skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; + + frag_len = skb_frag_size(frag); + + frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, + frag_len, DMA_TO_DEVICE); + + while (frag_len > AQ_CFG_TX_FRAME_MAX) { + ++dx; + ++ret; + dx->flags = 0U; + dx->len = AQ_CFG_TX_FRAME_MAX; + dx->pa = frag_pa; + dx->is_mapped = 1U; + + frag_len -= AQ_CFG_TX_FRAME_MAX; + frag_pa += AQ_CFG_TX_FRAME_MAX; + } + + ++dx; + ++ret; + + dx->flags = 0U; + dx->len = frag_len; + dx->pa = frag_pa; + dx->is_mapped = 1U; + } + + dx->is_eop = 1U; + dx->skb = skb; + + return ret; +} + +static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self, + struct sk_buff *skb, + struct aq_ring_buff_s *dx) +{ + dx->flags = 0U; + dx->len_pkt = skb->len; + dx->len_l2 = ETH_HLEN; + dx->len_l3 = ip_hdrlen(skb); + dx->len_l4 = tcp_hdrlen(skb); + dx->mss = skb_shinfo(skb)->gso_size; + dx->is_txc = 1U; + return 1U; +} + +static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, + struct aq_ring_buff_s *dx) +{ + unsigned int ret = 0U; + + if (unlikely(skb_is_gso(skb))) { + ret = aq_nic_map_skb_lso(self, skb, dx); + ++dx; + } + + ret += aq_nic_map_skb_frag(self, skb, dx); + + return ret; +} + +int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) +__releases(&ring->lock) +__acquires(&ring->lock) +{ + struct aq_ring_s *ring = NULL; + unsigned int frags = 0U; + unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; + unsigned int tc = 0U; + unsigned int trys = AQ_CFG_LOCK_TRYS; + int err = 0; + bool is_nic_in_bad_state; + bool is_busy = false; + struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX]; + + frags = skb_shinfo(skb)->nr_frags + 1; + + ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; + + atomic_inc(&self->header.busy_count); + is_busy = true; + + if (frags > AQ_CFG_SKB_FRAGS_MAX) { + dev_kfree_skb_any(skb); + goto err_exit; + } + + is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, + AQ_NIC_FLAGS_IS_NOT_TX_READY) || + (aq_ring_avail_dx(ring) < + AQ_CFG_SKB_FRAGS_MAX); + + if (is_nic_in_bad_state) { + aq_nic_ndev_queue_stop(self, ring->idx); + err = NETDEV_TX_BUSY; + goto err_exit; + } + + do { + if (spin_trylock(&ring->header.lock)) { + frags = aq_nic_map_skb(self, skb, &buffers[0]); + + aq_ring_tx_append_buffs(ring, &buffers[0], frags); + + err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw, + ring, frags); + if (err >= 0) { + if (aq_ring_avail_dx(ring) < + AQ_CFG_SKB_FRAGS_MAX + 1) + aq_nic_ndev_queue_stop(self, ring->idx); + } + spin_unlock(&ring->header.lock); + + if (err >= 0) { + ++ring->stats.tx.packets; + ring->stats.tx.bytes += skb->len; + } + break; + } + } while (--trys); + + if (!trys) { + err = NETDEV_TX_BUSY; + goto err_exit; + } + +err_exit: + if (is_busy) + atomic_dec(&self->header.busy_count); + return err; +} + +int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) +{ + int err = 0; + + err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags); + if (err < 0) + goto err_exit; + + self->packet_filter = flags; + +err_exit: + return err; +} + +int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) +{ + struct netdev_hw_addr *ha = NULL; + unsigned int i = 0U; + + self->mc_list.count = 0U; + + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(self->mc_list.ar[i++], ha->addr); + ++self->mc_list.count; + } + + return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); +} + +int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) +{ + int err = 0; + + if (new_mtu > self->aq_hw_caps.mtu) { + err = -EINVAL; + goto err_exit; + } + self->aq_nic_cfg.mtu = new_mtu; + +err_exit: + return err; +} + +int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) +{ + return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr); +} + +unsigned int aq_nic_get_link_speed(struct aq_nic_s *self) +{ + return self->link_status.mbps; +} + +int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) +{ + u32 *regs_buff = p; + int err = 0; + + regs->version = 1; + + err = self->aq_hw_ops.hw_get_regs(self->aq_hw, + &self->aq_hw_caps, regs_buff); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +int aq_nic_get_regs_count(struct aq_nic_s *self) +{ + return self->aq_hw_caps.mac_regs_count; +} + +void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + unsigned int count = 0U; + int err = 0; + + err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); + if (err < 0) + goto err_exit; + + data += count; + count = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + data += count; + aq_vec_get_sw_stats(aq_vec, data, &count); + } + +err_exit:; + (void)err; +} + +void aq_nic_get_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd) +{ + cmd->port = PORT_TP; + cmd->transceiver = XCVR_EXTERNAL; + /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ + cmd->duplex = DUPLEX_FULL; + cmd->autoneg = self->aq_nic_cfg.is_autoneg; + + cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) ? + ADVERTISED_10000baseT_Full : 0U; + cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) ? + ADVERTISED_1000baseT_Full : 0U; + cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) ? + ADVERTISED_100baseT_Full : 0U; + cmd->supported |= self->aq_hw_caps.flow_control ? SUPPORTED_Pause : 0; + cmd->supported |= SUPPORTED_Autoneg; + + cmd->advertising = (self->aq_nic_cfg.is_autoneg) ? + ADVERTISED_Autoneg : 0U; + cmd->advertising |= + (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) ? + ADVERTISED_10000baseT_Full : 0U; + cmd->advertising |= + (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) ? + ADVERTISED_1000baseT_Full : 0U; + + cmd->advertising |= + (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) ? + ADVERTISED_100baseT_Full : 0U; + cmd->advertising |= (self->aq_nic_cfg.flow_control) ? + ADVERTISED_Pause : 0U; +} + +int aq_nic_set_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd) +{ + u32 speed = 0U; + u32 rate = 0U; + int err = 0; + + if (cmd->autoneg == AUTONEG_ENABLE) { + rate = self->aq_hw_caps.link_speed_msk; + self->aq_nic_cfg.is_autoneg = true; + } else { + speed = ethtool_cmd_speed(cmd); + + switch (speed) { + case SPEED_100: + rate = AQ_NIC_RATE_100M; + break; + + case SPEED_1000: + rate = AQ_NIC_RATE_1G; + break; + + case SPEED_2500: + rate = AQ_NIC_RATE_2GS; + break; + + case SPEED_5000: + rate = AQ_NIC_RATE_5G; + break; + + case SPEED_10000: + rate = AQ_NIC_RATE_10G; + break; + + default: + err = -1; + goto err_exit; + break; + } + if (!(self->aq_hw_caps.link_speed_msk & rate)) { + err = -1; + goto err_exit; + } + + self->aq_nic_cfg.is_autoneg = false; + } + + err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate); + if (err < 0) + goto err_exit; + + self->aq_nic_cfg.link_speed_msk = rate; + +err_exit: + return err; +} + +struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self) +{ + return &self->aq_nic_cfg; +} + +u32 aq_nic_get_fw_version(struct aq_nic_s *self) +{ + u32 fw_version = 0U; + + self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version); + + return fw_version; +} + +int aq_nic_stop(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_nic_ndev_queue_stop(self, i); + + del_timer_sync(&self->service_timer); + + self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); + + if (self->aq_nic_cfg.is_polling) + del_timer_sync(&self->polling_timer); + else + aq_pci_func_free_irqs(self->aq_pci_func); + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_stop(aq_vec); + + return self->aq_hw_ops.hw_stop(self->aq_hw); +} + +void aq_nic_deinit(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_deinit(aq_vec); + + if (self->power_state == AQ_HW_POWER_STATE_D0) { + (void)self->aq_hw_ops.hw_deinit(self->aq_hw); + } else { + (void)self->aq_hw_ops.hw_set_power(self->aq_hw, + self->power_state); + } + +err_exit:; +} + +void aq_nic_free_hot_resources(struct aq_nic_s *self) +{ + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = AQ_DIMOF(self->aq_vec); i--;) { + if (self->aq_vec[i]) + aq_vec_free(self->aq_vec[i]); + } + +err_exit:; +} + +int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) +{ + int err = 0; + + if (!netif_running(self->ndev)) { + err = 0; + goto err_exit; + } + rtnl_lock(); + if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { + self->power_state = AQ_HW_POWER_STATE_D3; + netif_device_detach(self->ndev); + netif_tx_stop_all_queues(self->ndev); + + err = aq_nic_stop(self); + if (err < 0) + goto err_exit; + + aq_nic_deinit(self); + } else { + err = aq_nic_init(self); + if (err < 0) + goto err_exit; + + err = aq_nic_start(self); + if (err < 0) + goto err_exit; + + netif_device_attach(self->ndev); + netif_tx_start_all_queues(self->ndev); + } + rtnl_unlock(); + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h new file mode 100644 index 000000000000..055e2cdb0f6f --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -0,0 +1,108 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic.h: Declaration of common code for NIC. */ + +#ifndef AQ_NIC_H +#define AQ_NIC_H + +#include "aq_common.h" +#include "aq_rss.h" + +struct aq_ring_s; +struct aq_pci_func_s; +struct aq_hw_ops; + +#define AQ_NIC_FC_OFF 0U +#define AQ_NIC_FC_TX 1U +#define AQ_NIC_FC_RX 2U +#define AQ_NIC_FC_FULL 3U +#define AQ_NIC_FC_AUTO 4U + +#define AQ_NIC_RATE_10G BIT(0) +#define AQ_NIC_RATE_5G BIT(1) +#define AQ_NIC_RATE_5GSR BIT(2) +#define AQ_NIC_RATE_2GS BIT(3) +#define AQ_NIC_RATE_1G BIT(4) +#define AQ_NIC_RATE_100M BIT(5) + +struct aq_nic_cfg_s { + struct aq_hw_caps_s *aq_hw_caps; + u64 hw_features; + u32 rxds; /* rx ring size, descriptors # */ + u32 txds; /* tx ring size, descriptors # */ + u32 vecs; /* vecs==allocated irqs */ + u32 irq_type; + u32 itr; + u32 num_rss_queues; + u32 mtu; + u32 ucp_0x364; + u32 flow_control; + u32 link_speed_msk; + u32 vlan_id; + u16 is_mc_list_enabled; + u16 mc_list_count; + bool is_autoneg; + bool is_interrupt_moderation; + bool is_polling; + bool is_rss; + bool is_lro; + u8 tcs; + struct aq_rss_parameters aq_rss; +}; + +#define AQ_NIC_FLAG_STARTED 0x00000004U +#define AQ_NIC_FLAG_STOPPING 0x00000008U +#define AQ_NIC_FLAG_RESETTING 0x00000010U +#define AQ_NIC_FLAG_CLOSING 0x00000020U +#define AQ_NIC_LINK_DOWN 0x04000000U +#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U +#define AQ_NIC_FLAG_ERR_HW 0x80000000U + +#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ + ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) + +struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, + const struct ethtool_ops *et_ops, + struct device *dev, + struct aq_pci_func_s *aq_pci_func, + unsigned int port, + const struct aq_hw_ops *aq_hw_ops); +int aq_nic_ndev_init(struct aq_nic_s *self); +struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev); +void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, + struct aq_ring_s *ring); +struct device *aq_nic_get_dev(struct aq_nic_s *self); +struct net_device *aq_nic_get_ndev(struct aq_nic_s *self); +int aq_nic_init(struct aq_nic_s *self); +int aq_nic_cfg_start(struct aq_nic_s *self); +int aq_nic_ndev_register(struct aq_nic_s *self); +void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx); +void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx); +void aq_nic_ndev_free(struct aq_nic_s *self); +int aq_nic_start(struct aq_nic_s *self); +int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); +int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p); +int aq_nic_get_regs_count(struct aq_nic_s *self); +void aq_nic_get_stats(struct aq_nic_s *self, u64 *data); +int aq_nic_stop(struct aq_nic_s *self); +void aq_nic_deinit(struct aq_nic_s *self); +void aq_nic_free_hot_resources(struct aq_nic_s *self); +int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu); +int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev); +int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags); +int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev); +unsigned int aq_nic_get_link_speed(struct aq_nic_s *self); +void aq_nic_get_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd); +int aq_nic_set_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd); +struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); +u32 aq_nic_get_fw_version(struct aq_nic_s *self); +int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); + +#endif /* AQ_NIC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h new file mode 100644 index 000000000000..f81738a71c42 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h @@ -0,0 +1,46 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic_internal.h: Definition of private object structure. */ + +#ifndef AQ_NIC_INTERNAL_H +#define AQ_NIC_INTERNAL_H + +struct aq_nic_s { + struct aq_obj_s header; + struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; + struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX]; + struct aq_hw_s *aq_hw; + struct net_device *ndev; + struct aq_pci_func_s *aq_pci_func; + unsigned int aq_vecs; + unsigned int packet_filter; + unsigned int power_state; + bool is_ndev_registered; + u8 port; + struct aq_hw_ops aq_hw_ops; + struct aq_hw_caps_s aq_hw_caps; + struct aq_nic_cfg_s aq_nic_cfg; + struct timer_list service_timer; + struct timer_list polling_timer; + struct aq_hw_link_status_s link_status; + struct { + u32 count; + u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; + } mc_list; +}; + +#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \ + AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \ + AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW) + +#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \ + AQ_NIC_LINK_DOWN) + +#endif /* AQ_NIC_INTERNAL_H */ -- cgit v1.2.3 From bab6de8fd180bc284e3c191e59ceccf9a5ed14be Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:13 -0800 Subject: net: ethernet: aquantia: Atlantic A0 and B0 specific functions. Add Atlantic A0 and B0 specific functions. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | 905 +++++++++++++++++++ .../ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h | 34 + .../aquantia/atlantic/hw_atl/hw_atl_a0_internal.h | 155 ++++ .../ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | 958 +++++++++++++++++++++ .../ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h | 34 + .../aquantia/atlantic/hw_atl/hw_atl_b0_internal.h | 207 +++++ 6 files changed, 2293 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c new file mode 100644 index 000000000000..1f388054a6c7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -0,0 +1,905 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_ring.h" +#include "hw_atl_a0.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" +#include "hw_atl_a0_internal.h" + +static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); + return 0; +} + +static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func, + unsigned int port, + struct aq_hw_ops *ops) +{ + struct hw_atl_s *self = NULL; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) + goto err_exit; + + self->base.aq_pci_func = aq_pci_func; + + self->base.not_ff_addr = 0x10U; + +err_exit: + return (struct aq_hw_s *)self; +} + +static void hw_atl_a0_destroy(struct aq_hw_s *self) +{ + kfree(self); +} + +static int hw_atl_a0_hw_reset(struct aq_hw_s *self) +{ + int err = 0; + + glb_glb_reg_res_dis_set(self, 1U); + pci_pci_reg_res_dis_set(self, 0U); + rx_rx_reg_res_dis_set(self, 0U); + tx_tx_reg_res_dis_set(self, 0U); + + HW_ATL_FLUSH(); + glb_soft_res_set(self, 1); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + itr_irq_reg_res_dis_set(self, 0U); + itr_res_irq_set(self, 1U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) +{ + u32 tc = 0U; + u32 buff_size = 0U; + unsigned int i_priority = 0U; + bool is_rx_flow_control = false; + + /* TPS Descriptor rate init */ + tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + + /* TPS VM init */ + tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + + /* TPS TC credits init */ + tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + + tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + + /* Tx buf size */ + buff_size = HW_ATL_A0_TXBUF_MAX; + + tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 66U) / + 100U, tc); + tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 50U) / + 100U, tc); + + /* QoS Rx buf size per TC */ + tc = 0; + is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); + buff_size = HW_ATL_A0_RXBUF_MAX; + + rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + + /* QoS 802.1p priority -> TC mapping */ + for (i_priority = 8U; i_priority--;) + rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + struct aq_nic_cfg_s *cfg = NULL; + int err = 0; + unsigned int i = 0U; + unsigned int addr = 0U; + + cfg = self->aq_nic_cfg; + + for (i = 10, addr = 0U; i--; ++addr) { + u32 key_data = cfg->is_rss ? + __swab32(rss_params->hash_secret_key[i]) : 0U; + rpf_rss_key_wr_data_set(self, key_data); + rpf_rss_key_addr_set(self, addr); + rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + u8 *indirection_table = rss_params->indirection_table; + u32 i = 0U; + u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); + int err = 0; + u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX * + HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)]; + + memset(bitary, 0, sizeof(bitary)); + + for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) { + (*(u32 *)(bitary + ((i * 3U) / 16U))) |= + ((indirection_table[i] % num_rss_queues) << + ((i * 3U) & 0xFU)); + } + + for (i = AQ_DIMOF(bitary); i--;) { + rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + rpf_rss_redir_tbl_addr_set(self, i); + rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + /* TX checksums offloads*/ + tpo_ipv4header_crc_offload_en_set(self, 1); + tpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* RX checksums offloads*/ + rpo_ipv4header_crc_offload_en_set(self, 1); + rpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* LSO offloads*/ + tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + if (err < 0) + goto err_exit; + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) +{ + thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + + /* Tx interrupts */ + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? + 0x00010000U : 0x00000000U); + tdm_tx_dca_en_set(self, 0U); + tdm_tx_dca_mode_set(self, 0U); + + tpb_tx_path_scp_ins_en_set(self, 1U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + int i; + + /* Rx TC/RSS number config */ + rpb_rpf_rx_traf_class_mode_set(self, 1U); + + /* Rx flow control */ + rpb_rx_flow_ctl_mode_set(self, 1U); + + /* RSS Ring selection */ + reg_rx_flr_rss_control1set(self, cfg->is_rss ? + 0xB3333333U : 0x00000000U); + + /* Multicast filters */ + for (i = HW_ATL_A0_MAC_MAX; i--;) { + rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + rpfl2unicast_flr_act_set(self, 1U, i); + } + + reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + + /* Vlan filters */ + rpf_vlan_outer_etht_set(self, 0x88A8U); + rpf_vlan_inner_etht_set(self, 0x8100U); + rpf_vlan_prom_mode_en_set(self, 1); + + /* Rx Interrupts */ + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + rpfl2broadcast_flr_act_set(self, 1U); + rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + + rdm_rx_dca_en_set(self, 0U); + rdm_rx_dca_mode_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +{ + int err = 0; + unsigned int h = 0U; + unsigned int l = 0U; + + if (!mac_addr) { + err = -EINVAL; + goto err_exit; + } + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); + rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); + rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC); + rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_init(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr) +{ + static u32 aq_hw_atl_igcr_table_[4][2] = { + { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ + { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ + { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ + { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ + }; + + int err = 0; + + self->aq_nic_cfg = aq_nic_cfg; + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_A0->chip_features); + + hw_atl_a0_hw_init_tx_path(self); + hw_atl_a0_hw_init_rx_path(self); + + hw_atl_a0_hw_mac_addr_set(self, mac_addr); + + hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk); + + reg_tx_dma_debug_ctl_set(self, 0x800000b8U); + reg_tx_dma_debug_ctl_set(self, 0x000000b8U); + + hw_atl_a0_hw_qos_set(self); + hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); + hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; + + /* Interrupts */ + reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? + 1 : 0]); + + itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + + /* Interrupts */ + reg_gen_irq_map_set(self, + ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) | + ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) | + ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) | + ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U); + + hw_atl_a0_hw_offload_set(self, aq_nic_cfg); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_start(struct aq_hw_s *self) +{ + tpb_tx_buff_en_set(self, 1); + rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + return 0; +} + +static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int frags) +{ + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_txd_s *txd = NULL; + unsigned int buff_pa_len = 0U; + unsigned int pkt_len = 0U; + unsigned int frag_count = 0U; + bool is_gso = false; + + buff = &ring->buff_ring[ring->sw_tail]; + pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; + + for (frag_count = 0; frag_count < frags; frag_count++) { + txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * + HW_ATL_A0_TXD_SIZE]; + txd->ctl = 0; + txd->ctl2 = 0; + txd->buf_addr = 0; + + buff = &ring->buff_ring[ring->sw_tail]; + + if (buff->is_txc) { + txd->ctl |= (buff->len_l3 << 31) | + (buff->len_l2 << 24) | + HW_ATL_A0_TXD_CTL_CMD_TCP | + HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC; + txd->ctl2 |= (buff->mss << 16) | + (buff->len_l4 << 8) | + (buff->len_l3 >> 1); + + pkt_len -= (buff->len_l4 + + buff->len_l3 + + buff->len_l2); + is_gso = true; + } else { + buff_pa_len = buff->len; + + txd->buf_addr = buff->pa; + txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN & + ((u32)buff_pa_len << 4)); + txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD; + /* PAY_LEN */ + txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14); + + if (is_gso) { + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO; + txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN; + } + + /* Tx checksum offloads */ + if (buff->is_ip_cso) + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO; + + if (buff->is_udp_cso || buff->is_tcp_cso) + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO; + + if (unlikely(buff->is_eop)) { + txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; + } + } + + ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); + } + + hw_atl_a0_hw_tx_ring_tail_update(self, ring); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + rdm_rx_desc_en_set(self, false, aq_ring->idx); + + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + + reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + aq_ring->idx); + + reg_rx_dma_desc_base_addressmswset(self, + dma_desc_addr_msw, aq_ring->idx); + + rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + rdm_rx_desc_data_buff_size_set(self, + AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->idx); + + rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); + + /* Rx ring set mode */ + + /* Mapping interrupt vector */ + itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_rx_set(self, true, aq_ring->idx); + + rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + aq_ring->idx); + + reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + aq_ring->idx); + + tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring); + + /* Set Tx threshold */ + tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); + + /* Mapping interrupt vector */ + itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_tx_set(self, true, aq_ring->idx); + + tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int sw_tail_old) +{ + for (; sw_tail_old != ring->sw_tail; + sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { + struct hw_atl_rxd_s *rxd = + (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * + HW_ATL_A0_RXD_SIZE]; + + struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; + + rxd->buf_addr = buff->pa; + rxd->hdr_addr = 0U; + } + + reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + int err = 0; + unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx); + + if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + ring->hw_head = hw_head_; + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + struct device *ndev = aq_nic_get_dev(ring->aq_nic); + + for (; ring->hw_head != ring->sw_tail; + ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) + &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE]; + + unsigned int is_err = 1U; + unsigned int is_rx_check_sum_enabled = 0U; + unsigned int pkt_type = 0U; + + if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */ + if ((1U << 4) & + reg_rx_dma_desc_status_get(self, ring->idx)) { + rdm_rx_desc_en_set(self, false, ring->idx); + rdm_rx_desc_res_set(self, true, ring->idx); + rdm_rx_desc_res_set(self, false, ring->idx); + rdm_rx_desc_en_set(self, true, ring->idx); + } + + if (ring->hw_head || + (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) { + break; + } else if (!(rxd_wb->status & 0x1U)) { + struct hw_atl_rxd_wb_s *rxd_wb1 = + (struct hw_atl_rxd_wb_s *) + (&ring->dx_ring[(1U) * + HW_ATL_A0_RXD_SIZE]); + + if ((rxd_wb1->status & 0x1U)) { + rxd_wb->pkt_len = 1514U; + rxd_wb->status = 3U; + } else { + break; + } + } + } + + buff = &ring->buff_ring[ring->hw_head]; + + if (0x3U != (rxd_wb->status & 0x3U)) + rxd_wb->status |= 4; + + is_err = (0x0000001CU & rxd_wb->status); + is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); + pkt_type = 0xFFU & (rxd_wb->type >> 4); + + if (is_rx_check_sum_enabled) { + if (0x0U == (pkt_type & 0x3U)) + buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1; + + if (0x4U == (pkt_type & 0x1CU)) + buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; + else if (0x0U == (pkt_type & 0x1CU)) + buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; + } + + is_err &= ~0x18U; + is_err &= ~0x04U; + + dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); + + if (is_err || rxd_wb->type & 0x1000U) { + /* status error or DMA error */ + buff->is_error = 1U; + } else { + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; + } + } + + if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = (rxd_wb->pkt_len & + (AQ_CFG_RX_FRAME_MAX - 1U)); + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + /* jumbo */ + buff->next = aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_setlsw_set(self, LODWORD(mask) | + (1U << HW_ATL_A0_ERR_INT)); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + itr_irq_status_clearlsw_set(self, LODWORD(mask)); + + if ((1U << 16) & reg_gen_irq_status_get(self)) + + atomic_inc(&PHAL_ATLANTIC_A0->dpc); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask) +{ + *mask = itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); +} + +#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) + +static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, + unsigned int packet_filter) +{ + unsigned int i = 0U; + + rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); + rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0); + rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); + + self->aq_nic_cfg->is_mc_list_enabled = + IS_FILTER_ENABLED(IFF_MULTICAST); + + for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i) + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled && + (i <= self->aq_nic_cfg->mc_list_count)) ? + 1U : 0U, i); + + return aq_hw_err_from_flags(self); +} + +#undef IS_FILTER_ENABLED + +static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, + u8 ar_mac + [AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count) +{ + int err = 0; + + if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { + err = EBADRQC; + goto err_exit; + } + for (self->aq_nic_cfg->mc_list_count = 0U; + self->aq_nic_cfg->mc_list_count < count; + ++self->aq_nic_cfg->mc_list_count) { + u32 i = self->aq_nic_cfg->mc_list_count; + u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); + u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | + (ar_mac[i][4] << 8) | ar_mac[i][5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i); + + rpfl2unicast_dest_addresslsw_set(self, + l, HW_ATL_A0_MAC_MIN + i); + + rpfl2unicast_dest_addressmsw_set(self, + h, HW_ATL_A0_MAC_MIN + i); + + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled), + HW_ATL_A0_MAC_MIN + i); + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, + bool itr_enabled) +{ + unsigned int i = 0U; + + if (itr_enabled && self->aq_nic_cfg->itr) { + if (self->aq_nic_cfg->itr != 0xFFFFU) { + u32 itr_ = (self->aq_nic_cfg->itr >> 1); + + itr_ = min(AQ_CFG_IRQ_MASK, itr_); + + PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | + (itr_ << 0x10); + } else { + u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); + + if (n < self->aq_link_status.mbps) { + PHAL_ATLANTIC_A0->itr_rx = 0U; + } else { + static unsigned int hw_timers_tbl_[] = { + 0x01CU, /* 10Gbit */ + 0x039U, /* 5Gbit */ + 0x039U, /* 5Gbit 5GS */ + 0x073U, /* 2.5Gbit */ + 0x120U, /* 1Gbit */ + 0x1FFU, /* 100Mbit */ + }; + + unsigned int speed_index = + hw_atl_utils_mbps_2_speed_index( + self->aq_link_status.mbps); + + PHAL_ATLANTIC_A0->itr_rx = + 0x80000000U | + (hw_timers_tbl_[speed_index] << 0x10U); + } + + aq_hw_write_reg(self, 0x00002A00U, 0x40000000U); + aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); + } + } else { + PHAL_ATLANTIC_A0->itr_rx = 0U; + } + + for (i = HW_ATL_A0_RINGS_MAX; i--;) + reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_stop(struct aq_hw_s *self) +{ + hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed) +{ + int err = 0; + + err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static struct aq_hw_ops hw_atl_ops_ = { + .create = hw_atl_a0_create, + .destroy = hw_atl_a0_destroy, + .get_hw_caps = hw_atl_a0_get_hw_caps, + + .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent, + .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, + .hw_get_link_status = hw_atl_utils_mpi_get_link_status, + .hw_set_link_speed = hw_atl_a0_hw_set_speed, + .hw_init = hw_atl_a0_hw_init, + .hw_deinit = hw_atl_utils_hw_deinit, + .hw_set_power = hw_atl_utils_hw_set_power, + .hw_reset = hw_atl_a0_hw_reset, + .hw_start = hw_atl_a0_hw_start, + .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start, + .hw_ring_tx_stop = hw_atl_a0_hw_ring_tx_stop, + .hw_ring_rx_start = hw_atl_a0_hw_ring_rx_start, + .hw_ring_rx_stop = hw_atl_a0_hw_ring_rx_stop, + .hw_stop = hw_atl_a0_hw_stop, + + .hw_ring_tx_xmit = hw_atl_a0_hw_ring_tx_xmit, + .hw_ring_tx_head_update = hw_atl_a0_hw_ring_tx_head_update, + + .hw_ring_rx_receive = hw_atl_a0_hw_ring_rx_receive, + .hw_ring_rx_fill = hw_atl_a0_hw_ring_rx_fill, + + .hw_irq_enable = hw_atl_a0_hw_irq_enable, + .hw_irq_disable = hw_atl_a0_hw_irq_disable, + .hw_irq_read = hw_atl_a0_hw_irq_read, + + .hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init, + .hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init, + .hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set, + .hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set, + .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set, + .hw_rss_set = hw_atl_a0_hw_rss_set, + .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, + .hw_get_regs = hw_atl_utils_hw_get_regs, + .hw_get_hw_stats = hw_atl_utils_get_hw_stats, + .hw_get_fw_version = hw_atl_utils_get_fw_version, +}; + +struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev) +{ + bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA); + bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) || + (pdev->device == HW_ATL_DEVICE_ID_D100) || + (pdev->device == HW_ATL_DEVICE_ID_D107) || + (pdev->device == HW_ATL_DEVICE_ID_D108) || + (pdev->device == HW_ATL_DEVICE_ID_D109)); + + bool is_rev_ok = (pdev->revision == 1U); + + return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h new file mode 100644 index 000000000000..6e1d527954c9 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0.h: Declaration of abstract interface for Atlantic hardware + * specific functions. + */ + +#ifndef HW_ATL_A0_H +#define HW_ATL_A0_H + +#include "../aq_common.h" + +#ifndef PCI_VENDOR_ID_AQUANTIA + +#define PCI_VENDOR_ID_AQUANTIA 0x1D6A +#define HW_ATL_DEVICE_ID_0001 0x0001 +#define HW_ATL_DEVICE_ID_D100 0xD100 +#define HW_ATL_DEVICE_ID_D107 0xD107 +#define HW_ATL_DEVICE_ID_D108 0xD108 +#define HW_ATL_DEVICE_ID_D109 0xD109 + +#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter" + +#endif + +struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev); + +#endif /* HW_ATL_A0_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h new file mode 100644 index 000000000000..1093ea18823a --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -0,0 +1,155 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0_internal.h: Definition of Atlantic A0 chip specific + * constants. + */ + +#ifndef HW_ATL_A0_INTERNAL_H +#define HW_ATL_A0_INTERNAL_H + +#include "../aq_common.h" + +#define HW_ATL_A0_MTU_JUMBO 9014U + +#define HW_ATL_A0_TX_RINGS 4U +#define HW_ATL_A0_RX_RINGS 4U + +#define HW_ATL_A0_RINGS_MAX 32U +#define HW_ATL_A0_TXD_SIZE 16U +#define HW_ATL_A0_RXD_SIZE 16U + +#define HW_ATL_A0_MAC 0U +#define HW_ATL_A0_MAC_MIN 1U +#define HW_ATL_A0_MAC_MAX 33U + +/* interrupts */ +#define HW_ATL_A0_ERR_INT 8U +#define HW_ATL_A0_INT_MASK 0xFFFFFFFFU + +#define HW_ATL_A0_TXD_CTL2_LEN 0xFFFFC000U +#define HW_ATL_A0_TXD_CTL2_CTX_EN 0x00002000U +#define HW_ATL_A0_TXD_CTL2_CTX_IDX 0x00001000U + +#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD 0x00000001U +#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC 0x00000002U +#define HW_ATL_A0_TXD_CTL_BLEN 0x000FFFF0U +#define HW_ATL_A0_TXD_CTL_DD 0x00100000U +#define HW_ATL_A0_TXD_CTL_EOP 0x00200000U + +#define HW_ATL_A0_TXD_CTL_CMD_X 0x3FC00000U + +#define HW_ATL_A0_TXD_CTL_CMD_VLAN BIT(22) +#define HW_ATL_A0_TXD_CTL_CMD_FCS BIT(23) +#define HW_ATL_A0_TXD_CTL_CMD_IPCSO BIT(24) +#define HW_ATL_A0_TXD_CTL_CMD_TUCSO BIT(25) +#define HW_ATL_A0_TXD_CTL_CMD_LSO BIT(26) +#define HW_ATL_A0_TXD_CTL_CMD_WB BIT(27) +#define HW_ATL_A0_TXD_CTL_CMD_VXLAN BIT(28) + +#define HW_ATL_A0_TXD_CTL_CMD_IPV6 BIT(21) +#define HW_ATL_A0_TXD_CTL_CMD_TCP BIT(22) + +#define HW_ATL_A0_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_A0_MPI_STATE_ADR 0x036CU + +#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_A0_MPI_SPEED_SHIFT 16U + +#define HW_ATL_A0_RATE_10G BIT(0) +#define HW_ATL_A0_RATE_5G BIT(1) +#define HW_ATL_A0_RATE_2G5 BIT(3) +#define HW_ATL_A0_RATE_1G BIT(4) +#define HW_ATL_A0_RATE_100M BIT(5) + +#define HW_ATL_A0_TXBUF_MAX 160U +#define HW_ATL_A0_RXBUF_MAX 320U + +#define HW_ATL_A0_RSS_REDIRECTION_MAX 64U +#define HW_ATL_A0_RSS_REDIRECTION_BITS 3U + +#define HW_ATL_A0_TC_MAX 1U +#define HW_ATL_A0_RSS_MAX 8U + +#define HW_ATL_A0_FW_SEMA_RAM 0x2U + +#define HW_ATL_A0_RXD_DD 0x1U +#define HW_ATL_A0_RXD_NCEA0 0x1U + +#define HW_ATL_A0_RXD_WB_STAT2_EOP 0x0002U + +#define HW_ATL_A0_UCP_0X370_REG 0x370U + +#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U + +/* Hardware tx descriptor */ +struct __packed hw_atl_txd_s { + u64 buf_addr; + u32 ctl; + u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ +}; + +/* Hardware tx context descriptor */ +struct __packed hw_atl_txc_s { + u32 rsvd; + u32 len; + u32 ctl; + u32 len2; +}; + +/* Hardware rx descriptor */ +struct __packed hw_atl_rxd_s { + u64 buf_addr; + u64 hdr_addr; +}; + +/* Hardware rx descriptor writeback */ +struct __packed hw_atl_rxd_wb_s { + u32 type; + u32 rss_hash; + u16 status; + u16 pkt_len; + u16 next_desc_ptr; + u16 vlan; +}; + +/* HW layer capabilities */ +static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = { + .ports = 1U, + .is_64_dma = true, + .msix_irqs = 4U, + .irq_mask = ~0U, + .vecs = HW_ATL_A0_RSS_MAX, + .tcs = HW_ATL_A0_TC_MAX, + .rxd_alignment = 1U, + .rxd_size = HW_ATL_A0_RXD_SIZE, + .rxds = 248U, + .txd_alignment = 1U, + .txd_size = HW_ATL_A0_TXD_SIZE, + .txds = 8U * 1024U, + .txhwb_alignment = 4096U, + .tx_rings = HW_ATL_A0_TX_RINGS, + .rx_rings = HW_ATL_A0_RX_RINGS, + .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_TSO, + .hw_priv_flags = IFF_UNICAST_FLT, + .link_speed_msk = (HW_ATL_A0_RATE_10G | + HW_ATL_A0_RATE_5G | + HW_ATL_A0_RATE_2G5 | + HW_ATL_A0_RATE_1G | + HW_ATL_A0_RATE_100M), + .flow_control = true, + .mtu = HW_ATL_A0_MTU_JUMBO, + .mac_regs_count = 88, + .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED, +}; + +#endif /* HW_ATL_A0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c new file mode 100644 index 000000000000..e7e694f693bd --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -0,0 +1,958 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_ring.h" +#include "hw_atl_b0.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" +#include "hw_atl_b0_internal.h" + +static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); + return 0; +} + +static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func, + unsigned int port, + struct aq_hw_ops *ops) +{ + struct hw_atl_s *self = NULL; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) + goto err_exit; + + self->base.aq_pci_func = aq_pci_func; + + self->base.not_ff_addr = 0x10U; + +err_exit: + return (struct aq_hw_s *)self; +} + +static void hw_atl_b0_destroy(struct aq_hw_s *self) +{ + kfree(self); +} + +static int hw_atl_b0_hw_reset(struct aq_hw_s *self) +{ + int err = 0; + + glb_glb_reg_res_dis_set(self, 1U); + pci_pci_reg_res_dis_set(self, 0U); + rx_rx_reg_res_dis_set(self, 0U); + tx_tx_reg_res_dis_set(self, 0U); + + HW_ATL_FLUSH(); + glb_soft_res_set(self, 1); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + itr_irq_reg_res_dis_set(self, 0U); + itr_res_irq_set(self, 1U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) +{ + u32 tc = 0U; + u32 buff_size = 0U; + unsigned int i_priority = 0U; + bool is_rx_flow_control = false; + + /* TPS Descriptor rate init */ + tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + + /* TPS VM init */ + tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + + /* TPS TC credits init */ + tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + + tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + + /* Tx buf size */ + buff_size = HW_ATL_B0_TXBUF_MAX; + + tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 66U) / + 100U, tc); + tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 50U) / + 100U, tc); + + /* QoS Rx buf size per TC */ + tc = 0; + is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); + buff_size = HW_ATL_B0_RXBUF_MAX; + + rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + + /* QoS 802.1p priority -> TC mapping */ + for (i_priority = 8U; i_priority--;) + rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + struct aq_nic_cfg_s *cfg = NULL; + int err = 0; + unsigned int i = 0U; + unsigned int addr = 0U; + + cfg = self->aq_nic_cfg; + + for (i = 10, addr = 0U; i--; ++addr) { + u32 key_data = cfg->is_rss ? + __swab32(rss_params->hash_secret_key[i]) : 0U; + rpf_rss_key_wr_data_set(self, key_data); + rpf_rss_key_addr_set(self, addr); + rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + u8 *indirection_table = rss_params->indirection_table; + u32 i = 0U; + u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); + int err = 0; + u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX * + HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)]; + + memset(bitary, 0, sizeof(bitary)); + + for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) { + (*(u32 *)(bitary + ((i * 3U) / 16U))) |= + ((indirection_table[i] % num_rss_queues) << + ((i * 3U) & 0xFU)); + } + + for (i = AQ_DIMOF(bitary); i--;) { + rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + rpf_rss_redir_tbl_addr_set(self, i); + rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + unsigned int i; + + /* TX checksums offloads*/ + tpo_ipv4header_crc_offload_en_set(self, 1); + tpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* RX checksums offloads*/ + rpo_ipv4header_crc_offload_en_set(self, 1); + rpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* LSO offloads*/ + tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + if (err < 0) + goto err_exit; + +/* LRO offloads */ + { + unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U : + ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U : + ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0)); + + for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++) + rpo_lro_max_num_of_descriptors_set(self, val, i); + + rpo_lro_time_base_divider_set(self, 0x61AU); + rpo_lro_inactive_interval_set(self, 0); + rpo_lro_max_coalescing_interval_set(self, 2); + + rpo_lro_qsessions_lim_set(self, 1U); + + rpo_lro_total_desc_lim_set(self, 2U); + + rpo_lro_patch_optimization_en_set(self, 0U); + + rpo_lro_min_pay_of_first_pkt_set(self, 10U); + + rpo_lro_pkt_lim_set(self, 1U); + + rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); + } + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) +{ + thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + + /* Tx interrupts */ + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? + 0x00010000U : 0x00000000U); + tdm_tx_dca_en_set(self, 0U); + tdm_tx_dca_mode_set(self, 0U); + + tpb_tx_path_scp_ins_en_set(self, 1U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + int i; + + /* Rx TC/RSS number config */ + rpb_rpf_rx_traf_class_mode_set(self, 1U); + + /* Rx flow control */ + rpb_rx_flow_ctl_mode_set(self, 1U); + + /* RSS Ring selection */ + reg_rx_flr_rss_control1set(self, cfg->is_rss ? + 0xB3333333U : 0x00000000U); + + /* Multicast filters */ + for (i = HW_ATL_B0_MAC_MAX; i--;) { + rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + rpfl2unicast_flr_act_set(self, 1U, i); + } + + reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + + /* Vlan filters */ + rpf_vlan_outer_etht_set(self, 0x88A8U); + rpf_vlan_inner_etht_set(self, 0x8100U); + + if (cfg->vlan_id) { + rpf_vlan_flr_act_set(self, 1U, 0U); + rpf_vlan_id_flr_set(self, 0U, 0U); + rpf_vlan_flr_en_set(self, 0U, 0U); + + rpf_vlan_accept_untagged_packets_set(self, 1U); + rpf_vlan_untagged_act_set(self, 1U); + + rpf_vlan_flr_act_set(self, 1U, 1U); + rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U); + rpf_vlan_flr_en_set(self, 1U, 1U); + } else { + rpf_vlan_prom_mode_en_set(self, 1); + } + + /* Rx Interrupts */ + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00005040U, + IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U); + + rpfl2broadcast_flr_act_set(self, 1U); + rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + + rdm_rx_dca_en_set(self, 0U); + rdm_rx_dca_mode_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +{ + int err = 0; + unsigned int h = 0U; + unsigned int l = 0U; + + if (!mac_addr) { + err = -EINVAL; + goto err_exit; + } + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC); + rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC); + rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC); + rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_init(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr) +{ + static u32 aq_hw_atl_igcr_table_[4][2] = { + { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ + { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ + { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ + { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ + }; + + int err = 0; + + self->aq_nic_cfg = aq_nic_cfg; + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_B0->chip_features); + + hw_atl_b0_hw_init_tx_path(self); + hw_atl_b0_hw_init_rx_path(self); + + hw_atl_b0_hw_mac_addr_set(self, mac_addr); + + hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk); + + hw_atl_b0_hw_qos_set(self); + hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); + hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; + + /* Interrupts */ + reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? + 1 : 0]); + + itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + + /* Interrupts */ + reg_gen_irq_map_set(self, + ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) | + ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U); + + hw_atl_b0_hw_offload_set(self, aq_nic_cfg); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_start(struct aq_hw_s *self) +{ + tpb_tx_buff_en_set(self, 1); + rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + return 0; +} + +static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int frags) +{ + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_txd_s *txd = NULL; + unsigned int buff_pa_len = 0U; + unsigned int pkt_len = 0U; + unsigned int frag_count = 0U; + bool is_gso = false; + + buff = &ring->buff_ring[ring->sw_tail]; + pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; + + for (frag_count = 0; frag_count < frags; frag_count++) { + txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * + HW_ATL_B0_TXD_SIZE]; + txd->ctl = 0; + txd->ctl2 = 0; + txd->buf_addr = 0; + + buff = &ring->buff_ring[ring->sw_tail]; + + if (buff->is_txc) { + txd->ctl |= (buff->len_l3 << 31) | + (buff->len_l2 << 24) | + HW_ATL_B0_TXD_CTL_CMD_TCP | + HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC; + txd->ctl2 |= (buff->mss << 16) | + (buff->len_l4 << 8) | + (buff->len_l3 >> 1); + + pkt_len -= (buff->len_l4 + + buff->len_l3 + + buff->len_l2); + is_gso = true; + } else { + buff_pa_len = buff->len; + + txd->buf_addr = buff->pa; + txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN & + ((u32)buff_pa_len << 4)); + txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD; + /* PAY_LEN */ + txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14); + + if (is_gso) { + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO; + txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN; + } + + /* Tx checksum offloads */ + if (buff->is_ip_cso) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO; + + if (buff->is_udp_cso || buff->is_tcp_cso) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO; + + if (unlikely(buff->is_eop)) { + txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; + } + } + + ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); + } + + hw_atl_b0_hw_tx_ring_tail_update(self, ring); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + rdm_rx_desc_en_set(self, false, aq_ring->idx); + + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + + reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + aq_ring->idx); + + reg_rx_dma_desc_base_addressmswset(self, + dma_desc_addr_msw, aq_ring->idx); + + rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + rdm_rx_desc_data_buff_size_set(self, + AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->idx); + + rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); + + /* Rx ring set mode */ + + /* Mapping interrupt vector */ + itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_rx_set(self, true, aq_ring->idx); + + rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + aq_ring->idx); + + reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + aq_ring->idx); + + tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring); + + /* Set Tx threshold */ + tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); + + /* Mapping interrupt vector */ + itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_tx_set(self, true, aq_ring->idx); + + tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int sw_tail_old) +{ + for (; sw_tail_old != ring->sw_tail; + sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { + struct hw_atl_rxd_s *rxd = + (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * + HW_ATL_B0_RXD_SIZE]; + + struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; + + rxd->buf_addr = buff->pa; + rxd->hdr_addr = 0U; + } + + reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + int err = 0; + unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx); + + if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + ring->hw_head = hw_head_; + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + struct device *ndev = aq_nic_get_dev(ring->aq_nic); + + for (; ring->hw_head != ring->sw_tail; + ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) + &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; + + unsigned int is_err = 1U; + unsigned int is_rx_check_sum_enabled = 0U; + unsigned int pkt_type = 0U; + + if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ + break; + } + + buff = &ring->buff_ring[ring->hw_head]; + + is_err = (0x0000003CU & rxd_wb->status); + + is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); + is_err &= ~0x20U; /* exclude validity bit */ + + pkt_type = 0xFFU & (rxd_wb->type >> 4); + + if (is_rx_check_sum_enabled) { + if (0x0U == (pkt_type & 0x3U)) + buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; + + if (0x4U == (pkt_type & 0x1CU)) + buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; + else if (0x0U == (pkt_type & 0x1CU)) + buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; + } + + is_err &= ~0x18U; + + dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); + + if (is_err || rxd_wb->type & 0x1000U) { + /* status error or DMA error */ + buff->is_error = 1U; + } else { + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; + } + } + + if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = (rxd_wb->pkt_len & + (AQ_CFG_RX_FRAME_MAX - 1U)); + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & + rxd_wb->status) { + /* LRO */ + buff->next = rxd_wb->next_desc_ptr; + ++ring->stats.rx.lro_packets; + } else { + /* jumbo */ + buff->next = + aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; + } + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_setlsw_set(self, LODWORD(mask)); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + itr_irq_status_clearlsw_set(self, LODWORD(mask)); + + atomic_inc(&PHAL_ATLANTIC_B0->dpc); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask) +{ + *mask = itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); +} + +#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) + +static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, + unsigned int packet_filter) +{ + unsigned int i = 0U; + + rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); + rpfl2multicast_flr_en_set(self, + IS_FILTER_ENABLED(IFF_MULTICAST), 0); + + rpfl2_accept_all_mc_packets_set(self, + IS_FILTER_ENABLED(IFF_ALLMULTI)); + + rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); + + self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); + + for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i) + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled && + (i <= self->aq_nic_cfg->mc_list_count)) ? + 1U : 0U, i); + + return aq_hw_err_from_flags(self); +} + +#undef IS_FILTER_ENABLED + +static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, + u8 ar_mac + [AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count) +{ + int err = 0; + + if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) { + err = -EBADRQC; + goto err_exit; + } + for (self->aq_nic_cfg->mc_list_count = 0U; + self->aq_nic_cfg->mc_list_count < count; + ++self->aq_nic_cfg->mc_list_count) { + u32 i = self->aq_nic_cfg->mc_list_count; + u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); + u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | + (ar_mac[i][4] << 8) | ar_mac[i][5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i); + + rpfl2unicast_dest_addresslsw_set(self, + l, HW_ATL_B0_MAC_MIN + i); + + rpfl2unicast_dest_addressmsw_set(self, + h, HW_ATL_B0_MAC_MIN + i); + + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled), + HW_ATL_B0_MAC_MIN + i); + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, + bool itr_enabled) +{ + unsigned int i = 0U; + + if (itr_enabled && self->aq_nic_cfg->itr) { + tdm_tx_desc_wr_wb_irq_en_set(self, 0U); + tdm_tdm_intr_moder_en_set(self, 1U); + rdm_rx_desc_wr_wb_irq_en_set(self, 0U); + rdm_rdm_intr_moder_en_set(self, 1U); + + PHAL_ATLANTIC_B0->itr_tx = 2U; + PHAL_ATLANTIC_B0->itr_rx = 2U; + + if (self->aq_nic_cfg->itr != 0xFFFFU) { + unsigned int max_timer = self->aq_nic_cfg->itr / 2U; + unsigned int min_timer = self->aq_nic_cfg->itr / 32U; + + max_timer = min(0x1FFU, max_timer); + min_timer = min(0xFFU, min_timer); + + PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; + PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; + PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; + PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; + } else { + static unsigned int hw_atl_b0_timers_table_tx_[][2] = { + {0xffU, 0xffU}, /* 10Gbit */ + {0xffU, 0x1ffU}, /* 5Gbit */ + {0xffU, 0x1ffU}, /* 5Gbit 5GS */ + {0xffU, 0x1ffU}, /* 2.5Gbit */ + {0xffU, 0x1ffU}, /* 1Gbit */ + {0xffU, 0x1ffU}, /* 100Mbit */ + }; + + static unsigned int hw_atl_b0_timers_table_rx_[][2] = { + {0x6U, 0x38U},/* 10Gbit */ + {0xCU, 0x70U},/* 5Gbit */ + {0xCU, 0x70U},/* 5Gbit 5GS */ + {0x18U, 0xE0U},/* 2.5Gbit */ + {0x30U, 0x80U},/* 1Gbit */ + {0x4U, 0x50U},/* 100Mbit */ + }; + + unsigned int speed_index = + hw_atl_utils_mbps_2_speed_index( + self->aq_link_status.mbps); + + PHAL_ATLANTIC_B0->itr_tx |= + hw_atl_b0_timers_table_tx_[speed_index] + [0] << 0x8U; /* set min timer value */ + PHAL_ATLANTIC_B0->itr_tx |= + hw_atl_b0_timers_table_tx_[speed_index] + [1] << 0x10U; /* set max timer value */ + + PHAL_ATLANTIC_B0->itr_rx |= + hw_atl_b0_timers_table_rx_[speed_index] + [0] << 0x8U; /* set min timer value */ + PHAL_ATLANTIC_B0->itr_rx |= + hw_atl_b0_timers_table_rx_[speed_index] + [1] << 0x10U; /* set max timer value */ + } + } else { + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + tdm_tdm_intr_moder_en_set(self, 0U); + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + rdm_rdm_intr_moder_en_set(self, 0U); + PHAL_ATLANTIC_B0->itr_tx = 0U; + PHAL_ATLANTIC_B0->itr_rx = 0U; + } + + for (i = HW_ATL_B0_RINGS_MAX; i--;) { + reg_tx_intr_moder_ctrl_set(self, + PHAL_ATLANTIC_B0->itr_tx, i); + reg_rx_intr_moder_ctrl_set(self, + PHAL_ATLANTIC_B0->itr_rx, i); + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_stop(struct aq_hw_s *self) +{ + hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed) +{ + int err = 0; + + err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static struct aq_hw_ops hw_atl_ops_ = { + .create = hw_atl_b0_create, + .destroy = hw_atl_b0_destroy, + .get_hw_caps = hw_atl_b0_get_hw_caps, + + .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent, + .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, + .hw_get_link_status = hw_atl_utils_mpi_get_link_status, + .hw_set_link_speed = hw_atl_b0_hw_set_speed, + .hw_init = hw_atl_b0_hw_init, + .hw_deinit = hw_atl_utils_hw_deinit, + .hw_set_power = hw_atl_utils_hw_set_power, + .hw_reset = hw_atl_b0_hw_reset, + .hw_start = hw_atl_b0_hw_start, + .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, + .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop, + .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start, + .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop, + .hw_stop = hw_atl_b0_hw_stop, + + .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit, + .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update, + + .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive, + .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill, + + .hw_irq_enable = hw_atl_b0_hw_irq_enable, + .hw_irq_disable = hw_atl_b0_hw_irq_disable, + .hw_irq_read = hw_atl_b0_hw_irq_read, + + .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init, + .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init, + .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set, + .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set, + .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set, + .hw_rss_set = hw_atl_b0_hw_rss_set, + .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, + .hw_get_regs = hw_atl_utils_hw_get_regs, + .hw_get_hw_stats = hw_atl_utils_get_hw_stats, + .hw_get_fw_version = hw_atl_utils_get_fw_version, +}; + +struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev) +{ + bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA); + bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) || + (pdev->device == HW_ATL_DEVICE_ID_D100) || + (pdev->device == HW_ATL_DEVICE_ID_D107) || + (pdev->device == HW_ATL_DEVICE_ID_D108) || + (pdev->device == HW_ATL_DEVICE_ID_D109)); + + bool is_rev_ok = (pdev->revision == 2U); + + return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h new file mode 100644 index 000000000000..a1e1bce6c1f3 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware + * specific functions. + */ + +#ifndef HW_ATL_B0_H +#define HW_ATL_B0_H + +#include "../aq_common.h" + +#ifndef PCI_VENDOR_ID_AQUANTIA + +#define PCI_VENDOR_ID_AQUANTIA 0x1D6A +#define HW_ATL_DEVICE_ID_0001 0x0001 +#define HW_ATL_DEVICE_ID_D100 0xD100 +#define HW_ATL_DEVICE_ID_D107 0xD107 +#define HW_ATL_DEVICE_ID_D108 0xD108 +#define HW_ATL_DEVICE_ID_D109 0xD109 + +#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter" + +#endif + +struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev); + +#endif /* HW_ATL_B0_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h new file mode 100644 index 000000000000..8bdee3ddd5a0 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -0,0 +1,207 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific + * constants. + */ + +#ifndef HW_ATL_B0_INTERNAL_H +#define HW_ATL_B0_INTERNAL_H + +#include "../aq_common.h" + +#define HW_ATL_B0_MTU_JUMBO (16000U) +#define HW_ATL_B0_MTU 1514U + +#define HW_ATL_B0_TX_RINGS 4U +#define HW_ATL_B0_RX_RINGS 4U + +#define HW_ATL_B0_RINGS_MAX 32U +#define HW_ATL_B0_TXD_SIZE (16U) +#define HW_ATL_B0_RXD_SIZE (16U) + +#define HW_ATL_B0_MAC 0U +#define HW_ATL_B0_MAC_MIN 1U +#define HW_ATL_B0_MAC_MAX 33U + +/* UCAST/MCAST filters */ +#define HW_ATL_B0_UCAST_FILTERS_MAX 38 +#define HW_ATL_B0_MCAST_FILTERS_MAX 8 + +/* interrupts */ +#define HW_ATL_B0_ERR_INT 8U +#define HW_ATL_B0_INT_MASK (0xFFFFFFFFU) + +#define HW_ATL_B0_TXD_CTL2_LEN (0xFFFFC000) +#define HW_ATL_B0_TXD_CTL2_CTX_EN (0x00002000) +#define HW_ATL_B0_TXD_CTL2_CTX_IDX (0x00001000) + +#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD (0x00000001) +#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC (0x00000002) +#define HW_ATL_B0_TXD_CTL_BLEN (0x000FFFF0) +#define HW_ATL_B0_TXD_CTL_DD (0x00100000) +#define HW_ATL_B0_TXD_CTL_EOP (0x00200000) + +#define HW_ATL_B0_TXD_CTL_CMD_X (0x3FC00000) + +#define HW_ATL_B0_TXD_CTL_CMD_VLAN BIT(22) +#define HW_ATL_B0_TXD_CTL_CMD_FCS BIT(23) +#define HW_ATL_B0_TXD_CTL_CMD_IPCSO BIT(24) +#define HW_ATL_B0_TXD_CTL_CMD_TUCSO BIT(25) +#define HW_ATL_B0_TXD_CTL_CMD_LSO BIT(26) +#define HW_ATL_B0_TXD_CTL_CMD_WB BIT(27) +#define HW_ATL_B0_TXD_CTL_CMD_VXLAN BIT(28) + +#define HW_ATL_B0_TXD_CTL_CMD_IPV6 BIT(21) +#define HW_ATL_B0_TXD_CTL_CMD_TCP BIT(22) + +#define HW_ATL_B0_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_B0_MPI_STATE_ADR 0x036CU + +#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_B0_MPI_SPEED_SHIFT 16U + +#define HW_ATL_B0_RATE_10G BIT(0) +#define HW_ATL_B0_RATE_5G BIT(1) +#define HW_ATL_B0_RATE_2G5 BIT(3) +#define HW_ATL_B0_RATE_1G BIT(4) +#define HW_ATL_B0_RATE_100M BIT(5) + +#define HW_ATL_B0_TXBUF_MAX 160U +#define HW_ATL_B0_RXBUF_MAX 320U + +#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U +#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U +#define HW_ATL_B0_RSS_HASHKEY_BITS 320U + +#define HW_ATL_B0_TCRSS_4_8 1 +#define HW_ATL_B0_TC_MAX 1U +#define HW_ATL_B0_RSS_MAX 8U + +#define HW_ATL_B0_LRO_RXD_MAX 2U +#define HW_ATL_B0_RS_SLIP_ENABLED 0U + +/* (256k -1(max pay_len) - 54(header)) */ +#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U + +/* (256k -1(max pay_len) - 74(header)) */ +#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U + +#define HW_ATL_B0_CHIP_REVISION_B0 0xA0U +#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU + +#define HW_ATL_B0_FW_SEMA_RAM 0x2U + +#define HW_ATL_B0_TXC_LEN_TUNLEN (0x0000FF00) +#define HW_ATL_B0_TXC_LEN_OUTLEN (0xFFFF0000) + +#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007) +#define HW_ATL_B0_TXC_CTL_CTX_ID (0x00000008) +#define HW_ATL_B0_TXC_CTL_VLAN (0x000FFFF0) +#define HW_ATL_B0_TXC_CTL_CMD (0x00F00000) +#define HW_ATL_B0_TXC_CTL_L2LEN (0x7F000000) + +#define HW_ATL_B0_TXC_CTL_L3LEN (0x80000000) /* L3LEN lsb */ +#define HW_ATL_B0_TXC_LEN2_L3LEN (0x000000FF) /* L3LE upper bits */ +#define HW_ATL_B0_TXC_LEN2_L4LEN (0x0000FF00) +#define HW_ATL_B0_TXC_LEN2_MSSLEN (0xFFFF0000) + +#define HW_ATL_B0_RXD_DD (0x1) +#define HW_ATL_B0_RXD_NCEA0 (0x1) + +#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F) +#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0) +#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000) +#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000) +#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000) + +#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001) +#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002) +#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT (0x003C) +#define HW_ATL_B0_RXD_WB_STAT2_MACERR (0x0004) +#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR (0x0008) +#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR (0x0010) +#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0) +#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT (0xF000) + +#define L2_FILTER_ACTION_DISCARD (0x0) +#define L2_FILTER_ACTION_HOST (0x1) + +#define HW_ATL_B0_UCP_0X370_REG (0x370) + +#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10) + +#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U + +/* Hardware tx descriptor */ +struct __packed hw_atl_txd_s { + u64 buf_addr; + u32 ctl; + u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ +}; + +/* Hardware tx context descriptor */ +struct __packed hw_atl_txc_s { + u32 rsvd; + u32 len; + u32 ctl; + u32 len2; +}; + +/* Hardware rx descriptor */ +struct __packed hw_atl_rxd_s { + u64 buf_addr; + u64 hdr_addr; +}; + +/* Hardware rx descriptor writeback */ +struct __packed hw_atl_rxd_wb_s { + u32 type; + u32 rss_hash; + u16 status; + u16 pkt_len; + u16 next_desc_ptr; + u16 vlan; +}; + +/* HW layer capabilities */ +static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = { + .ports = 1U, + .is_64_dma = true, + .msix_irqs = 4U, + .irq_mask = ~0U, + .vecs = HW_ATL_B0_RSS_MAX, + .tcs = HW_ATL_B0_TC_MAX, + .rxd_alignment = 1U, + .rxd_size = HW_ATL_B0_RXD_SIZE, + .rxds = 8U * 1024U, + .txd_alignment = 1U, + .txd_size = HW_ATL_B0_TXD_SIZE, + .txds = 8U * 1024U, + .txhwb_alignment = 4096U, + .tx_rings = HW_ATL_B0_TX_RINGS, + .rx_rings = HW_ATL_B0_RX_RINGS, + .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_LRO, + .hw_priv_flags = IFF_UNICAST_FLT, + .link_speed_msk = (HW_ATL_B0_RATE_10G | + HW_ATL_B0_RATE_5G | + HW_ATL_B0_RATE_2G5 | + HW_ATL_B0_RATE_1G | + HW_ATL_B0_RATE_100M), + .flow_control = true, + .mtu = HW_ATL_B0_MTU_JUMBO, + .mac_regs_count = 88, + .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED, +}; + +#endif /* HW_ATL_B0_INTERNAL_H */ -- cgit v1.2.3 From 970a2e9864b0ba45f639aebf464a5edcaabb5be0 Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:14 -0800 Subject: net: ethernet: aquantia: Vector operations Add functions to manululate the vector of receive and transmit rings. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel.Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 392 ++++++++++++++++++++++++ drivers/net/ethernet/aquantia/atlantic/aq_vec.h | 42 +++ 2 files changed, 434 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_vec.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_vec.h diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c new file mode 100644 index 000000000000..140962f23e61 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -0,0 +1,392 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. + * Definition of functions for Rx and Tx rings. Friendly module for aq_nic. + */ + +#include "aq_vec.h" +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_hw.h" + +#include + +struct aq_vec_s { + struct aq_obj_s header; + struct aq_hw_ops *aq_hw_ops; + struct aq_hw_s *aq_hw; + struct aq_nic_s *aq_nic; + unsigned int tx_rings; + unsigned int rx_rings; + struct aq_ring_param_s aq_ring_param; + struct napi_struct napi; + struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; +}; + +#define AQ_VEC_TX_ID 0 +#define AQ_VEC_RX_ID 1 + +static int aq_vec_poll(struct napi_struct *napi, int budget) +__releases(&self->lock) +__acquires(&self->lock) +{ + struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); + struct aq_ring_s *ring = NULL; + int work_done = 0; + int err = 0; + unsigned int i = 0U; + unsigned int sw_tail_old = 0U; + bool was_tx_cleaned = false; + + if (!self) { + err = -EINVAL; + } else if (spin_trylock(&self->header.lock)) { + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + if (self->aq_hw_ops->hw_ring_tx_head_update) { + err = self->aq_hw_ops->hw_ring_tx_head_update( + self->aq_hw, + &ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + } + + if (ring[AQ_VEC_TX_ID].sw_head != + ring[AQ_VEC_TX_ID].hw_head) { + err = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + was_tx_cleaned = true; + } + + err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + if (ring[AQ_VEC_RX_ID].sw_head != + ring[AQ_VEC_RX_ID].hw_head) { + err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], + &work_done, + budget - work_done); + if (err < 0) + goto err_exit; + + sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail; + + err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_fill( + self->aq_hw, + &ring[AQ_VEC_RX_ID], sw_tail_old); + if (err < 0) + goto err_exit; + } + } + + if (was_tx_cleaned) + work_done = budget; + + if (work_done < budget) { + napi_complete(napi); + self->aq_hw_ops->hw_irq_enable(self->aq_hw, + 1U << self->aq_ring_param.vec_idx); + } + +err_exit: + spin_unlock(&self->header.lock); + } + + return work_done; +} + +struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + struct aq_vec_s *self = NULL; + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + + self->aq_nic = aq_nic; + self->aq_ring_param.vec_idx = idx; + self->aq_ring_param.cpu = + idx + aq_nic_cfg->aq_rss.base_cpu_number; + + cpumask_set_cpu(self->aq_ring_param.cpu, + &self->aq_ring_param.affinity_mask); + + self->tx_rings = 0; + self->rx_rings = 0; + + netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, + aq_vec_poll, AQ_CFG_NAPI_WEIGHT); + + for (i = 0; i < aq_nic_cfg->tcs; ++i) { + unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic, + self->tx_rings, + self->aq_ring_param.vec_idx); + + ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, + idx_ring, aq_nic_cfg); + if (!ring) { + err = -ENOMEM; + goto err_exit; + } + + ++self->tx_rings; + + aq_nic_set_tx_ring(aq_nic, idx_ring, ring); + + ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, + idx_ring, aq_nic_cfg); + if (!ring) { + err = -ENOMEM; + goto err_exit; + } + + ++self->rx_rings; + } + +err_exit: + if (err < 0) { + aq_vec_free(self); + self = NULL; + } + return self; +} + +int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, + struct aq_hw_s *aq_hw) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + self->aq_hw_ops = aq_hw_ops; + self->aq_hw = aq_hw; + + spin_lock_init(&self->header.lock); + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + err = aq_ring_init(&ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw, + &ring[AQ_VEC_TX_ID], + &self->aq_ring_param); + if (err < 0) + goto err_exit; + + err = aq_ring_init(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw, + &ring[AQ_VEC_RX_ID], + &self->aq_ring_param); + if (err < 0) + goto err_exit; + + err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw, + &ring[AQ_VEC_RX_ID], 0U); + if (err < 0) + goto err_exit; + } + +err_exit: + return err; +} + +int aq_vec_start(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, + &ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + } + + napi_enable(&self->napi); + +err_exit: + return err; +} + +void aq_vec_stop(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, + &ring[AQ_VEC_TX_ID]); + + self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + } + + napi_disable(&self->napi); +} + +void aq_vec_deinit(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + aq_ring_tx_deinit(&ring[AQ_VEC_TX_ID]); + aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); + } +err_exit:; +} + +void aq_vec_free(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + aq_ring_free(&ring[AQ_VEC_TX_ID]); + aq_ring_free(&ring[AQ_VEC_RX_ID]); + } + + netif_napi_del(&self->napi); + + kfree(self); + +err_exit:; +} + +irqreturn_t aq_vec_isr(int irq, void *private) +{ + struct aq_vec_s *self = private; + int err = 0; + + if (!self) { + err = -EINVAL; + goto err_exit; + } + napi_schedule(&self->napi); + +err_exit: + return err >= 0 ? IRQ_HANDLED : IRQ_NONE; +} + +irqreturn_t aq_vec_isr_legacy(int irq, void *private) +{ + struct aq_vec_s *self = private; + u64 irq_mask = 0U; + irqreturn_t err = 0; + + if (!self) { + err = -EINVAL; + goto err_exit; + } + err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask); + if (err < 0) + goto err_exit; + + if (irq_mask) { + self->aq_hw_ops->hw_irq_disable(self->aq_hw, + 1U << self->aq_ring_param.vec_idx); + napi_schedule(&self->napi); + } else { + self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U); + err = IRQ_NONE; + } + +err_exit: + return err >= 0 ? IRQ_HANDLED : IRQ_NONE; +} + +cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) +{ + return &self->aq_ring_param.affinity_mask; +} + +void aq_vec_add_stats(struct aq_vec_s *self, + struct aq_ring_stats_rx_s *stats_rx, + struct aq_ring_stats_tx_s *stats_tx) +{ + struct aq_ring_s *ring = NULL; + unsigned int r = 0U; + + for (r = 0U, ring = self->ring[0]; + self->tx_rings > r; ++r, ring = self->ring[r]) { + struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; + struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; + + stats_rx->packets += rx->packets; + stats_rx->bytes += rx->bytes; + stats_rx->errors += rx->errors; + stats_rx->jumbo_packets += rx->jumbo_packets; + stats_rx->lro_packets += rx->lro_packets; + + stats_tx->packets += tx->packets; + stats_tx->bytes += tx->bytes; + stats_tx->errors += tx->errors; + } +} + +int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) +{ + unsigned int count = 0U; + struct aq_ring_stats_rx_s stats_rx; + struct aq_ring_stats_tx_s stats_tx; + + memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); + memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); + aq_vec_add_stats(self, &stats_rx, &stats_tx); + + data[count] += stats_rx.packets; + data[++count] += stats_tx.packets; + data[++count] += stats_rx.jumbo_packets; + data[++count] += stats_rx.lro_packets; + data[++count] += stats_rx.errors; + + if (p_count) + *p_count = ++count; + + return 0; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h new file mode 100644 index 000000000000..6c68b184236c --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -0,0 +1,42 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings. + * Declaration of functions for Rx and Tx rings. + */ + +#ifndef AQ_VEC_H +#define AQ_VEC_H + +#include "aq_common.h" +#include + +struct aq_hw_s; +struct aq_hw_ops; +struct aq_ring_stats_rx_s; +struct aq_ring_stats_tx_s; + +irqreturn_t aq_vec_isr(int irq, void *private); +irqreturn_t aq_vec_isr_legacy(int irq, void *private); +struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, + struct aq_hw_s *aq_hw); +void aq_vec_deinit(struct aq_vec_s *self); +void aq_vec_free(struct aq_vec_s *self); +int aq_vec_start(struct aq_vec_s *self); +void aq_vec_stop(struct aq_vec_s *self); +cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self); +int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, + unsigned int *p_count); +void aq_vec_add_stats(struct aq_vec_s *self, + struct aq_ring_stats_rx_s *stats_rx, + struct aq_ring_stats_tx_s *stats_tx); + +#endif /* AQ_VEC_H */ -- cgit v1.2.3 From a4d36e20d035140bbc26d04595cdd4ea2913cfcc Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:15 -0800 Subject: net: ethernet: aquantia: PCI operations Add functions that handle the PCI bus interface. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_pci_func.c | 345 +++++++++++++++++++++ .../net/ethernet/aquantia/atlantic/aq_pci_func.h | 34 ++ 2 files changed, 379 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c new file mode 100644 index 000000000000..afcecdbf124c --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -0,0 +1,345 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_pci_func.c: Definition of PCI functions. */ + +#include "aq_pci_func.h" +#include "aq_nic.h" +#include "aq_vec.h" +#include "aq_hw.h" +#include + +struct aq_pci_func_s { + struct pci_dev *pdev; + struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS]; + void __iomem *mmio; + void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS]; + resource_size_t mmio_pa; + unsigned int msix_entry_mask; + unsigned int irq_type; + unsigned int ports; + bool is_pci_enabled; + bool is_regions; + bool is_pci_using_dac; + struct aq_hw_caps_s aq_hw_caps; + struct msix_entry msix_entry[AQ_CFG_PCI_FUNC_MSIX_IRQS]; +}; + +struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, + struct pci_dev *pdev, + const struct net_device_ops *ndev_ops, + const struct ethtool_ops *eth_ops) +{ + struct aq_pci_func_s *self = NULL; + int err = 0; + unsigned int port = 0U; + + if (!aq_hw_ops) { + err = -EFAULT; + goto err_exit; + } + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + + pci_set_drvdata(pdev, self); + self->pdev = pdev; + + err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); + if (err < 0) + goto err_exit; + + self->ports = self->aq_hw_caps.ports; + + for (port = 0; port < self->ports; ++port) { + struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, + &pdev->dev, self, + port, aq_hw_ops); + + if (!aq_nic) { + err = -ENOMEM; + goto err_exit; + } + self->port[port] = aq_nic; + } + +err_exit: + if (err < 0) { + if (self) + aq_pci_func_free(self); + self = NULL; + } + + (void)err; + return self; +} + +int aq_pci_func_init(struct aq_pci_func_s *self) +{ + int err = 0; + unsigned int bar = 0U; + unsigned int port = 0U; + unsigned int i = 0U; + + err = pci_enable_device(self->pdev); + if (err < 0) + goto err_exit; + + self->is_pci_enabled = true; + + err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64)); + self->is_pci_using_dac = 1; + } + if (err) { + err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(self->pdev, + DMA_BIT_MASK(32)); + self->is_pci_using_dac = 0; + } + if (err != 0) { + err = -ENOSR; + goto err_exit; + } + + err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio"); + if (err < 0) + goto err_exit; + + self->is_regions = true; + + pci_set_master(self->pdev); + + for (bar = 0; bar < 4; ++bar) { + if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) { + resource_size_t reg_sz; + + self->mmio_pa = pci_resource_start(self->pdev, bar); + if (self->mmio_pa == 0U) { + err = -EIO; + goto err_exit; + } + + reg_sz = pci_resource_len(self->pdev, bar); + if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { + err = -EIO; + goto err_exit; + } + + self->mmio = ioremap_nocache(self->mmio_pa, reg_sz); + if (!self->mmio) { + err = -EIO; + goto err_exit; + } + break; + } + } + + if (err < 0) + goto err_exit; + for (i = 0; i < self->aq_hw_caps.msix_irqs; i++) + self->msix_entry[i].entry = i; + + /*enable interrupts */ +#if AQ_CFG_FORCE_LEGACY_INT + self->irq_type = AQ_HW_IRQ_LEGACY; +#else + err = pci_enable_msix(self->pdev, self->msix_entry, + self->aq_hw_caps.msix_irqs); + + if (err >= 0) { + self->irq_type = AQ_HW_IRQ_MSIX; + } else { + err = pci_enable_msi(self->pdev); + + if (err >= 0) { + self->irq_type = AQ_HW_IRQ_MSI; + } else { + self->irq_type = AQ_HW_IRQ_LEGACY; + err = 0; + } + } +#endif + + /* net device init */ + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + err = aq_nic_cfg_start(self->port[port]); + if (err < 0) + goto err_exit; + + err = aq_nic_ndev_init(self->port[port]); + if (err < 0) + goto err_exit; + + err = aq_nic_ndev_register(self->port[port]); + if (err < 0) + goto err_exit; + } + +err_exit: + if (err < 0) + aq_pci_func_deinit(self); + return err; +} + +int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, + char *name, void *aq_vec, cpumask_t *affinity_mask) +{ + int err = 0; + + switch (self->irq_type) { + case AQ_HW_IRQ_MSIX: + err = request_irq(self->msix_entry[i].vector, aq_vec_isr, 0, + name, aq_vec); + break; + + case AQ_HW_IRQ_MSI: + err = request_irq(self->pdev->irq, aq_vec_isr, 0, name, aq_vec); + break; + + case AQ_HW_IRQ_LEGACY: + err = request_irq(self->pdev->irq, aq_vec_isr_legacy, + IRQF_SHARED, name, aq_vec); + break; + + default: + err = -EFAULT; + break; + } + + if (err >= 0) { + self->msix_entry_mask |= (1 << i); + self->aq_vec[i] = aq_vec; + + if (self->irq_type == AQ_HW_IRQ_MSIX) + irq_set_affinity_hint(self->msix_entry[i].vector, + affinity_mask); + } + + return err; +} + +void aq_pci_func_free_irqs(struct aq_pci_func_s *self) +{ + unsigned int i = 0U; + + for (i = 32U; i--;) { + if (!((1U << i) & self->msix_entry_mask)) + continue; + + switch (self->irq_type) { + case AQ_HW_IRQ_MSIX: + irq_set_affinity_hint(self->msix_entry[i].vector, NULL); + free_irq(self->msix_entry[i].vector, self->aq_vec[i]); + break; + + case AQ_HW_IRQ_MSI: + free_irq(self->pdev->irq, self->aq_vec[i]); + break; + + case AQ_HW_IRQ_LEGACY: + free_irq(self->pdev->irq, self->aq_vec[i]); + break; + + default: + break; + } + + self->msix_entry_mask &= ~(1U << i); + } +} + +void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self) +{ + return self->mmio; +} + +unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self) +{ + return self->irq_type; +} + +void aq_pci_func_deinit(struct aq_pci_func_s *self) +{ + if (!self) + goto err_exit; + + aq_pci_func_free_irqs(self); + + switch (self->irq_type) { + case AQ_HW_IRQ_MSI: + pci_disable_msi(self->pdev); + break; + + case AQ_HW_IRQ_MSIX: + pci_disable_msix(self->pdev); + break; + + case AQ_HW_IRQ_LEGACY: + break; + + default: + break; + } + + if (self->is_regions) + pci_release_regions(self->pdev); + + if (self->is_pci_enabled) + pci_disable_device(self->pdev); + +err_exit:; +} + +void aq_pci_func_free(struct aq_pci_func_s *self) +{ + unsigned int port = 0U; + + if (!self) + goto err_exit; + + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + aq_nic_ndev_free(self->port[port]); + } + + kfree(self); + +err_exit:; +} + +int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, + pm_message_t *pm_msg) +{ + int err = 0; + unsigned int port = 0U; + + if (!self) { + err = -EFAULT; + goto err_exit; + } + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + (void)aq_nic_change_pm_state(self->port[port], pm_msg); + } + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h new file mode 100644 index 000000000000..ecb033791203 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_pci_func.h: Declaration of PCI functions. */ + +#ifndef AQ_PCI_FUNC_H +#define AQ_PCI_FUNC_H + +#include "aq_common.h" + +struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops, + struct pci_dev *pdev, + const struct net_device_ops *ndev_ops, + const struct ethtool_ops *eth_ops); +int aq_pci_func_init(struct aq_pci_func_s *self); +int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, + char *name, void *aq_vec, + cpumask_t *affinity_mask); +void aq_pci_func_free_irqs(struct aq_pci_func_s *self); +int aq_pci_func_start(struct aq_pci_func_s *self); +void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self); +unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self); +void aq_pci_func_deinit(struct aq_pci_func_s *self); +void aq_pci_func_free(struct aq_pci_func_s *self); +int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, + pm_message_t *pm_msg); + +#endif /* AQ_PCI_FUNC_H */ -- cgit v1.2.3 From 98c4c20142e985faeb693d8546e53d2ff7eebe26 Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:16 -0800 Subject: net: ethernet: aquantia: Atlantic hardware abstraction layer Add common functions for Atlantic hardware abstraction layer. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- .../aquantia/atlantic/hw_atl/hw_atl_utils.c | 570 +++++++++++++++++++++ .../aquantia/atlantic/hw_atl/hw_atl_utils.h | 210 ++++++++ 2 files changed, 780 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c new file mode 100644 index 000000000000..8d6d8f5804da --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -0,0 +1,570 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware + * abstraction layer. + */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_pci_func.h" +#include "../aq_ring.h" +#include "../aq_vec.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" + +#include + +#define HW_ATL_UCP_0X370_REG 0x0370U + +#define HW_ATL_FW_SM_RAM 0x2U +#define HW_ATL_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_MPI_STATE_ADR 0x036CU + +#define HW_ATL_MPI_STATE_MSK 0x00FFU +#define HW_ATL_MPI_STATE_SHIFT 0U +#define HW_ATL_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_MPI_SPEED_SHIFT 16U + +static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, + u32 *p, u32 cnt) +{ + int err = 0; + + AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self, + HW_ATL_FW_SM_RAM) == 1U, + 1U, 10000U); + + if (err < 0) { + bool is_locked; + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + if (!is_locked) { + err = -ETIME; + goto err_exit; + } + } + + aq_hw_write_reg(self, 0x00000208U, a); + + for (++cnt; --cnt;) { + u32 i = 0U; + + aq_hw_write_reg(self, 0x00000200U, 0x00008000U); + + for (i = 1024U; + (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + } + + *(p++) = aq_hw_read_reg(self, 0x0000020CU); + } + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + +err_exit: + return err; +} + +static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, + u32 cnt) +{ + int err = 0; + bool is_locked; + + is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + if (!is_locked) { + err = -ETIME; + goto err_exit; + } + + aq_hw_write_reg(self, 0x00000208U, a); + + for (++cnt; --cnt;) { + u32 i = 0U; + + aq_hw_write_reg(self, 0x0000020CU, *(p++)); + aq_hw_write_reg(self, 0x00000200U, 0xC000U); + + for (i = 1024U; + (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + } + } + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + +err_exit: + return err; +} + +static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual) +{ + int err = 0; + const u32 dw_major_mask = 0xff000000U; + const u32 dw_minor_mask = 0x00ffffffU; + + err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0; + if (err < 0) + goto err_exit; + err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ? + -EOPNOTSUPP : 0; +err_exit: + return err; +} + +static int hw_atl_utils_init_ucp(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + int err = 0; + + if (!aq_hw_read_reg(self, 0x370U)) { + unsigned int rnd = 0U; + unsigned int ucp_0x370 = 0U; + + get_random_bytes(&rnd, sizeof(unsigned int)); + + ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd); + aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); + } + + reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr = + aq_hw_read_reg(self, 0x360U)), 1000U, 10U); + + err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, + aq_hw_read_reg(self, 0x18U)); + return err; +} + +#define HW_ATL_RPC_CONTROL_ADR 0x0338U +#define HW_ATL_RPC_STATE_ADR 0x033CU + +struct aq_hw_atl_utils_fw_rpc_tid_s { + union { + u32 val; + struct { + u16 tid; + u16 len; + }; + }; +}; + +#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL) + +static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) +{ + int err = 0; + struct aq_hw_atl_utils_fw_rpc_tid_s sw; + + if (!IS_CHIP_FEATURE(MIPS)) { + err = -1; + goto err_exit; + } + err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr, + (u32 *)(void *)&PHAL_ATLANTIC->rpc, + (rpc_size + sizeof(u32) - + sizeof(u8)) / sizeof(u32)); + if (err < 0) + goto err_exit; + + sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid); + sw.len = (u16)rpc_size; + aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val); + +err_exit: + return err; +} + +static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_aq_atl_utils_fw_rpc **rpc) +{ + int err = 0; + struct aq_hw_atl_utils_fw_rpc_tid_s sw; + struct aq_hw_atl_utils_fw_rpc_tid_s fw; + + do { + sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR); + + PHAL_ATLANTIC->rpc_tid = sw.tid; + + AQ_HW_WAIT_FOR(sw.tid == + (fw.val = + aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), + fw.tid), 1000U, 100U); + if (err < 0) + goto err_exit; + + if (fw.len == 0xFFFFU) { + err = hw_atl_utils_fw_rpc_call(self, sw.len); + if (err < 0) + goto err_exit; + } + } while (sw.tid != fw.tid || 0xFFFFU == fw.len); + if (err < 0) + goto err_exit; + + if (rpc) { + if (fw.len) { + err = + hw_atl_utils_fw_downld_dwords(self, + PHAL_ATLANTIC->rpc_addr, + (u32 *)(void *) + &PHAL_ATLANTIC->rpc, + (fw.len + sizeof(u32) - + sizeof(u8)) / + sizeof(u32)); + if (err < 0) + goto err_exit; + } + + *rpc = &PHAL_ATLANTIC->rpc; + } + +err_exit: + return err; +} + +static int hw_atl_utils_mpi_create(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + int err = 0; + + err = hw_atl_utils_init_ucp(self, aq_hw_caps); + if (err < 0) + goto err_exit; + + err = hw_atl_utils_fw_rpc_init(self); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox *pmbox) +{ + int err = 0; + + err = hw_atl_utils_fw_downld_dwords(self, + PHAL_ATLANTIC->mbox_addr, + (u32 *)(void *)pmbox, + sizeof(*pmbox) / sizeof(u32)); + if (err < 0) + goto err_exit; + + if (pmbox != &PHAL_ATLANTIC->mbox) + memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox)); + + if (IS_CHIP_FEATURE(REVISION_A0)) { + unsigned int mtu = self->aq_nic_cfg ? + self->aq_nic_cfg->mtu : 1514U; + pmbox->stats.ubrc = pmbox->stats.uprc * mtu; + pmbox->stats.ubtc = pmbox->stats.uptc * mtu; + pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc); + } else { + pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self); + } + +err_exit:; +} + +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, + enum hal_atl_utils_fw_state_e state) +{ + u32 ucp_0x368 = 0; + + ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state; + aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368); + + return 0; +} + +void hw_atl_utils_mpi_set(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state, u32 speed) +{ + int err = 0; + u32 transaction_id = 0; + + if (state == MPI_RESET) { + hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); + + transaction_id = PHAL_ATLANTIC->mbox.transaction_id; + + AQ_HW_WAIT_FOR(transaction_id != + (hw_atl_utils_mpi_read_stats + (self, &PHAL_ATLANTIC->mbox), + PHAL_ATLANTIC->mbox.transaction_id), + 1000U, 100U); + if (err < 0) + goto err_exit; + } + + err = hw_atl_utils_mpi_set_speed(self, speed, state); + +err_exit:; +} + +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status) +{ + u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); + u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; + + if (!link_speed_mask) { + link_status->mbps = 0U; + } else { + switch (link_speed_mask) { + case HAL_ATLANTIC_RATE_10G: + link_status->mbps = 10000U; + break; + + case HAL_ATLANTIC_RATE_5G: + case HAL_ATLANTIC_RATE_5GSR: + link_status->mbps = 5000U; + break; + + case HAL_ATLANTIC_RATE_2GS: + link_status->mbps = 2500U; + break; + + case HAL_ATLANTIC_RATE_1G: + link_status->mbps = 1000U; + break; + + case HAL_ATLANTIC_RATE_100M: + link_status->mbps = 100U; + break; + + default: + link_status->mbps = 0U; + break; + } + } + + return 0; +} + +int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac) +{ + int err = 0; + u32 h = 0U; + u32 l = 0U; + u32 mac_addr[2]; + + self->mmio = aq_pci_func_get_mmio(self->aq_pci_func); + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_A0->chip_features); + + err = hw_atl_utils_mpi_create(self, aq_hw_caps); + if (err < 0) + goto err_exit; + + if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) { + unsigned int rnd = 0; + unsigned int ucp_0x370 = 0; + + get_random_bytes(&rnd, sizeof(unsigned int)); + + ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd); + aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); + } + + err = hw_atl_utils_fw_downld_dwords(self, + aq_hw_read_reg(self, 0x00000374U) + + (40U * 4U), + mac_addr, + AQ_DIMOF(mac_addr)); + if (err < 0) { + mac_addr[0] = 0U; + mac_addr[1] = 0U; + err = 0; + } else { + mac_addr[0] = __swab32(mac_addr[0]); + mac_addr[1] = __swab32(mac_addr[1]); + } + + ether_addr_copy(mac, (u8 *)mac_addr); + + if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { + /* chip revision */ + l = 0xE3000000U + | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) + | (0x00 << 16); + h = 0x8001300EU; + + mac[5] = (u8)(0xFFU & l); + l >>= 8; + mac[4] = (u8)(0xFFU & l); + l >>= 8; + mac[3] = (u8)(0xFFU & l); + l >>= 8; + mac[2] = (u8)(0xFFU & l); + mac[1] = (u8)(0xFFU & h); + h >>= 8; + mac[0] = (u8)(0xFFU & h); + } + +err_exit: + return err; +} + +unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps) +{ + unsigned int ret = 0U; + + switch (mbps) { + case 100U: + ret = 5U; + break; + + case 1000U: + ret = 4U; + break; + + case 2500U: + ret = 3U; + break; + + case 5000U: + ret = 1U; + break; + + case 10000U: + ret = 0U; + break; + + default: + break; + } + return ret; +} + +void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) +{ + u32 chip_features = 0U; + u32 val = reg_glb_mif_id_get(self); + u32 mif_rev = val & 0xFFU; + + if ((3U & mif_rev) == 1U) { + chip_features |= + HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS; + } else if ((3U & mif_rev) == 2U) { + chip_features |= + HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS | + HAL_ATLANTIC_UTILS_CHIP_TPO2 | + HAL_ATLANTIC_UTILS_CHIP_RPF2; + } + + *p = chip_features; +} + +int hw_atl_utils_hw_deinit(struct aq_hw_s *self) +{ + hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U); + return 0; +} + +int hw_atl_utils_hw_set_power(struct aq_hw_s *self, + unsigned int power_state) +{ + hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U); + return 0; +} + +int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, + u64 *data, unsigned int *p_count) +{ + struct hw_atl_stats_s *stats = NULL; + int i = 0; + + hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); + + stats = &PHAL_ATLANTIC->mbox.stats; + + data[i] = stats->uprc + stats->mprc + stats->bprc; + data[++i] = stats->uprc; + data[++i] = stats->mprc; + data[++i] = stats->bprc; + data[++i] = stats->erpt; + data[++i] = stats->uptc + stats->mptc + stats->bptc; + data[++i] = stats->uptc; + data[++i] = stats->mptc; + data[++i] = stats->bptc; + data[++i] = stats->ubrc; + data[++i] = stats->ubtc; + data[++i] = stats->mbrc; + data[++i] = stats->mbtc; + data[++i] = stats->bbrc; + data[++i] = stats->bbtc; + data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; + data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; + data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self); + data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self); + data[++i] = stats_rx_dma_good_octet_counterlsw_get(self); + data[++i] = stats_tx_dma_good_octet_counterlsw_get(self); + data[++i] = stats->dpc; + + if (p_count) + *p_count = ++i; + + return 0; +} + +static const u32 hw_atl_utils_hw_mac_regs[] = { + 0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U, + 0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U, + 0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U, + 0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U, + 0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U, + 0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U, + 0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U, + 0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U, + 0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U, + 0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U, + 0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U, + 0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U, + 0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U, + 0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U, + 0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U, + 0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U, + 0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU, + 0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU, + 0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U, + 0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U, + 0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U, + 0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U, +}; + +int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u32 *regs_buff) +{ + unsigned int i = 0U; + + for (i = 0; i < aq_hw_caps->mac_regs_count; i++) + regs_buff[i] = aq_hw_read_reg(self, + hw_atl_utils_hw_mac_regs[i]); + return 0; +} + +int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) +{ + *fw_version = aq_hw_read_reg(self, 0x18U); + return 0; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h new file mode 100644 index 000000000000..b8e3d88f0879 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -0,0 +1,210 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware + * abstraction layer. + */ + +#ifndef HW_ATL_UTILS_H +#define HW_ATL_UTILS_H + +#include "../aq_common.h" + +#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); } + +struct __packed hw_atl_stats_s { + u32 uprc; + u32 mprc; + u32 bprc; + u32 erpt; + u32 uptc; + u32 mptc; + u32 bptc; + u32 erpr; + u32 mbtc; + u32 bbtc; + u32 mbrc; + u32 bbrc; + u32 ubrc; + u32 ubtc; + u32 dpc; +}; + +union __packed ip_addr { + struct { + u8 addr[16]; + } v6; + struct { + u8 padding[12]; + u8 addr[4]; + } v4; +}; + +struct __packed hw_aq_atl_utils_fw_rpc { + u32 msg_id; + + union { + struct { + u32 pong; + } msg_ping; + + struct { + u8 mac_addr[6]; + u32 ip_addr_cnt; + + struct { + union ip_addr addr; + union ip_addr mask; + } ip[1]; + } msg_arp; + + struct { + u32 len; + u8 packet[1514U]; + } msg_inject; + + struct { + u32 priority; + u32 wol_packet_type; + u16 friendly_name_len; + u16 friendly_name[65]; + u32 pattern_id; + u32 next_wol_pattern_offset; + + union { + struct { + u32 flags; + u8 ipv4_source_address[4]; + u8 ipv4_dest_address[4]; + u16 tcp_source_port_number; + u16 tcp_dest_port_number; + } ipv4_tcp_syn_parameters; + + struct { + u32 flags; + u8 ipv6_source_address[16]; + u8 ipv6_dest_address[16]; + u16 tcp_source_port_number; + u16 tcp_dest_port_number; + } ipv6_tcp_syn_parameters; + + struct { + u32 flags; + } eapol_request_id_message_parameters; + + struct { + u32 flags; + u32 mask_offset; + u32 mask_size; + u32 pattern_offset; + u32 pattern_size; + } wol_bit_map_pattern; + } wol_pattern; + } msg_wol; + + struct { + u32 is_wake_on_link_down; + u32 is_wake_on_link_up; + } msg_wolink; + }; +}; + +struct __packed hw_aq_atl_utils_mbox { + u32 version; + u32 transaction_id; + int error; + struct hw_atl_stats_s stats; +}; + +struct __packed hw_atl_s { + struct aq_hw_s base; + struct hw_aq_atl_utils_mbox mbox; + u64 speed; + u32 itr_tx; + u32 itr_rx; + unsigned int chip_features; + u32 fw_ver_actual; + atomic_t dpc; + u32 mbox_addr; + u32 rpc_addr; + u32 rpc_tid; + struct hw_aq_atl_utils_fw_rpc rpc; +}; + +#define SELF ((struct hw_atl_s *)self) + +#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self))) +#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self))) +#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self))) + +#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U +#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U +#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U +#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U + +#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ + PHAL_ATLANTIC->chip_features) + +enum hal_atl_utils_fw_state_e { + MPI_DEINIT = 0, + MPI_RESET = 1, + MPI_INIT = 2, + MPI_POWER = 4, +}; + +#define HAL_ATLANTIC_RATE_10G BIT(0) +#define HAL_ATLANTIC_RATE_5G BIT(1) +#define HAL_ATLANTIC_RATE_5GSR BIT(2) +#define HAL_ATLANTIC_RATE_2GS BIT(3) +#define HAL_ATLANTIC_RATE_1G BIT(4) +#define HAL_ATLANTIC_RATE_100M BIT(5) +#define HAL_ATLANTIC_RATE_INVALID BIT(6) + +void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); + +void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox *pmbox); + +void hw_atl_utils_mpi_set(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state, + u32 speed); + +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, + enum hal_atl_utils_fw_state_e state); + +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status); + +int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac); + +unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps); + +int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u32 *regs_buff); + +int hw_atl_utils_hw_get_settings(struct aq_hw_s *self, + struct ethtool_cmd *cmd); + +int hw_atl_utils_hw_set_power(struct aq_hw_s *self, + unsigned int power_state); + +int hw_atl_utils_hw_deinit(struct aq_hw_s *self); + +int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); + +int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, + u64 *data, + unsigned int *p_count); + +#endif /* HW_ATL_UTILS_H */ -- cgit v1.2.3 From 753f4783be7b60b2c936c6985ddf42f8ce268ad9 Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:17 -0800 Subject: net: ethernet: aquantia: Hardware interface and utility functions Add functions to interface with the hardware and some utility functions. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_hw.h | 177 +++++++++++++++++++++ .../net/ethernet/aquantia/atlantic/aq_hw_utils.c | 68 ++++++++ .../net/ethernet/aquantia/atlantic/aq_hw_utils.h | 47 ++++++ 3 files changed, 292 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_hw.h create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h new file mode 100644 index 000000000000..fce0fd3f23ff --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -0,0 +1,177 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific + * functions. + */ + +#ifndef AQ_HW_H +#define AQ_HW_H + +#include "aq_common.h" + +/* NIC H/W capabilities */ +struct aq_hw_caps_s { + u64 hw_features; + u64 link_speed_msk; + unsigned int hw_priv_flags; + u32 rxds; + u32 txds; + u32 txhwb_alignment; + u32 irq_mask; + u32 vecs; + u32 mtu; + u32 mac_regs_count; + u8 ports; + u8 msix_irqs; + u8 tcs; + u8 rxd_alignment; + u8 rxd_size; + u8 txd_alignment; + u8 txd_size; + u8 tx_rings; + u8 rx_rings; + bool flow_control; + bool is_64_dma; + u32 fw_ver_expected; +}; + +struct aq_hw_link_status_s { + unsigned int mbps; +}; + +#define AQ_HW_IRQ_INVALID 0U +#define AQ_HW_IRQ_LEGACY 1U +#define AQ_HW_IRQ_MSI 2U +#define AQ_HW_IRQ_MSIX 3U + +#define AQ_HW_POWER_STATE_D0 0U +#define AQ_HW_POWER_STATE_D3 3U + +#define AQ_HW_FLAG_STARTED 0x00000004U +#define AQ_HW_FLAG_STOPPING 0x00000008U +#define AQ_HW_FLAG_RESETTING 0x00000010U +#define AQ_HW_FLAG_CLOSING 0x00000020U +#define AQ_HW_LINK_DOWN 0x04000000U +#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U +#define AQ_HW_FLAG_ERR_HW 0x80000000U + +#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG) + +struct aq_hw_s { + struct aq_obj_s header; + struct aq_nic_cfg_s *aq_nic_cfg; + struct aq_pci_func_s *aq_pci_func; + void __iomem *mmio; + unsigned int not_ff_addr; + struct aq_hw_link_status_s aq_link_status; +}; + +struct aq_ring_s; +struct aq_ring_param_s; +struct aq_nic_cfg_s; +struct sk_buff; + +struct aq_hw_ops { + struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func, + unsigned int port, struct aq_hw_ops *ops); + + void (*destroy)(struct aq_hw_s *self); + + int (*get_hw_caps)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps); + + int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + unsigned int frags); + + int (*hw_ring_rx_receive)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_fill)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + unsigned int sw_tail_old); + + int (*hw_ring_tx_head_update)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_get_mac_permanent)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac); + + int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); + + int (*hw_get_link_status)(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status); + + int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed); + + int (*hw_reset)(struct aq_hw_s *self); + + int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr); + + int (*hw_start)(struct aq_hw_s *self); + + int (*hw_stop)(struct aq_hw_s *self); + + int (*hw_ring_tx_init)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param); + + int (*hw_ring_tx_start)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_tx_stop)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_init)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param); + + int (*hw_ring_rx_start)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_stop)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_irq_enable)(struct aq_hw_s *self, u64 mask); + + int (*hw_irq_disable)(struct aq_hw_s *self, u64 mask); + + int (*hw_irq_read)(struct aq_hw_s *self, u64 *mask); + + int (*hw_packet_filter_set)(struct aq_hw_s *self, + unsigned int packet_filter); + + int (*hw_multicast_list_set)(struct aq_hw_s *self, + u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count); + + int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, + bool itr_enabled); + + int (*hw_rss_set)(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params); + + int (*hw_rss_hash_set)(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params); + + int (*hw_get_regs)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); + + int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, + unsigned int *p_count); + + int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); + + int (*hw_deinit)(struct aq_hw_s *self); + + int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); +}; + +#endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c new file mode 100644 index 000000000000..5f13465995f6 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c @@ -0,0 +1,68 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw_utils.c: Definitions of helper functions used across + * hardware layer. + */ + +#include "aq_hw_utils.h" +#include "aq_hw.h" + +void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, + u32 shift, u32 val) +{ + if (msk ^ ~0) { + u32 reg_old, reg_new; + + reg_old = aq_hw_read_reg(aq_hw, addr); + reg_new = (reg_old & (~msk)) | (val << shift); + + if (reg_old != reg_new) + aq_hw_write_reg(aq_hw, addr, reg_new); + } else { + aq_hw_write_reg(aq_hw, addr, val); + } +} + +u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift) +{ + return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift); +} + +u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg) +{ + u32 value = readl(hw->mmio + reg); + + if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr)) + aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG); + + return value; +} + +void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value) +{ + writel(value, hw->mmio + reg); +} + +int aq_hw_err_from_flags(struct aq_hw_s *hw) +{ + int err = 0; + + if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) { + err = -EIO; + goto err_exit; + } + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h new file mode 100644 index 000000000000..78fcc0c3ab4d --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h @@ -0,0 +1,47 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw_utils.h: Declaration of helper functions used across hardware + * layer. + */ + +#ifndef AQ_HW_UTILS_H +#define AQ_HW_UTILS_H + +#include "aq_common.h" + +#ifndef HIDWORD +#define LODWORD(_qw) ((u32)(_qw)) +#define HIDWORD(_qw) ((u32)(((_qw) >> 32) & 0xffffffff)) +#endif + +#define AQ_HW_SLEEP(_US_) mdelay(_US_) + +#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \ +do { \ + unsigned int AQ_HW_WAIT_FOR_i; \ + for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\ + --AQ_HW_WAIT_FOR_i) {\ + udelay(_US_); \ + } \ + if (!AQ_HW_WAIT_FOR_i) {\ + err = ETIME; \ + } \ +} while (0) + +struct aq_hw_s; + +void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, + u32 shift, u32 val); +u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift); +u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg); +void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value); +int aq_hw_err_from_flags(struct aq_hw_s *hw); + +#endif /* AQ_HW_UTILS_H */ -- cgit v1.2.3 From c5760d03d4ebd12f1b2d6723aec4b45327a1d2eb Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:18 -0800 Subject: net: ethernet: aquantia: Ethtool support Add the driver interfaces required for support by the ethtool utility. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_ethtool.c | 261 +++++++++++++++++++++ .../net/ethernet/aquantia/atlantic/aq_ethtool.h | 19 ++ 2 files changed, 280 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c new file mode 100644 index 000000000000..c5b025e0dc10 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -0,0 +1,261 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ethtool.c: Definition of ethertool related functions. */ + +#include "aq_ethtool.h" +#include "aq_nic.h" + +static void aq_ethtool_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *p) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + memset(p, 0, regs_count * sizeof(u32)); + aq_nic_get_regs(aq_nic, regs, p); +} + +static int aq_ethtool_get_regs_len(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + return regs_count * sizeof(u32); +} + +static u32 aq_ethtool_get_link(struct net_device *ndev) +{ + return ethtool_op_get_link(ndev); +} + +static int aq_ethtool_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + aq_nic_get_link_settings(aq_nic, cmd); + ethtool_cmd_speed_set(cmd, netif_carrier_ok(ndev) ? + aq_nic_get_link_speed(aq_nic) : 0U); + + return 0; +} + +static int aq_ethtool_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + return aq_nic_set_link_settings(aq_nic, cmd); +} + +/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */ +static const unsigned int aq_ethtool_stat_queue_lines = 5U; +static const unsigned int aq_ethtool_stat_queue_chars = + 5U * ETH_GSTRING_LEN; +static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { + "InPackets", + "InUCast", + "InMCast", + "InBCast", + "InErrors", + "OutPackets", + "OutUCast", + "OutMCast", + "OutBCast", + "InUCastOctects", + "OutUCastOctects", + "InMCastOctects", + "OutMCastOctects", + "InBCastOctects", + "OutBCastOctects", + "InOctects", + "OutOctects", + "InPacketsDma", + "OutPacketsDma", + "InOctetsDma", + "OutOctetsDma", + "InDroppedDma", + "Queue[0] InPackets", + "Queue[0] OutPackets", + "Queue[0] InJumboPackets", + "Queue[0] InLroPackets", + "Queue[0] InErrors", + "Queue[1] InPackets", + "Queue[1] OutPackets", + "Queue[1] InJumboPackets", + "Queue[1] InLroPackets", + "Queue[1] InErrors", + "Queue[2] InPackets", + "Queue[2] OutPackets", + "Queue[2] InJumboPackets", + "Queue[2] InLroPackets", + "Queue[2] InErrors", + "Queue[3] InPackets", + "Queue[3] OutPackets", + "Queue[3] InJumboPackets", + "Queue[3] InLroPackets", + "Queue[3] InErrors", + "Queue[4] InPackets", + "Queue[4] OutPackets", + "Queue[4] InJumboPackets", + "Queue[4] InLroPackets", + "Queue[4] InErrors", + "Queue[5] InPackets", + "Queue[5] OutPackets", + "Queue[5] InJumboPackets", + "Queue[5] InLroPackets", + "Queue[5] InErrors", + "Queue[6] InPackets", + "Queue[6] OutPackets", + "Queue[6] InJumboPackets", + "Queue[6] InLroPackets", + "Queue[6] InErrors", + "Queue[7] InPackets", + "Queue[7] OutPackets", + "Queue[7] InJumboPackets", + "Queue[7] InLroPackets", + "Queue[7] InErrors", +}; + +static void aq_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + +/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ + BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); + memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); + aq_nic_get_stats(aq_nic, data); +} + +static void aq_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct pci_dev *pdev = to_pci_dev(ndev->dev.parent); + u32 firmware_version = aq_nic_get_fw_version(aq_nic); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver)); + strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version)); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u.%u", firmware_version >> 24, + (firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU); + + strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = regs_count; + drvinfo->eedump_len = 0; +} + +static void aq_ethtool_get_strings(struct net_device *ndev, + u32 stringset, u8 *data) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + if (stringset == ETH_SS_STATS) + memcpy(data, *aq_ethtool_stat_names, + sizeof(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * + aq_ethtool_stat_queue_chars); +} + +static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) +{ + int ret = 0; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + switch (stringset) { + case ETH_SS_STATS: + ret = ARRAY_SIZE(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * + aq_ethtool_stat_queue_lines; + break; + default: + ret = -EOPNOTSUPP; + } + return ret; +} + +static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev) +{ + return AQ_CFG_RSS_INDIRECTION_TABLE_MAX; +} + +static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + return sizeof(cfg->aq_rss.hash_secret_key); +} + +static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + unsigned int i = 0U; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + if (indir) { + for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++) + indir[i] = cfg->aq_rss.indirection_table[i]; + } + if (key) + memcpy(key, cfg->aq_rss.hash_secret_key, + sizeof(cfg->aq_rss.hash_secret_key)); + return 0; +} + +static int aq_ethtool_get_rxnfc(struct net_device *ndev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = cfg->vecs; + break; + + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +const struct ethtool_ops aq_ethtool_ops = { + .get_link = aq_ethtool_get_link, + .get_regs_len = aq_ethtool_get_regs_len, + .get_regs = aq_ethtool_get_regs, + .get_settings = aq_ethtool_get_settings, + .set_settings = aq_ethtool_set_settings, + .get_drvinfo = aq_ethtool_get_drvinfo, + .get_strings = aq_ethtool_get_strings, + .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .get_rxfh_key_size = aq_ethtool_get_rss_key_size, + .get_rxfh = aq_ethtool_get_rss, + .get_rxnfc = aq_ethtool_get_rxnfc, + .get_sset_count = aq_ethtool_get_sset_count, + .get_ethtool_stats = aq_ethtool_stats +}; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h new file mode 100644 index 000000000000..21c126eeb5eb --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h @@ -0,0 +1,19 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ethtool.h: Declaration of ethertool related functions. */ + +#ifndef AQ_ETHTOOL_H +#define AQ_ETHTOOL_H + +#include "aq_common.h" + +extern const struct ethtool_ops aq_ethtool_ops; + +#endif /* AQ_ETHTOOL_H */ -- cgit v1.2.3 From ee6d6d0055449c6d9d5fcce6e72614d6eaa18c01 Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:19 -0800 Subject: net: ethernet: aquantia: Receive side scaling Add definitions that support receive side scaling. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_rss.h | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 drivers/net/ethernet/aquantia/atlantic/aq_rss.h diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_rss.h b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h new file mode 100644 index 000000000000..1db6eb20a8f2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h @@ -0,0 +1,26 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_rss.h: Receive Side Scaling definitions. */ + +#ifndef AQ_RSS_H +#define AQ_RSS_H + +#include "aq_common.h" +#include "aq_cfg.h" + +struct aq_rss_parameters { + u16 base_cpu_number; + u16 indirection_table_size; + u16 hash_secret_key_size; + u32 hash_secret_key[AQ_CFG_RSS_HASHKEY_SIZE / sizeof(u32)]; + u8 indirection_table[AQ_CFG_RSS_INDIRECTION_TABLE_MAX]; +}; + +#endif /* AQ_RSS_H */ -- cgit v1.2.3 From aa13f7cedd9f2813acb83db98bbd3041dfa1e8fa Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Mon, 23 Jan 2017 22:09:20 -0800 Subject: net: ethernet: aquantia: Integrate AQtion 2.5/5 GB NIC driver Modify the drivers/net/ethernet/{Makefile,Kconfig} file to make them a part of the network drivers build. Signed-off-by: Alexander Loktionov Signed-off-by: Dmitrii Tarakanov Signed-off-by: Pavel Belous Signed-off-by: Dmitry Bezrukov Signed-off-by: David M. VomLehn Signed-off-by: David S. Miller --- drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index afc07d4434bb..8c08f9deef92 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -29,6 +29,7 @@ source "drivers/net/ethernet/amazon/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" +source "drivers/net/ethernet/aquantia/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" source "drivers/net/ethernet/aurora/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index e7861a8aa817..26dce5bf2c18 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ +obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ -- cgit v1.2.3 From 146408693945a68f227175c1cea3772fc0f98f20 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 24 Jan 2017 09:25:54 +0000 Subject: net: sctp: fix array overrun read on sctp_timer_tbl Table sctp_timer_tbl is missing a TIMEOUT_RECONF string so add this in. Also compare timeout with the size of the array sctp_timer_tbl rather than SCTP_EVENT_TIMEOUT_MAX. Also add a build time check that SCTP_EVENT_TIMEOUT_MAX is correct so we don't ever get this kind of mismatch between the table and SCTP_EVENT_TIMEOUT_MAX in the future. Kudos to Marcelo Ricardo Leitner for spotting the missing string and suggesting the build time sanity check. Fixes CoverityScan CID#1397639 ("Out-of-bounds read") Fixes: 7b9438de0cd4 ("sctp: add stream reconf timer") Signed-off-by: Colin Ian King Acked-by: Neil Horman Reviewed-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/debug.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 95d7b15dad21..2e47eb2f05cb 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -159,6 +159,7 @@ static const char *const sctp_timer_tbl[] = { "TIMEOUT_T4_RTO", "TIMEOUT_T5_SHUTDOWN_GUARD", "TIMEOUT_HEARTBEAT", + "TIMEOUT_RECONF", "TIMEOUT_SACK", "TIMEOUT_AUTOCLOSE", }; @@ -166,7 +167,9 @@ static const char *const sctp_timer_tbl[] = { /* Lookup timer debug name. */ const char *sctp_tname(const sctp_subtype_t id) { - if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX) + BUILD_BUG_ON(SCTP_EVENT_TIMEOUT_MAX + 1 != ARRAY_SIZE(sctp_timer_tbl)); + + if (id.timeout < ARRAY_SIZE(sctp_timer_tbl)) return sctp_timer_tbl[id.timeout]; return "unknown_timer"; } -- cgit v1.2.3 From efb3e74da2e1818565a9ffc5ec55d9e7bda17e8e Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 24 Jan 2017 14:53:47 +0100 Subject: net: dsa: mv88e6xxx: Abstract mv88e6165 PHY operations The mv88e6165 family has the internal PHYs mapped directly onto the SMI register space as the switch. So the registers can be read directly. Put a wrapper around this, in preparation for changing the signature in order to support the external MDIO bus of the 6390. Signed-off-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index c7e08e13bb54..374d887e8256 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -222,6 +222,18 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) return 0; } +static int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 *val) +{ + return mv88e6xxx_read(chip, addr, reg, val); +} + +static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 val) +{ + return mv88e6xxx_write(chip, addr, reg, val); +} + static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, int reg, u16 *val) { @@ -3085,8 +3097,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { static const struct mv88e6xxx_ops mv88e6123_ops = { /* MV88E6XXX_FAMILY_6165 */ .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_read, - .phy_write = mv88e6xxx_write, + .phy_read = mv88e6165_phy_read, + .phy_write = mv88e6165_phy_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3132,8 +3144,8 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { static const struct mv88e6xxx_ops mv88e6161_ops = { /* MV88E6XXX_FAMILY_6165 */ .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_read, - .phy_write = mv88e6xxx_write, + .phy_read = mv88e6165_phy_read, + .phy_write = mv88e6165_phy_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3157,8 +3169,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { static const struct mv88e6xxx_ops mv88e6165_ops = { /* MV88E6XXX_FAMILY_6165 */ .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_read, - .phy_write = mv88e6xxx_write, + .phy_read = mv88e6165_phy_read, + .phy_write = mv88e6165_phy_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, -- cgit v1.2.3 From ee26a2284b3d1ab0fb6c183d1cca7a85a05c82e0 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 24 Jan 2017 14:53:48 +0100 Subject: net: dsa: mv88e6xxx: Pass mii_bus to all PHY operations In preparation for supporting multiple MDIO busses, pass the mii_bus structure to all PHY operations. It will in future then be clear on which MDIO bus the operation should be performed. For reads/write from phylib, the mii_bus is readily available. However some internal code also access the PHY, e.g. for EEE and SERDES. Make this code use the one and only currently available MDIO bus. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 42 +++++++++++++++++++++++++---------- drivers/net/dsa/mv88e6xxx/global2.c | 10 +++++---- drivers/net/dsa/mv88e6xxx/global2.h | 12 ++++++---- drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 10 +++++---- 4 files changed, 50 insertions(+), 24 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 374d887e8256..dedccf201452 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -222,14 +222,16 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) return 0; } -static int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, int addr, - int reg, u16 *val) +static int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val) { return mv88e6xxx_read(chip, addr, reg, val); } -static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, int addr, - int reg, u16 val) +static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val) { return mv88e6xxx_write(chip, addr, reg, val); } @@ -238,22 +240,30 @@ static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, int reg, u16 *val) { int addr = phy; /* PHY devices addresses start at 0x0 */ + struct mii_bus *bus = chip->mdio_bus; if (!chip->info->ops->phy_read) return -EOPNOTSUPP; - return chip->info->ops->phy_read(chip, addr, reg, val); + if (!bus) + return -EOPNOTSUPP; + + return chip->info->ops->phy_read(chip, bus, addr, reg, val); } static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val) { int addr = phy; /* PHY devices addresses start at 0x0 */ + struct mii_bus *bus = chip->mdio_bus; if (!chip->info->ops->phy_write) return -EOPNOTSUPP; - return chip->info->ops->phy_write(chip, addr, reg, val); + if (!bus) + return -EOPNOTSUPP; + + return chip->info->ops->phy_write(chip, bus, addr, reg, val); } static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page) @@ -623,8 +633,9 @@ static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip) del_timer_sync(&chip->ppu_timer); } -static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr, - int reg, u16 *val) +static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val) { int err; @@ -637,8 +648,9 @@ static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr, return err; } -static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr, - int reg, u16 val) +static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val) { int err; @@ -2897,8 +2909,11 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) if (phy >= mv88e6xxx_num_ports(chip)) return 0xffff; + if (!chip->info->ops->phy_read) + return -EOPNOTSUPP; + mutex_lock(&chip->reg_lock); - err = mv88e6xxx_phy_read(chip, phy, reg, &val); + err = chip->info->ops->phy_read(chip, bus, phy, reg, &val); mutex_unlock(&chip->reg_lock); return err ? err : val; @@ -2912,8 +2927,11 @@ static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) if (phy >= mv88e6xxx_num_ports(chip)) return 0xffff; + if (!chip->info->ops->phy_write) + return -EOPNOTSUPP; + mutex_lock(&chip->reg_lock); - err = mv88e6xxx_phy_write(chip, phy, reg, val); + err = chip->info->ops->phy_write(chip, bus, phy, reg, val); mutex_unlock(&chip->reg_lock); return err; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index ead2e265c9ef..1f9a12a1fad9 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -501,8 +501,9 @@ static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd) return mv88e6xxx_g2_smi_phy_wait(chip); } -int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 *val) +int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg; int err; @@ -518,8 +519,9 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val); } -int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 val) +int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg; int err; diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 2918f22388f7..00e635279ba1 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -23,10 +23,12 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) return 0; } -int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 *val); -int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 val); +int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val); +int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val); int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, @@ -57,12 +59,14 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) } static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int addr, int reg, u16 *val) { return -EOPNOTSUPP; } static inline int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int addr, int reg, u16 val) { return -EOPNOTSUPP; diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index ce8b43b14e96..7d75dd546bf7 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -758,10 +758,12 @@ struct mv88e6xxx_ops { int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr); - int (*phy_read)(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 *val); - int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 val); + int (*phy_read)(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val); + int (*phy_write)(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val); /* PHY Polling Unit (PPU) operations */ int (*ppu_enable)(struct mv88e6xxx_chip *chip); -- cgit v1.2.3 From 0dd12d54c4252e5a51f3b9f7b622b3b7a5b95293 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 24 Jan 2017 14:53:49 +0100 Subject: net: dsa: mv88e6xxx: Add mdio private structure Have the MDIO bus driver code allocate a private structure and make the chip a member of it. This will allow us to add further members in the future. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 13 +++++++++---- drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 4 ++++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index dedccf201452..b0fd1432f4f3 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2902,7 +2902,8 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) { - struct mv88e6xxx_chip *chip = bus->priv; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + struct mv88e6xxx_chip *chip = mdio_bus->chip; u16 val; int err; @@ -2921,7 +2922,8 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) { - struct mv88e6xxx_chip *chip = bus->priv; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + struct mv88e6xxx_chip *chip = mdio_bus->chip; int err; if (phy >= mv88e6xxx_num_ports(chip)) @@ -2941,17 +2943,20 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, struct device_node *np) { static int index; + struct mv88e6xxx_mdio_bus *mdio_bus; struct mii_bus *bus; int err; if (np) chip->mdio_np = of_get_child_by_name(np, "mdio"); - bus = devm_mdiobus_alloc(chip->dev); + bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus)); if (!bus) return -ENOMEM; - bus->priv = (void *)chip; + mdio_bus = bus->priv; + mdio_bus->chip = chip; + if (np) { bus->name = np->full_name; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name); diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 7d75dd546bf7..6f7ddb594809 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -750,6 +750,10 @@ struct mv88e6xxx_bus_ops { int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); }; +struct mv88e6xxx_mdio_bus { + struct mv88e6xxx_chip *chip; +}; + struct mv88e6xxx_ops { int (*get_eeprom)(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); -- cgit v1.2.3 From a3c53be55c955b7150cda17874c3fcb4eeb97a89 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 24 Jan 2017 14:53:50 +0100 Subject: net: dsa: mv88e6xxx: Support multiple MDIO busses The mv88e6390 has multiple MDIO busses. Generalize the parsing of the device tree to support multiple mdio nodes. The external mdio bus has a compatible strings to indicate it is external. Keep a linked list of busses, placing the external mdio bus at the tail of the list. When within the driver an mdio bus is needed, e.g. for EEE or SERDES, use the head of the list which should be the internal bus. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- .../devicetree/bindings/net/dsa/marvell.txt | 41 +++++++- drivers/net/dsa/mv88e6xxx/chip.c | 110 +++++++++++++++------ drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 10 +- 3 files changed, 126 insertions(+), 35 deletions(-) diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt index b3dd6b40e0de..64dbdd825981 100644 --- a/Documentation/devicetree/bindings/net/dsa/marvell.txt +++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt @@ -26,8 +26,12 @@ Optional properties: - interrupt-controller : Indicates the switch is itself an interrupt controller. This is used for the PHY interrupts. #interrupt-cells = <2> : Controller uses two cells, number and flag -- mdio : container of PHY and devices on the switches MDIO - bus +- mdio : Container of PHY and devices on the switches MDIO + bus. +- mdio? : Container of PHYs and devices on the external MDIO + bus. The node must contains a compatible string of + "marvell,mv88e6xxx-mdio-external" + Example: mdio { @@ -53,3 +57,36 @@ Example: }; }; }; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&gpio0>; + interrupts = <27 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #interrupt-cells = <2>; + + switch0: switch@0 { + compatible = "marvell,mv88e6390"; + reg = <0>; + reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>; + }; + mdio { + #address-cells = <1>; + #size-cells = <0>; + switch1phy0: switch1phy0@0 { + reg = <0>; + interrupt-parent = <&switch0>; + interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + mdio1 { + compatible = "marvell,mv88e6xxx-mdio-external"; + #address-cells = <1>; + #size-cells = <0>; + switch1phy9: switch1phy0@9 { + reg = <9>; + }; + }; + }; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index b0fd1432f4f3..5668e778ed1d 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -236,16 +236,29 @@ static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, return mv88e6xxx_write(chip, addr, reg, val); } +static struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip) +{ + struct mv88e6xxx_mdio_bus *mdio_bus; + + mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus, + list); + if (!mdio_bus) + return NULL; + + return mdio_bus->bus; +} + static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, int reg, u16 *val) { int addr = phy; /* PHY devices addresses start at 0x0 */ - struct mii_bus *bus = chip->mdio_bus; + struct mii_bus *bus; - if (!chip->info->ops->phy_read) + bus = mv88e6xxx_default_mdio_bus(chip); + if (!bus) return -EOPNOTSUPP; - if (!bus) + if (!chip->info->ops->phy_read) return -EOPNOTSUPP; return chip->info->ops->phy_read(chip, bus, addr, reg, val); @@ -255,12 +268,13 @@ static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val) { int addr = phy; /* PHY devices addresses start at 0x0 */ - struct mii_bus *bus = chip->mdio_bus; + struct mii_bus *bus; - if (!chip->info->ops->phy_write) + bus = mv88e6xxx_default_mdio_bus(chip); + if (!bus) return -EOPNOTSUPP; - if (!bus) + if (!chip->info->ops->phy_write) return -EOPNOTSUPP; return chip->info->ops->phy_write(chip, bus, addr, reg, val); @@ -2845,7 +2859,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) int i; chip->ds = ds; - ds->slave_mii_bus = chip->mdio_bus; + ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip); mutex_lock(&chip->reg_lock); @@ -2940,22 +2954,23 @@ static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) } static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, - struct device_node *np) + struct device_node *np, + bool external) { static int index; struct mv88e6xxx_mdio_bus *mdio_bus; struct mii_bus *bus; int err; - if (np) - chip->mdio_np = of_get_child_by_name(np, "mdio"); - bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus)); if (!bus) return -ENOMEM; mdio_bus = bus->priv; + mdio_bus->bus = bus; mdio_bus->chip = chip; + INIT_LIST_HEAD(&mdio_bus->list); + mdio_bus->external = external; if (np) { bus->name = np->full_name; @@ -2969,34 +2984,72 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, bus->write = mv88e6xxx_mdio_write; bus->parent = chip->dev; - if (chip->mdio_np) - err = of_mdiobus_register(bus, chip->mdio_np); + if (np) + err = of_mdiobus_register(bus, np); else err = mdiobus_register(bus); if (err) { dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); - goto out; + return err; } - chip->mdio_bus = bus; + + if (external) + list_add_tail(&mdio_bus->list, &chip->mdios); + else + list_add(&mdio_bus->list, &chip->mdios); return 0; +} -out: - if (chip->mdio_np) - of_node_put(chip->mdio_np); +static const struct of_device_id mv88e6xxx_mdio_external_match[] = { + { .compatible = "marvell,mv88e6xxx-mdio-external", + .data = (void *)true }, + { }, +}; - return err; +static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, + struct device_node *np) +{ + const struct of_device_id *match; + struct device_node *child; + int err; + + /* Always register one mdio bus for the internal/default mdio + * bus. This maybe represented in the device tree, but is + * optional. + */ + child = of_get_child_by_name(np, "mdio"); + err = mv88e6xxx_mdio_register(chip, child, false); + if (err) + return err; + + /* Walk the device tree, and see if there are any other nodes + * which say they are compatible with the external mdio + * bus. + */ + for_each_available_child_of_node(np, child) { + match = of_match_node(mv88e6xxx_mdio_external_match, child); + if (match) { + err = mv88e6xxx_mdio_register(chip, child, true); + if (err) + return err; + } + } + + return 0; } -static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip) +static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) { - struct mii_bus *bus = chip->mdio_bus; + struct mv88e6xxx_mdio_bus *mdio_bus; + struct mii_bus *bus; - mdiobus_unregister(bus); + list_for_each_entry(mdio_bus, &chip->mdios, list) { + bus = mdio_bus->bus; - if (chip->mdio_np) - of_node_put(chip->mdio_np); + mdiobus_unregister(bus); + } } static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) @@ -4123,6 +4176,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) chip->dev = dev; mutex_init(&chip->reg_lock); + INIT_LIST_HEAD(&chip->mdios); return chip; } @@ -4197,7 +4251,7 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, mv88e6xxx_phy_init(chip); - err = mv88e6xxx_mdio_register(chip, NULL); + err = mv88e6xxx_mdios_register(chip, NULL); if (err) goto free; @@ -4398,7 +4452,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) } } - err = mv88e6xxx_mdio_register(chip, np); + err = mv88e6xxx_mdios_register(chip, np); if (err) goto out_g2_irq; @@ -4409,7 +4463,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) return 0; out_mdio: - mv88e6xxx_mdio_unregister(chip); + mv88e6xxx_mdios_unregister(chip); out_g2_irq: if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT) && chip->irq > 0) mv88e6xxx_g2_irq_free(chip); @@ -4430,7 +4484,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_phy_destroy(chip); mv88e6xxx_unregister_switch(chip); - mv88e6xxx_mdio_unregister(chip); + mv88e6xxx_mdios_unregister(chip); if (chip->irq > 0) { if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT)) diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 6f7ddb594809..7d24add45e74 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -730,11 +730,8 @@ struct mv88e6xxx_chip { /* set to size of eeprom if supported by the switch */ int eeprom_len; - /* Device node for the MDIO bus */ - struct device_node *mdio_np; - - /* And the MDIO bus itself */ - struct mii_bus *mdio_bus; + /* List of mdio busses */ + struct list_head mdios; /* There can be two interrupt controllers, which are chained * off a GPIO as interrupt source @@ -751,7 +748,10 @@ struct mv88e6xxx_bus_ops { }; struct mv88e6xxx_mdio_bus { + struct mii_bus *bus; struct mv88e6xxx_chip *chip; + struct list_head list; + bool external; }; struct mv88e6xxx_ops { -- cgit v1.2.3 From c61a6a71e7a48ff3a8714a93b9d4f6caa67990b1 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 24 Jan 2017 14:53:51 +0100 Subject: net: dsa: mv88e6xxx: Implement the 6390 external MDIO bus With all the infrastructure in place, implement access to the external MDIO bus on the 6390 family. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/global2.c | 8 ++++++++ drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 1 + 2 files changed, 9 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 1f9a12a1fad9..353e26bea3c3 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -506,8 +506,12 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; int err; + if (mdio_bus->external) + cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; + err = mv88e6xxx_g2_smi_phy_wait(chip); if (err) return err; @@ -524,8 +528,12 @@ int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; int err; + if (mdio_bus->external) + cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; + err = mv88e6xxx_g2_smi_phy_wait(chip); if (err) return err; diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 7d24add45e74..572d585dc1e2 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -387,6 +387,7 @@ #define GLOBAL2_PTP_AVB_DATA 0x17 #define GLOBAL2_SMI_PHY_CMD 0x18 #define GLOBAL2_SMI_PHY_CMD_BUSY BIT(15) +#define GLOBAL2_SMI_PHY_CMD_EXTERNAL BIT(13) #define GLOBAL2_SMI_PHY_CMD_MODE_22 BIT(12) #define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA ((0x1 << 10) | \ GLOBAL2_SMI_PHY_CMD_MODE_22 | \ -- cgit v1.2.3 From 0b307ebd6834012c483b8ee9801164123a54f79a Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:05:58 -0800 Subject: netvsc: remove no longer needed receive staging buffers The ring buffer mapping now handles the wraparound case inside get_next_pkt_raw. Therefore it is not necessary to have an additional special receive staging buffer. See commit 1562edaed8c164ca5199 ("Drivers: hv: ring_buffer: count on wrap around mappings") Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 5 --- drivers/net/hyperv/netvsc.c | 83 ++++++--------------------------------- drivers/net/hyperv/rndis_filter.c | 11 ------ 3 files changed, 11 insertions(+), 88 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 3958adade7eb..cce70ceba6d5 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -748,11 +748,6 @@ struct netvsc_device { int ring_size; - /* The primary channel callback buffer */ - unsigned char *cb_buffer; - /* The sub channel callback buffer */ - unsigned char *sub_cb_buf; - struct multi_send_data msd[VRSS_CHANNEL_MAX]; u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5a1cc089acb7..5e90c7fb1bd2 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -67,12 +67,6 @@ static struct netvsc_device *alloc_net_device(void) if (!net_device) return NULL; - net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL); - if (!net_device->cb_buffer) { - kfree(net_device); - return NULL; - } - net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); @@ -93,7 +87,6 @@ static void free_netvsc_device(struct netvsc_device *nvdev) for (i = 0; i < VRSS_CHANNEL_MAX; i++) vfree(nvdev->mrc[i].buf); - kfree(nvdev->cb_buffer); kfree(nvdev); } @@ -584,7 +577,6 @@ void netvsc_device_remove(struct hv_device *device) vmbus_close(device->channel); /* Release all resources */ - vfree(net_device->sub_cb_buf); free_netvsc_device(net_device); } @@ -1271,16 +1263,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device, void netvsc_channel_cb(void *context) { - int ret; - struct vmbus_channel *channel = (struct vmbus_channel *)context; + struct vmbus_channel *channel = context; u16 q_idx = channel->offermsg.offer.sub_channel_index; struct hv_device *device; struct netvsc_device *net_device; - u32 bytes_recvd; - u64 request_id; struct vmpacket_descriptor *desc; - unsigned char *buffer; - int bufferlen = NETVSC_PACKET_SIZE; struct net_device *ndev; bool need_to_commit = false; @@ -1292,65 +1279,19 @@ void netvsc_channel_cb(void *context) net_device = get_inbound_net_device(device); if (!net_device) return; + ndev = hv_get_drvdata(device); - buffer = get_per_channel_state(channel); - - do { - desc = get_next_pkt_raw(channel); - if (desc != NULL) { - netvsc_process_raw_pkt(device, - channel, - net_device, - ndev, - desc->trans_id, - desc); - - put_pkt_raw(channel, desc); - need_to_commit = true; - continue; - } - if (need_to_commit) { - need_to_commit = false; - commit_rd_index(channel); - } - ret = vmbus_recvpacket_raw(channel, buffer, bufferlen, - &bytes_recvd, &request_id); - if (ret == 0) { - if (bytes_recvd > 0) { - desc = (struct vmpacket_descriptor *)buffer; - netvsc_process_raw_pkt(device, - channel, - net_device, - ndev, - request_id, - desc); - } else { - /* - * We are done for this pass. - */ - break; - } - - } else if (ret == -ENOBUFS) { - if (bufferlen > NETVSC_PACKET_SIZE) - kfree(buffer); - /* Handle large packet */ - buffer = kmalloc(bytes_recvd, GFP_ATOMIC); - if (buffer == NULL) { - /* Try again next time around */ - netdev_err(ndev, - "unable to allocate buffer of size " - "(%d)!!\n", bytes_recvd); - break; - } - - bufferlen = bytes_recvd; - } - } while (1); + while ((desc = get_next_pkt_raw(channel)) != NULL) { + netvsc_process_raw_pkt(device, channel, net_device, + ndev, desc->trans_id, desc); - if (bufferlen > NETVSC_PACKET_SIZE) - kfree(buffer); + put_pkt_raw(channel, desc); + need_to_commit = true; + } + + if (need_to_commit) + commit_rd_index(channel); netvsc_chk_recv_comp(net_device, channel, q_idx); } @@ -1374,8 +1315,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) net_device->ring_size = ring_size; - set_per_channel_state(device->channel, net_device->cb_buffer); - /* Open the channel */ ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, ring_size * PAGE_SIZE, NULL, 0, diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 8d90904e0e49..113c7f4d1590 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -948,9 +948,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) if (chn_index >= nvscdev->num_chn) return; - set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) * - NETVSC_PACKET_SIZE); - nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); @@ -1099,14 +1096,6 @@ int rndis_filter_device_add(struct hv_device *dev, if (net_device->num_chn == 1) goto out; - net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) * - NETVSC_PACKET_SIZE); - if (!net_device->sub_cb_buf) { - net_device->num_chn = 1; - dev_info(&dev->device, "No memory for subchannels.\n"); - goto out; - } - vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); init_packet = &net_device->channel_init_pkt; -- cgit v1.2.3 From 23312a3be999c22ec70c0fdf9f01cdee05fac986 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:05:59 -0800 Subject: netvsc: negotiate checksum and segmentation parameters Redo how Hyper-V network driver negotiates offload features. Query the host to determine offload settings, and use the result. Also: * disable IPv4 header checksum offload (not used by Linux) * enable TSO only if host supports * enable UDP checksum offload if supported * don't advertise support for checksumming of non-IP protocols * adjust GSO maximum segment size * enable HIGHDMA Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 149 +++++++++++++++++++++++++++++++++++--- drivers/net/hyperv/netvsc_drv.c | 92 ++++++++++------------- drivers/net/hyperv/rndis_filter.c | 140 +++++++++++++++++++++++++++++++---- 3 files changed, 305 insertions(+), 76 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index cce70ceba6d5..ccf94c16084c 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -34,6 +34,7 @@ #define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88 #define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89 +#define NDIS_OBJECT_TYPE_OFFLOAD 0xa7 #define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2 #define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2 @@ -685,6 +686,7 @@ struct net_device_context { struct work_struct work; u32 msg_enable; /* debug level */ + u32 tx_checksum_mask; struct netvsc_stats __percpu *tx_stats; struct netvsc_stats __percpu *rx_stats; @@ -934,7 +936,7 @@ struct ndis_pkt_8021q_info { }; }; -struct ndis_oject_header { +struct ndis_object_header { u8 type; u8 revision; u16 size; @@ -942,6 +944,9 @@ struct ndis_oject_header { #define NDIS_OBJECT_TYPE_DEFAULT 0x80 #define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3 +#define NDIS_OFFLOAD_PARAMETERS_REVISION_2 2 +#define NDIS_OFFLOAD_PARAMETERS_REVISION_1 1 + #define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0 #define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1 #define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2 @@ -968,8 +973,135 @@ struct ndis_oject_header { #define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */ #define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */ +/* + * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES + * ndis_type: NDIS_OBJTYPE_OFFLOAD + */ + +#define NDIS_OFFLOAD_ENCAP_NONE 0x0000 +#define NDIS_OFFLOAD_ENCAP_NULL 0x0001 +#define NDIS_OFFLOAD_ENCAP_8023 0x0002 +#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004 +#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008 +#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010 + +struct ndis_csum_offload { + u32 ip4_txenc; + u32 ip4_txcsum; +#define NDIS_TXCSUM_CAP_IP4OPT 0x001 +#define NDIS_TXCSUM_CAP_TCP4OPT 0x004 +#define NDIS_TXCSUM_CAP_TCP4 0x010 +#define NDIS_TXCSUM_CAP_UDP4 0x040 +#define NDIS_TXCSUM_CAP_IP4 0x100 + +#define NDIS_TXCSUM_ALL_TCP4 (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT) + + u32 ip4_rxenc; + u32 ip4_rxcsum; +#define NDIS_RXCSUM_CAP_IP4OPT 0x001 +#define NDIS_RXCSUM_CAP_TCP4OPT 0x004 +#define NDIS_RXCSUM_CAP_TCP4 0x010 +#define NDIS_RXCSUM_CAP_UDP4 0x040 +#define NDIS_RXCSUM_CAP_IP4 0x100 + u32 ip6_txenc; + u32 ip6_txcsum; +#define NDIS_TXCSUM_CAP_IP6EXT 0x001 +#define NDIS_TXCSUM_CAP_TCP6OPT 0x004 +#define NDIS_TXCSUM_CAP_TCP6 0x010 +#define NDIS_TXCSUM_CAP_UDP6 0x040 + u32 ip6_rxenc; + u32 ip6_rxcsum; +#define NDIS_RXCSUM_CAP_IP6EXT 0x001 +#define NDIS_RXCSUM_CAP_TCP6OPT 0x004 +#define NDIS_RXCSUM_CAP_TCP6 0x010 +#define NDIS_RXCSUM_CAP_UDP6 0x040 + +#define NDIS_TXCSUM_ALL_TCP6 (NDIS_TXCSUM_CAP_TCP6 | \ + NDIS_TXCSUM_CAP_TCP6OPT | \ + NDIS_TXCSUM_CAP_IP6EXT) +}; + +struct ndis_lsov1_offload { + u32 encap; + u32 maxsize; + u32 minsegs; + u32 opts; +}; + +struct ndis_ipsecv1_offload { + u32 encap; + u32 ah_esp; + u32 xport_tun; + u32 ip4_opts; + u32 flags; + u32 ip4_ah; + u32 ip4_esp; +}; + +struct ndis_lsov2_offload { + u32 ip4_encap; + u32 ip4_maxsz; + u32 ip4_minsg; + u32 ip6_encap; + u32 ip6_maxsz; + u32 ip6_minsg; + u32 ip6_opts; +#define NDIS_LSOV2_CAP_IP6EXT 0x001 +#define NDIS_LSOV2_CAP_TCP6OPT 0x004 + +#define NDIS_LSOV2_CAP_IP6 (NDIS_LSOV2_CAP_IP6EXT | \ + NDIS_LSOV2_CAP_TCP6OPT) +}; + +struct ndis_ipsecv2_offload { + u32 encap; + u16 ip6; + u16 ip4opt; + u16 ip6ext; + u16 ah; + u16 esp; + u16 ah_esp; + u16 xport; + u16 tun; + u16 xport_tun; + u16 lso; + u16 extseq; + u32 udp_esp; + u32 auth; + u32 crypto; + u32 sa_caps; +}; + +struct ndis_rsc_offload { + u16 ip4; + u16 ip6; +}; + +struct ndis_encap_offload { + u32 flags; + u32 maxhdr; +}; + +struct ndis_offload { + struct ndis_object_header header; + struct ndis_csum_offload csum; + struct ndis_lsov1_offload lsov1; + struct ndis_ipsecv1_offload ipsecv1; + struct ndis_lsov2_offload lsov2; + u32 flags; + /* NDIS >= 6.1 */ + struct ndis_ipsecv2_offload ipsecv2; + /* NDIS >= 6.30 */ + struct ndis_rsc_offload rsc; + struct ndis_encap_offload encap_gre; +}; + +#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload) +#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ipsecv2) +#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, rsc) + struct ndis_offload_params { - struct ndis_oject_header header; + struct ndis_object_header header; u8 ip_v4_csum; u8 tcp_ip_v4_csum; u8 udp_ip_v4_csum; @@ -1296,15 +1428,10 @@ struct rndis_message { #define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400 #define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800 -#define INFO_IPV4 2 -#define INFO_IPV6 4 -#define INFO_TCP 2 -#define INFO_UDP 4 - #define TRANSPORT_INFO_NOT_IP 0 -#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP) -#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP) -#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP) -#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP) +#define TRANSPORT_INFO_IPV4_TCP 0x01 +#define TRANSPORT_INFO_IPV4_UDP 0x02 +#define TRANSPORT_INFO_IPV6_TCP 0x10 +#define TRANSPORT_INFO_IPV6_UDP 0x20 #endif /* _HYPERV_NET_H */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index eebeb9378bac..f26d9d2d475f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -42,14 +42,6 @@ #define RING_SIZE_MIN 64 #define LINKCHANGE_INT (2 * HZ) -#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \ - NETIF_F_SG | \ - NETIF_F_TSO | \ - NETIF_F_TSO6 | \ - NETIF_F_HW_CSUM) - -/* Restrict GSO size to account for NVGRE */ -#define NETVSC_GSO_MAX_SIZE 62768 static int ring_size = 128; module_param(ring_size, int, S_IRUGO); @@ -323,33 +315,25 @@ static int netvsc_get_slots(struct sk_buff *skb) return slots + frag_slots; } -static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off) +static u32 net_checksum_info(struct sk_buff *skb) { - u32 ret_val = TRANSPORT_INFO_NOT_IP; - - if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) && - (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) { - goto not_ip; - } - - *trans_off = skb_transport_offset(skb); + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip = ip_hdr(skb); - if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) { - struct iphdr *iphdr = ip_hdr(skb); - - if (iphdr->protocol == IPPROTO_TCP) - ret_val = TRANSPORT_INFO_IPV4_TCP; - else if (iphdr->protocol == IPPROTO_UDP) - ret_val = TRANSPORT_INFO_IPV4_UDP; + if (ip->protocol == IPPROTO_TCP) + return TRANSPORT_INFO_IPV4_TCP; + else if (ip->protocol == IPPROTO_UDP) + return TRANSPORT_INFO_IPV4_UDP; } else { - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - ret_val = TRANSPORT_INFO_IPV6_TCP; + struct ipv6hdr *ip6 = ipv6_hdr(skb); + + if (ip6->nexthdr == IPPROTO_TCP) + return TRANSPORT_INFO_IPV6_TCP; else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) - ret_val = TRANSPORT_INFO_IPV6_UDP; + return TRANSPORT_INFO_IPV6_UDP; } -not_ip: - return ret_val; + return TRANSPORT_INFO_NOT_IP; } static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) @@ -362,9 +346,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) struct rndis_packet *rndis_pkt; u32 rndis_msg_size; struct rndis_per_packet_info *ppi; - struct ndis_tcp_ip_checksum_info *csum_info; - int hdr_offset; - u32 net_trans_info; u32 hash; u32 skb_length; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; @@ -445,13 +426,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) VLAN_PRIO_SHIFT; } - net_trans_info = get_net_transport_info(skb, &hdr_offset); - - /* - * Setup the sendside checksum offload only if this is not a - * GSO packet. - */ - if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) { + if (skb_is_gso(skb)) { struct ndis_tcp_lso_info *lso_info; rndis_msg_size += NDIS_LSO_PPI_SIZE; @@ -462,7 +437,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ppi->ppi_offset); lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; - if (net_trans_info & (INFO_IPV4 << 16)) { + if (skb->protocol == htons(ETH_P_IP)) { lso_info->lso_v2_transmit.ip_version = NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; ip_hdr(skb)->tot_len = 0; @@ -478,10 +453,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } - lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; + lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (net_trans_info & INFO_TCP) { + if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { + struct ndis_tcp_ip_checksum_info *csum_info; + rndis_msg_size += NDIS_CSUM_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, TCPIP_CHKSUM_PKTINFO); @@ -489,15 +466,25 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + ppi->ppi_offset); - if (net_trans_info & (INFO_IPV4 << 16)) + csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); + + if (skb->protocol == htons(ETH_P_IP)) { csum_info->transmit.is_ipv4 = 1; - else + + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + csum_info->transmit.tcp_checksum = 1; + else + csum_info->transmit.udp_checksum = 1; + } else { csum_info->transmit.is_ipv6 = 1; - csum_info->transmit.tcp_checksum = 1; - csum_info->transmit.tcp_header_offset = hdr_offset; + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + csum_info->transmit.tcp_checksum = 1; + else + csum_info->transmit.udp_checksum = 1; + } } else { - /* UDP checksum (and other) offload is not supported. */ + /* Can't do offload of this type of checksum */ if (skb_checksum_help(skb)) goto drop; } @@ -1372,10 +1359,6 @@ static int netvsc_probe(struct hv_device *dev, INIT_LIST_HEAD(&net_device_ctx->reconfig_events); net->netdev_ops = &device_ops; - - net->hw_features = NETVSC_HW_FEATURES; - net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX; - net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); @@ -1395,10 +1378,15 @@ static int netvsc_probe(struct hv_device *dev, } memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + /* hw_features computed in rndis_filter_device_add */ + net->features = net->hw_features | + NETIF_F_HIGHDMA | NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + net->vlan_features = net->features; + nvdev = net_device_ctx->nvdev; netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); - netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE); /* MTU range: 68 - 1500 or 65521 */ net->min_mtu = NETVSC_MTU_MIN; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 113c7f4d1590..8f04be1d7421 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -485,7 +485,35 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, query->info_buflen = 0; query->dev_vc_handle = 0; - if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) { + if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) { + struct net_device_context *ndevctx = netdev_priv(dev->ndev); + struct netvsc_device *nvdev = ndevctx->nvdev; + struct ndis_offload *hwcaps; + u32 nvsp_version = nvdev->nvsp_version; + u8 ndis_rev; + size_t size; + + if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) { + ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3; + size = NDIS_OFFLOAD_SIZE; + } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) { + ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2; + size = NDIS_OFFLOAD_SIZE_6_1; + } else { + ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1; + size = NDIS_OFFLOAD_SIZE_6_0; + } + + request->request_msg.msg_len += size; + query->info_buflen = size; + hwcaps = (struct ndis_offload *) + ((unsigned long)query + query->info_buf_offset); + + hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD; + hwcaps->header.revision = ndis_rev; + hwcaps->header.size = size; + + } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) { struct ndis_recv_scale_cap *cap; request->request_msg.msg_len += @@ -526,6 +554,44 @@ cleanup: return ret; } +/* Get the hardware offload capabilities */ +static int +rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps) +{ + u32 caps_len = sizeof(*caps); + int ret; + + memset(caps, 0, sizeof(*caps)); + + ret = rndis_filter_query_device(dev, + OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES, + caps, &caps_len); + if (ret) + return ret; + + if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) { + netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n", + caps->header.type); + return -EINVAL; + } + + if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) { + netdev_warn(dev->ndev, "invalid NDIS objrev %x\n", + caps->header.revision); + return -EINVAL; + } + + if (caps->header.size > caps_len || + caps->header.size < NDIS_OFFLOAD_SIZE_6_0) { + netdev_warn(dev->ndev, + "invalid NDIS objsize %u, data size %u\n", + caps->header.size, caps_len); + return -EINVAL; + } + + return 0; +} + static int rndis_filter_query_device_mac(struct rndis_device *dev) { u32 size = ETH_ALEN; @@ -974,10 +1040,12 @@ int rndis_filter_device_add(struct hv_device *dev, struct netvsc_device *net_device; struct rndis_device *rndis_device; struct netvsc_device_info *device_info = additional_info; + struct ndis_offload hwcaps; struct ndis_offload_params offloads; struct nvsp_message *init_packet; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); + unsigned int gso_max_size = GSO_MAX_SIZE; u32 mtu, size; u32 num_rss_qs; u32 sc_delta; @@ -1034,19 +1102,65 @@ int rndis_filter_device_add(struct hv_device *dev, memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); - /* Turn on the offloads; the host supports all of the relevant - * offloads. - */ + /* Find HW offload capabilities */ + ret = rndis_query_hwcaps(rndis_device, &hwcaps); + if (ret != 0) { + rndis_filter_device_remove(dev); + return ret; + } + + /* A value of zero means "no change"; now turn on what we want. */ memset(&offloads, 0, sizeof(struct ndis_offload_params)); - /* A value of zero means "no change"; now turn on what we - * want. - */ - offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; + + /* Linux does not care about IP checksum, always does in kernel */ + offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED; + + /* Compute tx offload settings based on hw capabilities */ + net->hw_features = NETIF_F_RXCSUM; + + if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) { + /* Can checksum TCP */ + net->hw_features |= NETIF_F_IP_CSUM; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP; + + offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + + if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) { + offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; + net->hw_features |= NETIF_F_TSO; + + if (hwcaps.lsov2.ip4_maxsz < gso_max_size) + gso_max_size = hwcaps.lsov2.ip4_maxsz; + } + + if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) { + offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP; + } + } + + if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) { + net->hw_features |= NETIF_F_IPV6_CSUM; + + offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP; + + if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) && + (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) { + offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; + net->hw_features |= NETIF_F_TSO6; + + if (hwcaps.lsov2.ip6_maxsz < gso_max_size) + gso_max_size = hwcaps.lsov2.ip6_maxsz; + } + + if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) { + offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP; + } + } + + netif_set_gso_max_size(net, gso_max_size); ret = rndis_filter_set_offload_params(net, &offloads); if (ret) -- cgit v1.2.3 From b448f4e89272ba26a73f28b0b3dd93e749b30c86 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:00 -0800 Subject: netvsc: report number of rx queues in ethtool Report actual number of receive queues to ethtool. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f26d9d2d475f..b158cab678b3 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1008,6 +1008,21 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } +static int +netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules) +{ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = nvdev->num_chn; + return 0; + } + return -EOPNOTSUPP; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void netvsc_poll_controller(struct net_device *net) { @@ -1028,6 +1043,7 @@ static const struct ethtool_ops ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_settings = netvsc_get_settings, .set_settings = netvsc_set_settings, + .get_rxnfc = netvsc_get_rxnfc, }; static const struct net_device_ops device_ops = { -- cgit v1.2.3 From b5a5dc8dc8ac311c5ee1cf7fa4c6ef2f4e1743b6 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:01 -0800 Subject: netvsc: report rss field values Report current components used in RSS hash. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index b158cab678b3..6ab8b52af965 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1008,6 +1008,30 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } +static int +netvsc_get_rss_hash_opts(struct netvsc_device *nvdev, + struct ethtool_rxnfc *info) +{ + info->data = RXH_IP_SRC | RXH_IP_DST; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case UDP_V4_FLOW: + case UDP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + info->data = 0; + break; + } + + return 0; +} + static int netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) @@ -1019,6 +1043,9 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, case ETHTOOL_GRXRINGS: info->data = nvdev->num_chn; return 0; + + case ETHTOOL_GRXFH: + return netvsc_get_rss_hash_opts(nvdev, info); } return -EOPNOTSUPP; } -- cgit v1.2.3 From 962f3fee83a4ef9010ae84dc43ae7aecb572e2a9 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:02 -0800 Subject: netvsc: add ethtool ops to get/set RSS key For some cases it is useful to be able to change RSS key value. For example, replacing RSS key with a symmetric hash. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 7 +++++- drivers/net/hyperv/netvsc_drv.c | 46 +++++++++++++++++++++++++++++++++++++++ drivers/net/hyperv/rndis_filter.c | 34 +++++++++++++++-------------- 3 files changed, 70 insertions(+), 17 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index ccf94c16084c..5bf21418bbe5 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -156,6 +156,8 @@ enum rndis_device_state { RNDIS_DEV_DATAINITIALIZED, }; +#define NETVSC_HASH_KEYLEN 40 + struct rndis_device { struct net_device *ndev; @@ -166,7 +168,8 @@ struct rndis_device { spinlock_t request_lock; struct list_head req_list; - unsigned char hw_mac_adr[ETH_ALEN]; + u8 hw_mac_adr[ETH_ALEN]; + u8 rss_key[NETVSC_HASH_KEYLEN]; }; @@ -194,6 +197,8 @@ int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_device_add(struct hv_device *dev, void *additional_info); void rndis_filter_device_remove(struct hv_device *dev); +int rndis_filter_set_rss_param(struct rndis_device *rdev, + const u8 *key, int num_queue); int rndis_filter_receive(struct hv_device *dev, struct hv_netvsc_packet *pkt, void **data, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 6ab8b52af965..40a88387a8f5 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1059,6 +1059,48 @@ static void netvsc_poll_controller(struct net_device *net) } #endif +static u32 netvsc_get_rxfh_key_size(struct net_device *dev) +{ + return NETVSC_HASH_KEYLEN; +} + +static u32 netvsc_rss_indir_size(struct net_device *dev) +{ + return 0; +} + +static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *ndev = ndc->nvdev; + struct rndis_device *rndis_dev = ndev->extension; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + + if (key) + memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); + + return 0; +} + +static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *ndev = ndc->nvdev; + struct rndis_device *rndis_dev = ndev->extension; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!key || memcmp(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN) == 0) + return 0; /* no change */ + + return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn); +} + static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1071,6 +1113,10 @@ static const struct ethtool_ops ethtool_ops = { .get_settings = netvsc_get_settings, .set_settings = netvsc_set_settings, .get_rxnfc = netvsc_get_rxnfc, + .get_rxfh_key_size = netvsc_get_rxfh_key_size, + .get_rxfh_indir_size = netvsc_rss_indir_size, + .get_rxfh = netvsc_get_rxfh, + .set_rxfh = netvsc_set_rxfh, }; static const struct net_device_ops device_ops = { diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 8f04be1d7421..ac08aa1b089a 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -57,6 +57,14 @@ struct rndis_request { u8 request_ext[RNDIS_EXT_LEN]; }; +static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa +}; + static struct rndis_device *get_rndis_device(void) { struct rndis_device *device; @@ -729,23 +737,15 @@ cleanup: return ret; } -static const u8 netvsc_hash_key[] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, - 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, - 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, - 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, - 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa -}; -#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key) - -static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) +int rndis_filter_set_rss_param(struct rndis_device *rdev, + const u8 *rss_key, int num_queue) { struct net_device *ndev = rdev->ndev; struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; u32 extlen = sizeof(struct ndis_recv_scale_param) + - 4*ITAB_NUM + HASH_KEYLEN; + 4 * ITAB_NUM + NETVSC_HASH_KEYLEN; struct ndis_recv_scale_param *rssp; u32 *itab; u8 *keyp; @@ -773,7 +773,7 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) NDIS_HASH_TCP_IPV6; rssp->indirect_tabsize = 4*ITAB_NUM; rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); - rssp->hashkey_size = HASH_KEYLEN; + rssp->hashkey_size = NETVSC_HASH_KEYLEN; rssp->kashkey_offset = rssp->indirect_taboffset + rssp->indirect_tabsize; @@ -784,8 +784,7 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset); - for (i = 0; i < HASH_KEYLEN; i++) - keyp[i] = netvsc_hash_key[i]; + memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN); ret = rndis_filter_send_request(rdev, request); if (ret != 0) @@ -793,7 +792,9 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status != RNDIS_STATUS_SUCCESS) { + if (set_complete->status == RNDIS_STATUS_SUCCESS) + memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); + else { netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", set_complete->status); ret = -EINVAL; @@ -1235,7 +1236,8 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->num_chn = 1 + init_packet->msg.v5_msg.subchn_comp.num_subchannels; - ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); + ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, + net_device->num_chn); /* * Set the number of sub-channels to be received. -- cgit v1.2.3 From 2b01888d1b453096b5b13c0d4b73e630411198b4 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:03 -0800 Subject: netvsc: allow more flexible setting of number of channels This allows for number of channels to be managed in a manner similar to existing hardware drivers. It also removes the restriction of maximum 8 channels and allows as many as the host will allow. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 1 + drivers/net/hyperv/netvsc_drv.c | 128 +++++++++++++++------------------------- 2 files changed, 50 insertions(+), 79 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 5bf21418bbe5..5a652eb8a619 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -628,6 +628,7 @@ struct nvsp_message { #define VRSS_SEND_TAB_SIZE 16 #define VRSS_CHANNEL_MAX 64 +#define VRSS_CHANNEL_DEFAULT 8 #define RNDIS_MAX_PKT_DEFAULT 8 #define RNDIS_PKT_ALIGN_DEFAULT 8 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 40a88387a8f5..ead472150742 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -47,8 +47,6 @@ static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); -static int max_num_vrss_chns = 8; - static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | @@ -709,102 +707,76 @@ static void netvsc_get_channels(struct net_device *net, } } +static int netvsc_set_queues(struct net_device *net, struct hv_device *dev, + u32 num_chn) +{ + struct netvsc_device_info device_info; + int ret; + + memset(&device_info, 0, sizeof(device_info)); + device_info.num_chn = num_chn; + device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = num_chn; + + ret = rndis_filter_device_add(dev, &device_info); + if (ret) + return ret; + + ret = netif_set_real_num_tx_queues(net, num_chn); + if (ret) + return ret; + + ret = netif_set_real_num_rx_queues(net, num_chn); + + return ret; +} + static int netvsc_set_channels(struct net_device *net, struct ethtool_channels *channels) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = net_device_ctx->nvdev; - struct netvsc_device_info device_info; - u32 num_chn; - u32 max_chn; - int ret = 0; - bool recovering = false; + unsigned int count = channels->combined_count; + int ret; + + /* We do not support separate count for rx, tx, or other */ + if (count == 0 || + channels->rx_count || channels->tx_count || channels->other_count) + return -EINVAL; + + if (count > net->num_tx_queues || count > net->num_rx_queues) + return -EINVAL; if (net_device_ctx->start_remove || !nvdev || nvdev->destroy) return -ENODEV; - num_chn = nvdev->num_chn; - max_chn = min_t(u32, nvdev->max_chn, num_online_cpus()); - - if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) { - pr_info("vRSS unsupported before NVSP Version 5\n"); + if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) return -EINVAL; - } - /* We do not support rx, tx, or other */ - if (!channels || - channels->rx_count || - channels->tx_count || - channels->other_count || - (channels->combined_count < 1)) + if (count > nvdev->max_chn) return -EINVAL; - if (channels->combined_count > max_chn) { - pr_info("combined channels too high, using %d\n", max_chn); - channels->combined_count = max_chn; - } - ret = netvsc_close(net); if (ret) - goto out; + return ret; - do_set: net_device_ctx->start_remove = true; rndis_filter_device_remove(dev); - nvdev->num_chn = channels->combined_count; - - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */ - device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = max_num_vrss_chns; - - ret = rndis_filter_device_add(dev, &device_info); - if (ret) { - if (recovering) { - netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - return ret; - } - goto recover; - } - - nvdev = net_device_ctx->nvdev; - - ret = netif_set_real_num_tx_queues(net, nvdev->num_chn); - if (ret) { - if (recovering) { - netdev_err(net, "could not set tx queue count (ret %d)\n", ret); - return ret; - } - goto recover; - } - - ret = netif_set_real_num_rx_queues(net, nvdev->num_chn); - if (ret) { - if (recovering) { - netdev_err(net, "could not set rx queue count (ret %d)\n", ret); - return ret; - } - goto recover; - } + ret = netvsc_set_queues(net, dev, count); + if (ret == 0) + nvdev->num_chn = count; + else + netvsc_set_queues(net, dev, nvdev->num_chn); - out: netvsc_open(net); net_device_ctx->start_remove = false; + /* We may have missed link change notifications */ schedule_delayed_work(&net_device_ctx->dwork, 0); return ret; - - recover: - /* If the above failed, we attempt to recover through the same - * process but with the original number of channels. - */ - netdev_err(net, "could not set channels, recovering\n"); - recovering = true; - channels->combined_count = num_chn; - goto do_set; } static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd) @@ -865,8 +837,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct netvsc_device *nvdev = ndevctx->nvdev; struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device_info device_info; - u32 num_chn; - int ret = 0; + int ret; if (ndevctx->start_remove || !nvdev || nvdev->destroy) return -ENODEV; @@ -875,8 +846,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) if (ret) goto out; - num_chn = nvdev->num_chn; - ndevctx->start_remove = true; rndis_filter_device_remove(hdev); @@ -884,8 +853,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; - device_info.num_chn = num_chn; - device_info.max_num_vrss_chns = max_num_vrss_chns; + device_info.num_chn = nvdev->num_chn; + device_info.max_num_vrss_chns = nvdev->num_chn; rndis_filter_device_add(hdev, &device_info); out: @@ -1410,7 +1379,7 @@ static int netvsc_probe(struct hv_device *dev, int ret; net = alloc_etherdev_mq(sizeof(struct net_device_context), - num_online_cpus()); + VRSS_CHANNEL_MAX); if (!net) return -ENOMEM; @@ -1457,7 +1426,8 @@ static int netvsc_probe(struct hv_device *dev, /* Notify the netvsc driver of the new device */ memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = max_num_vrss_chns; + device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT, + num_online_cpus()); ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); -- cgit v1.2.3 From ff4a44199012ee32839278cb84f82ae32c01dbc9 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:04 -0800 Subject: netvsc: allow get/set of RSS indirection table Allow setting receive indirection table. Also uses the system standard for initialization. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 1 + drivers/net/hyperv/netvsc_drv.c | 26 +++++++++++++++++++++++--- drivers/net/hyperv/rndis_filter.c | 9 +++++++-- 3 files changed, 31 insertions(+), 5 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 5a652eb8a619..db11f7ab67a8 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -170,6 +170,7 @@ struct rndis_device { u8 hw_mac_adr[ETH_ALEN]; u8 rss_key[NETVSC_HASH_KEYLEN]; + u16 ind_table[ITAB_NUM]; }; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index ead472150742..a09602e59cf5 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1035,7 +1035,7 @@ static u32 netvsc_get_rxfh_key_size(struct net_device *dev) static u32 netvsc_rss_indir_size(struct net_device *dev) { - return 0; + return ITAB_NUM; } static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, @@ -1044,10 +1044,16 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, struct net_device_context *ndc = netdev_priv(dev); struct netvsc_device *ndev = ndc->nvdev; struct rndis_device *rndis_dev = ndev->extension; + int i; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + if (indir) { + for (i = 0; i < ITAB_NUM; i++) + indir[i] = rndis_dev->ind_table[i]; + } + if (key) memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); @@ -1060,12 +1066,26 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, struct net_device_context *ndc = netdev_priv(dev); struct netvsc_device *ndev = ndc->nvdev; struct rndis_device *rndis_dev = ndev->extension; + int i; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!key || memcmp(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN) == 0) - return 0; /* no change */ + if (indir) { + for (i = 0; i < ITAB_NUM; i++) + if (indir[i] >= dev->num_rx_queues) + return -EINVAL; + + for (i = 0; i < ITAB_NUM; i++) + rndis_dev->ind_table[i] = indir[i]; + } + + if (!key) { + if (!indir) + return 0; + + key = rndis_dev->rss_key; + } return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn); } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index ac08aa1b089a..8d2b078403de 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -780,7 +780,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) - itab[i] = i % num_queue; + itab[i] = rdev->ind_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset); @@ -1035,7 +1035,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) int rndis_filter_device_add(struct hv_device *dev, void *additional_info) { - int ret; struct net_device *net = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *net_device; @@ -1053,6 +1052,7 @@ int rndis_filter_device_add(struct hv_device *dev, const struct cpumask *node_cpu_mask; u32 num_possible_rss_qs; unsigned long flags; + int i, ret; rndis_device = get_rndis_device(); if (!rndis_device) @@ -1206,6 +1206,11 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); num_rss_qs = net_device->num_chn - 1; + + for (i = 0; i < ITAB_NUM; i++) + rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, + net_device->num_chn); + net_device->num_sc_offered = num_rss_qs; if (net_device->num_chn == 1) -- cgit v1.2.3 From d8e18ee0fa9679e24aa1dacb1edc7bf934c8ddd4 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:05 -0800 Subject: netvsc: enhance transmit select_queue The netvsc select queue function was missing many of the flow caching features that exist in default tx queue selection. Add the same logic to remember queue based on socket and implement two level mapping (like RSS). Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a09602e59cf5..86feab3f5443 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -191,22 +191,41 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, return ppi; } +/* + * Select queue for transmit. + * + * If a valid queue has already been assigned, then use that. + * Otherwise compute tx queue based on hash and the send table. + * + * This is basically similar to default (__netdev_pick_tx) with the added step + * of using the host send_table when no other queue has been assigned. + * + * TODO support XPS - but get_xps_queue not exported + */ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct netvsc_device *nvsc_dev = net_device_ctx->nvdev; - u32 hash; - u16 q_idx = 0; + struct sock *sk = skb->sk; + int q_idx = sk_tx_queue_get(sk); - if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) - return 0; + if (q_idx < 0 || skb->ooo_okay || + q_idx >= ndev->real_num_tx_queues) { + u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE); + int new_idx; + + new_idx = nvsc_dev->send_table[hash] + % nvsc_dev->num_chn; - hash = skb_get_hash(skb); - q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % - ndev->real_num_tx_queues; + if (q_idx != new_idx && sk && + sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) + sk_tx_queue_set(sk, new_idx); + + q_idx = new_idx; + } - if (!nvsc_dev->chn_table[q_idx]) + if (unlikely(!nvsc_dev->chn_table[q_idx])) q_idx = 0; return q_idx; -- cgit v1.2.3 From ceaaea0483cf82a2c817781f3cbacf29f2c5cc97 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:06 -0800 Subject: netvsc: remove unused variables Fixes set but never used warnings Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 4 ---- drivers/net/hyperv/rndis_filter.c | 3 --- 2 files changed, 7 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 86feab3f5443..f96b44eee270 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1390,7 +1390,6 @@ static int netvsc_vf_down(struct net_device *vf_netdev) static int netvsc_unregister_vf(struct net_device *vf_netdev) { struct net_device *ndev; - struct netvsc_device *netvsc_dev; struct net_device_context *net_device_ctx; ndev = get_netvsc_byref(vf_netdev); @@ -1398,7 +1397,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); - netvsc_dev = net_device_ctx->nvdev; netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); @@ -1507,7 +1505,6 @@ static int netvsc_remove(struct hv_device *dev) { struct net_device *net; struct net_device_context *ndev_ctx; - struct netvsc_device *net_device; net = hv_get_drvdata(dev); @@ -1517,7 +1514,6 @@ static int netvsc_remove(struct hv_device *dev) } ndev_ctx = netdev_priv(net); - net_device = ndev_ctx->nvdev; /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels() * removing the device. diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 8d2b078403de..fc98d2a16ddd 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -845,7 +845,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; - u32 status; int ret; request = get_rndis_request(dev, RNDIS_MSG_SET, @@ -872,8 +871,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - status = set_complete->status; - cleanup: if (request) put_rndis_request(dev, request); -- cgit v1.2.3 From b8b835a89b2f7a7fe681983dfe5c489cb9ad9500 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:07 -0800 Subject: netvsc: group all per-channel state together Put all the per-channel state together in one data struct. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 14 +++++--- drivers/net/hyperv/netvsc.c | 73 +++++++++++++++++++-------------------- drivers/net/hyperv/netvsc_drv.c | 5 ++- drivers/net/hyperv/rndis_filter.c | 6 ++-- 4 files changed, 51 insertions(+), 47 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index db11f7ab67a8..fb73caad0965 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -714,6 +714,14 @@ struct net_device_context { u32 vf_serial; }; +/* Per channel data */ +struct netvsc_channel { + struct vmbus_channel *channel; + struct multi_send_data msd; + struct multi_recv_comp mrc; + atomic_t queue_sends; +}; + /* Per netvsc device */ struct netvsc_device { u32 nvsp_version; @@ -744,27 +752,25 @@ struct netvsc_device { struct nvsp_message revoke_packet; - struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX]; u32 send_table[VRSS_SEND_TAB_SIZE]; u32 max_chn; u32 num_chn; spinlock_t sc_lock; /* Protects num_sc_offered variable */ u32 num_sc_offered; - atomic_t queue_sends[VRSS_CHANNEL_MAX]; /* Holds rndis device info */ void *extension; int ring_size; - struct multi_send_data msd[VRSS_CHANNEL_MAX]; u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - struct multi_recv_comp mrc[VRSS_CHANNEL_MAX]; atomic_t num_outstanding_recvs; atomic_t open_cnt; + + struct netvsc_channel chan_table[VRSS_CHANNEL_MAX]; }; static inline struct netvsc_device * diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5e90c7fb1bd2..4dd2a1f2da11 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -67,8 +67,8 @@ static struct netvsc_device *alloc_net_device(void) if (!net_device) return NULL; - net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX * - sizeof(struct recv_comp_data)); + net_device->chan_table[0].mrc.buf + = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; @@ -85,7 +85,7 @@ static void free_netvsc_device(struct netvsc_device *nvdev) int i; for (i = 0; i < VRSS_CHANNEL_MAX; i++) - vfree(nvdev->mrc[i].buf); + vfree(nvdev->chan_table[i].mrc.buf); kfree(nvdev); } @@ -632,7 +632,9 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, num_outstanding_sends = atomic_dec_return(&net_device->num_outstanding_sends); - queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]); + + queue_sends = + atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); if (net_device->destroy && num_outstanding_sends == 0) wake_up(&net_device->wait_drain); @@ -757,9 +759,11 @@ static inline int netvsc_send_pkt( struct sk_buff *skb) { struct nvsp_message nvmsg; - u16 q_idx = packet->q_idx; - struct vmbus_channel *out_channel = net_device->chn_table[q_idx]; + struct netvsc_channel *nvchan + = &net_device->chan_table[packet->q_idx]; + struct vmbus_channel *out_channel = nvchan->channel; struct net_device *ndev = hv_get_drvdata(device); + struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); u64 req_id; int ret; struct hv_page_buffer *pgbuf; @@ -820,22 +824,18 @@ static inline int netvsc_send_pkt( if (ret == 0) { atomic_inc(&net_device->num_outstanding_sends); - atomic_inc(&net_device->queue_sends[q_idx]); + atomic_inc_return(&nvchan->queue_sends); if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { - netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx)); + netif_tx_stop_queue(txq); - if (atomic_read(&net_device-> - queue_sends[q_idx]) < 1) - netif_tx_wake_queue(netdev_get_tx_queue( - ndev, q_idx)); + if (atomic_read(&nvchan->queue_sends) < 1) + netif_tx_wake_queue(txq); } } else if (ret == -EAGAIN) { - netif_tx_stop_queue(netdev_get_tx_queue( - ndev, q_idx)); - if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { - netif_tx_wake_queue(netdev_get_tx_queue( - ndev, q_idx)); + netif_tx_stop_queue(txq); + if (atomic_read(&nvchan->queue_sends) < 1) { + netif_tx_wake_queue(txq); ret = -ENOSPC; } } else { @@ -866,8 +866,7 @@ int netvsc_send(struct hv_device *device, { struct netvsc_device *net_device; int ret = 0; - struct vmbus_channel *out_channel; - u16 q_idx = packet->q_idx; + struct netvsc_channel *nvchan; u32 pktlen = packet->total_data_buflen, msd_len = 0; unsigned int section_index = NETVSC_INVALID_INDEX; struct multi_send_data *msdp; @@ -887,8 +886,7 @@ int netvsc_send(struct hv_device *device, if (!net_device->send_section_map) return -EAGAIN; - out_channel = net_device->chn_table[q_idx]; - + nvchan = &net_device->chan_table[packet->q_idx]; packet->send_buf_index = NETVSC_INVALID_INDEX; packet->cp_partial = false; @@ -900,9 +898,8 @@ int netvsc_send(struct hv_device *device, goto send_now; } - msdp = &net_device->msd[q_idx]; - /* batch packets in send buffer if possible */ + msdp = &nvchan->msd; if (msdp->pkt) msd_len = msdp->pkt->total_data_buflen; @@ -1003,8 +1000,9 @@ static int netvsc_send_recv_completion(struct vmbus_channel *channel, static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, u32 *filled, u32 *avail) { - u32 first = nvdev->mrc[q_idx].first; - u32 next = nvdev->mrc[q_idx].next; + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; + u32 first = mrc->first; + u32 next = mrc->next; *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next : next - first; @@ -1016,26 +1014,26 @@ static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) { + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; u32 filled, avail; - if (!nvdev->mrc[q_idx].buf) + if (unlikely(!mrc->buf)) return NULL; count_recv_comp_slot(nvdev, q_idx, &filled, &avail); if (!filled) return NULL; - return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first * - sizeof(struct recv_comp_data); + return mrc->buf + mrc->first * sizeof(struct recv_comp_data); } /* Put the first filled slot back to available pool */ static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) { + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; int num_recv; - nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) % - NETVSC_RECVSLOT_MAX; + mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX; num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs); @@ -1070,13 +1068,14 @@ static void netvsc_chk_recv_comp(struct netvsc_device *nvdev, static inline struct recv_comp_data *get_recv_comp_slot( struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx) { + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; u32 filled, avail, next; struct recv_comp_data *rcd; - if (!nvdev->recv_section) + if (unlikely(!nvdev->recv_section)) return NULL; - if (!nvdev->mrc[q_idx].buf) + if (unlikely(!mrc->buf)) return NULL; if (atomic_read(&nvdev->num_outstanding_recvs) > @@ -1087,9 +1086,9 @@ static inline struct recv_comp_data *get_recv_comp_slot( if (!avail) return NULL; - next = nvdev->mrc[q_idx].next; - rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data); - nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX; + next = mrc->next; + rcd = mrc->buf + next * sizeof(struct recv_comp_data); + mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX; atomic_inc(&nvdev->num_outstanding_recvs); @@ -1159,7 +1158,7 @@ static void netvsc_receive(struct netvsc_device *net_device, channel); } - if (!net_device->mrc[q_idx].buf) { + if (!net_device->chan_table[q_idx].mrc.buf) { ret = netvsc_send_recv_completion(channel, vmxferpage_packet->d.trans_id, status); @@ -1333,7 +1332,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) * opened. */ for (i = 0; i < VRSS_CHANNEL_MAX; i++) - net_device->chn_table[i] = device->channel; + net_device->chan_table[i].channel = device->channel; /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is * populated. diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f96b44eee270..da5863641703 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -135,7 +135,7 @@ static int netvsc_close(struct net_device *net) while (true) { aread = 0; for (i = 0; i < nvdev->num_chn; i++) { - chn = nvdev->chn_table[i]; + chn = nvdev->chan_table[i].channel; if (!chn) continue; @@ -225,7 +225,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, q_idx = new_idx; } - if (unlikely(!nvsc_dev->chn_table[q_idx])) + if (unlikely(!nvsc_dev->chan_table[q_idx].channel)) q_idx = 0; return q_idx; @@ -545,7 +545,6 @@ no_memory: ++net_device_ctx->eth_stats.tx_no_memory; goto drop; } - /* * netvsc_linkstatus_callback - Link up/down notification */ diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index fc98d2a16ddd..0df02c9808e8 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1012,15 +1012,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) if (chn_index >= nvscdev->num_chn) return; - nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX * - sizeof(struct recv_comp_data)); + nvscdev->chan_table[chn_index].mrc.buf + = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, nvscdev->ring_size * PAGE_SIZE, NULL, 0, netvsc_channel_cb, new_sc); if (ret == 0) - nvscdev->chn_table[chn_index] = new_sc; + nvscdev->chan_table[chn_index].channel = new_sc; spin_lock_irqsave(&nvscdev->sc_lock, flags); nvscdev->num_sc_offered--; -- cgit v1.2.3 From dc54a08cd3620e6457382c0cd0c8f03513dd749a Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:08 -0800 Subject: netvsc: optimize receive path Do manual optimizations of receive path: - remove checks for impossible conditions (but keep checks for bad data from host) - pass argument down, rather than having callee recompute what is already known - remove indirection about receive buffer datalength - remove dependence on VLAN_TAG_PRESENCE - use _hot/_cold and likely/unlikely Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 21 +++++---- drivers/net/hyperv/netvsc.c | 74 ++++++++++++----------------- drivers/net/hyperv/netvsc_drv.c | 32 ++++++------- drivers/net/hyperv/rndis_filter.c | 99 +++++++++++++++------------------------ 4 files changed, 93 insertions(+), 133 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index fb73caad0965..4b91e37c85ca 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -119,6 +119,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */ /* Fwd declaration */ struct ndis_tcp_ip_checksum_info; +struct ndis_pkt_8021q_info; /* * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame @@ -186,12 +187,11 @@ int netvsc_send(struct hv_device *device, struct sk_buff *skb); void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_message *resp); -int netvsc_recv_callback(struct hv_device *device_obj, - struct hv_netvsc_packet *packet, - void **data, - struct ndis_tcp_ip_checksum_info *csum_info, - struct vmbus_channel *channel, - u16 vlan_tci); +int netvsc_recv_callback(struct net_device *net, + struct vmbus_channel *channel, + void *data, u32 len, + const struct ndis_tcp_ip_checksum_info *csum_info, + const struct ndis_pkt_8021q_info *vlan); void netvsc_channel_cb(void *context); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); @@ -200,10 +200,11 @@ int rndis_filter_device_add(struct hv_device *dev, void rndis_filter_device_remove(struct hv_device *dev); int rndis_filter_set_rss_param(struct rndis_device *rdev, const u8 *key, int num_queue); -int rndis_filter_receive(struct hv_device *dev, - struct hv_netvsc_packet *pkt, - void **data, - struct vmbus_channel *channel); +int rndis_filter_receive(struct net_device *ndev, + struct netvsc_device *net_dev, + struct hv_device *dev, + struct vmbus_channel *channel, + void *data, u32 buflen); int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 4dd2a1f2da11..80eecb4f9456 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1095,51 +1095,35 @@ static inline struct recv_comp_data *get_recv_comp_slot( return rcd; } -static void netvsc_receive(struct netvsc_device *net_device, - struct vmbus_channel *channel, - struct hv_device *device, - struct vmpacket_descriptor *packet) +static void netvsc_receive(struct net_device *ndev, + struct netvsc_device *net_device, + struct net_device_context *net_device_ctx, + struct hv_device *device, + struct vmbus_channel *channel, + struct vmtransfer_page_packet_header *vmxferpage_packet, + struct nvsp_message *nvsp) { - struct vmtransfer_page_packet_header *vmxferpage_packet; - struct nvsp_message *nvsp_packet; - struct hv_netvsc_packet nv_pkt; - struct hv_netvsc_packet *netvsc_packet = &nv_pkt; + char *recv_buf = net_device->recv_buf; u32 status = NVSP_STAT_SUCCESS; int i; int count = 0; - struct net_device *ndev = hv_get_drvdata(device); - void *data; int ret; struct recv_comp_data *rcd; u16 q_idx = channel->offermsg.offer.sub_channel_index; - /* - * All inbound packets other than send completion should be xfer page - * packet - */ - if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) { - netdev_err(ndev, "Unknown packet type received - %d\n", - packet->type); - return; - } - - nvsp_packet = (struct nvsp_message *)((unsigned long)packet + - (packet->offset8 << 3)); - /* Make sure this is a valid nvsp packet */ - if (nvsp_packet->hdr.msg_type != - NVSP_MSG1_TYPE_SEND_RNDIS_PKT) { - netdev_err(ndev, "Unknown nvsp packet type received-" - " %d\n", nvsp_packet->hdr.msg_type); + if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { + netif_err(net_device_ctx, rx_err, ndev, + "Unknown nvsp packet type received %u\n", + nvsp->hdr.msg_type); return; } - vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet; - - if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) { - netdev_err(ndev, "Invalid xfer page set id - " - "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID, - vmxferpage_packet->xfer_pageset_id); + if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { + netif_err(net_device_ctx, rx_err, ndev, + "Invalid xfer page set id - expecting %x got %x\n", + NETVSC_RECEIVE_BUFFER_ID, + vmxferpage_packet->xfer_pageset_id); return; } @@ -1147,15 +1131,13 @@ static void netvsc_receive(struct netvsc_device *net_device, /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ for (i = 0; i < count; i++) { - /* Initialize the netvsc packet */ - data = (void *)((unsigned long)net_device-> - recv_buf + vmxferpage_packet->ranges[i].byte_offset); - netvsc_packet->total_data_buflen = - vmxferpage_packet->ranges[i].byte_count; + void *data = recv_buf + + vmxferpage_packet->ranges[i].byte_offset; + u32 buflen = vmxferpage_packet->ranges[i].byte_count; /* Pass it to the upper layer */ - status = rndis_filter_receive(device, netvsc_packet, &data, - channel); + status = rndis_filter_receive(ndev, net_device, device, + channel, data, buflen); } if (!net_device->chan_table[q_idx].mrc.buf) { @@ -1234,11 +1216,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device, u64 request_id, struct vmpacket_descriptor *desc) { - struct nvsp_message *nvmsg; struct net_device_context *net_device_ctx = netdev_priv(ndev); - - nvmsg = (struct nvsp_message *)((unsigned long) - desc + (desc->offset8 << 3)); + struct nvsp_message *nvmsg + = (struct nvsp_message *)((unsigned long)desc + + (desc->offset8 << 3)); switch (desc->type) { case VM_PKT_COMP: @@ -1246,7 +1227,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device, break; case VM_PKT_DATA_USING_XFER_PAGES: - netvsc_receive(net_device, channel, device, desc); + netvsc_receive(ndev, net_device, net_device_ctx, + device, channel, + (struct vmtransfer_page_packet_header *)desc, + nvmsg); break; case VM_PKT_DATA_INBAND: diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index da5863641703..4bc1fdbc8cd7 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -596,13 +596,13 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, } static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, - struct hv_netvsc_packet *packet, - struct ndis_tcp_ip_checksum_info *csum_info, - void *data, u16 vlan_tci) + const struct ndis_tcp_ip_checksum_info *csum_info, + const struct ndis_pkt_8021q_info *vlan, + void *data, u32 buflen) { struct sk_buff *skb; - skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); + skb = netdev_alloc_skb_ip_align(net, buflen); if (!skb) return skb; @@ -610,8 +610,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ - memcpy(skb_put(skb, packet->total_data_buflen), data, - packet->total_data_buflen); + memcpy(skb_put(skb, buflen), data, buflen); skb->protocol = eth_type_trans(skb, net); @@ -628,9 +627,12 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, skb->ip_summed = CHECKSUM_UNNECESSARY; } - if (vlan_tci & VLAN_TAG_PRESENT) + if (vlan) { + u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); + } return skb; } @@ -639,14 +641,12 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, * netvsc_recv_callback - Callback when we receive a packet from the * "wire" on the specified device. */ -int netvsc_recv_callback(struct hv_device *device_obj, - struct hv_netvsc_packet *packet, - void **data, - struct ndis_tcp_ip_checksum_info *csum_info, - struct vmbus_channel *channel, - u16 vlan_tci) +int netvsc_recv_callback(struct net_device *net, + struct vmbus_channel *channel, + void *data, u32 len, + const struct ndis_tcp_ip_checksum_info *csum_info, + const struct ndis_pkt_8021q_info *vlan) { - struct net_device *net = hv_get_drvdata(device_obj); struct net_device_context *net_device_ctx = netdev_priv(net); struct net_device *vf_netdev; struct sk_buff *skb; @@ -668,7 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, net = vf_netdev; /* Allocate a skb - TODO direct I/O to pages? */ - skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci); + skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len); if (unlikely(!skb)) { ++net->stats.rx_dropped; rcu_read_unlock(); @@ -687,7 +687,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; - rx_stats->bytes += packet->total_data_buflen; + rx_stats->bytes += len; if (skb->pkt_type == PACKET_BROADCAST) ++rx_stats->broadcast; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 0df02c9808e8..decce4f8fda8 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -132,7 +132,7 @@ static void put_rndis_request(struct rndis_device *dev, } static void dump_rndis_message(struct hv_device *hv_dev, - struct rndis_message *rndis_msg) + const struct rndis_message *rndis_msg) { struct net_device *netdev = hv_get_drvdata(hv_dev); @@ -347,102 +347,78 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) return NULL; } -static int rndis_filter_receive_data(struct rndis_device *dev, - struct rndis_message *msg, - struct hv_netvsc_packet *pkt, - void **data, - struct vmbus_channel *channel) +static int rndis_filter_receive_data(struct net_device *ndev, + struct rndis_device *dev, + struct rndis_message *msg, + struct vmbus_channel *channel, + void *data, u32 data_buflen) { - struct rndis_packet *rndis_pkt; + struct rndis_packet *rndis_pkt = &msg->msg.pkt; + const struct ndis_tcp_ip_checksum_info *csum_info; + const struct ndis_pkt_8021q_info *vlan; u32 data_offset; - struct ndis_pkt_8021q_info *vlan; - struct ndis_tcp_ip_checksum_info *csum_info; - u16 vlan_tci = 0; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); - - rndis_pkt = &msg->msg.pkt; /* Remove the rndis header and pass it back up the stack */ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; - pkt->total_data_buflen -= data_offset; + data_buflen -= data_offset; /* * Make sure we got a valid RNDIS message, now total_data_buflen * should be the data packet size plus the trailer padding size */ - if (pkt->total_data_buflen < rndis_pkt->data_len) { + if (unlikely(data_buflen < rndis_pkt->data_len)) { netdev_err(dev->ndev, "rndis message buffer " "overflow detected (got %u, min %u)" "...dropping this message!\n", - pkt->total_data_buflen, rndis_pkt->data_len); + data_buflen, rndis_pkt->data_len); return NVSP_STAT_FAIL; } + vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); + /* * Remove the rndis trailer padding from rndis packet message * rndis_pkt->data_len tell us the real data length, we only copy * the data packet to the stack, without the rndis trailer padding */ - pkt->total_data_buflen = rndis_pkt->data_len; - *data = (void *)((unsigned long)(*data) + data_offset); - - vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); - if (vlan) { - vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid | - (vlan->pri << VLAN_PRIO_SHIFT); - } - + data = (void *)((unsigned long)data + data_offset); csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); - return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data, - csum_info, channel, vlan_tci); + return netvsc_recv_callback(ndev, channel, + data, rndis_pkt->data_len, + csum_info, vlan); } -int rndis_filter_receive(struct hv_device *dev, - struct hv_netvsc_packet *pkt, - void **data, - struct vmbus_channel *channel) +int rndis_filter_receive(struct net_device *ndev, + struct netvsc_device *net_dev, + struct hv_device *dev, + struct vmbus_channel *channel, + void *data, u32 buflen) { - struct net_device *ndev = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_dev = net_device_ctx->nvdev; - struct rndis_device *rndis_dev; - struct rndis_message *rndis_msg; - int ret = 0; - - if (!net_dev) { - ret = NVSP_STAT_FAIL; - goto exit; - } + struct rndis_device *rndis_dev = net_dev->extension; + struct rndis_message *rndis_msg = data; /* Make sure the rndis device state is initialized */ - if (!net_dev->extension) { - netdev_err(ndev, "got rndis message but no rndis device - " - "dropping this message!\n"); - ret = NVSP_STAT_FAIL; - goto exit; + if (unlikely(!rndis_dev)) { + netif_err(net_device_ctx, rx_err, ndev, + "got rndis message but no rndis device!\n"); + return NVSP_STAT_FAIL; } - rndis_dev = (struct rndis_device *)net_dev->extension; - if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) { - netdev_err(ndev, "got rndis message but rndis device " - "uninitialized...dropping this message!\n"); - ret = NVSP_STAT_FAIL; - goto exit; + if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) { + netif_err(net_device_ctx, rx_err, ndev, + "got rndis message uninitialized\n"); + return NVSP_STAT_FAIL; } - rndis_msg = *data; - - if (netif_msg_rx_err(net_device_ctx)) + if (netif_msg_rx_status(net_device_ctx)) dump_rndis_message(dev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: - /* data msg */ - ret = rndis_filter_receive_data(rndis_dev, rndis_msg, pkt, - data, channel); - break; - + return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg, + channel, data, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: case RNDIS_MSG_SET_C: @@ -462,8 +438,7 @@ int rndis_filter_receive(struct hv_device *dev, break; } -exit: - return ret; + return 0; } static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, -- cgit v1.2.3 From 2c7f83ca713fa0ac5c4698e4134b09a355f60263 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:09 -0800 Subject: netvsc: don't pass void * to internal device_add All the caller's/callee's know that the format of the device_add parameter is a netvsc_device_info struct. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 5 +++-- drivers/net/hyperv/netvsc.c | 6 +++--- drivers/net/hyperv/rndis_filter.c | 5 ++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 4b91e37c85ca..28cbd6a2ecf8 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -178,7 +178,8 @@ struct rndis_device { /* Interface */ struct rndis_message; struct netvsc_device; -int netvsc_device_add(struct hv_device *device, void *additional_info); +int netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *info); void netvsc_device_remove(struct hv_device *device); int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet, @@ -196,7 +197,7 @@ void netvsc_channel_cb(void *context); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_device_add(struct hv_device *dev, - void *additional_info); + struct netvsc_device_info *info); void rndis_filter_device_remove(struct hv_device *dev); int rndis_filter_set_rss_param(struct rndis_device *rdev, const u8 *key, int num_queue); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 80eecb4f9456..359e7ef7040b 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1283,11 +1283,11 @@ void netvsc_channel_cb(void *context) * netvsc_device_add - Callback when the device belonging to this * driver is added */ -int netvsc_device_add(struct hv_device *device, void *additional_info) +int netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *device_info) { int i, ret = 0; - int ring_size = - ((struct netvsc_device_info *)additional_info)->ring_size; + int ring_size = device_info->ring_size; struct netvsc_device *net_device; struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index decce4f8fda8..e3b29f35366c 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1005,13 +1005,12 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) } int rndis_filter_device_add(struct hv_device *dev, - void *additional_info) + struct netvsc_device_info *device_info) { struct net_device *net = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *net_device; struct rndis_device *rndis_device; - struct netvsc_device_info *device_info = additional_info; struct ndis_offload hwcaps; struct ndis_offload_params offloads; struct nvsp_message *init_packet; @@ -1035,7 +1034,7 @@ int rndis_filter_device_add(struct hv_device *dev, * NOTE! Once the channel is created, we may get a receive callback * (RndisFilterOnReceive()) before this call is completed */ - ret = netvsc_device_add(dev, additional_info); + ret = netvsc_device_add(dev, device_info); if (ret != 0) { kfree(rndis_device); return ret; -- cgit v1.2.3 From 2289f0aa706e5160e078f73c32fcbfb56a3ff1e2 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:10 -0800 Subject: netvsc: simplify rndis_filter_remove All caller's already have pointer to netvsc_device so pass it. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 3 ++- drivers/net/hyperv/netvsc_drv.c | 8 ++++---- drivers/net/hyperv/rndis_filter.c | 12 ++++++------ 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 28cbd6a2ecf8..757205c9cb93 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -198,7 +198,8 @@ int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *info); -void rndis_filter_device_remove(struct hv_device *dev); +void rndis_filter_device_remove(struct hv_device *dev, + struct netvsc_device *nvdev); int rndis_filter_set_rss_param(struct rndis_device *rdev, const u8 *key, int num_queue); int rndis_filter_receive(struct net_device *ndev, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 4bc1fdbc8cd7..11755783c2f6 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -780,7 +780,7 @@ static int netvsc_set_channels(struct net_device *net, return ret; net_device_ctx->start_remove = true; - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, nvdev); ret = netvsc_set_queues(net, dev, count); if (ret == 0) @@ -865,7 +865,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) goto out; ndevctx->start_remove = true; - rndis_filter_device_remove(hdev); + rndis_filter_device_remove(hdev, nvdev); ndev->mtu = mtu; @@ -1493,7 +1493,7 @@ static int netvsc_probe(struct hv_device *dev, ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, nvdev); netvsc_free_netdev(net); } @@ -1533,7 +1533,7 @@ static int netvsc_remove(struct hv_device *dev) * Call to the vsc driver to let it know that the device is being * removed */ - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, ndev_ctx->nvdev); hv_set_drvdata(dev, NULL); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index e3b29f35366c..70b099a731a9 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1053,7 +1053,7 @@ int rndis_filter_device_add(struct hv_device *dev, /* Send the rndis initialization message */ ret = rndis_filter_init_device(rndis_device); if (ret != 0) { - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, net_device); return ret; } @@ -1068,7 +1068,7 @@ int rndis_filter_device_add(struct hv_device *dev, /* Get the mac address */ ret = rndis_filter_query_device_mac(rndis_device); if (ret != 0) { - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, net_device); return ret; } @@ -1077,7 +1077,7 @@ int rndis_filter_device_add(struct hv_device *dev, /* Find HW offload capabilities */ ret = rndis_query_hwcaps(rndis_device, &hwcaps); if (ret != 0) { - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, net_device); return ret; } @@ -1233,13 +1233,13 @@ out: return 0; /* return 0 because primary channel can be used alone */ err_dev_remv: - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, net_device); return ret; } -void rndis_filter_device_remove(struct hv_device *dev) +void rndis_filter_device_remove(struct hv_device *dev, + struct netvsc_device *net_dev) { - struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev); struct rndis_device *rndis_dev = net_dev->extension; /* If not all subchannel offers are complete, wait for them until -- cgit v1.2.3 From 46b4f7f5d1f7410de48128540ef2d1aab913a619 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:11 -0800 Subject: netvsc: eliminate per-device outstanding send counter Since now keep track of per-queue outstanding sends, we can avoid one atomic update by removing no longer needed per-device atomic. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 1 - drivers/net/hyperv/netvsc.c | 44 ++++++++++++++------------------------- drivers/net/hyperv/rndis_filter.c | 21 ++++++++++++++++--- 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 757205c9cb93..fec365241f37 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -729,7 +729,6 @@ struct netvsc_channel { struct netvsc_device { u32 nvsp_version; - atomic_t num_outstanding_sends; wait_queue_head_t wait_drain; bool destroy; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 359e7ef7040b..bd055146f098 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -90,29 +90,23 @@ static void free_netvsc_device(struct netvsc_device *nvdev) kfree(nvdev); } -static struct netvsc_device *get_outbound_net_device(struct hv_device *device) -{ - struct netvsc_device *net_device = hv_device_to_netvsc_device(device); - if (net_device && net_device->destroy) - net_device = NULL; +static inline bool netvsc_channel_idle(const struct netvsc_device *net_device, + u16 q_idx) +{ + const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; - return net_device; + return atomic_read(&net_device->num_outstanding_recvs) == 0 && + atomic_read(&nvchan->queue_sends) == 0; } -static struct netvsc_device *get_inbound_net_device(struct hv_device *device) +static struct netvsc_device *get_outbound_net_device(struct hv_device *device) { struct netvsc_device *net_device = hv_device_to_netvsc_device(device); - if (!net_device) - goto get_in_err; - - if (net_device->destroy && - atomic_read(&net_device->num_outstanding_sends) == 0 && - atomic_read(&net_device->num_outstanding_recvs) == 0) + if (net_device && net_device->destroy) net_device = NULL; -get_in_err: return net_device; } @@ -612,7 +606,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); struct vmbus_channel *channel = device->channel; - int num_outstanding_sends; u16 q_idx = 0; int queue_sends; @@ -630,13 +623,10 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, dev_consume_skb_any(skb); } - num_outstanding_sends = - atomic_dec_return(&net_device->num_outstanding_sends); - queue_sends = atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); - if (net_device->destroy && num_outstanding_sends == 0) + if (net_device->destroy && queue_sends == 0) wake_up(&net_device->wait_drain); if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && @@ -823,15 +813,10 @@ static inline int netvsc_send_pkt( } if (ret == 0) { - atomic_inc(&net_device->num_outstanding_sends); atomic_inc_return(&nvchan->queue_sends); - if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { + if (ring_avail < RING_AVAIL_PERCENT_LOWATER) netif_tx_stop_queue(txq); - - if (atomic_read(&nvchan->queue_sends) < 1) - netif_tx_wake_queue(txq); - } } else if (ret == -EAGAIN) { netif_tx_stop_queue(txq); if (atomic_read(&nvchan->queue_sends) < 1) { @@ -1259,11 +1244,14 @@ void netvsc_channel_cb(void *context) else device = channel->device_obj; - net_device = get_inbound_net_device(device); - if (!net_device) + ndev = hv_get_drvdata(device); + if (unlikely(!ndev)) return; - ndev = hv_get_drvdata(device); + net_device = net_device_to_netvsc_device(ndev); + if (unlikely(net_device->destroy) && + netvsc_channel_idle(net_device, q_idx)) + return; while ((desc = get_next_pkt_raw(channel)) != NULL) { netvsc_process_raw_pkt(device, channel, net_device, diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 70b099a731a9..19356f56b7b1 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -903,6 +903,23 @@ cleanup: return ret; } +static bool netvsc_device_idle(const struct netvsc_device *nvdev) +{ + int i; + + if (atomic_read(&nvdev->num_outstanding_recvs) > 0) + return false; + + for (i = 0; i < nvdev->num_chn; i++) { + const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + + if (atomic_read(&nvchan->queue_sends) > 0) + return false; + } + + return true; +} + static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; @@ -933,9 +950,7 @@ cleanup: spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); /* Wait for all send completions */ - wait_event(nvdev->wait_drain, - atomic_read(&nvdev->num_outstanding_sends) == 0 && - atomic_read(&nvdev->num_outstanding_recvs) == 0); + wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev)); if (request) put_rndis_request(dev, request); -- cgit v1.2.3 From 793e39555511bccd73308c41205b72448d0077db Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:12 -0800 Subject: netvsc: account for packets/bytes transmitted after completion Most drivers do not increment transmit statistics until after the transmit is completed. This will also be necessary for BQL support. Slight additional complexity because the netvsc driver aggregates multiple packets into one transmit. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 4 +++- drivers/net/hyperv/netvsc.c | 19 ++++++++++++++++--- drivers/net/hyperv/netvsc_drv.c | 13 +++---------- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index fec365241f37..340f64233e2a 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -137,8 +137,10 @@ struct hv_netvsc_packet { u8 page_buf_cnt; u16 q_idx; - u32 send_buf_index; + u16 total_packets; + u32 total_bytes; + u32 send_buf_index; u32 total_data_buflen; }; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index bd055146f098..397aa1d88a6f 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -611,15 +611,23 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, /* Notify the layer above us */ if (likely(skb)) { - struct hv_netvsc_packet *nvsc_packet + const struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)skb->cb; - u32 send_index = nvsc_packet->send_buf_index; + u32 send_index = packet->send_buf_index; + struct netvsc_stats *tx_stats; if (send_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, send_index); - q_idx = nvsc_packet->q_idx; + q_idx = packet->q_idx; channel = incoming_channel; + tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); + + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->packets += packet->total_packets; + tx_stats->bytes += packet->total_bytes; + u64_stats_update_end(&tx_stats->syncp); + dev_consume_skb_any(skb); } @@ -924,6 +932,11 @@ int netvsc_send(struct hv_device *device, packet->total_data_buflen += msd_len; } + if (msdp->pkt) { + packet->total_packets += msdp->pkt->total_packets; + packet->total_bytes += msdp->pkt->total_bytes; + } + if (msdp->skb) dev_consume_skb_any(msdp->skb); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 11755783c2f6..1dd13da79f02 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -364,7 +364,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) u32 rndis_msg_size; struct rndis_per_packet_info *ppi; u32 hash; - u32 skb_length; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; struct hv_page_buffer *pb = page_buf; @@ -374,7 +373,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) * more pages we try linearizing it. */ - skb_length = skb->len; num_data_pgs = netvsc_get_slots(skb) + 2; if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { @@ -407,6 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) packet->q_idx = skb_get_queue_mapping(skb); packet->total_data_buflen = skb->len; + packet->total_bytes = skb->len; + packet->total_packets = 1; rndis_msg = (struct rndis_message *)skb->head; @@ -517,15 +517,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) skb_tx_timestamp(skb); ret = netvsc_send(net_device_ctx->device_ctx, packet, rndis_msg, &pb, skb); - if (likely(ret == 0)) { - struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); - - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->packets++; - tx_stats->bytes += skb_length; - u64_stats_update_end(&tx_stats->syncp); + if (likely(ret == 0)) return NETDEV_TX_OK; - } if (ret == -EAGAIN) { ++net_device_ctx->eth_stats.tx_busy; -- cgit v1.2.3 From 6c80f3fc2398aef22798e8ac4258454b1062f3fb Mon Sep 17 00:00:00 2001 From: Simon Xiao Date: Tue, 24 Jan 2017 13:06:13 -0800 Subject: netvsc: report per-channel stats in ethtool statistics Report packets and bytes transferred through a vmbus channel via ethtool. This supersedes need for per-cpu statistics. Example: $ ethtool -S eth0 NIC statistics: ... tx_queue_0_packets: 3523179 tx_queue_0_bytes: 505370920 rx_queue_0_packets: 41430490 rx_queue_0_bytes: 62714661254 tx_queue_1_packets: 0 tx_queue_1_bytes: 0 rx_queue_1_packets: 0 rx_queue_1_bytes: 0 ... Reviewed-by: Long Li Reviewed-by: K. Y. Srinivasan Reviewed-by: Haiyang Zhang Signed-off-by: Simon Xiao Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 5 +- drivers/net/hyperv/netvsc.c | 2 +- drivers/net/hyperv/netvsc_drv.c | 143 +++++++++++++++++++++++++--------------- 3 files changed, 93 insertions(+), 57 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 340f64233e2a..d3e73ac158ae 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -699,8 +699,6 @@ struct net_device_context { u32 msg_enable; /* debug level */ u32 tx_checksum_mask; - struct netvsc_stats __percpu *tx_stats; - struct netvsc_stats __percpu *rx_stats; /* Ethtool settings */ u8 duplex; @@ -725,6 +723,9 @@ struct netvsc_channel { struct multi_send_data msd; struct multi_recv_comp mrc; atomic_t queue_sends; + + struct netvsc_stats tx_stats; + struct netvsc_stats rx_stats; }; /* Per netvsc device */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 397aa1d88a6f..c70c4ac77b74 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -621,7 +621,7 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, q_idx = packet->q_idx; channel = incoming_channel; - tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); + tx_stats = &net_device->chan_table[q_idx].tx_stats; u64_stats_update_begin(&tx_stats->syncp); tx_stats->packets += packet->total_packets; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1dd13da79f02..fe0df72532a3 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -641,9 +641,12 @@ int netvsc_recv_callback(struct net_device *net, const struct ndis_pkt_8021q_info *vlan) { struct net_device_context *net_device_ctx = netdev_priv(net); + struct netvsc_device *net_device = net_device_ctx->nvdev; struct net_device *vf_netdev; struct sk_buff *skb; struct netvsc_stats *rx_stats; + u16 q_idx = channel->offermsg.offer.sub_channel_index; + if (net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; @@ -669,15 +672,14 @@ int netvsc_recv_callback(struct net_device *net, } if (net != vf_netdev) - skb_record_rx_queue(skb, - channel->offermsg.offer.sub_channel_index); + skb_record_rx_queue(skb, q_idx); /* * Even if injecting the packet, record the statistics * on the synthetic device because modifying the VF device * statistics will not work correctly. */ - rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); + rx_stats = &net_device->chan_table[q_idx].rx_stats; u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; rx_stats->bytes += len; @@ -882,34 +884,39 @@ static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); - int cpu; - - for_each_possible_cpu(cpu) { - struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats, - cpu); - struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats, - cpu); - u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast; + struct netvsc_device *nvdev = ndev_ctx->nvdev; + int i; + + if (!nvdev) + return; + + for (i = 0; i < nvdev->num_chn; i++) { + const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + const struct netvsc_stats *stats; + u64 packets, bytes, multicast; unsigned int start; + stats = &nvchan->tx_stats; do { - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); - tx_packets = tx_stats->packets; - tx_bytes = tx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + t->tx_bytes += bytes; + t->tx_packets += packets; + stats = &nvchan->rx_stats; do { - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); - rx_packets = rx_stats->packets; - rx_bytes = rx_stats->bytes; - rx_multicast = rx_stats->multicast + rx_stats->broadcast; - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); - - t->tx_bytes += tx_bytes; - t->tx_packets += tx_packets; - t->rx_bytes += rx_bytes; - t->rx_packets += rx_packets; - t->multicast += rx_multicast; + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + multicast = stats->multicast + stats->broadcast; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + t->rx_bytes += bytes; + t->rx_packets += packets; + t->multicast += multicast; } t->tx_dropped = net->stats.tx_dropped; @@ -954,11 +961,19 @@ static const struct { { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, }; +#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) + +/* 4 statistics per queue (rx/tx packets/bytes) */ +#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) + static int netvsc_get_sset_count(struct net_device *dev, int string_set) { + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; + switch (string_set) { case ETH_SS_STATS: - return ARRAY_SIZE(netvsc_stats); + return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev); default: return -EINVAL; } @@ -968,22 +983,63 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; const void *nds = &ndc->eth_stats; - int i; + const struct netvsc_stats *qstats; + unsigned int start; + u64 packets, bytes; + int i, j; - for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) + for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); + + for (j = 0; j < nvdev->num_chn; j++) { + qstats = &nvdev->chan_table[j].tx_stats; + + do { + start = u64_stats_fetch_begin_irq(&qstats->syncp); + packets = qstats->packets; + bytes = qstats->bytes; + } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); + data[i++] = packets; + data[i++] = bytes; + + qstats = &nvdev->chan_table[j].rx_stats; + do { + start = u64_stats_fetch_begin_irq(&qstats->syncp); + packets = qstats->packets; + bytes = qstats->bytes; + } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); + data[i++] = packets; + data[i++] = bytes; + } } static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; + u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) - memcpy(data + i * ETH_GSTRING_LEN, + memcpy(p + i * ETH_GSTRING_LEN, netvsc_stats[i].name, ETH_GSTRING_LEN); + + p += i * ETH_GSTRING_LEN; + for (i = 0; i < nvdev->num_chn; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + break; } } @@ -1237,15 +1293,6 @@ out_unlock: rtnl_unlock(); } -static void netvsc_free_netdev(struct net_device *netdev) -{ - struct net_device_context *net_device_ctx = netdev_priv(netdev); - - free_percpu(net_device_ctx->tx_stats); - free_percpu(net_device_ctx->rx_stats); - free_netdev(netdev); -} - static struct net_device *get_netvsc_bymac(const u8 *mac) { struct net_device *dev; @@ -1423,18 +1470,6 @@ static int netvsc_probe(struct hv_device *dev, netdev_dbg(net, "netvsc msg_enable: %d\n", net_device_ctx->msg_enable); - net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats); - if (!net_device_ctx->tx_stats) { - free_netdev(net); - return -ENOMEM; - } - net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats); - if (!net_device_ctx->rx_stats) { - free_percpu(net_device_ctx->tx_stats); - free_netdev(net); - return -ENOMEM; - } - hv_set_drvdata(dev, net); net_device_ctx->start_remove = false; @@ -1460,7 +1495,7 @@ static int netvsc_probe(struct hv_device *dev, ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - netvsc_free_netdev(net); + free_netdev(net); hv_set_drvdata(dev, NULL); return ret; } @@ -1487,7 +1522,7 @@ static int netvsc_probe(struct hv_device *dev, if (ret != 0) { pr_err("Unable to register netdev.\n"); rndis_filter_device_remove(dev, nvdev); - netvsc_free_netdev(net); + free_netdev(net); } return ret; @@ -1530,7 +1565,7 @@ static int netvsc_remove(struct hv_device *dev) hv_set_drvdata(dev, NULL); - netvsc_free_netdev(net); + free_netdev(net); return 0; } -- cgit v1.2.3 From b58a185801dab4eefccad868c466296795c5bae7 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:14 -0800 Subject: netvsc: simplify get next send section Use kernel for_each_clear_bit macro to simplify finding next available send section. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index c70c4ac77b74..5cfdb1a1b4c1 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -680,27 +680,15 @@ static void netvsc_send_completion(struct netvsc_device *net_device, static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) { - unsigned long index; - u32 max_words = net_device->map_words; - unsigned long *map_addr = (unsigned long *)net_device->send_section_map; - u32 section_cnt = net_device->send_section_cnt; - int ret_val = NETVSC_INVALID_INDEX; - int i; - int prev_val; - - for (i = 0; i < max_words; i++) { - if (!~(map_addr[i])) - continue; - index = ffz(map_addr[i]); - prev_val = sync_test_and_set_bit(index, &map_addr[i]); - if (prev_val) - continue; - if ((index + (i * BITS_PER_LONG)) >= section_cnt) - break; - ret_val = (index + (i * BITS_PER_LONG)); - break; + unsigned long *map_addr = net_device->send_section_map; + unsigned int i; + + for_each_clear_bit(i, map_addr, net_device->map_words) { + if (sync_test_and_set_bit(i, map_addr) == 0) + return i; } - return ret_val; + + return NETVSC_INVALID_INDEX; } static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, -- cgit v1.2.3 From 1130383c174499826a3f01486e574e89be17e2d2 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 24 Jan 2017 13:06:15 -0800 Subject: netvsc: call netif_receive_skb To improve performance, netvsc can call network stack directly and avoid the local backlog queue. This is safe since incoming packets are handled in softirq context already because the receive function callback is called from a tasklet. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fe0df72532a3..72b0c1f7496e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -695,7 +695,7 @@ int netvsc_recv_callback(struct net_device *net, * is done. * TODO - use NAPI? */ - netif_rx(skb); + netif_receive_skb(skb); rcu_read_unlock(); return 0; -- cgit v1.2.3 From 42f82e2e62ae748a27741e63dbb035bfbe3353c9 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 25 Jan 2017 15:36:57 +0100 Subject: wireless: radiotap: rewrite the radiotap header file The header file has grown a lot of #define's etc, but they are nicer as enums, so rewrite the file from the documentation as such. Signed-off-by: Johannes Berg --- include/net/ieee80211_radiotap.h | 455 ++++++++++++++------------------------- 1 file changed, 157 insertions(+), 298 deletions(-) diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index d0e7e3f8e67a..d91f9e7f4d71 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -1,201 +1,54 @@ /* - * Copyright (c) 2003, 2004 David Young. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of David Young may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID - * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY - * OF SUCH DAMAGE. - */ - -/* - * Modifications to fit into the linux IEEE 802.11 stack, - * Mike Kershaw (dragorn@kismetwireless.net) + * Copyright (c) 2017 Intel Deutschland GmbH + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#ifndef __RADIOTAP_H +#define __RADIOTAP_H -#ifndef IEEE80211RADIOTAP_H -#define IEEE80211RADIOTAP_H - -#include #include #include -/* Base version of the radiotap packet header data */ -#define PKTHDR_RADIOTAP_VERSION 0 - -/* A generic radio capture format is desirable. There is one for - * Linux, but it is neither rigidly defined (there were not even - * units given for some fields) nor easily extensible. - * - * I suggest the following extensible radio capture format. It is - * based on a bitmap indicating which fields are present. - * - * I am trying to describe precisely what the application programmer - * should expect in the following, and for that reason I tell the - * units and origin of each measurement (where it applies), or else I - * use sufficiently weaselly language ("is a monotonically nondecreasing - * function of...") that I cannot set false expectations for lawyerly - * readers. - */ - -/* - * The radio capture header precedes the 802.11 header. - * All data in the header is little endian on all platforms. +/** + * struct ieee82011_radiotap_header - base radiotap header */ struct ieee80211_radiotap_header { - u8 it_version; /* Version 0. Only increases - * for drastic changes, - * introduction of compatible - * new fields does not count. - */ - u8 it_pad; - __le16 it_len; /* length of the whole - * header in bytes, including - * it_version, it_pad, - * it_len, and data fields. - */ - __le32 it_present; /* A bitmap telling which - * fields are present. Set bit 31 - * (0x80000000) to extend the - * bitmap by another 32 bits. - * Additional extensions are made - * by setting bit 31. - */ + /** + * @it_version: radiotap version, always 0 + */ + uint8_t it_version; + + /** + * @it_pad: padding (or alignment) + */ + uint8_t it_pad; + + /** + * @it_len: overall radiotap header length + */ + __le16 it_len; + + /** + * @it_present: (first) present word + */ + __le32 it_present; } __packed; -/* Name Data type Units - * ---- --------- ----- - * - * IEEE80211_RADIOTAP_TSFT __le64 microseconds - * - * Value in microseconds of the MAC's 64-bit 802.11 Time - * Synchronization Function timer when the first bit of the - * MPDU arrived at the MAC. For received frames, only. - * - * IEEE80211_RADIOTAP_CHANNEL 2 x __le16 MHz, bitmap - * - * Tx/Rx frequency in MHz, followed by flags (see below). - * - * IEEE80211_RADIOTAP_FHSS __le16 see below - * - * For frequency-hopping radios, the hop set (first byte) - * and pattern (second byte). - * - * IEEE80211_RADIOTAP_RATE u8 500kb/s - * - * Tx/Rx data rate - * - * IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8 decibels from - * one milliwatt (dBm) - * - * RF signal power at the antenna, decibel difference from - * one milliwatt. - * - * IEEE80211_RADIOTAP_DBM_ANTNOISE s8 decibels from - * one milliwatt (dBm) - * - * RF noise power at the antenna, decibel difference from one - * milliwatt. - * - * IEEE80211_RADIOTAP_DB_ANTSIGNAL u8 decibel (dB) - * - * RF signal power at the antenna, decibel difference from an - * arbitrary, fixed reference. - * - * IEEE80211_RADIOTAP_DB_ANTNOISE u8 decibel (dB) - * - * RF noise power at the antenna, decibel difference from an - * arbitrary, fixed reference point. - * - * IEEE80211_RADIOTAP_LOCK_QUALITY __le16 unitless - * - * Quality of Barker code lock. Unitless. Monotonically - * nondecreasing with "better" lock strength. Called "Signal - * Quality" in datasheets. (Is there a standard way to measure - * this?) - * - * IEEE80211_RADIOTAP_TX_ATTENUATION __le16 unitless - * - * Transmit power expressed as unitless distance from max - * power set at factory calibration. 0 is max power. - * Monotonically nondecreasing with lower power levels. - * - * IEEE80211_RADIOTAP_DB_TX_ATTENUATION __le16 decibels (dB) - * - * Transmit power expressed as decibel distance from max power - * set at factory calibration. 0 is max power. Monotonically - * nondecreasing with lower power levels. - * - * IEEE80211_RADIOTAP_DBM_TX_POWER s8 decibels from - * one milliwatt (dBm) - * - * Transmit power expressed as dBm (decibels from a 1 milliwatt - * reference). This is the absolute power level measured at - * the antenna port. - * - * IEEE80211_RADIOTAP_FLAGS u8 bitmap - * - * Properties of transmitted and received frames. See flags - * defined below. - * - * IEEE80211_RADIOTAP_ANTENNA u8 antenna index - * - * Unitless indication of the Rx/Tx antenna for this packet. - * The first antenna is antenna 0. - * - * IEEE80211_RADIOTAP_RX_FLAGS __le16 bitmap - * - * Properties of received frames. See flags defined below. - * - * IEEE80211_RADIOTAP_TX_FLAGS __le16 bitmap - * - * Properties of transmitted frames. See flags defined below. - * - * IEEE80211_RADIOTAP_RTS_RETRIES u8 data - * - * Number of rts retries a transmitted frame used. - * - * IEEE80211_RADIOTAP_DATA_RETRIES u8 data - * - * Number of unicast retries a transmitted frame used. - * - * IEEE80211_RADIOTAP_MCS u8, u8, u8 unitless - * - * Contains a bitmap of known fields/flags, the flags, and - * the MCS index. - * - * IEEE80211_RADIOTAP_AMPDU_STATUS u32, u16, u8, u8 unitless - * - * Contains the AMPDU information for the subframe. - * - * IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 - * - * Contains VHT information about this frame. - * - * IEEE80211_RADIOTAP_TIMESTAMP u64, u16, u8, u8 variable - * - * Contains timestamp information for this frame. - */ -enum ieee80211_radiotap_type { +/* version is always 0 */ +#define PKTHDR_RADIOTAP_VERSION 0 + +/* see the radiotap website for the descriptions */ +enum ieee80211_radiotap_presence { IEEE80211_RADIOTAP_TSFT = 0, IEEE80211_RADIOTAP_FLAGS = 1, IEEE80211_RADIOTAP_RATE = 2, @@ -214,7 +67,7 @@ enum ieee80211_radiotap_type { IEEE80211_RADIOTAP_TX_FLAGS = 15, IEEE80211_RADIOTAP_RTS_RETRIES = 16, IEEE80211_RADIOTAP_DATA_RETRIES = 17, - + /* 18 is XChannel, but it's not defined yet */ IEEE80211_RADIOTAP_MCS = 19, IEEE80211_RADIOTAP_AMPDU_STATUS = 20, IEEE80211_RADIOTAP_VHT = 21, @@ -226,129 +79,135 @@ enum ieee80211_radiotap_type { IEEE80211_RADIOTAP_EXT = 31 }; -/* Channel flags. */ -#define IEEE80211_CHAN_TURBO 0x0010 /* Turbo channel */ -#define IEEE80211_CHAN_CCK 0x0020 /* CCK channel */ -#define IEEE80211_CHAN_OFDM 0x0040 /* OFDM channel */ -#define IEEE80211_CHAN_2GHZ 0x0080 /* 2 GHz spectrum channel. */ -#define IEEE80211_CHAN_5GHZ 0x0100 /* 5 GHz spectrum channel */ -#define IEEE80211_CHAN_PASSIVE 0x0200 /* Only passive scan allowed */ -#define IEEE80211_CHAN_DYN 0x0400 /* Dynamic CCK-OFDM channel */ -#define IEEE80211_CHAN_GFSK 0x0800 /* GFSK channel (FHSS PHY) */ -#define IEEE80211_CHAN_GSM 0x1000 /* GSM (900 MHz) */ -#define IEEE80211_CHAN_STURBO 0x2000 /* Static Turbo */ -#define IEEE80211_CHAN_HALF 0x4000 /* Half channel (10 MHz wide) */ -#define IEEE80211_CHAN_QUARTER 0x8000 /* Quarter channel (5 MHz wide) */ - -/* For IEEE80211_RADIOTAP_FLAGS */ -#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received - * during CFP - */ -#define IEEE80211_RADIOTAP_F_SHORTPRE 0x02 /* sent/received - * with short - * preamble - */ -#define IEEE80211_RADIOTAP_F_WEP 0x04 /* sent/received - * with WEP encryption - */ -#define IEEE80211_RADIOTAP_F_FRAG 0x08 /* sent/received - * with fragmentation - */ -#define IEEE80211_RADIOTAP_F_FCS 0x10 /* frame includes FCS */ -#define IEEE80211_RADIOTAP_F_DATAPAD 0x20 /* frame has padding between - * 802.11 header and payload - * (to 32-bit boundary) - */ -#define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* bad FCS */ - -/* For IEEE80211_RADIOTAP_RX_FLAGS */ -#define IEEE80211_RADIOTAP_F_RX_BADPLCP 0x0002 /* frame has bad PLCP */ +/* for IEEE80211_RADIOTAP_FLAGS */ +enum ieee80211_radiotap_flags { + IEEE80211_RADIOTAP_F_CFP = 0x01, + IEEE80211_RADIOTAP_F_SHORTPRE = 0x02, + IEEE80211_RADIOTAP_F_WEP = 0x04, + IEEE80211_RADIOTAP_F_FRAG = 0x08, + IEEE80211_RADIOTAP_F_FCS = 0x10, + IEEE80211_RADIOTAP_F_DATAPAD = 0x20, + IEEE80211_RADIOTAP_F_BADFCS = 0x40, +}; -/* For IEEE80211_RADIOTAP_TX_FLAGS */ -#define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive - * retries */ -#define IEEE80211_RADIOTAP_F_TX_CTS 0x0002 /* used cts 'protection' */ -#define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */ -#define IEEE80211_RADIOTAP_F_TX_NOACK 0x0008 /* don't expect an ack */ +/* for IEEE80211_RADIOTAP_CHANNEL */ +enum ieee80211_radiotap_channel_flags { + IEEE80211_CHAN_CCK = 0x0020, + IEEE80211_CHAN_OFDM = 0x0040, + IEEE80211_CHAN_2GHZ = 0x0080, + IEEE80211_CHAN_5GHZ = 0x0100, + IEEE80211_CHAN_DYN = 0x0400, + IEEE80211_CHAN_HALF = 0x4000, + IEEE80211_CHAN_QUARTER = 0x8000, +}; +/* for IEEE80211_RADIOTAP_RX_FLAGS */ +enum ieee80211_radiotap_rx_flags { + IEEE80211_RADIOTAP_F_RX_BADPLCP = 0x0002, +}; -/* For IEEE80211_RADIOTAP_MCS */ -#define IEEE80211_RADIOTAP_MCS_HAVE_BW 0x01 -#define IEEE80211_RADIOTAP_MCS_HAVE_MCS 0x02 -#define IEEE80211_RADIOTAP_MCS_HAVE_GI 0x04 -#define IEEE80211_RADIOTAP_MCS_HAVE_FMT 0x08 -#define IEEE80211_RADIOTAP_MCS_HAVE_FEC 0x10 -#define IEEE80211_RADIOTAP_MCS_HAVE_STBC 0x20 +/* for IEEE80211_RADIOTAP_TX_FLAGS */ +enum ieee80211_radiotap_tx_flags { + IEEE80211_RADIOTAP_F_TX_FAIL = 0x0001, + IEEE80211_RADIOTAP_F_TX_CTS = 0x0002, + IEEE80211_RADIOTAP_F_TX_RTS = 0x0004, + IEEE80211_RADIOTAP_F_TX_NOACK = 0x0008, +}; -#define IEEE80211_RADIOTAP_MCS_BW_MASK 0x03 -#define IEEE80211_RADIOTAP_MCS_BW_20 0 -#define IEEE80211_RADIOTAP_MCS_BW_40 1 -#define IEEE80211_RADIOTAP_MCS_BW_20L 2 -#define IEEE80211_RADIOTAP_MCS_BW_20U 3 -#define IEEE80211_RADIOTAP_MCS_SGI 0x04 -#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08 -#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 -#define IEEE80211_RADIOTAP_MCS_STBC_MASK 0x60 -#define IEEE80211_RADIOTAP_MCS_STBC_1 1 -#define IEEE80211_RADIOTAP_MCS_STBC_2 2 -#define IEEE80211_RADIOTAP_MCS_STBC_3 3 +/* for IEEE80211_RADIOTAP_MCS "have" flags */ +enum ieee80211_radiotap_mcs_have { + IEEE80211_RADIOTAP_MCS_HAVE_BW = 0x01, + IEEE80211_RADIOTAP_MCS_HAVE_MCS = 0x02, + IEEE80211_RADIOTAP_MCS_HAVE_GI = 0x04, + IEEE80211_RADIOTAP_MCS_HAVE_FMT = 0x08, + IEEE80211_RADIOTAP_MCS_HAVE_FEC = 0x10, + IEEE80211_RADIOTAP_MCS_HAVE_STBC = 0x20, +}; -#define IEEE80211_RADIOTAP_MCS_STBC_SHIFT 5 +enum ieee80211_radiotap_mcs_flags { + IEEE80211_RADIOTAP_MCS_BW_MASK = 0x03, + IEEE80211_RADIOTAP_MCS_BW_20 = 0, + IEEE80211_RADIOTAP_MCS_BW_40 = 1, + IEEE80211_RADIOTAP_MCS_BW_20L = 2, + IEEE80211_RADIOTAP_MCS_BW_20U = 3, + + IEEE80211_RADIOTAP_MCS_SGI = 0x04, + IEEE80211_RADIOTAP_MCS_FMT_GF = 0x08, + IEEE80211_RADIOTAP_MCS_FEC_LDPC = 0x10, + IEEE80211_RADIOTAP_MCS_STBC_MASK = 0x60, + IEEE80211_RADIOTAP_MCS_STBC_1 = 1, + IEEE80211_RADIOTAP_MCS_STBC_2 = 2, + IEEE80211_RADIOTAP_MCS_STBC_3 = 3, + IEEE80211_RADIOTAP_MCS_STBC_SHIFT = 5, +}; -/* For IEEE80211_RADIOTAP_AMPDU_STATUS */ -#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001 -#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN 0x0002 -#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN 0x0004 -#define IEEE80211_RADIOTAP_AMPDU_IS_LAST 0x0008 -#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR 0x0010 -#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN 0x0020 +/* for IEEE80211_RADIOTAP_AMPDU_STATUS */ +enum ieee80211_radiotap_ampdu_flags { + IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN = 0x0001, + IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN = 0x0002, + IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN = 0x0004, + IEEE80211_RADIOTAP_AMPDU_IS_LAST = 0x0008, + IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR = 0x0010, + IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN = 0x0020, +}; -/* For IEEE80211_RADIOTAP_VHT */ -#define IEEE80211_RADIOTAP_VHT_KNOWN_STBC 0x0001 -#define IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA 0x0002 -#define IEEE80211_RADIOTAP_VHT_KNOWN_GI 0x0004 -#define IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS 0x0008 -#define IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM 0x0010 -#define IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED 0x0020 -#define IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH 0x0040 -#define IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID 0x0080 -#define IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID 0x0100 +/* for IEEE80211_RADIOTAP_VHT */ +enum ieee80211_radiotap_vht_known { + IEEE80211_RADIOTAP_VHT_KNOWN_STBC = 0x0001, + IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA = 0x0002, + IEEE80211_RADIOTAP_VHT_KNOWN_GI = 0x0004, + IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS = 0x0008, + IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM = 0x0010, + IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED = 0x0020, + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH = 0x0040, + IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID = 0x0080, + IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID = 0x0100, +}; -#define IEEE80211_RADIOTAP_VHT_FLAG_STBC 0x01 -#define IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA 0x02 -#define IEEE80211_RADIOTAP_VHT_FLAG_SGI 0x04 -#define IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9 0x08 -#define IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM 0x10 -#define IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED 0x20 +enum ieee80211_radiotap_vht_flags { + IEEE80211_RADIOTAP_VHT_FLAG_STBC = 0x01, + IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA = 0x02, + IEEE80211_RADIOTAP_VHT_FLAG_SGI = 0x04, + IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9 = 0x08, + IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM = 0x10, + IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED = 0x20, +}; -#define IEEE80211_RADIOTAP_CODING_LDPC_USER0 0x01 -#define IEEE80211_RADIOTAP_CODING_LDPC_USER1 0x02 -#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04 -#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08 +enum ieee80211_radiotap_vht_coding { + IEEE80211_RADIOTAP_CODING_LDPC_USER0 = 0x01, + IEEE80211_RADIOTAP_CODING_LDPC_USER1 = 0x02, + IEEE80211_RADIOTAP_CODING_LDPC_USER2 = 0x04, + IEEE80211_RADIOTAP_CODING_LDPC_USER3 = 0x08, +}; -/* For IEEE80211_RADIOTAP_TIMESTAMP */ -#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK 0x000F -#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS 0x0000 -#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US 0x0001 -#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS 0x0003 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK 0x00F0 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU 0x0000 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ 0x0010 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU 0x0020 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU 0x0030 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN 0x00F0 +/* for IEEE80211_RADIOTAP_TIMESTAMP */ +enum ieee80211_radiotap_timestamp_unit_spos { + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK = 0x000F, + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS = 0x0000, + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US = 0x0001, + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS = 0x0003, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK = 0x00F0, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU = 0x0000, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ = 0x0010, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU = 0x0020, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU = 0x0030, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN = 0x00F0, +}; -#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT 0x00 -#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT 0x01 -#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY 0x02 +enum ieee80211_radiotap_timestamp_flags { + IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT = 0x00, + IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT = 0x01, + IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02, +}; -/* helpers */ -static inline int ieee80211_get_radiotap_len(unsigned char *data) +/** + * ieee80211_get_radiotap_len - get radiotap header length + */ +static inline u16 ieee80211_get_radiotap_len(const char *data) { - struct ieee80211_radiotap_header *hdr = - (struct ieee80211_radiotap_header *)data; + struct ieee80211_radiotap_header *hdr = (void *)data; return get_unaligned_le16(&hdr->it_len); } -#endif /* IEEE80211_RADIOTAP_H */ +#endif /* __RADIOTAP_H */ -- cgit v1.2.3 From 1045ba77a5962a22bce7777678ef46714107ea63 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Tue, 24 Jan 2017 07:02:41 -0500 Subject: net sched actions: Add support for user cookies Introduce optional 128-bit action cookie. Like all other cookie schemes in the networking world (eg in protocols like http or existing kernel fib protocol field, etc) the idea is to save user state that when retrieved serves as a correlator. The kernel _should not_ intepret it. The user can store whatever they wish in the 128 bits. Sample exercise(showing variable length use of cookie) .. create an accept action with cookie a1b2c3d4 sudo $TC actions add action ok index 1 cookie a1b2c3d4 .. dump all gact actions.. sudo $TC -s actions ls action gact action order 0: gact action pass random type none pass val 0 index 1 ref 1 bind 0 installed 5 sec used 5 sec Action statistics: Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0) backlog 0b 0p requeues 0 cookie a1b2c3d4 .. bind the accept action to a filter.. sudo $TC filter add dev lo parent ffff: protocol ip prio 1 \ u32 match ip dst 127.0.0.1/32 flowid 1:1 action gact index 1 ... send some traffic.. $ ping 127.0.0.1 -c 3 PING 127.0.0.1 (127.0.0.1) 56(84) bytes of data. 64 bytes from 127.0.0.1: icmp_seq=1 ttl=64 time=0.020 ms 64 bytes from 127.0.0.1: icmp_seq=2 ttl=64 time=0.027 ms 64 bytes from 127.0.0.1: icmp_seq=3 ttl=64 time=0.038 ms Signed-off-by: David S. Miller --- include/net/act_api.h | 1 + include/net/pkt_cls.h | 8 ++++++++ include/uapi/linux/pkt_cls.h | 3 +++ net/sched/act_api.c | 45 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 57 insertions(+) diff --git a/include/net/act_api.h b/include/net/act_api.h index 1d716449209e..cfa2ae33da9a 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -41,6 +41,7 @@ struct tc_action { struct rcu_head tcfa_rcu; struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_queue __percpu *cpu_qstats; + struct tc_cookie *act_cookie; }; #define tcf_head common.tcfa_head #define tcf_index common.tcfa_index diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index f0a051480c6c..b43077e47d35 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -515,4 +515,12 @@ struct tc_cls_bpf_offload { u32 gen_flags; }; + +/* This structure holds cookie structure that is passed from user + * to the kernel for actions and classifiers + */ +struct tc_cookie { + u8 *data; + u32 len; +}; #endif diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index fd373ebd5a44..345551e71410 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -4,6 +4,8 @@ #include #include +#define TC_COOKIE_MAX_SIZE 16 + /* Action attributes */ enum { TCA_ACT_UNSPEC, @@ -12,6 +14,7 @@ enum { TCA_ACT_INDEX, TCA_ACT_STATS, TCA_ACT_PAD, + TCA_ACT_COOKIE, __TCA_ACT_MAX }; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index cd08df91351d..3c5e29ba6594 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -33,6 +34,12 @@ static void free_tcf(struct rcu_head *head) free_percpu(p->cpu_bstats); free_percpu(p->cpu_qstats); + + if (p->act_cookie) { + kfree(p->act_cookie->data); + kfree(p->act_cookie); + } + kfree(p); } @@ -475,6 +482,12 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) goto nla_put_failure; if (tcf_action_copy_stats(skb, a, 0)) goto nla_put_failure; + if (a->act_cookie) { + if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len, + a->act_cookie->data)) + goto nla_put_failure; + } + nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; @@ -516,6 +529,22 @@ errout: return err; } +int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb) +{ + a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL); + if (!a->act_cookie) + return -ENOMEM; + + a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); + if (!a->act_cookie->data) { + kfree(a->act_cookie); + return -ENOMEM; + } + a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]); + + return 0; +} + struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind) @@ -575,6 +604,22 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, if (err < 0) goto err_mod; + if (tb[TCA_ACT_COOKIE]) { + int cklen = nla_len(tb[TCA_ACT_COOKIE]); + + if (cklen > TC_COOKIE_MAX_SIZE) { + err = -EINVAL; + tcf_hash_release(a, bind); + goto err_mod; + } + + err = nla_memdup_cookie(a, tb); + if (err < 0) { + tcf_hash_release(a, bind); + goto err_mod; + } + } + /* module count goes up only when brand new policy is created * if it exists and is only bound to in a_o->init() then * ACT_P_CREATED is not returned (a zero is). -- cgit v1.2.3 From a08ef4768f479a712f80426fba42d16253feb37c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 24 Jan 2017 12:49:35 +0300 Subject: tipc: uninitialized return code in tipc_setsockopt() We shuffled some code around and added some new case statements here and now "res" isn't initialized on all paths. Fixes: 01fd12bb189a ("tipc: make replicast a user selectable option") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- net/tipc/socket.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 5bec8aac5008..103d1fd058c0 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2348,7 +2348,7 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); u32 value = 0; - int res; + int res = 0; if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) return 0; @@ -2388,7 +2388,6 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, break; case TIPC_CONN_TIMEOUT: tipc_sk(sk)->conn_timeout = value; - /* no need to set "res", since already 0 at this point */ break; case TIPC_MCAST_BROADCAST: tsk->mc_method.rcast = false; -- cgit v1.2.3 From 60b1af3300724d211bb0b420c1fbe6bf5b87b013 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 24 Jan 2017 14:57:36 -0800 Subject: tcp: reduce skb overhead in selected places tcp_add_backlog() can use skb_condense() helper to get better gains and less SKB_TRUESIZE() magic. This only happens when socket backlog has to be used. Some attacks involve specially crafted out of order tiny TCP packets, clogging the ofo queue of (many) sockets. Then later, expensive collapse happens, trying to copy all these skbs into single ones. This unfortunately does not work if each skb has no neighbor in TCP sequence order. By using skb_condense() if the skb could not be coalesced to a prior one, we defeat these kind of threats, potentially saving 4K per skb (or more, since this is one page fragment). A typical NAPI driver allocates gro packets with GRO_MAX_HEAD bytes in skb->head, meaning the copy done by skb_condense() is limited to about 200 bytes. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 1 + net/ipv4/tcp_ipv4.c | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index bfa165cc455a..3de6eba378ad 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4507,6 +4507,7 @@ add_sack: end: if (skb) { tcp_grow_window(sk, skb); + skb_condense(skb); skb_set_owner_r(skb, sk); } } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f7325b25b06e..a90b4540c11e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1556,8 +1556,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) * It has been noticed pure SACK packets were sometimes dropped * (if cooked by drivers without copybreak feature). */ - if (!skb->data_len) - skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); + skb_condense(skb); if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); -- cgit v1.2.3 From 2acae0d5b0f73a8fb4b180bd13491feb96e55fc6 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 25 Jan 2017 02:28:16 +0100 Subject: trace: add variant without spacing in trace_print_hex_seq For upcoming tracepoint support for BPF, we want to dump the program's tag. Format should be similar to __print_hex(), but without spacing. Add a __print_hex_str() variant for exactly that purpose that reuses trace_print_hex_seq(). Signed-off-by: Daniel Borkmann Cc: Steven Rostedt Cc: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/trace_events.h | 3 ++- include/trace/trace_events.h | 8 +++++++- kernel/trace/trace_output.c | 7 ++++--- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index be007610ceb0..cfa475a0e9ca 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -33,7 +33,8 @@ const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, unsigned int bitmask_size); const char *trace_print_hex_seq(struct trace_seq *p, - const unsigned char *buf, int len); + const unsigned char *buf, int len, + bool spacing); const char *trace_print_array_seq(struct trace_seq *p, const void *buf, int count, diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 467e12f780d8..9f684629c3cf 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -297,7 +297,12 @@ TRACE_MAKE_SYSTEM_STR(); #endif #undef __print_hex -#define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len) +#define __print_hex(buf, buf_len) \ + trace_print_hex_seq(p, buf, buf_len, true) + +#undef __print_hex_str +#define __print_hex_str(buf, buf_len) \ + trace_print_hex_seq(p, buf, buf_len, false) #undef __print_array #define __print_array(array, count, el_size) \ @@ -711,6 +716,7 @@ static inline void ftrace_test_probe_##call(void) \ #undef __print_flags #undef __print_symbolic #undef __print_hex +#undef __print_hex_str #undef __get_dynamic_array #undef __get_dynamic_array_len #undef __get_str diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 5d33a7352919..30a144b1b9ee 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -163,14 +163,15 @@ trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, EXPORT_SYMBOL_GPL(trace_print_bitmask_seq); const char * -trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) +trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len, + bool spacing) { int i; const char *ret = trace_seq_buffer_ptr(p); for (i = 0; i < buf_len; i++) - trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); - + trace_seq_printf(p, "%s%2.2x", !spacing || i == 0 ? "" : " ", + buf[i]); trace_seq_putc(p, 0); return ret; -- cgit v1.2.3 From 0fe05591790e953f3ef9cf4f1bce08b6dd7b771f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 25 Jan 2017 02:28:17 +0100 Subject: lib, traceevent: add PRINT_HEX_STR variant Add support for the __print_hex_str() macro that was added for tracing, so that user space tools such as perf can understand it as well. Signed-off-by: Daniel Borkmann Cc: Steven Rostedt Cc: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- tools/lib/traceevent/event-parse.c | 34 ++++++++++++++++++++-- tools/lib/traceevent/event-parse.h | 1 + .../perf/util/scripting-engines/trace-event-perl.c | 1 + .../util/scripting-engines/trace-event-python.c | 1 + 4 files changed, 34 insertions(+), 3 deletions(-) diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 14a4f623c1a5..f2ea78021450 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -831,6 +831,7 @@ static void free_arg(struct print_arg *arg) free_flag_sym(arg->symbol.symbols); break; case PRINT_HEX: + case PRINT_HEX_STR: free_arg(arg->hex.field); free_arg(arg->hex.size); break; @@ -2629,10 +2630,11 @@ out_free: } static enum event_type -process_hex(struct event_format *event, struct print_arg *arg, char **tok) +process_hex_common(struct event_format *event, struct print_arg *arg, + char **tok, enum print_arg_type type) { memset(arg, 0, sizeof(*arg)); - arg->type = PRINT_HEX; + arg->type = type; if (alloc_and_process_delim(event, ",", &arg->hex.field)) goto out; @@ -2650,6 +2652,19 @@ out: return EVENT_ERROR; } +static enum event_type +process_hex(struct event_format *event, struct print_arg *arg, char **tok) +{ + return process_hex_common(event, arg, tok, PRINT_HEX); +} + +static enum event_type +process_hex_str(struct event_format *event, struct print_arg *arg, + char **tok) +{ + return process_hex_common(event, arg, tok, PRINT_HEX_STR); +} + static enum event_type process_int_array(struct event_format *event, struct print_arg *arg, char **tok) { @@ -3009,6 +3024,10 @@ process_function(struct event_format *event, struct print_arg *arg, free_token(token); return process_hex(event, arg, tok); } + if (strcmp(token, "__print_hex_str") == 0) { + free_token(token); + return process_hex_str(event, arg, tok); + } if (strcmp(token, "__print_array") == 0) { free_token(token); return process_int_array(event, arg, tok); @@ -3547,6 +3566,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg case PRINT_SYMBOL: case PRINT_INT_ARRAY: case PRINT_HEX: + case PRINT_HEX_STR: break; case PRINT_TYPE: val = eval_num_arg(data, size, event, arg->typecast.item); @@ -3962,6 +3982,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, } break; case PRINT_HEX: + case PRINT_HEX_STR: if (arg->hex.field->type == PRINT_DYNAMIC_ARRAY) { unsigned long offset; offset = pevent_read_number(pevent, @@ -3981,7 +4002,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, } len = eval_num_arg(data, size, event, arg->hex.size); for (i = 0; i < len; i++) { - if (i) + if (i && arg->type == PRINT_HEX) trace_seq_putc(s, ' '); trace_seq_printf(s, "%02x", hex[i]); } @@ -5727,6 +5748,13 @@ static void print_args(struct print_arg *args) print_args(args->hex.size); printf(")"); break; + case PRINT_HEX_STR: + printf("__print_hex_str("); + print_args(args->hex.field); + printf(", "); + print_args(args->hex.size); + printf(")"); + break; case PRINT_INT_ARRAY: printf("__print_array("); print_args(args->int_array.field); diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 7aae746ec2fe..74cecba87daa 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h @@ -292,6 +292,7 @@ enum print_arg_type { PRINT_FUNC, PRINT_BITMASK, PRINT_DYNAMIC_ARRAY_LEN, + PRINT_HEX_STR, }; struct print_arg { diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index e55a132f69b7..e74adfbd6a2e 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -217,6 +217,7 @@ static void define_event_symbols(struct event_format *event, cur_field_name); break; case PRINT_HEX: + case PRINT_HEX_STR: define_event_symbols(event, ev_name, args->hex.field); define_event_symbols(event, ev_name, args->hex.size); break; diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 089438da1f7f..581e0efd6356 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -236,6 +236,7 @@ static void define_event_symbols(struct event_format *event, cur_field_name); break; case PRINT_HEX: + case PRINT_HEX_STR: define_event_symbols(event, ev_name, args->hex.field); define_event_symbols(event, ev_name, args->hex.size); break; -- cgit v1.2.3 From a67edbf4fb6deadcfe57a04a134abed4a5ba3bb5 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 25 Jan 2017 02:28:18 +0100 Subject: bpf: add initial bpf tracepoints This work adds a number of tracepoints to paths that are either considered slow-path or exception-like states, where monitoring or inspecting them would be desirable. For bpf(2) syscall, tracepoints have been placed for main commands when they succeed. In XDP case, tracepoint is for exceptions, that is, f.e. on abnormal BPF program exit such as unknown or XDP_ABORTED return code, or when error occurs during XDP_TX action and the packet could not be forwarded. Both have been split into separate event headers, and can be further extended. Worst case, if they unexpectedly should get into our way in future, they can also removed [1]. Of course, these tracepoints (like any other) can be analyzed by eBPF itself, etc. Example output: # ./perf record -a -e bpf:* sleep 10 # ./perf script sock_example 6197 [005] 283.980322: bpf:bpf_map_create: map type=ARRAY ufd=4 key=4 val=8 max=256 flags=0 sock_example 6197 [005] 283.980721: bpf:bpf_prog_load: prog=a5ea8fa30ea6849c type=SOCKET_FILTER ufd=5 sock_example 6197 [005] 283.988423: bpf:bpf_prog_get_type: prog=a5ea8fa30ea6849c type=SOCKET_FILTER sock_example 6197 [005] 283.988443: bpf:bpf_map_lookup_elem: map type=ARRAY ufd=4 key=[06 00 00 00] val=[00 00 00 00 00 00 00 00] [...] sock_example 6197 [005] 288.990868: bpf:bpf_map_lookup_elem: map type=ARRAY ufd=4 key=[01 00 00 00] val=[14 00 00 00 00 00 00 00] swapper 0 [005] 289.338243: bpf:bpf_prog_put_rcu: prog=a5ea8fa30ea6849c type=SOCKET_FILTER [1] https://lwn.net/Articles/705270/ Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 3 + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 12 +- .../net/ethernet/netronome/nfp/nfp_net_common.c | 15 +- drivers/net/ethernet/qlogic/qede/qede_fp.c | 4 + drivers/net/virtio_net.c | 12 +- include/linux/bpf_trace.h | 7 + include/trace/events/bpf.h | 347 +++++++++++++++++++++ include/trace/events/xdp.h | 53 ++++ kernel/bpf/core.c | 9 + kernel/bpf/inode.c | 17 +- kernel/bpf/syscall.c | 19 +- 11 files changed, 483 insertions(+), 15 deletions(-) create mode 100644 include/linux/bpf_trace.h create mode 100644 include/trace/events/bpf.h create mode 100644 include/trace/events/xdp.h diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index e362f99334d0..f15ddba3659a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -926,10 +927,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud length, cq->ring, &doorbell_pending))) goto consumed; + trace_xdp_exception(dev, xdp_prog, act); goto xdp_drop_no_cnt; /* Drop on xmit failure */ default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(dev, xdp_prog, act); case XDP_DROP: ring->xdp_drop++; xdp_drop_no_cnt: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index ba50583ea3ed..3d2e1a1886a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include "en.h" #include "en_tc.h" @@ -640,7 +641,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq) mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); } -static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, +static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, const struct xdp_buff *xdp) { @@ -662,7 +663,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) { rq->stats.xdp_drop++; mlx5e_page_release(rq, di, true); - return; + return false; } if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) { @@ -673,7 +674,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, } rq->stats.xdp_tx_full++; mlx5e_page_release(rq, di, true); - return; + return false; } dma_len -= MLX5E_XDP_MIN_INLINE; @@ -703,6 +704,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, sq->db.xdp.doorbell = true; rq->stats.xdp_tx++; + return true; } /* returns true if packet was consumed by xdp */ @@ -728,11 +730,13 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, *len = xdp.data_end - xdp.data; return false; case XDP_TX: - mlx5e_xmit_xdp_frame(rq, di, &xdp); + if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) + trace_xdp_exception(rq->netdev, prog, act); return true; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(rq->netdev, prog, act); case XDP_DROP: rq->stats.xdp_drop++; mlx5e_page_release(rq, di, true); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 67afd95ffb93..6ac43abf561b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -42,6 +42,7 @@ */ #include +#include #include #include #include @@ -1459,7 +1460,7 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, dev_kfree_skb_any(skb); } -static void +static bool nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, struct nfp_net_tx_ring *tx_ring, struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off, @@ -1473,13 +1474,13 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, if (unlikely(nfp_net_tx_full(tx_ring, 1))) { nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); - return; + return false; } new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr); if (unlikely(!new_frag)) { nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); - return; + return false; } nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); @@ -1509,6 +1510,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, tx_ring->wr_p++; tx_ring->wr_ptr_add++; + return true; } static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) @@ -1613,12 +1615,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) case XDP_PASS: break; case XDP_TX: - nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf, - pkt_off, pkt_len); + if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring, + tx_ring, rxbuf, + pkt_off, pkt_len))) + trace_xdp_exception(nn->netdev, xdp_prog, act); continue; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(nn->netdev, xdp_prog, act); case XDP_DROP: nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr); diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 1a6ca4884fad..445d4d2492c3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -1016,6 +1017,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, /* We need the replacement buffer before transmit. */ if (qede_alloc_rx_buffer(rxq, true)) { qede_recycle_rx_bd_ring(rxq, 1); + trace_xdp_exception(edev->ndev, prog, act); return false; } @@ -1026,6 +1028,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(bd->data); + trace_xdp_exception(edev->ndev, prog, act); } /* Regardless, we've consumed an Rx BD */ @@ -1035,6 +1038,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(edev->ndev, prog, act); case XDP_DROP: qede_recycle_rx_bd_ring(rxq, cqe->bd_num); } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 37db91d1a0a3..f9bf94887ff1 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -330,7 +331,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, return skb; } -static void virtnet_xdp_xmit(struct virtnet_info *vi, +static bool virtnet_xdp_xmit(struct virtnet_info *vi, struct receive_queue *rq, struct send_queue *sq, struct xdp_buff *xdp, @@ -382,10 +383,12 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi, put_page(page); } else /* small buffer */ kfree_skb(data); - return; // On error abort to avoid unnecessary kick + /* On error abort to avoid unnecessary kick */ + return false; } virtqueue_kick(sq->vq); + return true; } static u32 do_xdp_prog(struct virtnet_info *vi, @@ -421,11 +424,14 @@ static u32 do_xdp_prog(struct virtnet_info *vi, vi->xdp_queue_pairs + smp_processor_id(); xdp.data = buf; - virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data); + if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, + data))) + trace_xdp_exception(vi->dev, xdp_prog, act); return XDP_TX; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(vi->dev, xdp_prog, act); case XDP_DROP: return XDP_DROP; } diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h new file mode 100644 index 000000000000..b22efbdd2eb4 --- /dev/null +++ b/include/linux/bpf_trace.h @@ -0,0 +1,7 @@ +#ifndef __LINUX_BPF_TRACE_H__ +#define __LINUX_BPF_TRACE_H__ + +#include +#include + +#endif /* __LINUX_BPF_TRACE_H__ */ diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h new file mode 100644 index 000000000000..c3a53fd47ff1 --- /dev/null +++ b/include/trace/events/bpf.h @@ -0,0 +1,347 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM bpf + +#if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BPF_H + +#include +#include +#include +#include + +#define __PROG_TYPE_MAP(FN) \ + FN(SOCKET_FILTER) \ + FN(KPROBE) \ + FN(SCHED_CLS) \ + FN(SCHED_ACT) \ + FN(TRACEPOINT) \ + FN(XDP) \ + FN(PERF_EVENT) \ + FN(CGROUP_SKB) \ + FN(CGROUP_SOCK) \ + FN(LWT_IN) \ + FN(LWT_OUT) \ + FN(LWT_XMIT) + +#define __MAP_TYPE_MAP(FN) \ + FN(HASH) \ + FN(ARRAY) \ + FN(PROG_ARRAY) \ + FN(PERF_EVENT_ARRAY) \ + FN(PERCPU_HASH) \ + FN(PERCPU_ARRAY) \ + FN(STACK_TRACE) \ + FN(CGROUP_ARRAY) \ + FN(LRU_HASH) \ + FN(LRU_PERCPU_HASH) \ + FN(LPM_TRIE) + +#define __PROG_TYPE_TP_FN(x) \ + TRACE_DEFINE_ENUM(BPF_PROG_TYPE_##x); +#define __PROG_TYPE_SYM_FN(x) \ + { BPF_PROG_TYPE_##x, #x }, +#define __PROG_TYPE_SYM_TAB \ + __PROG_TYPE_MAP(__PROG_TYPE_SYM_FN) { -1, 0 } +__PROG_TYPE_MAP(__PROG_TYPE_TP_FN) + +#define __MAP_TYPE_TP_FN(x) \ + TRACE_DEFINE_ENUM(BPF_MAP_TYPE_##x); +#define __MAP_TYPE_SYM_FN(x) \ + { BPF_MAP_TYPE_##x, #x }, +#define __MAP_TYPE_SYM_TAB \ + __MAP_TYPE_MAP(__MAP_TYPE_SYM_FN) { -1, 0 } +__MAP_TYPE_MAP(__MAP_TYPE_TP_FN) + +DECLARE_EVENT_CLASS(bpf_prog_event, + + TP_PROTO(const struct bpf_prog *prg), + + TP_ARGS(prg), + + TP_STRUCT__entry( + __array(u8, prog_tag, 8) + __field(u32, type) + ), + + TP_fast_assign( + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); + memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); + __entry->type = prg->type; + ), + + TP_printk("prog=%s type=%s", + __print_hex_str(__entry->prog_tag, 8), + __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB)) +); + +DEFINE_EVENT(bpf_prog_event, bpf_prog_get_type, + + TP_PROTO(const struct bpf_prog *prg), + + TP_ARGS(prg) +); + +DEFINE_EVENT(bpf_prog_event, bpf_prog_put_rcu, + + TP_PROTO(const struct bpf_prog *prg), + + TP_ARGS(prg) +); + +TRACE_EVENT(bpf_prog_load, + + TP_PROTO(const struct bpf_prog *prg, int ufd), + + TP_ARGS(prg, ufd), + + TP_STRUCT__entry( + __array(u8, prog_tag, 8) + __field(u32, type) + __field(int, ufd) + ), + + TP_fast_assign( + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); + memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); + __entry->type = prg->type; + __entry->ufd = ufd; + ), + + TP_printk("prog=%s type=%s ufd=%d", + __print_hex_str(__entry->prog_tag, 8), + __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB), + __entry->ufd) +); + +TRACE_EVENT(bpf_map_create, + + TP_PROTO(const struct bpf_map *map, int ufd), + + TP_ARGS(map, ufd), + + TP_STRUCT__entry( + __field(u32, type) + __field(u32, size_key) + __field(u32, size_value) + __field(u32, max_entries) + __field(u32, flags) + __field(int, ufd) + ), + + TP_fast_assign( + __entry->type = map->map_type; + __entry->size_key = map->key_size; + __entry->size_value = map->value_size; + __entry->max_entries = map->max_entries; + __entry->flags = map->map_flags; + __entry->ufd = ufd; + ), + + TP_printk("map type=%s ufd=%d key=%u val=%u max=%u flags=%x", + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), + __entry->ufd, __entry->size_key, __entry->size_value, + __entry->max_entries, __entry->flags) +); + +DECLARE_EVENT_CLASS(bpf_obj_prog, + + TP_PROTO(const struct bpf_prog *prg, int ufd, + const struct filename *pname), + + TP_ARGS(prg, ufd, pname), + + TP_STRUCT__entry( + __array(u8, prog_tag, 8) + __field(int, ufd) + __string(path, pname->name) + ), + + TP_fast_assign( + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); + memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); + __assign_str(path, pname->name); + __entry->ufd = ufd; + ), + + TP_printk("prog=%s path=%s ufd=%d", + __print_hex_str(__entry->prog_tag, 8), + __get_str(path), __entry->ufd) +); + +DEFINE_EVENT(bpf_obj_prog, bpf_obj_pin_prog, + + TP_PROTO(const struct bpf_prog *prg, int ufd, + const struct filename *pname), + + TP_ARGS(prg, ufd, pname) +); + +DEFINE_EVENT(bpf_obj_prog, bpf_obj_get_prog, + + TP_PROTO(const struct bpf_prog *prg, int ufd, + const struct filename *pname), + + TP_ARGS(prg, ufd, pname) +); + +DECLARE_EVENT_CLASS(bpf_obj_map, + + TP_PROTO(const struct bpf_map *map, int ufd, + const struct filename *pname), + + TP_ARGS(map, ufd, pname), + + TP_STRUCT__entry( + __field(u32, type) + __field(int, ufd) + __string(path, pname->name) + ), + + TP_fast_assign( + __assign_str(path, pname->name); + __entry->type = map->map_type; + __entry->ufd = ufd; + ), + + TP_printk("map type=%s ufd=%d path=%s", + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), + __entry->ufd, __get_str(path)) +); + +DEFINE_EVENT(bpf_obj_map, bpf_obj_pin_map, + + TP_PROTO(const struct bpf_map *map, int ufd, + const struct filename *pname), + + TP_ARGS(map, ufd, pname) +); + +DEFINE_EVENT(bpf_obj_map, bpf_obj_get_map, + + TP_PROTO(const struct bpf_map *map, int ufd, + const struct filename *pname), + + TP_ARGS(map, ufd, pname) +); + +DECLARE_EVENT_CLASS(bpf_map_keyval, + + TP_PROTO(const struct bpf_map *map, int ufd, + const void *key, const void *val), + + TP_ARGS(map, ufd, key, val), + + TP_STRUCT__entry( + __field(u32, type) + __field(u32, key_len) + __dynamic_array(u8, key, map->key_size) + __field(bool, key_trunc) + __field(u32, val_len) + __dynamic_array(u8, val, map->value_size) + __field(bool, val_trunc) + __field(int, ufd) + ), + + TP_fast_assign( + memcpy(__get_dynamic_array(key), key, map->key_size); + memcpy(__get_dynamic_array(val), val, map->value_size); + __entry->type = map->map_type; + __entry->key_len = min(map->key_size, 16U); + __entry->key_trunc = map->key_size != __entry->key_len; + __entry->val_len = min(map->value_size, 16U); + __entry->val_trunc = map->value_size != __entry->val_len; + __entry->ufd = ufd; + ), + + TP_printk("map type=%s ufd=%d key=[%s%s] val=[%s%s]", + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), + __entry->ufd, + __print_hex(__get_dynamic_array(key), __entry->key_len), + __entry->key_trunc ? " ..." : "", + __print_hex(__get_dynamic_array(val), __entry->val_len), + __entry->val_trunc ? " ..." : "") +); + +DEFINE_EVENT(bpf_map_keyval, bpf_map_lookup_elem, + + TP_PROTO(const struct bpf_map *map, int ufd, + const void *key, const void *val), + + TP_ARGS(map, ufd, key, val) +); + +DEFINE_EVENT(bpf_map_keyval, bpf_map_update_elem, + + TP_PROTO(const struct bpf_map *map, int ufd, + const void *key, const void *val), + + TP_ARGS(map, ufd, key, val) +); + +TRACE_EVENT(bpf_map_delete_elem, + + TP_PROTO(const struct bpf_map *map, int ufd, + const void *key), + + TP_ARGS(map, ufd, key), + + TP_STRUCT__entry( + __field(u32, type) + __field(u32, key_len) + __dynamic_array(u8, key, map->key_size) + __field(bool, key_trunc) + __field(int, ufd) + ), + + TP_fast_assign( + memcpy(__get_dynamic_array(key), key, map->key_size); + __entry->type = map->map_type; + __entry->key_len = min(map->key_size, 16U); + __entry->key_trunc = map->key_size != __entry->key_len; + __entry->ufd = ufd; + ), + + TP_printk("map type=%s ufd=%d key=[%s%s]", + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), + __entry->ufd, + __print_hex(__get_dynamic_array(key), __entry->key_len), + __entry->key_trunc ? " ..." : "") +); + +TRACE_EVENT(bpf_map_next_key, + + TP_PROTO(const struct bpf_map *map, int ufd, + const void *key, const void *key_next), + + TP_ARGS(map, ufd, key, key_next), + + TP_STRUCT__entry( + __field(u32, type) + __field(u32, key_len) + __dynamic_array(u8, key, map->key_size) + __dynamic_array(u8, nxt, map->key_size) + __field(bool, key_trunc) + __field(int, ufd) + ), + + TP_fast_assign( + memcpy(__get_dynamic_array(key), key, map->key_size); + memcpy(__get_dynamic_array(nxt), key_next, map->key_size); + __entry->type = map->map_type; + __entry->key_len = min(map->key_size, 16U); + __entry->key_trunc = map->key_size != __entry->key_len; + __entry->ufd = ufd; + ), + + TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]", + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), + __entry->ufd, + __print_hex(__get_dynamic_array(key), __entry->key_len), + __entry->key_trunc ? " ..." : "", + __print_hex(__get_dynamic_array(nxt), __entry->key_len), + __entry->key_trunc ? " ..." : "") +); + +#endif /* _TRACE_BPF_H */ + +#include diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h new file mode 100644 index 000000000000..1b61357d3f57 --- /dev/null +++ b/include/trace/events/xdp.h @@ -0,0 +1,53 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM xdp + +#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_XDP_H + +#include +#include +#include + +#define __XDP_ACT_MAP(FN) \ + FN(ABORTED) \ + FN(DROP) \ + FN(PASS) \ + FN(TX) + +#define __XDP_ACT_TP_FN(x) \ + TRACE_DEFINE_ENUM(XDP_##x); +#define __XDP_ACT_SYM_FN(x) \ + { XDP_##x, #x }, +#define __XDP_ACT_SYM_TAB \ + __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 } +__XDP_ACT_MAP(__XDP_ACT_TP_FN) + +TRACE_EVENT(xdp_exception, + + TP_PROTO(const struct net_device *dev, + const struct bpf_prog *xdp, u32 act), + + TP_ARGS(dev, xdp, act), + + TP_STRUCT__entry( + __string(name, dev->name) + __array(u8, prog_tag, 8) + __field(u32, act) + ), + + TP_fast_assign( + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag)); + memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag)); + __assign_str(name, dev->name); + __entry->act = act; + ), + + TP_printk("prog=%s device=%s action=%s", + __print_hex_str(__entry->prog_tag, 8), + __get_str(name), + __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB)) +); + +#endif /* _TRACE_XDP_H */ + +#include diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 503d4211988a..fddd76b1b627 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1173,3 +1173,12 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, { return -EFAULT; } + +/* All definitions of tracepoints related to BPF. */ +#define CREATE_TRACE_POINTS +#include + +EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); + +EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type); +EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu); diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 0b030c9126d3..fddcae801724 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -21,6 +21,7 @@ #include #include #include +#include enum bpf_type { BPF_TYPE_UNSPEC = 0, @@ -281,6 +282,13 @@ int bpf_obj_pin_user(u32 ufd, const char __user *pathname) ret = bpf_obj_do_pin(pname, raw, type); if (ret != 0) bpf_any_put(raw, type); + if ((trace_bpf_obj_pin_prog_enabled() || + trace_bpf_obj_pin_map_enabled()) && !ret) { + if (type == BPF_TYPE_PROG) + trace_bpf_obj_pin_prog(raw, ufd, pname); + if (type == BPF_TYPE_MAP) + trace_bpf_obj_pin_map(raw, ufd, pname); + } out: putname(pname); return ret; @@ -342,8 +350,15 @@ int bpf_obj_get_user(const char __user *pathname) else goto out; - if (ret < 0) + if (ret < 0) { bpf_any_put(raw, type); + } else if (trace_bpf_obj_get_prog_enabled() || + trace_bpf_obj_get_map_enabled()) { + if (type == BPF_TYPE_PROG) + trace_bpf_obj_get_prog(raw, ret, pname); + if (type == BPF_TYPE_MAP) + trace_bpf_obj_get_map(raw, ret, pname); + } out: putname(pname); return ret; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 1d6b29e4e2c3..05ad086ab71d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -10,6 +10,7 @@ * General Public License for more details. */ #include +#include #include #include #include @@ -215,6 +216,7 @@ static int map_create(union bpf_attr *attr) /* failed to allocate fd */ goto free_map; + trace_bpf_map_create(map, err); return err; free_map: @@ -339,6 +341,7 @@ static int map_lookup_elem(union bpf_attr *attr) if (copy_to_user(uvalue, value, value_size) != 0) goto free_value; + trace_bpf_map_lookup_elem(map, ufd, key, value); err = 0; free_value: @@ -421,6 +424,8 @@ static int map_update_elem(union bpf_attr *attr) __this_cpu_dec(bpf_prog_active); preempt_enable(); + if (!err) + trace_bpf_map_update_elem(map, ufd, key, value); free_value: kfree(value); free_key: @@ -466,6 +471,8 @@ static int map_delete_elem(union bpf_attr *attr) __this_cpu_dec(bpf_prog_active); preempt_enable(); + if (!err) + trace_bpf_map_delete_elem(map, ufd, key); free_key: kfree(key); err_put: @@ -518,6 +525,7 @@ static int map_get_next_key(union bpf_attr *attr) if (copy_to_user(unext_key, next_key, map->key_size) != 0) goto free_next_key; + trace_bpf_map_next_key(map, ufd, key, next_key); err = 0; free_next_key: @@ -671,8 +679,10 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu) void bpf_prog_put(struct bpf_prog *prog) { - if (atomic_dec_and_test(&prog->aux->refcnt)) + if (atomic_dec_and_test(&prog->aux->refcnt)) { + trace_bpf_prog_put_rcu(prog); call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); + } } EXPORT_SYMBOL_GPL(bpf_prog_put); @@ -781,7 +791,11 @@ struct bpf_prog *bpf_prog_get(u32 ufd) struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) { - return __bpf_prog_get(ufd, &type); + struct bpf_prog *prog = __bpf_prog_get(ufd, &type); + + if (!IS_ERR(prog)) + trace_bpf_prog_get_type(prog); + return prog; } EXPORT_SYMBOL_GPL(bpf_prog_get_type); @@ -863,6 +877,7 @@ static int bpf_prog_load(union bpf_attr *attr) /* failed to allocate fd */ goto free_used_maps; + trace_bpf_prog_load(prog, err); return err; free_used_maps: -- cgit v1.2.3 From d234559952f4f2b1bf89229da0d54be5281613d7 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 25 Jan 2017 02:44:48 +0100 Subject: Doc: DT: bindings: net: dsa: marvell.txt: Tabification Replace spaces with tabs. Fix indentation to be multiples of tabs, not a mixture or tabs and spaces. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- .../devicetree/bindings/net/dsa/marvell.txt | 112 ++++++++++----------- 1 file changed, 56 insertions(+), 56 deletions(-) diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt index 64dbdd825981..7ef9dbb08957 100644 --- a/Documentation/devicetree/bindings/net/dsa/marvell.txt +++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt @@ -14,9 +14,9 @@ The properties described here are those specific to Marvell devices. Additional required and optional properties can be found in dsa.txt. Required properties: -- compatible : Should be one of "marvell,mv88e6085" or - "marvell,mv88e6190" -- reg : Address on the MII bus for the switch. +- compatible : Should be one of "marvell,mv88e6085" or + "marvell,mv88e6190" +- reg : Address on the MII bus for the switch. Optional properties: @@ -28,65 +28,65 @@ Optional properties: #interrupt-cells = <2> : Controller uses two cells, number and flag - mdio : Container of PHY and devices on the switches MDIO bus. -- mdio? : Container of PHYs and devices on the external MDIO +- mdio? : Container of PHYs and devices on the external MDIO bus. The node must contains a compatible string of "marvell,mv88e6xxx-mdio-external" Example: - mdio { - #address-cells = <1>; - #size-cells = <0>; - interrupt-parent = <&gpio0>; - interrupts = <27 IRQ_TYPE_LEVEL_LOW>; - interrupt-controller; - #interrupt-cells = <2>; + mdio { + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&gpio0>; + interrupts = <27 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #interrupt-cells = <2>; - switch0: switch@0 { - compatible = "marvell,mv88e6085"; - reg = <0>; - reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>; - }; - mdio { - #address-cells = <1>; - #size-cells = <0>; - switch1phy0: switch1phy0@0 { - reg = <0>; - interrupt-parent = <&switch0>; - interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; - }; - }; - }; + switch0: switch@0 { + compatible = "marvell,mv88e6085"; + reg = <0>; + reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>; + }; + mdio { + #address-cells = <1>; + #size-cells = <0>; + switch1phy0: switch1phy0@0 { + reg = <0>; + interrupt-parent = <&switch0>; + interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + }; - mdio { - #address-cells = <1>; - #size-cells = <0>; - interrupt-parent = <&gpio0>; - interrupts = <27 IRQ_TYPE_LEVEL_LOW>; - interrupt-controller; - #interrupt-cells = <2>; + mdio { + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&gpio0>; + interrupts = <27 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #interrupt-cells = <2>; - switch0: switch@0 { - compatible = "marvell,mv88e6390"; - reg = <0>; - reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>; - }; - mdio { - #address-cells = <1>; - #size-cells = <0>; - switch1phy0: switch1phy0@0 { - reg = <0>; - interrupt-parent = <&switch0>; - interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; - }; - }; + switch0: switch@0 { + compatible = "marvell,mv88e6390"; + reg = <0>; + reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>; + }; + mdio { + #address-cells = <1>; + #size-cells = <0>; + switch1phy0: switch1phy0@0 { + reg = <0>; + interrupt-parent = <&switch0>; + interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; + }; + }; - mdio1 { - compatible = "marvell,mv88e6xxx-mdio-external"; - #address-cells = <1>; - #size-cells = <0>; - switch1phy9: switch1phy0@9 { - reg = <9>; - }; - }; - }; + mdio1 { + compatible = "marvell,mv88e6xxx-mdio-external"; + #address-cells = <1>; + #size-cells = <0>; + switch1phy9: switch1phy0@9 { + reg = <9>; + }; + }; + }; -- cgit v1.2.3 From a9c54ad2c737853b1f404b10b09da4ca7fa18597 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Wed, 25 Jan 2017 13:41:45 +0800 Subject: r8152: fix the wrong spelling Replace rumtime with runtime. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index f3b48ad90865..d59d7737708b 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3576,7 +3576,7 @@ static bool delay_autosuspend(struct r8152 *tp) return false; } -static int rtl8152_rumtime_suspend(struct r8152 *tp) +static int rtl8152_runtime_suspend(struct r8152 *tp) { struct net_device *netdev = tp->netdev; int ret = 0; @@ -3653,7 +3653,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) mutex_lock(&tp->control); if (PMSG_IS_AUTO(message)) - ret = rtl8152_rumtime_suspend(tp); + ret = rtl8152_runtime_suspend(tp); else ret = rtl8152_system_suspend(tp); -- cgit v1.2.3 From 065263f40f0972d5f1cd294bb0242bd5aa5f06b2 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Mon, 23 Jan 2017 10:59:20 -0800 Subject: net/tcp-fastopen: refactor cookie check logic Refactor the cookie check logic in tcp_send_syn_data() into a function. This function will be called else where in later changes. Signed-off-by: Wei Wang Acked-by: Eric Dumazet Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ net/ipv4/tcp_fastopen.c | 21 +++++++++++++++++++++ net/ipv4/tcp_output.c | 16 ++-------------- 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index c55d65f74f7f..de67541d7adf 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1493,6 +1493,8 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct tcp_fastopen_cookie *foc, struct dst_entry *dst); void tcp_fastopen_init_key_once(bool publish); +bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, + struct tcp_fastopen_cookie *cookie); #define TCP_FASTOPEN_KEY_LENGTH 16 /* Fastopen key context */ diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index f51919535ca7..f90e09e1ff4c 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -325,3 +325,24 @@ fastopen: *foc = valid_foc; return NULL; } + +bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, + struct tcp_fastopen_cookie *cookie) +{ + unsigned long last_syn_loss = 0; + int syn_loss = 0; + + tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss); + + /* Recurring FO SYN losses: no cookie or data in SYN */ + if (syn_loss > 1 && + time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { + cookie->len = -1; + return false; + } + if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) { + cookie->len = -1; + return true; + } + return cookie->len > 0; +} diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9a1a1494b9dd..671c69535671 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3267,23 +3267,11 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_request *fo = tp->fastopen_req; - int syn_loss = 0, space, err = 0; - unsigned long last_syn_loss = 0; + int space, err = 0; struct sk_buff *syn_data; tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ - tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, - &syn_loss, &last_syn_loss); - /* Recurring FO SYN losses: revert to regular handshake temporarily */ - if (syn_loss > 1 && - time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { - fo->cookie.len = -1; - goto fallback; - } - - if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) - fo->cookie.len = -1; - else if (fo->cookie.len <= 0) + if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) goto fallback; /* MSS for SYN-data is based on cached MSS and bounded by PMTU and -- cgit v1.2.3 From 25776aa943401662617437841b3d3ea4693ee98a Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Mon, 23 Jan 2017 10:59:21 -0800 Subject: net: Remove __sk_dst_reset() in tcp_v6_connect() Remove __sk_dst_reset() in the failure handling because __sk_dst_reset() will eventually get called when sk is released. No need to handle it in the protocol specific connect call. This is also to make the code path consistent with ipv4. Signed-off-by: Wei Wang Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv6/tcp_ipv6.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f72100eedd5d..0b7cd3d009b6 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -295,7 +295,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, late_failure: tcp_set_state(sk, TCP_CLOSE); - __sk_dst_reset(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; -- cgit v1.2.3 From 19f6d3f3c8422d65b5e3d2162e30ef07c6e21ea2 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Mon, 23 Jan 2017 10:59:22 -0800 Subject: net/tcp-fastopen: Add new API support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a new socket option, TCP_FASTOPEN_CONNECT, as an alternative way to perform Fast Open on the active side (client). Prior to this patch, a client needs to replace the connect() call with sendto(MSG_FASTOPEN). This can be cumbersome for applications who want to use Fast Open: these socket operations are often done in lower layer libraries used by many other applications. Changing these libraries and/or the socket call sequences are not trivial. A more convenient approach is to perform Fast Open by simply enabling a socket option when the socket is created w/o changing other socket calls sequence: s = socket() create a new socket setsockopt(s, IPPROTO_TCP, TCP_FASTOPEN_CONNECT …); newly introduced sockopt If set, new functionality described below will be used. Return ENOTSUPP if TFO is not supported or not enabled in the kernel. connect() With cookie present, return 0 immediately. With no cookie, initiate 3WHS with TFO cookie-request option and return -1 with errno = EINPROGRESS. write()/sendmsg() With cookie present, send out SYN with data and return the number of bytes buffered. With no cookie, and 3WHS not yet completed, return -1 with errno = EINPROGRESS. No MSG_FASTOPEN flag is needed. read() Return -1 with errno = EWOULDBLOCK/EAGAIN if connect() is called but write() is not called yet. Return -1 with errno = EWOULDBLOCK/EAGAIN if connection is established but no msg is received yet. Return number of bytes read if socket is established and there is msg received. The new API simplifies life for applications that always perform a write() immediately after a successful connect(). Such applications can now take advantage of Fast Open by merely making one new setsockopt() call at the time of creating the socket. Nothing else about the application's socket call sequence needs to change. Signed-off-by: Wei Wang Acked-by: Eric Dumazet Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/tcp.h | 3 ++- include/net/inet_sock.h | 6 +++++- include/net/tcp.h | 1 + include/uapi/linux/tcp.h | 1 + net/ipv4/af_inet.c | 31 ++++++++++++++++++++++++------- net/ipv4/tcp.c | 35 ++++++++++++++++++++++++++++++++++- net/ipv4/tcp_fastopen.c | 33 +++++++++++++++++++++++++++++++++ net/ipv4/tcp_ipv4.c | 7 ++++++- net/ipv6/tcp_ipv6.c | 5 +++++ 9 files changed, 111 insertions(+), 11 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 5371b3d70cfe..f88f4649ba6f 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -222,7 +222,8 @@ struct tcp_sock { u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u8 chrono_type:2, /* current chronograph type */ rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ - unused:5; + fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ + unused:4; u8 nonagle : 4,/* Disable Nagle algorithm? */ thin_lto : 1,/* Use linear timeouts for thin streams */ unused1 : 1, diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index c9cff977a7fb..aa95053dfc78 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -206,7 +206,11 @@ struct inet_sock { transparent:1, mc_all:1, nodefrag:1; - __u8 bind_address_no_port:1; + __u8 bind_address_no_port:1, + defer_connect:1; /* Indicates that fastopen_connect is set + * and cookie exists so we defer connect + * until first data frame is written + */ __u8 rcv_tos; __u8 convert_csum; int uc_index; diff --git a/include/net/tcp.h b/include/net/tcp.h index de67541d7adf..6ec4ea652f3f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1495,6 +1495,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, void tcp_fastopen_init_key_once(bool publish); bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, struct tcp_fastopen_cookie *cookie); +bool tcp_fastopen_defer_connect(struct sock *sk, int *err); #define TCP_FASTOPEN_KEY_LENGTH 16 /* Fastopen key context */ diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index c53de2691cec..6ff35eb48d10 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -116,6 +116,7 @@ enum { #define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */ #define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */ #define TCP_REPAIR_WINDOW 29 /* Get/set window parameters */ +#define TCP_FASTOPEN_CONNECT 30 /* Attempt FastOpen with connect */ struct tcp_repair_opt { __u32 opt_code; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 28fe8da4e1ac..92e7f3e957fa 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -576,13 +576,24 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int err; long timeo; - if (addr_len < sizeof(uaddr->sa_family)) - return -EINVAL; + /* + * uaddr can be NULL and addr_len can be 0 if: + * sk is a TCP fastopen active socket and + * TCP_FASTOPEN_CONNECT sockopt is set and + * we already have a valid cookie for this socket. + * In this case, user can call write() after connect(). + * write() will invoke tcp_sendmsg_fastopen() which calls + * __inet_stream_connect(). + */ + if (uaddr) { + if (addr_len < sizeof(uaddr->sa_family)) + return -EINVAL; - if (uaddr->sa_family == AF_UNSPEC) { - err = sk->sk_prot->disconnect(sk, flags); - sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED; - goto out; + if (uaddr->sa_family == AF_UNSPEC) { + err = sk->sk_prot->disconnect(sk, flags); + sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED; + goto out; + } } switch (sock->state) { @@ -593,7 +604,10 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, err = -EISCONN; goto out; case SS_CONNECTING: - err = -EALREADY; + if (inet_sk(sk)->defer_connect) + err = -EINPROGRESS; + else + err = -EALREADY; /* Fall out of switch with err, set for this state */ break; case SS_UNCONNECTED: @@ -607,6 +621,9 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, sock->state = SS_CONNECTING; + if (!err && inet_sk(sk)->defer_connect) + goto out; + /* Just entered SS_CONNECTING state; the only * difference is that return value in non-blocking * case is EINPROGRESS, rather than EALREADY. diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c43eb1a831d7..d9735b76d073 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -533,6 +533,12 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) if (tp->urg_data & TCP_URG_VALID) mask |= POLLPRI; + } else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { + /* Active TCP fastopen socket with defer_connect + * Return POLLOUT so application can call write() + * in order for kernel to generate SYN+data + */ + mask |= POLLOUT | POLLWRNORM; } /* This barrier is coupled with smp_wmb() in tcp_reset() */ smp_rmb(); @@ -1071,6 +1077,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, size_t size) { struct tcp_sock *tp = tcp_sk(sk); + struct inet_sock *inet = inet_sk(sk); int err, flags; if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) @@ -1085,9 +1092,19 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, tp->fastopen_req->data = msg; tp->fastopen_req->size = size; + if (inet->defer_connect) { + err = tcp_connect(sk); + /* Same failure procedure as in tcp_v4/6_connect */ + if (err) { + tcp_set_state(sk, TCP_CLOSE); + inet->inet_dport = 0; + sk->sk_route_caps = 0; + } + } flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; err = __inet_stream_connect(sk->sk_socket, msg->msg_name, msg->msg_namelen, flags); + inet->defer_connect = 0; *copied = tp->fastopen_req->copied; tcp_free_fastopen_req(tp); return err; @@ -1107,7 +1124,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) lock_sock(sk); flags = msg->msg_flags; - if (flags & MSG_FASTOPEN) { + if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); if (err == -EINPROGRESS && copied_syn > 0) goto out; @@ -2656,6 +2673,18 @@ static int do_tcp_setsockopt(struct sock *sk, int level, err = -EINVAL; } break; + case TCP_FASTOPEN_CONNECT: + if (val > 1 || val < 0) { + err = -EINVAL; + } else if (sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) { + if (sk->sk_state == TCP_CLOSE) + tp->fastopen_connect = val; + else + err = -EINVAL; + } else { + err = -EOPNOTSUPP; + } + break; case TCP_TIMESTAMP: if (!tp->repair) err = -EPERM; @@ -3016,6 +3045,10 @@ static int do_tcp_getsockopt(struct sock *sk, int level, val = icsk->icsk_accept_queue.fastopenq.max_qlen; break; + case TCP_FASTOPEN_CONNECT: + val = tp->fastopen_connect; + break; + case TCP_TIMESTAMP: val = tcp_time_stamp + tp->tsoffset; break; diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index f90e09e1ff4c..9674bec4a0f8 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -346,3 +346,36 @@ bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, } return cookie->len > 0; } + +/* This function checks if we want to defer sending SYN until the first + * write(). We defer under the following conditions: + * 1. fastopen_connect sockopt is set + * 2. we have a valid cookie + * Return value: return true if we want to defer until application writes data + * return false if we want to send out SYN immediately + */ +bool tcp_fastopen_defer_connect(struct sock *sk, int *err) +{ + struct tcp_fastopen_cookie cookie = { .len = 0 }; + struct tcp_sock *tp = tcp_sk(sk); + u16 mss; + + if (tp->fastopen_connect && !tp->fastopen_req) { + if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) { + inet_sk(sk)->defer_connect = 1; + return true; + } + + /* Alloc fastopen_req in order for FO option to be included + * in SYN + */ + tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req), + sk->sk_allocation); + if (tp->fastopen_req) + tp->fastopen_req->cookie = cookie; + else + *err = -ENOBUFS; + } + return false; +} +EXPORT_SYMBOL(tcp_fastopen_defer_connect); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a90b4540c11e..8c9e9aa17d66 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -232,6 +232,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) /* OK, now commit destination to socket. */ sk->sk_gso_type = SKB_GSO_TCPV4; sk_setup_caps(sk, &rt->dst); + rt = NULL; if (!tp->write_seq && likely(!tp->repair)) tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, @@ -242,9 +243,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_id = tp->write_seq ^ jiffies; + if (tcp_fastopen_defer_connect(sk, &err)) + return err; + if (err) + goto failure; + err = tcp_connect(sk); - rt = NULL; if (err) goto failure; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 0b7cd3d009b6..95c05e5293b1 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -287,6 +287,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, inet->inet_dport, &tp->tsoffset); + if (tcp_fastopen_defer_connect(sk, &err)) + return err; + if (err) + goto late_failure; + err = tcp_connect(sk); if (err) goto late_failure; -- cgit v1.2.3 From 3979ad7e82dfe3fb94a51c3915e64ec64afa45c3 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Wed, 25 Jan 2017 14:42:46 +0100 Subject: net/tcp-fastopen: make connect()'s return case more consistent with non-TFO Without TFO, any subsequent connect() call after a successful one returns -1 EISCONN. The last API update ensured that __inet_stream_connect() can return -1 EINPROGRESS in response to sendmsg() when TFO is in use to indicate that the connection is now in progress. Unfortunately since this function is used both for connect() and sendmsg(), it has the undesired side effect of making connect() now return -1 EINPROGRESS as well after a successful call, while at the same time poll() returns POLLOUT. This can confuse some applications which happen to call connect() and to check for -1 EISCONN to ensure the connection is usable, and for which EINPROGRESS indicates a need to poll, causing a loop. This problem was encountered in haproxy where a call to connect() is precisely used in certain cases to confirm a connection's readiness. While arguably haproxy's behaviour should be improved here, it seems important to aim at a more robust behaviour when the goal of the new API is to make it easier to implement TFO in existing applications. This patch simply ensures that we preserve the same semantics as in the non-TFO case on the connect() syscall when using TFO, while still returning -1 EINPROGRESS on sendmsg(). For this we simply tell __inet_stream_connect() whether we're doing a regular connect() or in fact connecting for a sendmsg() call. Cc: Wei Wang Cc: Yuchung Cheng Cc: Eric Dumazet Signed-off-by: Willy Tarreau Signed-off-by: David S. Miller --- include/net/inet_common.h | 2 +- net/ipv4/af_inet.c | 6 +++--- net/ipv4/tcp.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 5d683428fced..b7952d55b9c0 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -17,7 +17,7 @@ int inet_release(struct socket *sock); int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, - int addr_len, int flags); + int addr_len, int flags, int is_sendmsg); int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); int inet_accept(struct socket *sock, struct socket *newsock, int flags); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 92e7f3e957fa..685ba53df2d1 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -570,7 +570,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) * TCP 'magic' in here. */ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, - int addr_len, int flags) + int addr_len, int flags, int is_sendmsg) { struct sock *sk = sock->sk; int err; @@ -605,7 +605,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, goto out; case SS_CONNECTING: if (inet_sk(sk)->defer_connect) - err = -EINPROGRESS; + err = is_sendmsg ? -EINPROGRESS : -EISCONN; else err = -EALREADY; /* Fall out of switch with err, set for this state */ @@ -679,7 +679,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int err; lock_sock(sock->sk); - err = __inet_stream_connect(sock, uaddr, addr_len, flags); + err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0); release_sock(sock->sk); return err; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d9735b76d073..2ed472ebf3b5 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1103,7 +1103,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, } flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; err = __inet_stream_connect(sk->sk_socket, msg->msg_name, - msg->msg_namelen, flags); + msg->msg_namelen, flags, 1); inet->defer_connect = 0; *copied = tp->fastopen_req->copied; tcp_free_fastopen_req(tp); -- cgit v1.2.3 From fff4ffdde175bfa4516394db95ae56153224664b Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Wed, 25 Jan 2017 17:36:23 +0530 Subject: net: thunderx: Support to configure queue sizes from ethtool Adds support to set Rx/Tx queue sizes from ethtool. Fixes an issue with retrieving queue size. Also sets SQ's CQ_LIMIT based on configured Tx queue size such that HW doesn't process SQEs when there is no sufficient space in CQ. Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- .../net/ethernet/cavium/thunder/nicvf_ethtool.c | 39 ++++++++++++++++++++-- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 19 +++++++++-- drivers/net/ethernet/cavium/thunder/nicvf_queues.h | 16 ++++++--- 3 files changed, 65 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 5ac474683c98..02a986cdbb39 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -471,12 +471,46 @@ static void nicvf_get_ringparam(struct net_device *netdev, struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; - ring->rx_max_pending = MAX_RCV_BUF_COUNT; - ring->rx_pending = qs->rbdr_len; + ring->rx_max_pending = MAX_CMP_QUEUE_LEN; + ring->rx_pending = qs->cq_len; ring->tx_max_pending = MAX_SND_QUEUE_LEN; ring->tx_pending = qs->sq_len; } +static int nicvf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + u32 rx_count, tx_count; + + /* Due to HW errata this is not supported on T88 pass 1.x silicon */ + if (pass1_silicon(nic->pdev)) + return -EINVAL; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + tx_count = clamp_t(u32, ring->tx_pending, + MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN); + rx_count = clamp_t(u32, ring->rx_pending, + MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN); + + if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len)) + return 0; + + /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */ + qs->sq_len = rounddown_pow_of_two(tx_count); + qs->cq_len = rounddown_pow_of_two(rx_count); + + if (netif_running(netdev)) { + nicvf_stop(netdev); + nicvf_open(netdev); + } + + return 0; +} + static int nicvf_get_rss_hash_opts(struct nicvf *nic, struct ethtool_rxnfc *info) { @@ -787,6 +821,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = { .get_regs = nicvf_get_regs, .get_coalesce = nicvf_get_coalesce, .get_ringparam = nicvf_get_ringparam, + .set_ringparam = nicvf_set_ringparam, .get_rxnfc = nicvf_get_rxnfc, .set_rxnfc = nicvf_set_rxnfc, .get_rxfh_key_size = nicvf_get_rxfh_key_size, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d2ac133e36f1..ac0390be3b12 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -603,7 +603,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, cq_cfg.ena = 1; cq_cfg.reset = 0; cq_cfg.caching = 0; - cq_cfg.qsize = CMP_QSIZE; + cq_cfg.qsize = ilog2(qs->cq_len >> 10); cq_cfg.avg_con = 0; nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); @@ -652,9 +652,12 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, sq_cfg.ena = 1; sq_cfg.reset = 0; sq_cfg.ldwb = 0; - sq_cfg.qsize = SND_QSIZE; + sq_cfg.qsize = ilog2(qs->sq_len >> 10); sq_cfg.tstmp_bgx_intf = 0; - sq_cfg.cq_limit = 0; + /* CQ's level at which HW will stop processing SQEs to avoid + * transmitting a pkt with no space in CQ to post CQE_TX. + */ + sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); /* Set threshold value for interrupt generation */ @@ -816,11 +819,21 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable) { bool disable = false; struct queue_set *qs = nic->qs; + struct queue_set *pqs = nic->pnicvf->qs; int qidx; if (!qs) return 0; + /* Take primary VF's queue lengths. + * This is needed to take queue lengths set from ethtool + * into consideration. + */ + if (nic->sqs_mode && pqs) { + qs->cq_len = pqs->cq_len; + qs->sq_len = pqs->sq_len; + } + if (enable) { if (nicvf_alloc_resources(nic)) return -ENOMEM; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 9e2104675bc9..5cb84da99a2d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -59,8 +59,9 @@ /* Default queue count per QS, its lengths and threshold values */ #define DEFAULT_RBDR_CNT 1 -#define SND_QSIZE SND_QUEUE_SIZE2 +#define SND_QSIZE SND_QUEUE_SIZE0 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) +#define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10)) #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) #define SND_QUEUE_THRESH 2ULL #define MIN_SQ_DESC_PER_PKT_XMIT 2 @@ -70,11 +71,18 @@ /* Keep CQ and SQ sizes same, if timestamping * is enabled this equation will change. */ -#define CMP_QSIZE CMP_QUEUE_SIZE2 +#define CMP_QSIZE CMP_QUEUE_SIZE0 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) +#define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10)) +#define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10)) #define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2) #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ +/* No of CQEs that might anyway gets used by HW due to pipelining + * effects irrespective of PASS/DROP/LEVELS being configured + */ +#define CMP_QUEUE_PIPELINE_RSVD 544 + #define RBDR_SIZE RBDR_SIZE0 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) @@ -93,8 +101,8 @@ * RED accepts pkt if unused CQE < 2304 & >= 2560 * DROPs pkts if unused CQE < 2304 */ -#define RQ_PASS_CQ_LVL 160ULL -#define RQ_DROP_CQ_LVL 144ULL +#define RQ_PASS_CQ_LVL 192ULL +#define RQ_DROP_CQ_LVL 184ULL /* RED and Backpressure levels of RBDR for pkt reception * For RBDR, level is a measure of fullness i.e 0x0 means empty -- cgit v1.2.3 From fff37fdad9df3b214294ef83943d92aa9e1c7ecc Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Wed, 25 Jan 2017 17:36:24 +0530 Subject: net: thunderx: Leave serdes lane config on 81/83xx to firmware For DLMs and SLMs on 80/81/83xx, many lane configurations across different boards are coming up. Also kernel doesn't have any way to identify board type/info and since firmware does, just get rid of figuring out lane to serdes config and take whatever has been programmed by low level firmware. Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 95 +++++------------------ 1 file changed, 18 insertions(+), 77 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 2f85b64f01fa..dfb2bad7ced5 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -894,17 +894,15 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) struct device *dev = &bgx->pdev->dev; struct lmac *lmac; char str[20]; - u8 dlm; - if (lmacid > bgx->max_lmac) + if (!bgx->is_dlm && lmacid) return; lmac = &bgx->lmac[lmacid]; - dlm = (lmacid / 2) + (bgx->bgx_id * 2); if (!bgx->is_dlm) sprintf(str, "BGX%d QLM mode", bgx->bgx_id); else - sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm); + sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid); switch (lmac->lmac_type) { case BGX_MODE_SGMII: @@ -990,7 +988,6 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) { struct lmac *lmac; - struct lmac *olmac; u64 cmr_cfg; u8 lmac_type; u8 lane_to_sds; @@ -1010,62 +1007,26 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) return; } - /* On 81xx BGX can be split across 2 DLMs - * firmware programs lmac_type of LMAC0 and LMAC2 + /* For DLMs or SLMs on 80/81/83xx so many lane configurations + * are possible and vary across boards. Also Kernel doesn't have + * any way to identify board type/info and since firmware does, + * just take lmac type and serdes lane config as is. */ - if ((idx == 0) || (idx == 2)) { - cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); - lmac_type = (u8)((cmr_cfg >> 8) & 0x07); - lane_to_sds = (u8)(cmr_cfg & 0xFF); - /* Check if config is not reset value */ - if ((lmac_type == 0) && (lane_to_sds == 0xE4)) - lmac->lmac_type = BGX_MODE_INVALID; - else - lmac->lmac_type = lmac_type; - lmac_set_training(bgx, lmac, lmac->lmacid); - lmac_set_lane2sds(bgx, lmac); - - olmac = &bgx->lmac[idx + 1]; - /* Check if other LMAC on the same DLM is already configured by - * firmware, if so use the same config or else set as same, as - * that of LMAC 0/2. - * This check is needed as on 80xx only one lane of each of the - * DLM of BGX0 is used, so have to rely on firmware for - * distingushing 80xx from 81xx. - */ - cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG); - lmac_type = (u8)((cmr_cfg >> 8) & 0x07); - lane_to_sds = (u8)(cmr_cfg & 0xFF); - if ((lmac_type == 0) && (lane_to_sds == 0xE4)) { - olmac->lmac_type = lmac->lmac_type; - lmac_set_lane2sds(bgx, olmac); - } else { - olmac->lmac_type = lmac_type; - olmac->lane_to_sds = lane_to_sds; - } - lmac_set_training(bgx, olmac, olmac->lmacid); - } -} - -static bool is_dlm0_in_bgx_mode(struct bgx *bgx) -{ - struct lmac *lmac; - - if (!bgx->is_dlm) - return true; - - lmac = &bgx->lmac[0]; - if (lmac->lmac_type == BGX_MODE_INVALID) - return false; - - return true; + cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); + lmac_type = (u8)((cmr_cfg >> 8) & 0x07); + lane_to_sds = (u8)(cmr_cfg & 0xFF); + /* Check if config is reset value */ + if ((lmac_type == 0) && (lane_to_sds == 0xE4)) + lmac->lmac_type = BGX_MODE_INVALID; + else + lmac->lmac_type = lmac_type; + lmac->lane_to_sds = lane_to_sds; + lmac_set_training(bgx, lmac, lmac->lmacid); } static void bgx_get_qlm_mode(struct bgx *bgx) { struct lmac *lmac; - struct lmac *lmac01; - struct lmac *lmac23; u8 idx; /* Init all LMAC's type to invalid */ @@ -1081,29 +1042,9 @@ static void bgx_get_qlm_mode(struct bgx *bgx) if (bgx->lmac_count > bgx->max_lmac) bgx->lmac_count = bgx->max_lmac; - for (idx = 0; idx < bgx->max_lmac; idx++) - bgx_set_lmac_config(bgx, idx); - - if (!bgx->is_dlm || bgx->is_rgx) { - bgx_print_qlm_mode(bgx, 0); - return; - } - - if (bgx->lmac_count) { - bgx_print_qlm_mode(bgx, 0); - bgx_print_qlm_mode(bgx, 2); - } - - /* If DLM0 is not in BGX mode then LMAC0/1 have - * to be configured with serdes lanes of DLM1 - */ - if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2)) - return; for (idx = 0; idx < bgx->lmac_count; idx++) { - lmac01 = &bgx->lmac[idx]; - lmac23 = &bgx->lmac[idx + 2]; - lmac01->lmac_type = lmac23->lmac_type; - lmac01->lane_to_sds = lmac23->lane_to_sds; + bgx_set_lmac_config(bgx, idx); + bgx_print_qlm_mode(bgx, idx); } } -- cgit v1.2.3 From 6eacfb54ea1825a3f1062bbfd71669b594168646 Mon Sep 17 00:00:00 2001 From: Tomáš Pilař Date: Wed, 25 Jan 2017 13:48:17 +0000 Subject: sfc: reduce severity of PIO buffer alloc failures PIO buffer allocation can fail for two valid reasons: - we've run out of them (results in -ENOSPC) - the NIC configuration doesn't support them (results in -EPERM) Since both these failures are expected netif_err is excessive. Signed-off-by: Bert Kenward Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index dccbbd323616..7c53da28ad64 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1170,7 +1170,13 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) nic_data->piobuf_size / efx_piobuf_size); rc = efx_ef10_alloc_piobufs(efx, n_piobufs); - if (rc) + if (rc == -ENOSPC) + netif_dbg(efx, probe, efx->net_dev, + "out of PIO buffers; cannot allocate more\n"); + else if (rc == -EPERM) + netif_dbg(efx, probe, efx->net_dev, + "not permitted to allocate PIO buffers\n"); + else if (rc) netif_err(efx, probe, efx->net_dev, "failed to allocate PIO buffers (%d)\n", rc); else @@ -1317,8 +1323,14 @@ static int efx_ef10_init_nic(struct efx_nic *efx) efx_ef10_free_piobufs(efx); } - /* Log an error on failure, but this is non-fatal */ - if (rc) + /* Log an error on failure, but this is non-fatal. + * Permission errors are less important - we've presumably + * had the PIO buffer licence removed. + */ + if (rc == -EPERM) + netif_dbg(efx, drv, efx->net_dev, + "not permitted to restore PIO buffers\n"); + else if (rc) netif_err(efx, drv, efx->net_dev, "failed to restore PIO buffers (%d)\n", rc); nic_data->must_restore_piobufs = false; -- cgit v1.2.3 From 434502930f59995f37fcc2c02cab79e059fb5043 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 25 Jan 2017 15:04:17 +0100 Subject: net: dsa: Mop up remaining NET_DSA_HWMON references Previous patches have moved the temperature sensor code into the Marvell PHYs. A few now dead references to NET_DSA_HWMON were left behind. Go reap them. Reported-by: Valentin Rothberg Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- Documentation/networking/dsa/dsa.txt | 24 ------------------------ include/net/dsa.h | 8 -------- 2 files changed, 32 deletions(-) diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 63912ef34606..b8b40753133e 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -295,7 +295,6 @@ DSA currently leverages the following subsystems: - MDIO/PHY library: drivers/net/phy/phy.c, mdio_bus.c - Switchdev: net/switchdev/* - Device Tree for various of_* functions -- HWMON: drivers/hwmon/* MDIO/PHY library ---------------- @@ -349,12 +348,6 @@ Documentation/devicetree/bindings/net/dsa/dsa.txt. PHY/MDIO library helper functions such as of_get_phy_mode(), of_phy_connect() are also used to query per-port PHY specific details: interface connection, MDIO bus location etc.. -HWMON ------ - -Some switch drivers feature internal temperature sensors which are exposed as -regular HWMON devices in /sys/class/hwmon/. - Driver development ================== @@ -495,23 +488,6 @@ Power management BR_STATE_DISABLED and propagating changes to the hardware if this port is disabled while being a bridge member -Hardware monitoring -------------------- - -These callbacks are only available if CONFIG_NET_DSA_HWMON is enabled: - -- get_temp: this function queries the given switch for its temperature - -- get_temp_limit: this function returns the switch current maximum temperature - limit - -- set_temp_limit: this function configures the maximum temperature limit allowed - -- get_temp_alarm: this function returns the critical temperature threshold - returning an alarm notification - -See Documentation/hwmon/sysfs-interface for details. - Bridge layer ------------ diff --git a/include/net/dsa.h b/include/net/dsa.h index 9d6cd923c48c..08b340403927 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -178,14 +178,6 @@ struct dsa_switch { */ s8 rtable[DSA_MAX_SWITCHES]; -#ifdef CONFIG_NET_DSA_HWMON - /* - * Hardware monitoring information - */ - char hwmon_name[IFNAMSIZ + 8]; - struct device *hwmon_dev; -#endif - /* * The lower device this switch uses to talk to the host */ -- cgit v1.2.3 From 85b4685da52f6680635370c2cfb9a42f04ac9652 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 25 Jan 2017 21:00:25 +0100 Subject: net: phy: broadcom: use auxctl reading helper in BCM54612E code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Starting with commit 5b4e29005123 ("net: phy: broadcom: add bcm54xx_auxctl_read") we have a reading helper so use it and avoid code duplication. It also means we don't need MII_BCM54XX_AUXCTL_SHDWSEL_MISC define as it's the same as MII_BCM54XX_AUXCTL_SHDWSEL_MISC just for reading needs (same value shifted by 12 bits). Signed-off-by: Rafał Miłecki Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/broadcom.c | 6 ++---- include/linux/brcmphy.h | 1 - 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 4223e35490b0..25c6e6cea2dc 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -395,10 +395,8 @@ static int bcm54612e_config_aneg(struct phy_device *phydev) (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) { u16 reg; - /* Errata: reads require filling in the write selector field */ - bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, - MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC); - reg = phy_read(phydev, MII_BCM54XX_AUX_CTL); + reg = bcm54xx_auxctl_read(phydev, + MII_BCM54XX_AUXCTL_SHDWSEL_MISC); /* Disable RXD to RXC delay (default set) */ reg &= ~MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW; /* Clear shadow selector field */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 295fb3e73de5..34e61004b9dc 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -111,7 +111,6 @@ #define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 #define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100 #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 -#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 #define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8) -- cgit v1.2.3 From 8293c7bcdef1b866610bf21dee3eb0d181cee753 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 25 Jan 2017 21:00:26 +0100 Subject: net: phy: broadcom: drop duplicated define for RGMII SKEW delay MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We had two defines for the same bit (both were used with the MII_BCM54XX_AUXCTL_SHDWSEL_MISC register). Signed-off-by: Rafał Miłecki Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/broadcom.c | 2 +- include/linux/brcmphy.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 25c6e6cea2dc..97d1c057c0a1 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -398,7 +398,7 @@ static int bcm54612e_config_aneg(struct phy_device *phydev) reg = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC); /* Disable RXD to RXC delay (default set) */ - reg &= ~MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW; + reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; /* Clear shadow selector field */ reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK; bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 34e61004b9dc..f9cb73df127e 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -109,7 +109,6 @@ #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 #define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 -#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100 #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 #define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 -- cgit v1.2.3 From 5e7bfa6cb0a94c673f6d565e58353366d88e2aa5 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 25 Jan 2017 21:00:27 +0100 Subject: net: phy: bcm-phy-lib: clean up remaining AUXCTL register defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1) Use 0x%02x format for register number. This follows some other defines and makes it easier to distinct register from values. 2) Put register define above values and sort the values. It makes reading header code easier. 3) Use 0x%04x format for all values. It's about consistency with other values (and most of the header) not a personal preference. 4) Separate define for reading shift value with an extre empty line. It's user for all AUXCTL registers in a bcm54xx_auxctl_read. Signed-off-by: Rafał Miłecki Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/brcmphy.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index f9cb73df127e..cf93f1399d3e 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -104,17 +104,17 @@ /* * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18) */ -#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 +#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00 #define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400 #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 -#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 -#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 -#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 -#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 -#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8) -#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4) +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100 +#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 +#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 +#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 #define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007 /* -- cgit v1.2.3 From 0901df3aeaee6cb5dde2fd51089786aff6225ebf Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 26 Jan 2017 00:42:49 +0100 Subject: bpf: use prefix_len in test_tag when reading fdinfo We currently used len instead of prefix_len for the strncmp() in fdinfo on the prog_tag. It still worked as we matched on the correct output line also with first 8 instead of 10 chars, but lets fix it properly to use the intended length. Fixes: 62b64660262a ("bpf: add prog tag test case to bpf selftests") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_tag.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c index 6ab4793b3d16..5f7c602f47d1 100644 --- a/tools/testing/selftests/bpf/test_tag.c +++ b/tools/testing/selftests/bpf/test_tag.c @@ -99,7 +99,7 @@ static void tag_from_fdinfo(int fd_prog, uint8_t *tag, uint32_t len) assert(fp); while (fgets(buff, sizeof(buff), fp)) { - if (strncmp(buff, "prog_tag:\t", len)) + if (strncmp(buff, "prog_tag:\t", prefix_len)) continue; ret = hex2bin(tag, buff + prefix_len, len); break; -- cgit v1.2.3 From 5b9d6b154a622d0481123c1b149afdf377264268 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 25 Jan 2017 23:29:33 +0100 Subject: bridge: move maybe_deliver_addr() inside #ifdef The only caller of this new function is inside of an #ifdef checking for CONFIG_BRIDGE_IGMP_SNOOPING, so we should move the implementation there too, in order to avoid this harmless warning: net/bridge/br_forward.c:177:13: error: 'maybe_deliver_addr' defined but not used [-Werror=unused-function] Fixes: 6db6f0eae605 ("bridge: multicast to unicast") Signed-off-by: Arnd Bergmann Acked-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/bridge/br_forward.c | 50 ++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index a0f9d0037d24..5a1f8ef49899 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -174,31 +174,6 @@ out: return p; } -static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb, - const unsigned char *addr, bool local_orig) -{ - struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; - const unsigned char *src = eth_hdr(skb)->h_source; - - if (!should_deliver(p, skb)) - return; - - /* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */ - if (skb->dev == p->dev && ether_addr_equal(src, addr)) - return; - - skb = skb_copy(skb, GFP_ATOMIC); - if (!skb) { - dev->stats.tx_dropped++; - return; - } - - if (!is_broadcast_ether_addr(addr)) - memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN); - - __br_forward(p, skb, local_orig); -} - /* called under rcu_read_lock */ void br_flood(struct net_bridge *br, struct sk_buff *skb, enum br_pkt_type pkt_type, bool local_rcv, bool local_orig) @@ -245,6 +220,31 @@ out: } #ifdef CONFIG_BRIDGE_IGMP_SNOOPING +static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb, + const unsigned char *addr, bool local_orig) +{ + struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; + const unsigned char *src = eth_hdr(skb)->h_source; + + if (!should_deliver(p, skb)) + return; + + /* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */ + if (skb->dev == p->dev && ether_addr_equal(src, addr)) + return; + + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) { + dev->stats.tx_dropped++; + return; + } + + if (!is_broadcast_ether_addr(addr)) + memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN); + + __br_forward(p, skb, local_orig); +} + /* called with rcu_read_lock */ void br_multicast_flood(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, -- cgit v1.2.3 From d3e9768ab97fbbff43d9face2711692206a576a9 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Thu, 24 Nov 2016 16:11:01 +0100 Subject: batman-adv: don't add loop detect macs to TT The bridge loop avoidance (BLA) feature of batman-adv sends packets to probe for Mesh/LAN packet loops. Those packets are not sent by real clients and should therefore not be added to the translation table (TT). Signed-off-by: Simon Wunderlich --- net/batman-adv/bridge_loop_avoidance.h | 18 ++++++++++++++++++ net/batman-adv/soft-interface.c | 3 ++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 1ae93e46fb98..2827cd3c13d2 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -20,6 +20,8 @@ #include "main.h" +#include +#include #include struct net_device; @@ -27,6 +29,22 @@ struct netlink_callback; struct seq_file; struct sk_buff; +/** + * batadv_bla_is_loopdetect_mac - check if the mac address is from a loop detect + * frame sent by bridge loop avoidance + * @mac: mac address to check + * + * Return: true if the it looks like a loop detect frame + * (mac starts with BA:BE), false otherwise + */ +static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac) +{ + if (mac[0] == 0xba && mac[1] == 0xbe) + return true; + + return false; +} + #ifdef CONFIG_BATMAN_ADV_BLA bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, bool is_bcast); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 7b3494ae6ad9..1f55b4b9181c 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -258,7 +258,8 @@ static int batadv_interface_tx(struct sk_buff *skb, ethhdr = eth_hdr(skb); /* Register the client MAC in the transtable */ - if (!is_multicast_ether_addr(ethhdr->h_source)) { + if (!is_multicast_ether_addr(ethhdr->h_source) && + !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) { client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source, vid, skb->skb_iif, skb->mark); -- cgit v1.2.3 From e60bf3ea67673fc6dd2645946e6dcf135fd7e30c Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sun, 11 Dec 2016 13:16:07 +0100 Subject: uapi: install batman_adv.h header 09748a22f4ab ("batman-adv: add generic netlink family for batman-adv") introduced the new batman_adv.h which describes the netlink attributes and commands of batman-adv. But the Kbuild entry to install the header was not added. All currently known tools ship their own copy of batman_adv.h but it should be installed anyway to later be able to migrate to the system batman_adv.h. Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- include/uapi/linux/Kbuild | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index a8b93e685239..7fdceb2ac5b7 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -64,6 +64,7 @@ header-y += auto_fs.h header-y += auxvec.h header-y += ax25.h header-y += b1lli.h +header-y += batman_adv.h header-y += baycom.h header-y += bcm933xx_hcs.h header-y += bfs_fs.h -- cgit v1.2.3 From ac79cbb96b58614ce13c4fccc00a9b4d43c2f79b Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sun, 1 Jan 2017 00:00:00 +0100 Subject: batman-adv: update copyright years for 2017 Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- include/uapi/linux/batman_adv.h | 2 +- net/batman-adv/Makefile | 2 +- net/batman-adv/bat_algo.c | 2 +- net/batman-adv/bat_algo.h | 2 +- net/batman-adv/bat_iv_ogm.c | 2 +- net/batman-adv/bat_iv_ogm.h | 2 +- net/batman-adv/bat_v.c | 2 +- net/batman-adv/bat_v.h | 2 +- net/batman-adv/bat_v_elp.c | 2 +- net/batman-adv/bat_v_elp.h | 2 +- net/batman-adv/bat_v_ogm.c | 2 +- net/batman-adv/bat_v_ogm.h | 2 +- net/batman-adv/bitarray.c | 2 +- net/batman-adv/bitarray.h | 2 +- net/batman-adv/bridge_loop_avoidance.c | 2 +- net/batman-adv/bridge_loop_avoidance.h | 2 +- net/batman-adv/debugfs.c | 2 +- net/batman-adv/debugfs.h | 2 +- net/batman-adv/distributed-arp-table.c | 2 +- net/batman-adv/distributed-arp-table.h | 2 +- net/batman-adv/fragmentation.c | 2 +- net/batman-adv/fragmentation.h | 2 +- net/batman-adv/gateway_client.c | 2 +- net/batman-adv/gateway_client.h | 2 +- net/batman-adv/gateway_common.c | 2 +- net/batman-adv/gateway_common.h | 2 +- net/batman-adv/hard-interface.c | 2 +- net/batman-adv/hard-interface.h | 2 +- net/batman-adv/hash.c | 2 +- net/batman-adv/hash.h | 2 +- net/batman-adv/icmp_socket.c | 2 +- net/batman-adv/icmp_socket.h | 2 +- net/batman-adv/log.c | 2 +- net/batman-adv/log.h | 2 +- net/batman-adv/main.c | 2 +- net/batman-adv/main.h | 2 +- net/batman-adv/multicast.c | 2 +- net/batman-adv/multicast.h | 2 +- net/batman-adv/netlink.c | 2 +- net/batman-adv/netlink.h | 2 +- net/batman-adv/network-coding.c | 2 +- net/batman-adv/network-coding.h | 2 +- net/batman-adv/originator.c | 2 +- net/batman-adv/originator.h | 2 +- net/batman-adv/packet.h | 2 +- net/batman-adv/routing.c | 2 +- net/batman-adv/routing.h | 2 +- net/batman-adv/send.c | 2 +- net/batman-adv/send.h | 2 +- net/batman-adv/soft-interface.c | 2 +- net/batman-adv/soft-interface.h | 2 +- net/batman-adv/sysfs.c | 2 +- net/batman-adv/sysfs.h | 2 +- net/batman-adv/tp_meter.c | 2 +- net/batman-adv/tp_meter.h | 2 +- net/batman-adv/translation-table.c | 2 +- net/batman-adv/translation-table.h | 2 +- net/batman-adv/tvlv.c | 2 +- net/batman-adv/tvlv.h | 2 +- net/batman-adv/types.h | 2 +- 60 files changed, 60 insertions(+), 60 deletions(-) diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index 734fe83ab645..a83ddb7b63db 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors: * * Matthias Schiffer * diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index f724d3c98a81..915987bc6d29 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -1,5 +1,5 @@ # -# Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +# Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: # # Marek Lindner, Simon Wunderlich # diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c index 623d04302aa2..44fd073b7546 100644 --- a/net/batman-adv/bat_algo.c +++ b/net/batman-adv/bat_algo.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h index 3b5b69cdd12b..29f6312f9bf1 100644 --- a/net/batman-adv/bat_algo.h +++ b/net/batman-adv/bat_algo.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Linus Lüssing * diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index f00f666e2ccd..7c3d994e90d8 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h index b9f3550faaf7..ae2ab526bdb1 100644 --- a/net/batman-adv/bat_iv_ogm.h +++ b/net/batman-adv/bat_iv_ogm.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 2ac612d7bab4..0acd081dd286 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors: * * Linus Lüssing, Marek Lindner * diff --git a/net/batman-adv/bat_v.h b/net/batman-adv/bat_v.h index 83b77639729e..dd7c4b647e6b 100644 --- a/net/batman-adv/bat_v.h +++ b/net/batman-adv/bat_v.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Linus Lüssing * diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index f2fb2f05b6bf..b90c9903e246 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors: * * Linus Lüssing, Marek Lindner * diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h index be17c0b1369e..376ead280ab9 100644 --- a/net/batman-adv/bat_v_elp.h +++ b/net/batman-adv/bat_v_elp.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors: * * Linus Lüssing, Marek Lindner * diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 38b9aab83fc0..03a35c9f456d 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors: * * Antonio Quartulli * diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h index 4c4d45caa422..2068770b542d 100644 --- a/net/batman-adv/bat_v_ogm.h +++ b/net/batman-adv/bat_v_ogm.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors: * * Antonio Quartulli * diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index 032271421a20..2b070c7e31da 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h index 0e6e9d09078c..cc262c9d97e0 100644 --- a/net/batman-adv/bitarray.h +++ b/net/batman-adv/bitarray.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index e7f690b571ea..2d22fd5ba96c 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors: * * Simon Wunderlich * diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 2827cd3c13d2..e157986bd01c 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors: * * Simon Wunderlich * diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 77925504379d..5406148b9497 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h index e49121ee55f6..9c5d4a65b98c 100644 --- a/net/batman-adv/debugfs.h +++ b/net/batman-adv/debugfs.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 49576c5a3fe3..dab466f97ccf 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors: * * Antonio Quartulli * diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h index 813ecea96cf9..ec364a3c1c66 100644 --- a/net/batman-adv/distributed-arp-table.h +++ b/net/batman-adv/distributed-arp-table.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors: * * Antonio Quartulli * diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 9c561e683f4b..42bfbd801a1b 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors: * * Martin Hundebøll * diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h index b95f619606af..1a2d6c308745 100644 --- a/net/batman-adv/fragmentation.h +++ b/net/batman-adv/fragmentation.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors: * * Martin Hundebøll * diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 52b8bd6ec431..de9955d5224d 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 859166d03561..3baa3d466e5e 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c index 21184810d89f..5db2e43e3775 100644 --- a/net/batman-adv/gateway_common.c +++ b/net/batman-adv/gateway_common.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h index 8a5e1ddf1175..0a6a97d201f2 100644 --- a/net/batman-adv/gateway_common.h +++ b/net/batman-adv/gateway_common.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 61a431a9772b..e348f76ea8c1 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h index d6309a423629..9f9890ff7a22 100644 --- a/net/batman-adv/hard-interface.h +++ b/net/batman-adv/hard-interface.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c index a0a0fdb85805..b5f7e13918ac 100644 --- a/net/batman-adv/hash.c +++ b/net/batman-adv/hash.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h index 557a7044cfbc..0c905e91c5e2 100644 --- a/net/batman-adv/hash.h +++ b/net/batman-adv/hash.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index b310f381ae02..6308c9f0fd96 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h index e44a7da51431..f3fec40aae86 100644 --- a/net/batman-adv/icmp_socket.h +++ b/net/batman-adv/icmp_socket.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c index c73c31769aba..4ef4bde2cc2d 100644 --- a/net/batman-adv/log.c +++ b/net/batman-adv/log.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h index 3284a7b0325d..7a2b9f4da078 100644 --- a/net/batman-adv/log.h +++ b/net/batman-adv/log.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index d46415edd3be..5000c540614d 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 8683542067ba..57a8103dbce7 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 090a69fc342e..952ba81a565b 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2014-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2014-2017 B.A.T.M.A.N. contributors: * * Linus Lüssing * diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h index 2cddaf52a21d..2a78cddab0e9 100644 --- a/net/batman-adv/multicast.h +++ b/net/batman-adv/multicast.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2014-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2014-2017 B.A.T.M.A.N. contributors: * * Linus Lüssing * diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 062738163bdc..ab13b4d58733 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors: * * Matthias Schiffer * diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h index 52eb16281aba..f1cd8c5da966 100644 --- a/net/batman-adv/netlink.h +++ b/net/batman-adv/netlink.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors: * * Matthias Schiffer * diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index ab5a3bf0765f..e1f6fc72fe3e 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors: * * Martin Hundebøll, Jeppe Ledet-Pedersen * diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h index d6d7fb4ec5d5..c66efb81d2f4 100644 --- a/net/batman-adv/network-coding.h +++ b/net/batman-adv/network-coding.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors: * * Martin Hundebøll, Jeppe Ledet-Pedersen * diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 8f3b2969cc4e..8e2a4b205257 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index ebc56183f358..d94220a6d21a 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 7a36bcfa0ba0..8e8a5db197cb 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 6713bdf414cd..5f050fbdfff7 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index 05c3ff42e181..5ede16c32f15 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 49021b7124f3..d7308263b8fa 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index a94e1e8639ca..f21166d10323 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 1f55b4b9181c..4a9923a95e8a 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index ec303ddbf647..639c3abb214a 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index 17c844196eb2..0ae8b30e4eaa 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h index c76021b4e198..e487412e256b 100644 --- a/net/batman-adv/sysfs.h +++ b/net/batman-adv/sysfs.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index 981e8c5b07e9..07f64b60b528 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors: * * Edo Monticelli, Antonio Quartulli * diff --git a/net/batman-adv/tp_meter.h b/net/batman-adv/tp_meter.h index ba922c425e56..a8ada5c123bd 100644 --- a/net/batman-adv/tp_meter.h +++ b/net/batman-adv/tp_meter.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors: * * Edo Monticelli, Antonio Quartulli * diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 30ecbfb40adf..941afad92121 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich, Antonio Quartulli * diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index 783fdba84db2..411d586191da 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich, Antonio Quartulli * diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index a783420356ae..1d9e267caec9 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h index e4369b547b43..4d01400ada30 100644 --- a/net/batman-adv/tvlv.h +++ b/net/batman-adv/tvlv.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index e913aee28c98..8f64a5c01345 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * -- cgit v1.2.3 From 269cee621847a09b9cfcdb6469ff9b546efbcac1 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 17 Dec 2016 11:58:29 +0100 Subject: batman-adv: Remove unused variable in batadv_tt_local_set_flags Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/translation-table.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 941afad92121..6077a87d46f0 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -3714,7 +3714,6 @@ static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags, { struct batadv_hashtable *hash = bat_priv->tt.local_hash; struct batadv_tt_common_entry *tt_common_entry; - u16 changed_num = 0; struct hlist_head *head; u32 i; @@ -3736,7 +3735,6 @@ static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags, continue; tt_common_entry->flags &= ~flags; } - changed_num++; if (!count) continue; -- cgit v1.2.3 From 0843f197c46dd3c432a3202de80e8bc1fcb96a6b Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Mon, 21 Nov 2016 23:01:09 +0800 Subject: batman-adv: Remove one condition check in batadv_route_unicast_packet It could decrease one condition check to collect some statements in the first condition block. Signed-off-by: Gao Feng Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/routing.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 5f050fbdfff7..7fd740b6e36d 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -719,20 +719,19 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, len = skb->len; res = batadv_send_skb_to_orig(skb, orig_node, recv_if); - if (res == NET_XMIT_SUCCESS) - ret = NET_RX_SUCCESS; - - /* skb was consumed */ - skb = NULL; /* translate transmit result into receive result */ if (res == NET_XMIT_SUCCESS) { + ret = NET_RX_SUCCESS; /* skb was transmitted and consumed */ batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD); batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES, len + ETH_HLEN); } + /* skb was consumed */ + skb = NULL; + put_orig_node: batadv_orig_node_put(orig_node); free_skb: -- cgit v1.2.3 From 5bdaa0ef77065e07be5443b3cea5224ac72c9ac0 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 20 Oct 2016 10:01:43 +0200 Subject: iwlwifi: allow memory debug TLV to specify the memory type Due to some new features and changes, the firmware file will now specify what type of memory to dump, in upper 8 bits of the type field of the TLV. Parse it (types we don't understand are errors) and teach the code to dump periphery memory. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 12 +++++ drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 15 +++++- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 65 +++++++++++++++++++----- 3 files changed, 79 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 431581229948..a6719d67ac00 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1020,6 +1020,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", dbg_mem->data_type); + switch (type & FW_DBG_MEM_TYPE_MASK) { + case FW_DBG_MEM_TYPE_REGULAR: + case FW_DBG_MEM_TYPE_PRPH: + /* we know how to handle these */ + break; + default: + IWL_ERR(drv, + "Found debug memory segment with invalid type: 0x%x\n", + type); + return -EINVAL; + } + size = sizeof(*pieces->dbg_mem_tlv) * (pieces->n_dbg_mem_tlv + 1); n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 11245b9117c7..c84207576587 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -488,10 +488,23 @@ enum iwl_fw_dbg_monitor_mode { MIPI_MODE = 3, }; +/** + * enum iwl_fw_mem_seg_type - memory segment type + * @FW_DBG_MEM_TYPE_MASK: mask for the type indication + * @FW_DBG_MEM_TYPE_REGULAR: regular memory + * @FW_DBG_MEM_TYPE_PRPH: periphery memory (requires special reading) + */ +enum iwl_fw_mem_seg_type { + FW_DBG_MEM_TYPE_MASK = 0xff000000, + FW_DBG_MEM_TYPE_REGULAR = 0x00000000, + FW_DBG_MEM_TYPE_PRPH = 0x01000000, +}; + /** * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments * - * @data_type: the memory segment type to record + * @data_type: the memory segment type to record, see &enum iwl_fw_mem_seg_type + * for what we care about * @ofs: the memory segment offset * @len: the memory segment length, in bytes * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 7df3c3f4749e..e447cd92deb3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -406,6 +406,30 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a02400, .end = 0x00a02758 }, }; +static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start, + u32 len_bytes, __le32 *data) +{ + u32 i; + + for (i = 0; i < len_bytes; i += 4) + *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); +} + +static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start, + u32 len_bytes, __le32 *data) +{ + unsigned long flags; + bool success = false; + + if (iwl_trans_grab_nic_access(trans, &flags)) { + success = true; + _iwl_read_prph_block(trans, start, len_bytes, data); + iwl_trans_release_nic_access(trans, &flags); + } + + return success; +} + static void iwl_dump_prph(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, const struct iwl_prph_range *iwl_prph_dump_addr, @@ -422,21 +446,18 @@ static void iwl_dump_prph(struct iwl_trans *trans, /* The range includes both boundaries */ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; - int reg; - __le32 *val; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + num_bytes_in_chunk); prph = (void *)(*data)->data; prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); - val = (void *)prph->data; - for (reg = iwl_prph_dump_addr[i].start; - reg <= iwl_prph_dump_addr[i].end; - reg += 4) - *val++ = cpu_to_le32(iwl_read_prph_no_grab(trans, - reg)); + _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, + /* our range is inclusive, hence + 4 */ + iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4, + (void *)prph->data); *data = iwl_fw_error_next_data(*data); } @@ -716,16 +737,36 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { u32 len = le32_to_cpu(fw_dbg_mem[i].len); u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); + bool success; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; dump_mem->type = fw_dbg_mem[i].data_type; dump_mem->offset = cpu_to_le32(ofs); - iwl_trans_read_mem_bytes(mvm->trans, ofs, - dump_mem->data, - len); - dump_data = iwl_fw_error_next_data(dump_data); + + switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) { + case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR): + iwl_trans_read_mem_bytes(mvm->trans, ofs, + dump_mem->data, + len); + success = true; + break; + case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH): + success = iwl_read_prph_block(mvm->trans, ofs, len, + (void *)dump_mem->data); + break; + default: + /* + * shouldn't get here, we ignored this kind + * of TLV earlier during the TLV parsing?! + */ + WARN_ON(1); + success = false; + } + + if (success) + dump_data = iwl_fw_error_next_data(dump_data); } if (smem_len) { -- cgit v1.2.3 From c2e27e16f2411155b906db201b7e478144034ffe Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 20 Oct 2016 15:25:00 +0200 Subject: iwlwifi: mvm: properly check for transport data in dump When copying from vmalloc'ed memory to the SG list, don't crash if the transport didn't provide any data. Fixes: 7e62a699aafb ("iwlwifi: mvm: use dev_coredumpsg()") Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index e447cd92deb3..e7b3b712d778 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -848,11 +848,12 @@ dump_trans_data: sg_nents(sg_dump_data), fw_error_dump->op_mode_ptr, fw_error_dump->op_mode_len, 0); - sg_pcopy_from_buffer(sg_dump_data, - sg_nents(sg_dump_data), - fw_error_dump->trans_ptr->data, - fw_error_dump->trans_ptr->len, - fw_error_dump->op_mode_len); + if (fw_error_dump->trans_ptr) + sg_pcopy_from_buffer(sg_dump_data, + sg_nents(sg_dump_data), + fw_error_dump->trans_ptr->data, + fw_error_dump->trans_ptr->len, + fw_error_dump->op_mode_len); dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } -- cgit v1.2.3 From 5d81fbd5df9654940dfc14b4fed193273a83aab2 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 26 Oct 2016 16:04:43 +0300 Subject: iwlwifi: mvm: bump max API to 28 We skipped one release, so bump twice, to 28. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-7000.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/iwl-8000.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-a000.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index d4b73dedf89b..a72e58623d3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -73,8 +73,8 @@ /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 -#define IWL7265D_UCODE_API_MAX 26 -#define IWL3168_UCODE_API_MAX 26 +#define IWL7265D_UCODE_API_MAX 28 +#define IWL3168_UCODE_API_MAX 28 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 17 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index d02ca1491d16..df0fc24e99e1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -70,8 +70,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 26 -#define IWL8265_UCODE_API_MAX 26 +#define IWL8000_UCODE_API_MAX 28 +#define IWL8265_UCODE_API_MAX 28 /* Lowest firmware API version supported */ #define IWL8000_UCODE_API_MIN 17 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index ff850410d897..a5f0c0bf85ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 26 +#define IWL9000_UCODE_API_MAX 28 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 17 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c index ea1618525878..82f18d967c40 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL_A000_UCODE_API_MAX 26 +#define IWL_A000_UCODE_API_MAX 28 /* Lowest firmware API version supported */ #define IWL_A000_UCODE_API_MIN 24 -- cgit v1.2.3 From 08d785fd809cdaf34d289d917bcb1733a77d9787 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 27 Oct 2016 17:27:23 +0300 Subject: iwlwifi: mvm: simplify paging allocation code Some of the code there is duplicate while the only change is the block size. Unifying it shortens the code and make the difference clearer. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 61 +++++++---------------------- 1 file changed, 15 insertions(+), 46 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 872066317fa5..04fe1051b1e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -259,9 +259,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, { struct page *block; dma_addr_t phys = 0; - int blk_idx = 0; - int order, num_of_pages; - int dma_enabled; + int blk_idx, order, num_of_pages, size, dma_enabled; if (mvm->fw_paging_db[0].fw_paging_block) return 0; @@ -274,7 +272,6 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; mvm->num_of_paging_blk = ((num_of_pages - 1) / NUM_OF_PAGE_PER_GROUP) + 1; - mvm->num_of_pages_in_last_blk = num_of_pages - NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); @@ -284,46 +281,13 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, mvm->num_of_paging_blk, mvm->num_of_pages_in_last_blk); - /* allocate block of 4Kbytes for paging CSS */ - order = get_order(FW_PAGING_SIZE); - block = alloc_pages(GFP_KERNEL, order); - if (!block) { - /* free all the previous pages since we failed */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - - mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE; - - if (dma_enabled) { - phys = dma_map_page(mvm->trans->dev, block, 0, - PAGE_SIZE << order, DMA_BIDIRECTIONAL); - if (dma_mapping_error(mvm->trans->dev, phys)) { - /* - * free the previous pages and the current one since - * we failed to map_page. - */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; - } else { - mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | - blk_idx << BLOCK_2_EXP_SIZE; - } - - IWL_DEBUG_FW(mvm, - "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", - order); - /* - * allocate blocks in dram. - * since that CSS allocated in fw_paging_db[0] loop start from index 1 + * Allocate CSS and paging blocks in dram. */ - for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - /* allocate block of PAGING_BLOCK_SIZE (32K) */ - order = get_order(PAGING_BLOCK_SIZE); + for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { + /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ + size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; + order = get_order(size); block = alloc_pages(GFP_KERNEL, order); if (!block) { /* free all the previous pages since we failed */ @@ -332,7 +296,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, } mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE; + mvm->fw_paging_db[blk_idx].fw_paging_size = size; if (dma_enabled) { phys = dma_map_page(mvm->trans->dev, block, 0, @@ -353,9 +317,14 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, blk_idx << BLOCK_2_EXP_SIZE; } - IWL_DEBUG_FW(mvm, - "Paging: allocated 32K bytes (order %d) for firmware paging.\n", - order); + if (!blk_idx) + IWL_DEBUG_FW(mvm, + "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", + order); + else + IWL_DEBUG_FW(mvm, + "Paging: allocated 32K bytes (order %d) for firmware paging.\n", + order); } return 0; -- cgit v1.2.3 From 850fe9af62b2ec28b4c1fc5d02924cd469452283 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 27 Oct 2016 17:58:27 +0300 Subject: iwlwifi: mvm: replace the number of blocks calculation The logic in the paging calculations is a bit hard to follow. Replace it with a kernel define. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 04fe1051b1e5..606b3fc18d46 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -270,8 +270,8 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; - mvm->num_of_paging_blk = ((num_of_pages - 1) / - NUM_OF_PAGE_PER_GROUP) + 1; + mvm->num_of_paging_blk = + DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); mvm->num_of_pages_in_last_blk = num_of_pages - NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); -- cgit v1.2.3 From eef187a7b8a144a1a0c35c1ccccadc8fd5218504 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 25 Oct 2016 11:38:31 +0300 Subject: iwlwifi: enlarge number of ucode sections The maximum number of firmware sections is now 32 instead of 16 for a000 devices. Set the appropriate define. Avoid out of bounds access in case there are more sections than the maximum set by driver. Make the driver extensible to FW size changes by allocating the section memory dynamically. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c | 2 +- drivers/net/wireless/intel/iwlwifi/dvm/ucode.c | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 31 ++++++++++++++++++----- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 1 - drivers/net/wireless/intel/iwlwifi/iwl-fw.h | 3 ++- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 4 +-- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 4 +-- 8 files changed, 33 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 8c0719468d00..2a04d0cd71ae 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -163,7 +163,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, REGULATORY_DISABLE_BEACON_HINTS; #ifdef CONFIG_PM_SLEEP - if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && + if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec && priv->trans->ops->d3_suspend && priv->trans->ops->d3_resume && device_can_wakeup(priv->trans->dev)) { diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c index c7509c51e9d9..d6013bfe991c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c @@ -407,7 +407,7 @@ int iwl_run_init_ucode(struct iwl_priv *priv) lockdep_assert_held(&priv->mutex); /* No init ucode required? Curious, but maybe ok */ - if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) + if (!priv->fw->img[IWL_UCODE_INIT].num_sec) return 0; iwl_init_notification_wait(&priv->notif_wait, &calib_wait, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index a6719d67ac00..1d1af4bc1530 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -166,8 +166,9 @@ static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img) { int i; - for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) + for (i = 0; i < img->num_sec; i++) iwl_free_fw_desc(drv, &img->sec[i]); + kfree(img->sec); } static void iwl_dealloc_ucode(struct iwl_drv *drv) @@ -240,7 +241,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) } struct fw_img_parsing { - struct fw_sec sec[IWL_UCODE_SECTION_MAX]; + struct fw_sec *sec; int sec_counter; }; @@ -383,6 +384,7 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, struct fw_img_parsing *img; struct fw_sec *sec; struct fw_sec_parsing *sec_parse; + size_t alloc_size; if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX)) return -1; @@ -390,6 +392,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, sec_parse = (struct fw_sec_parsing *)data; img = &pieces->img[type]; + + alloc_size = sizeof(*img->sec) * (img->sec_counter + 1); + sec = krealloc(img->sec, alloc_size, GFP_KERNEL); + if (!sec) + return -ENOMEM; + img->sec = sec; + sec = &img->sec[img->sec_counter]; sec->offset = le32_to_cpu(sec_parse->offset); @@ -1089,12 +1098,18 @@ static int iwl_alloc_ucode(struct iwl_drv *drv, enum iwl_ucode_type type) { int i; - for (i = 0; - i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i); - i++) - if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]), - get_sec(pieces, type, i))) + struct fw_desc *sec; + + sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL); + if (!sec) + return -ENOMEM; + drv->fw.img[type].sec = sec; + drv->fw.img[type].num_sec = pieces->img[type].sec_counter; + + for (i = 0; i < pieces->img[type].sec_counter; i++) + if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i))) return -ENOMEM; + return 0; } @@ -1457,6 +1472,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); free: + for (i = 0; i < ARRAY_SIZE(pieces->img); i++) + kfree(pieces->img[i].sec); kfree(pieces->dbg_mem_tlv); kfree(pieces); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index c84207576587..d01701ee4777 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -379,7 +379,6 @@ enum iwl_ucode_tlv_capa { * For 16.0 uCode and above, there is no differentiation between sections, * just an offset to the HW address. */ -#define IWL_UCODE_SECTION_MAX 16 #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC #define PAGING_SEPARATOR_SECTION 0xAAAABBBB diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h index 710ecb490bfc..d323b70b510a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -132,7 +132,8 @@ struct fw_desc { }; struct fw_img { - struct fw_desc sec[IWL_UCODE_SECTION_MAX]; + struct fw_desc *sec; + int num_sec; bool is_dual_cpus; u32 paging_mem_size; }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 606b3fc18d46..b278e44e97ad 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -190,7 +190,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) * CPU2 paging CSS * CPU2 paging image (including instruction and data) */ - for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) { + for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { sec_idx++; break; @@ -201,7 +201,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) * If paging is enabled there should be at least 2 more sections left * (one for CSS and one for Paging data) */ - if (sec_idx >= ARRAY_SIZE(image->sec) - 1) { + if (sec_idx >= image->num_sec - 1) { IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); iwl_free_fw_paging(mvm); return -EINVAL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 039cb2ad2d3e..71f9aa9f7c7d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -677,7 +677,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->wowlan = &mvm->wowlan; } - if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && + if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index b10e3633df1a..bf0ecdcf7402 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -805,7 +805,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, (*first_ucode_section)++; } - for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { + for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* @@ -880,7 +880,7 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, (*first_ucode_section)++; } - for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { + for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* -- cgit v1.2.3 From b86dd74f8c73212db4c4c9349d46ce2350fb8106 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 29 Sep 2016 15:16:03 +0300 Subject: iwlwifi: mvm: change iwl_mvm_tx_csum to return value Currently the function changes the TX cmd itself. Make it more generic by returning a value, as preperation to the new TX cmd. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 66957ac12ca4..73a90d4a43eb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -102,14 +102,13 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) -static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_hdr *hdr, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd) +static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *info) { + u16 offload_assist = 0; #if IS_ENABLED(CONFIG_INET) u16 mh_len = ieee80211_hdrlen(hdr->frame_control); - u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist); u8 protocol = 0; /* @@ -117,7 +116,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, * compute it */ if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD) - return; + goto out; /* We do not expect to be requested to csum stuff we do not support */ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || @@ -125,7 +124,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, skb->protocol != htons(ETH_P_IPV6)), "No support for requested checksum\n")) { skb_checksum_help(skb); - return; + goto out; } if (skb->protocol == htons(ETH_P_IP)) { @@ -145,7 +144,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, protocol != NEXTHDR_HOP && protocol != NEXTHDR_DEST) { skb_checksum_help(skb); - return; + goto out; } hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); @@ -159,7 +158,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { WARN_ON_ONCE(1); skb_checksum_help(skb); - return; + goto out; } /* enable L4 csum */ @@ -191,8 +190,9 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, mh_len /= 2; offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; - tx_cmd->offload_assist = cpu_to_le16(offload_assist); +out: #endif + return offload_assist; } /* @@ -295,7 +295,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU)))) tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD)); - iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd); + tx_cmd->offload_assist |= + cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info)); } /* -- cgit v1.2.3 From 5ec295dbbd674dfbddfcac091857264804f5dc25 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 1 Nov 2016 10:52:11 +0200 Subject: iwlwifi: mvm: separate rate calculation to a new function In preparation for the new TX cmd - move the rate calculation to a diffrent function, and make it independent of the TX command. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 84 +++++++++++++++-------------- 1 file changed, 45 insertions(+), 39 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 73a90d4a43eb..2b2db38eee3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -299,6 +299,50 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info)); } +static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta) +{ + int rate_idx; + u8 rate_plcp; + u32 rate_flags; + + /* HT rate doesn't make sense for a non data frame */ + WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, + "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n", + info->control.rates[0].flags, + info->control.rates[0].idx); + + rate_idx = info->control.rates[0].idx; + /* if the rate isn't a well known legacy rate, take the lowest one */ + if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) + rate_idx = rate_lowest_index( + &mvm->nvm_data->bands[info->band], sta); + + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == NL80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + + /* For 2.4 GHZ band, check that there is no need to remap */ + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); + + if (info->band == NL80211_BAND_2GHZ && + !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) + rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; + else + rate_flags = + BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + + return (u32)rate_plcp | rate_flags; +} + /* * Sets the fields in the Tx cmd that are rate related */ @@ -306,10 +350,6 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { - u32 rate_flags; - int rate_idx; - u8 rate_plcp; - /* Set retry limit on RTS packets */ tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; @@ -338,46 +378,12 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); } - /* HT rate doesn't make sense for a non data frame */ - WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, - "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n", - info->control.rates[0].flags, - info->control.rates[0].idx, - le16_to_cpu(fc)); - - rate_idx = info->control.rates[0].idx; - /* if the rate isn't a well known legacy rate, take the lowest one */ - if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) - rate_idx = rate_lowest_index( - &mvm->nvm_data->bands[info->band], sta); - - /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == NL80211_BAND_5GHZ) - rate_idx += IWL_FIRST_OFDM_RATE; - - /* For 2.4 GHZ band, check that there is no need to remap */ - BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); - - /* Get PLCP rate for tx_cmd->rate_n_flags */ - rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); - mvm->mgmt_last_antenna_idx = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), mvm->mgmt_last_antenna_idx); - if (info->band == NL80211_BAND_2GHZ && - !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) - rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; - else - rate_flags = - BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; - - /* Set CCK flag as needed */ - if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) - rate_flags |= RATE_MCS_CCK_MSK; - /* Set the rate in the TX cmd */ - tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags); + tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta)); } static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, -- cgit v1.2.3 From 34118c25a9093e6ed99ad1c38ddffa4998d3c13e Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 8 Nov 2016 18:34:42 +0200 Subject: iwlwifi: mvm: support version 2 of stored beacon notification For 9000 devices withg upload, FW cannot send the entire phy flags, and will send only the band - which is what we really care about anyway. Change is backward compatible. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 6 +++--- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index ae12badc0c2a..567597c26115 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -2075,7 +2075,7 @@ struct iwl_mu_group_mgmt_notif { * @system_time: system time on air rise * @tsf: TSF on air rise * @beacon_timestamp: beacon on air rise - * @phy_flags: general phy flags: band, modulation, etc. + * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition * @channel: channel this beacon was received on * @rates: rate in ucode internal format * @byte_count: frame's byte count @@ -2084,12 +2084,12 @@ struct iwl_stored_beacon_notif { __le32 system_time; __le64 tsf; __le32 beacon_timestamp; - __le16 phy_flags; + __le16 band; __le16 channel; __le32 rates; __le32 byte_count; u8 data[MAX_STORED_BEACON_SIZE]; -} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ #define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 4a0874e40731..ebf6c071eb36 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1565,7 +1565,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.band = - (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? + (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), -- cgit v1.2.3 From d8320d75b59ecdc1b8e60ac793d3a54d84333a18 Mon Sep 17 00:00:00 2001 From: Jürg Billeter Date: Mon, 10 Oct 2016 18:30:00 +0200 Subject: iwlwifi: fix MODULE_FIRMWARE for 6030 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit IWL6000G2B_UCODE_API_MAX is not defined. ucode_api_max of IWL_DEVICE_6030 uses IWL6000G2_UCODE_API_MAX. Use this also for MODULE_FIRMWARE. Fixes: 9d9b21d1b616 ("iwlwifi: remove IWL_*_UCODE_API_OK") Signed-off-by: Jürg Billeter Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-6000.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c index 0b9f6a7bc834..39335b7b0c16 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c @@ -371,4 +371,4 @@ const struct iwl_cfg iwl6000_3agn_cfg = { MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); -- cgit v1.2.3 From 7b1be5ad26d36a8ed5e930793f269f3d844166f1 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 9 Nov 2016 10:07:14 +0200 Subject: iwlwifi: mvm: remove unused variable in iwl_mvm_handle_statistics() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The temperature variable is set but not used in iwl_mvm_handle_statistics(). Remove it to avoid the following compiler warning when W=1 is used: /home/luca/iwlwifi/stack-dev/drivers/net/wireless/intel/iwlwifi/mvm/rx.c: In function ‘iwl_mvm_handle_rx_statistics’: /home/luca/iwlwifi/stack-dev/drivers/net/wireless/intel/iwlwifi/mvm/rx.c:759:6: warning: variable ‘temperature’ set but not used [-Wunused-but-set-variable] u32 temperature; ^ Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 0e60e38b2acf..e16687d5afaa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -621,12 +621,10 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, }; int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) : sizeof(struct iwl_notif_statistics_v10); - u32 temperature; if (iwl_rx_packet_payload_len(pkt) != expected_size) goto invalid; - temperature = le32_to_cpu(stats->general.radio_temperature); data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = stats->general.beacon_filter_average_energy; -- cgit v1.2.3 From c94aace0030eed6d26827aaecba207dbc30af112 Mon Sep 17 00:00:00 2001 From: Kirtika Ruchandani Date: Tue, 8 Nov 2016 21:50:15 -0800 Subject: iwlwifi: mvm: rs: Remove unused 'mvmvif'/'mvmsta' variables mvmvif is defined and set in rs_mimo_allow but not used. Compiling iwlwifi with W=1 gives the following warning, remove it. mvmsta is used only to obtain mvmvif so remove it as well. iwlwifi/mvm/rs.c: In function 'rs_mimo_allow': iwlwifi/mvm/rs.c:165:22: warning: variable 'mvmvif' set but not used.[-Wunused-but-set-variable] This fix removes calls to iwl_mvm_sta_from_mac & iwl_mvm_vif_from_mac. They are both accessors, and do not have any side-effects. Commit e621c2282e31 ("iwlwifi: rs: Remove workaround that disables MIMO on P2P") removed a workaround that disabled MIMO on P2P, 'mvmvif' was used for that workaround, but not removed with it. Fixes: e621c2282e31 ("iwlwifi: rs: Remove workaround that disables MIMO on P2P") Signed-off-by: Kirtika Ruchandani Cc: Alexander Bondar Cc: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 227c5ed9cbe6..0b79f4a43eaf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -161,9 +161,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col) { - struct iwl_mvm_sta *mvmsta; - struct iwl_mvm_vif *mvmvif; - if (!sta->ht_cap.ht_supported) return false; @@ -176,9 +173,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) return false; - mvmsta = iwl_mvm_sta_from_mac80211(sta); - mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - if (mvm->nvm_data->sku_cap_mimo_disabled) return false; -- cgit v1.2.3 From 9d50443575fa379c3c72848162af1375a4d43b50 Mon Sep 17 00:00:00 2001 From: Kirtika Ruchandani Date: Tue, 8 Nov 2016 21:50:33 -0800 Subject: iwlwifi: mvm: rs: Remove unused 'mcs' variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 5fc0f76c4 introduced Rx stats from debugfs, the function iwl_mvm_reset_frame_stats from that commit defines and sets mcs but does not use it. Compiling iwlwifi with W=1 gives this warning - iwlwifi/mvm/rs.c: In function ‘iwl_mvm_update_frame_stats’: iwlwifi/mvm/rs.c:3074:14: warning: variable ‘mcs’ set but not used [-Wunused-but-set-variable] Fixes: 5fc0f76c43bd (iwlwifi: mvm: add Rx frames statistics via debugfs) Signed-off-by: Kirtika Ruchandani Cc: Eyal Shapira Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 0b79f4a43eaf..80f99c365b6a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -3065,7 +3065,7 @@ static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm) void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) { - u8 nss = 0, mcs = 0; + u8 nss = 0; spin_lock(&mvm->drv_stats_lock); @@ -3093,11 +3093,9 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) if (rate & RATE_MCS_HT_MSK) { mvm->drv_rx_stats.ht_frames++; - mcs = rate & RATE_HT_MCS_RATE_CODE_MSK; nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; } else if (rate & RATE_MCS_VHT_MSK) { mvm->drv_rx_stats.vht_frames++; - mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; } else { -- cgit v1.2.3 From 3ce4a03852d6dd3fd28c2fb2ee9f89bb9ccf9a9b Mon Sep 17 00:00:00 2001 From: Kirtika Ruchandani Date: Tue, 8 Nov 2016 21:50:48 -0800 Subject: iwlwifi: pcie: trans: Remove unused 'shift_param' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit shift_param is defined and set in iwl_pcie_load_cpu_sections but not used. Fix this to avoid -Wunused-but-set-variable warning. The code using it turned into dead code with commit dcab8ecd5617 ("iwlwifi: mvm: support ucode load for family_8000 B0 only") which added a separate function iwl_pcie_load_given_ucode_8000 (then 8000b) for IWL_DEVICE_FAMILY_8000. Commit 76f8c0e17edc ("iwlwifi: pcie: remove dead code") removed the dead code but left shift_param as is. iwlwifi/pcie/trans.c: In function ‘iwl_pcie_load_cpu_sections’: iwlwifi/pcie/trans.c:871:6: warning: variable ‘shift_param’ set but not used [-Wunused-but-set-variable] Fixes: dcab8ecd5617 ("iwlwifi: mvm: support ucode load for family_8000 B0 only") Fixes: 76f8c0e17edc ("iwlwifi: pcie: remove dead code") Signed-off-by: Kirtika Ruchandani Cc: Sara Sharon Cc: Luca Coelho Cc: Liad Kaufman Cc: Emmanuel Grumbach [removed some unnecessary braces] Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index bf0ecdcf7402..16790db650c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -868,17 +868,13 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, int cpu, int *first_ucode_section) { - int shift_param; int i, ret = 0; u32 last_read_idx = 0; - if (cpu == 1) { - shift_param = 0; + if (cpu == 1) *first_ucode_section = 0; - } else { - shift_param = 16; + else (*first_ucode_section)++; - } for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; -- cgit v1.2.3 From ad04e088d48a37871a5635de4acdfa689a0b2429 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 9 Nov 2016 09:48:49 +0200 Subject: iwlwifi: dvm: make rs_tl_get_load() return void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The return value from this function is never used, so make it return void to avoid getting the following warning when compiling wiht W=1: /home/luca/iwlwifi/stack-dev/drivers/net/wireless/intel/iwlwifi/dvm/rs.c: In function ‘rs_tl_turn_on_agg_for_tid’: /home/luca/iwlwifi/stack-dev/drivers/net/wireless/intel/iwlwifi/dvm/rs.c:400:6: warning: variable ‘load’ set but not used [-Wunused-but-set-variable] u32 load; ^ Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/dvm/rs.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index b95c2d76db33..710dbbefd551 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -364,7 +364,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv, /* get the traffic load value for tid */ -static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) +static void rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) { u32 curr_time = jiffies_to_msecs(jiffies); u32 time_diff; @@ -372,14 +372,14 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) struct iwl_traffic_load *tl = NULL; if (tid >= IWL_MAX_TID_COUNT) - return 0; + return; tl = &(lq_data->load[tid]); curr_time -= curr_time % TID_ROUND_VALUE; if (!(tl->queue_count)) - return 0; + return; time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); index = time_diff / TID_QUEUE_CELL_SPACING; @@ -388,8 +388,6 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) /* TID_MAX_TIME_DIFF */ if (index >= TID_QUEUE_MAX_SIZE) rs_tl_rm_old_stats(tl, curr_time); - - return tl->total; } static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, @@ -397,7 +395,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, struct ieee80211_sta *sta) { int ret = -EAGAIN; - u32 load; /* * Don't create TX aggregation sessions when in high @@ -410,7 +407,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, return ret; } - load = rs_tl_get_load(lq_data, tid); + rs_tl_get_load(lq_data, tid); IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); -- cgit v1.2.3 From 727c02dfb8480fd5b233a18786ee5de2c0be741c Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 26 Oct 2016 14:28:23 +0300 Subject: iwlwifi: pcie: cleanup rfkill checks Move repeating code to a separate function. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 39 ++++++++++++------------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 16790db650c1..c1d99d15796d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1062,6 +1062,20 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, &first_ucode_section); } +static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) +{ + bool hw_rfkill = iwl_is_rfkill_set(trans); + + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans->status); + else + clear_bit(STATUS_RFKILL, &trans->status); + + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + + return hw_rfkill; +} + static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1204,12 +1218,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + hw_rfkill = iwl_trans_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; @@ -1257,13 +1266,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, ret = iwl_pcie_load_given_ucode(trans, fw); /* re-check RF-Kill state since we may have missed the interrupt */ - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + hw_rfkill = iwl_trans_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; @@ -1655,7 +1658,6 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - bool hw_rfkill; int err; lockdep_assert_held(&trans_pcie->mutex); @@ -1679,13 +1681,8 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* Set is_down to false here so that...*/ trans_pcie->is_down = false; - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - /* ... rfkill can call stop_device and set it false if needed */ - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + /* ...rfkill can call stop_device and set it false if needed */ + iwl_trans_check_hw_rf_kill(trans); /* Make sure we sync here, because we'll need full access later */ if (low_power) -- cgit v1.2.3 From 18c44b271b672dd7961399ce6ad099eac5ae1a6a Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 9 Nov 2016 10:05:18 +0200 Subject: iwlwifi: mvm: remove unused sta_id variable in iwl_mvm_change_queue_owner() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the sta_id variable, to avoid this compile warning when compiling with W=1: /home/luca/iwlwifi/stack-dev/drivers/net/wireless/intel/iwlwifi/mvm/sta.c: In function ‘iwl_mvm_change_queue_owner’: /home/luca/iwlwifi/stack-dev/drivers/net/wireless/intel/iwlwifi/mvm/sta.c:871:5: warning: variable ‘sta_id’ set but not used [-Wunused-but-set-variable] s8 sta_id; ^ Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 636c8b03e318..cf8222a8b588 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -868,7 +868,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) .scd_queue = queue, .action = SCD_CFG_UPDATE_QUEUE_TID, }; - s8 sta_id; int tid; unsigned long tid_bitmap; int ret; @@ -876,7 +875,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->queue_info_lock); - sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; spin_unlock_bh(&mvm->queue_info_lock); -- cgit v1.2.3 From a43ae1de13f48831a3bd530e419f43390f3ba3c4 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 9 Nov 2016 09:45:58 +0200 Subject: iwlwifi: dvm: remove unused variable compiler warning in debugfs.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When compiling with W=1, we get the following warning in debugfs.c: /home/luca/iwlwifi/stack-dev/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c: In function ‘iwl_dbgfs_fw_restart_write’: /home/luca/iwlwifi/stack-dev/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c:2313:6: warning: variable ‘ret’ set but not used [-Wunused-but-set-variable] int ret; ^ Fix it by ignoring the return value in this case. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index affe760c8c22..376c79337a0e 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, { struct iwl_priv *priv = file->private_data; bool restart_fw = iwlwifi_mod_params.restart_fw; - int ret; + int __maybe_unused ret; iwlwifi_mod_params.restart_fw = true; -- cgit v1.2.3 From 758d1a8a8af04c70addb26761796019df267f420 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 9 Nov 2016 10:02:46 +0200 Subject: iwlwifi: mvm: mark ret as maybe_unused in iwl_dbgfs_fw_restart_write() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The value returned from iwl_mvm_send_cmd_pdu() in this function is not used and always returns an error (which is the whole point of the call). Tag the ret variable with __maybe_unused to avoid this compile warning when W=1 is used: /home/luca/iwlwifi/stack-dev/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c: In function ‘iwl_dbgfs_fw_restart_write’: /home/luca/iwlwifi/stack-dev/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c:875:6: warning: variable ‘ret’ set but not used [-Wunused-but-set-variable] int ret; ^ Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 7b7d2a146e30..a260cd503200 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -798,7 +798,7 @@ static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file, static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { - int ret; + int __maybe_unused ret; mutex_lock(&mvm->mutex); -- cgit v1.2.3 From cf90da352a329e755df923fd2c9e810cb75c83a9 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 7 Nov 2016 15:22:28 +0200 Subject: iwlwifi: mvm: use mvm_disable_queue instead of sharing logic When removing inactive queue - use the central disable queue function. This is needed due to a000 changes to come, but is a proper cleanup anyway. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 4 ++-- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 29 +++++++------------------- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 10 +++++---- 3 files changed, 16 insertions(+), 27 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 4a9cb76b7611..a672aa71c656 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1657,8 +1657,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, * Disable a TXQ. * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. */ -void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags); +int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u8 tid, u8 flags); int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); /* Return a bitmask with all the hw supported queues, except for the diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index cf8222a8b588..40e7fc5f1f1f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -454,13 +454,6 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) rcu_read_unlock(); - spin_lock_bh(&mvm->queue_info_lock); - /* Unmap MAC queues and TIDs from this queue */ - mvm->queue_info[queue].hw_queue_to_mac80211 = 0; - mvm->queue_info[queue].hw_queue_refcount = 0; - mvm->queue_info[queue].tid_bitmap = 0; - spin_unlock_bh(&mvm->queue_info_lock); - return disable_agg_tids; } @@ -755,28 +748,22 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, * first */ if (using_inactive_queue) { - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .action = SCD_CFG_DISABLE_QUEUE, - }; - u8 txq_curr_ac; - - disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); + u8 txq_curr_ac, sta_id; spin_lock_bh(&mvm->queue_info_lock); txq_curr_ac = mvm->queue_info[queue].mac80211_ac; - cmd.sta_id = mvm->queue_info[queue].ra_sta_id; - cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac]; - cmd.tid = mvm->queue_info[queue].txq_tid; + sta_id = mvm->queue_info[queue].ra_sta_id; spin_unlock_bh(&mvm->queue_info_lock); + disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); /* Disable the queue */ if (disable_agg_tids) iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); - iwl_trans_txq_disable(mvm->trans, queue, false); - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), - &cmd); + + ret = iwl_mvm_disable_txq(mvm, queue, + mvmsta->vif->hw_queue[txq_curr_ac], + tid, 0); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", @@ -791,7 +778,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, } /* If TXQ is allocated to another STA, update removal in FW */ - if (cmd.sta_id != mvmsta->sta_id) + if (sta_id != mvmsta->sta_id) iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index d04babd99b53..2beea3b98e52 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -706,8 +706,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, } } -void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags) +int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u8 tid, u8 flags) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, @@ -720,7 +720,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { spin_unlock_bh(&mvm->queue_info_lock); - return; + return 0; } mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); @@ -760,7 +760,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) { spin_unlock_bh(&mvm->queue_info_lock); - return; + return 0; } cmd.sta_id = mvm->queue_info[queue].ra_sta_id; @@ -791,6 +791,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, if (ret) IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret); + + return ret; } /** -- cgit v1.2.3 From 758c98e089389e90732fe45087fdee3177493a81 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 29 Sep 2016 16:07:35 +0300 Subject: iwlwifi: mvm: cleanup redundant assignment This set of station id is redundant - it is being initialised above. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 2beea3b98e52..26b853ef195f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -693,10 +693,6 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, .tid = cfg->tid, }; - /* Set sta_id in the command, if it exists */ - if (iwl_mvm_is_dqa_supported(mvm)) - cmd.sta_id = cfg->sta_id; - iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), -- cgit v1.2.3 From c33705188c493b7de3b8dc2956d67de91b444727 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Mon, 21 Nov 2016 23:00:32 +0800 Subject: batman-adv: Treat NET_XMIT_CN as transmit successfully The tc could return NET_XMIT_CN as one congestion notification, but it does not mean the packet is lost. Other modules like ipvlan, macvlan, and others treat NET_XMIT_CN as success too. So batman-adv should handle NET_XMIT_CN also as NET_XMIT_SUCCESS. Signed-off-by: Gao Feng [sven@narfation.org: Moved NET_XMIT_CN handling to batadv_send_skb_packet] Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/send.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index d7308263b8fa..d9b2889064a6 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -115,7 +115,7 @@ int batadv_send_skb_packet(struct sk_buff *skb, * congestion and traffic shaping, it drops and returns NET_XMIT_DROP * (which is > 0). This will not be treated as an error. */ - return dev_queue_xmit(skb); + return net_xmit_eval(dev_queue_xmit(skb)); send_skb_err: kfree_skb(skb); return NET_XMIT_DROP; -- cgit v1.2.3 From 731977e97b3697454a862fec656c2561eabc0b87 Mon Sep 17 00:00:00 2001 From: Amadeusz Sławiński Date: Tue, 24 Jan 2017 16:42:10 +0100 Subject: mac80211: use helper function to access ieee802_1d_to_ac[] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cleanup patch to make use of ieee80211_ac_from_tid() to retrieve ac from ieee802_1d_to_ac[] Signed-off-by: Amadeusz Sławiński Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 2 +- net/mac80211/status.c | 2 +- net/mac80211/tx.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index b791c4190564..50ca3828b124 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1391,7 +1391,7 @@ EXPORT_SYMBOL(ieee80211_sta_pspoll); void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); - u8 ac = ieee802_1d_to_ac[tid & 7]; + int ac = ieee80211_ac_from_tid(tid); /* * If this AC is not trigger-enabled do nothing unless the diff --git a/net/mac80211/status.c b/net/mac80211/status.c index d6a1bfaa7a81..a3af6e1bfd98 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -95,7 +95,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, */ if (*p & IEEE80211_QOS_CTL_EOSP) *p &= ~IEEE80211_QOS_CTL_EOSP; - ac = ieee802_1d_to_ac[tid & 7]; + ac = ieee80211_ac_from_tid(tid); } else { ac = IEEE80211_AC_BE; } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 986de098803d..ba8d7db0a071 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1414,7 +1414,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata, txqi->txq.sta = &sta->sta; sta->sta.txq[tid] = &txqi->txq; txqi->txq.tid = tid; - txqi->txq.ac = ieee802_1d_to_ac[tid & 7]; + txqi->txq.ac = ieee80211_ac_from_tid(tid); } else { sdata->vif.txq = &txqi->txq; txqi->txq.tid = 0; @@ -4659,7 +4659,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, enum nl80211_band band) { - int ac = ieee802_1d_to_ac[tid & 7]; + int ac = ieee80211_ac_from_tid(tid); skb_reset_mac_header(skb); skb_set_queue_mapping(skb, ac); -- cgit v1.2.3 From 228c8c6b1f4376788e9d5ab00d50b10228eb40d3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 26 Jan 2017 17:15:44 +0100 Subject: wireless: define cipher/AKM suites using a macro The spec writes cipher/AKM suites as something like 00-0F-AC:9, but the part after the colon isn't hex, it's decimal, so that we've already had a few mistakes (in other code, or unmerged patches) to e.g. write 0x000FAC10 instead of 0x000FAC0A. Use a macro to avoid that problem. Reviewed-by: Luca Coelho Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 46 ++++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 87d1937e4671..02768de209d6 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2324,31 +2324,33 @@ enum ieee80211_sa_query_action { }; +#define SUITE(oui, id) (((oui) << 8) | (id)) + /* cipher suite selectors */ -#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00 -#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01 -#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02 -/* reserved: 0x000FAC03 */ -#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04 -#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 -#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 -#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08 -#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09 -#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A -#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B -#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C -#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D - -#define WLAN_CIPHER_SUITE_SMS4 0x00147201 +#define WLAN_CIPHER_SUITE_USE_GROUP SUITE(0x000FAC, 0) +#define WLAN_CIPHER_SUITE_WEP40 SUITE(0x000FAC, 1) +#define WLAN_CIPHER_SUITE_TKIP SUITE(0x000FAC, 2) +/* reserved: SUITE(0x000FAC, 3) */ +#define WLAN_CIPHER_SUITE_CCMP SUITE(0x000FAC, 4) +#define WLAN_CIPHER_SUITE_WEP104 SUITE(0x000FAC, 5) +#define WLAN_CIPHER_SUITE_AES_CMAC SUITE(0x000FAC, 6) +#define WLAN_CIPHER_SUITE_GCMP SUITE(0x000FAC, 8) +#define WLAN_CIPHER_SUITE_GCMP_256 SUITE(0x000FAC, 9) +#define WLAN_CIPHER_SUITE_CCMP_256 SUITE(0x000FAC, 10) +#define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11) +#define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12) +#define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13) + +#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1) /* AKM suite selectors */ -#define WLAN_AKM_SUITE_8021X 0x000FAC01 -#define WLAN_AKM_SUITE_PSK 0x000FAC02 -#define WLAN_AKM_SUITE_8021X_SHA256 0x000FAC05 -#define WLAN_AKM_SUITE_PSK_SHA256 0x000FAC06 -#define WLAN_AKM_SUITE_TDLS 0x000FAC07 -#define WLAN_AKM_SUITE_SAE 0x000FAC08 -#define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09 +#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1) +#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2) +#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5) +#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6) +#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) +#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) +#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) #define WLAN_MAX_KEY_LEN 32 -- cgit v1.2.3 From 27d807180ae0a9e50d90adf9b22573c21be904c2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 25 Jan 2017 23:15:53 +0100 Subject: ISDN: eicon: reduce stack size of sig_ind function I noticed that this function uses a lot of kernel stack when the "latent entropy" plugin is enabled: drivers/isdn/hardware/eicon/message.c: In function 'sig_ind': drivers/isdn/hardware/eicon/message.c:6113:1: error: the frame size of 1168 bytes is larger than 1152 bytes [-Werror=frame-larger-than=] We currently don't warn about this, as we raise the warning limit to 2048 bytes in mainline, but I'd like to lower that limit again in the future, and this function can easily be changed to be more efficient and avoid that warning, by making some of its local variables 'const'. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/isdn/hardware/eicon/message.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index 1a1d99704fe6..5dcfa2913ceb 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -147,7 +147,7 @@ static word plci_remove_check(PLCI *); static void listen_check(DIVA_CAPI_ADAPTER *); static byte AddInfo(byte **, byte **, byte *, byte *); static byte getChannel(API_PARSE *); -static void IndParse(PLCI *, word *, byte **, byte); +static void IndParse(PLCI *, const word *, byte **, byte); static byte ie_compare(byte *, byte *); static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *); static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word); @@ -4858,7 +4858,7 @@ static void sig_ind(PLCI *plci) /* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */ /* SMSG is situated at the end because its 0 (for compatibility reasons */ /* (see Info_Mask Bit 4, first IE. then the message type) */ - word parms_id[] = + static const word parms_id[] = {MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA, UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW, RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR, @@ -4866,12 +4866,12 @@ static void sig_ind(PLCI *plci) /* 14 FTY repl by ESC_CHI */ /* 18 PI repl by ESC_LAW */ /* removed OAD changed to 0xff for future use, OAD is multiIE now */ - word multi_fac_id[] = {1, FTY}; - word multi_pi_id[] = {1, PI}; - word multi_CiPN_id[] = {1, OAD}; - word multi_ssext_id[] = {1, ESC_SSEXT}; + static const word multi_fac_id[] = {1, FTY}; + static const word multi_pi_id[] = {1, PI}; + static const word multi_CiPN_id[] = {1, OAD}; + static const word multi_ssext_id[] = {1, ESC_SSEXT}; - word multi_vswitch_id[] = {1, ESC_VSWITCH}; + static const word multi_vswitch_id[] = {1, ESC_VSWITCH}; byte *cau; word ncci; @@ -8924,7 +8924,7 @@ static void listen_check(DIVA_CAPI_ADAPTER *a) /* functions for all parameters sent in INDs */ /*------------------------------------------------------------------*/ -static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize) +static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize) { word ploc; /* points to current location within packet */ byte w; -- cgit v1.2.3 From 80c8eae6ee66b52e1304c9ffd02b4472d5976608 Mon Sep 17 00:00:00 2001 From: Satanand Burla Date: Thu, 26 Jan 2017 11:52:35 -0800 Subject: liquidio: Avoid accessing skb after submitting to input queue Accessing skb after submitting to input queue can cause access to stale pointers if the skb ends up being transmitted and freed by that time. Signed-off-by: Satanand Burla Signed-off-by: Derek Chickles Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 6 +++--- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 5ee3f007c613..9261ddc06852 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3316,11 +3316,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - if (skb_shinfo(skb)->gso_size) - stats->tx_done += skb_shinfo(skb)->gso_segs; + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; - stats->tx_tot_bytes += skb->len; + stats->tx_tot_bytes += ndata.datasize; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index e96cf6cdecfd..a6587d7019ed 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -2433,11 +2433,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - if (skb_shinfo(skb)->gso_size) - stats->tx_done += skb_shinfo(skb)->gso_segs; + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; - stats->tx_tot_bytes += skb->len; + stats->tx_tot_bytes += ndata.datasize; return NETDEV_TX_OK; -- cgit v1.2.3 From 55ed0ce0898e15fec30d2ca2a563d7934b082375 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 26 Jan 2017 10:45:51 -0800 Subject: net: dsa: Pass device pointer to dsa_register_switch In preparation for allowing dsa_register_switch() to be supplied with device/platform data, pass down a struct device pointer instead of a struct device_node. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 2 +- drivers/net/dsa/mv88e6xxx/chip.c | 7 +++---- drivers/net/dsa/qca8k.c | 2 +- include/net/dsa.h | 2 +- net/dsa/dsa2.c | 7 ++++--- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 5cbb14f6a03b..bb210b12ad1b 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1894,7 +1894,7 @@ int b53_switch_register(struct b53_device *dev) pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev); - return dsa_register_switch(dev->ds, dev->ds->dev->of_node); + return dsa_register_switch(dev->ds, dev->ds->dev); } EXPORT_SYMBOL(b53_switch_register); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 5668e778ed1d..921e53351786 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -4356,8 +4356,7 @@ static struct dsa_switch_driver mv88e6xxx_switch_drv = { .ops = &mv88e6xxx_switch_ops, }; -static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip, - struct device_node *np) +static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) { struct device *dev = chip->dev; struct dsa_switch *ds; @@ -4372,7 +4371,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip, dev_set_drvdata(dev, ds); - return dsa_register_switch(ds, np); + return dsa_register_switch(ds, dev); } static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip) @@ -4456,7 +4455,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (err) goto out_g2_irq; - err = mv88e6xxx_register_switch(chip, np); + err = mv88e6xxx_register_switch(chip); if (err) goto out_mdio; diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 54d270d59eb0..c084aa484d2b 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -964,7 +964,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); - return dsa_register_switch(priv->ds, priv->ds->dev->of_node); + return dsa_register_switch(priv->ds, &mdiodev->dev); } static void diff --git a/include/net/dsa.h b/include/net/dsa.h index 08b340403927..92fd795e9573 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -387,7 +387,7 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) } void dsa_unregister_switch(struct dsa_switch *ds); -int dsa_register_switch(struct dsa_switch *ds, struct device_node *np); +int dsa_register_switch(struct dsa_switch *ds, struct device *dev); #ifdef CONFIG_PM_SLEEP int dsa_switch_suspend(struct dsa_switch *ds); int dsa_switch_resume(struct dsa_switch *ds); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 866222a8f9bf..2cf489c5e90f 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -578,8 +578,9 @@ static struct device_node *dsa_get_ports(struct dsa_switch *ds, return ports; } -static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np) +static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev) { + struct device_node *np = dev->of_node; struct device_node *ports = dsa_get_ports(ds, np); struct dsa_switch_tree *dst; u32 tree, index; @@ -659,12 +660,12 @@ out: return err; } -int dsa_register_switch(struct dsa_switch *ds, struct device_node *np) +int dsa_register_switch(struct dsa_switch *ds, struct device *dev) { int err; mutex_lock(&dsa2_mutex); - err = _dsa_register_switch(ds, np); + err = _dsa_register_switch(ds, dev); mutex_unlock(&dsa2_mutex); return err; -- cgit v1.2.3 From 293784a8f856e854b4742be4aacf435062d91e9c Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 26 Jan 2017 10:45:52 -0800 Subject: net: dsa: Make most functions take a dsa_port argument In preparation for allowing platform data, and therefore no valid device_node pointer, make most DSA functions takes a pointer to a dsa_port structure whenever possible. While at it, introduce a dsa_port_is_valid() helper function which checks whether port->dn is NULL or not at the moment. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa.c | 15 ++++++++------ net/dsa/dsa2.c | 61 +++++++++++++++++++++++++++++------------------------- net/dsa/dsa_priv.h | 4 ++-- 3 files changed, 44 insertions(+), 36 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 1f3afeb673d6..07e863369e04 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -110,8 +110,9 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, /* basic switch operations **************************************************/ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, - struct device_node *port_dn, int port) + struct dsa_port *dport, int port) { + struct device_node *port_dn = dport->dn; struct phy_device *phydev; int ret, mode; @@ -141,15 +142,15 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, static int dsa_cpu_dsa_setups(struct dsa_switch *ds, struct device *dev) { - struct device_node *port_dn; + struct dsa_port *dport; int ret, port; for (port = 0; port < DSA_MAX_PORTS; port++) { if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) continue; - port_dn = ds->ports[port].dn; - ret = dsa_cpu_dsa_setup(ds, dev, port_dn, port); + dport = &ds->ports[port]; + ret = dsa_cpu_dsa_setup(ds, dev, dport, port); if (ret) return ret; } @@ -364,8 +365,10 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, return ds; } -void dsa_cpu_dsa_destroy(struct device_node *port_dn) +void dsa_cpu_dsa_destroy(struct dsa_port *port) { + struct device_node *port_dn = port->dn; + if (of_phy_is_fixed_link(port_dn)) of_phy_deregister_fixed_link(port_dn); } @@ -389,7 +392,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds) for (port = 0; port < DSA_MAX_PORTS; port++) { if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) continue; - dsa_cpu_dsa_destroy(ds->ports[port].dn); + dsa_cpu_dsa_destroy(&ds->ports[port]); /* Clearing a bit which is not set does no harm */ ds->cpu_port_mask |= ~(1 << port); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 2cf489c5e90f..56c43ca7c049 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -78,14 +78,19 @@ static void dsa_dst_del_ds(struct dsa_switch_tree *dst, kref_put(&dst->refcount, dsa_free_dst); } -static bool dsa_port_is_dsa(struct device_node *port) +static bool dsa_port_is_valid(struct dsa_port *port) { - return !!of_parse_phandle(port, "link", 0); + return !!port->dn; } -static bool dsa_port_is_cpu(struct device_node *port) +static bool dsa_port_is_dsa(struct dsa_port *port) { - return !!of_parse_phandle(port, "ethernet", 0); + return !!of_parse_phandle(port->dn, "link", 0); +} + +static bool dsa_port_is_cpu(struct dsa_port *port) +{ + return !!of_parse_phandle(port->dn, "ethernet", 0); } static bool dsa_ds_find_port(struct dsa_switch *ds, @@ -119,7 +124,7 @@ static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst, static int dsa_port_complete(struct dsa_switch_tree *dst, struct dsa_switch *src_ds, - struct device_node *port, + struct dsa_port *port, u32 src_port) { struct device_node *link; @@ -127,7 +132,7 @@ static int dsa_port_complete(struct dsa_switch_tree *dst, struct dsa_switch *dst_ds; for (index = 0;; index++) { - link = of_parse_phandle(port, "link", index); + link = of_parse_phandle(port->dn, "link", index); if (!link) break; @@ -150,13 +155,13 @@ static int dsa_port_complete(struct dsa_switch_tree *dst, */ static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds) { - struct device_node *port; + struct dsa_port *port; u32 index; int err; for (index = 0; index < DSA_MAX_PORTS; index++) { - port = ds->ports[index].dn; - if (!port) + port = &ds->ports[index]; + if (!dsa_port_is_valid(port)) continue; if (!dsa_port_is_dsa(port)) @@ -196,7 +201,7 @@ static int dsa_dst_complete(struct dsa_switch_tree *dst) return 0; } -static int dsa_dsa_port_apply(struct device_node *port, u32 index, +static int dsa_dsa_port_apply(struct dsa_port *port, u32 index, struct dsa_switch *ds) { int err; @@ -211,13 +216,13 @@ static int dsa_dsa_port_apply(struct device_node *port, u32 index, return 0; } -static void dsa_dsa_port_unapply(struct device_node *port, u32 index, +static void dsa_dsa_port_unapply(struct dsa_port *port, u32 index, struct dsa_switch *ds) { dsa_cpu_dsa_destroy(port); } -static int dsa_cpu_port_apply(struct device_node *port, u32 index, +static int dsa_cpu_port_apply(struct dsa_port *port, u32 index, struct dsa_switch *ds) { int err; @@ -234,7 +239,7 @@ static int dsa_cpu_port_apply(struct device_node *port, u32 index, return 0; } -static void dsa_cpu_port_unapply(struct device_node *port, u32 index, +static void dsa_cpu_port_unapply(struct dsa_port *port, u32 index, struct dsa_switch *ds) { dsa_cpu_dsa_destroy(port); @@ -242,13 +247,13 @@ static void dsa_cpu_port_unapply(struct device_node *port, u32 index, } -static int dsa_user_port_apply(struct device_node *port, u32 index, +static int dsa_user_port_apply(struct dsa_port *port, u32 index, struct dsa_switch *ds) { const char *name; int err; - name = of_get_property(port, "label", NULL); + name = of_get_property(port->dn, "label", NULL); if (!name) name = "eth%d"; @@ -262,7 +267,7 @@ static int dsa_user_port_apply(struct device_node *port, u32 index, return 0; } -static void dsa_user_port_unapply(struct device_node *port, u32 index, +static void dsa_user_port_unapply(struct dsa_port *port, u32 index, struct dsa_switch *ds) { if (ds->ports[index].netdev) { @@ -274,7 +279,7 @@ static void dsa_user_port_unapply(struct device_node *port, u32 index, static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds) { - struct device_node *port; + struct dsa_port *port; u32 index; int err; @@ -308,8 +313,8 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds) } for (index = 0; index < DSA_MAX_PORTS; index++) { - port = ds->ports[index].dn; - if (!port) + port = &ds->ports[index]; + if (!dsa_port_is_valid(port)) continue; if (dsa_port_is_dsa(port)) { @@ -336,12 +341,12 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds) static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds) { - struct device_node *port; + struct dsa_port *port; u32 index; for (index = 0; index < DSA_MAX_PORTS; index++) { - port = ds->ports[index].dn; - if (!port) + port = &ds->ports[index]; + if (!dsa_port_is_valid(port)) continue; if (dsa_port_is_dsa(port)) { @@ -425,7 +430,7 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst) dst->applied = false; } -static int dsa_cpu_parse(struct device_node *port, u32 index, +static int dsa_cpu_parse(struct dsa_port *port, u32 index, struct dsa_switch_tree *dst, struct dsa_switch *ds) { @@ -433,7 +438,7 @@ static int dsa_cpu_parse(struct device_node *port, u32 index, struct net_device *ethernet_dev; struct device_node *ethernet; - ethernet = of_parse_phandle(port, "ethernet", 0); + ethernet = of_parse_phandle(port->dn, "ethernet", 0); if (!ethernet) return -EINVAL; @@ -466,13 +471,13 @@ static int dsa_cpu_parse(struct device_node *port, u32 index, static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds) { - struct device_node *port; + struct dsa_port *port; u32 index; int err; for (index = 0; index < DSA_MAX_PORTS; index++) { - port = ds->ports[index].dn; - if (!port) + port = &ds->ports[index]; + if (!dsa_port_is_valid(port)) continue; if (dsa_port_is_cpu(port)) { @@ -533,7 +538,7 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds) * to have access to a correct value, just like what * net/dsa/dsa.c::dsa_switch_setup_one does. */ - if (!dsa_port_is_cpu(port)) + if (!dsa_port_is_cpu(&ds->ports[reg])) ds->enabled_port_mask |= 1 << reg; } diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 63ae1484abae..16194a4bb2fe 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -50,8 +50,8 @@ struct dsa_slave_priv { /* dsa.c */ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, - struct device_node *port_dn, int port); -void dsa_cpu_dsa_destroy(struct device_node *port_dn); + struct dsa_port *dport, int port); +void dsa_cpu_dsa_destroy(struct dsa_port *dport); const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol); int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds); void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds); -- cgit v1.2.3 From 3512a8e95e6acb51d4cd04480689ac484ed538c2 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 26 Jan 2017 10:45:53 -0800 Subject: net: dsa: Suffix function manipulating device_node with _dn Make it clear that these functions take a device_node structure pointer Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa2.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 56c43ca7c049..4c11619a818b 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -93,8 +93,8 @@ static bool dsa_port_is_cpu(struct dsa_port *port) return !!of_parse_phandle(port->dn, "ethernet", 0); } -static bool dsa_ds_find_port(struct dsa_switch *ds, - struct device_node *port) +static bool dsa_ds_find_port_dn(struct dsa_switch *ds, + struct device_node *port) { u32 index; @@ -104,8 +104,8 @@ static bool dsa_ds_find_port(struct dsa_switch *ds, return false; } -static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst, - struct device_node *port) +static struct dsa_switch *dsa_dst_find_port_dn(struct dsa_switch_tree *dst, + struct device_node *port) { struct dsa_switch *ds; u32 index; @@ -115,7 +115,7 @@ static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst, if (!ds) continue; - if (dsa_ds_find_port(ds, port)) + if (dsa_ds_find_port_dn(ds, port)) return ds; } @@ -136,7 +136,7 @@ static int dsa_port_complete(struct dsa_switch_tree *dst, if (!link) break; - dst_ds = dsa_dst_find_port(dst, link); + dst_ds = dsa_dst_find_port_dn(dst, link); of_node_put(link); if (!dst_ds) @@ -545,7 +545,7 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds) return 0; } -static int dsa_parse_member(struct device_node *np, u32 *tree, u32 *index) +static int dsa_parse_member_dn(struct device_node *np, u32 *tree, u32 *index) { int err; @@ -591,7 +591,7 @@ static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev) u32 tree, index; int i, err; - err = dsa_parse_member(np, &tree, &index); + err = dsa_parse_member_dn(np, &tree, &index); if (err) return err; -- cgit v1.2.3 From bc1727d242fa595c84e8f42b292e82151ba8cf06 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 26 Jan 2017 10:45:54 -0800 Subject: net: dsa: Move ports assignment closer to error checking Move the assignment of ports in _dsa_register_switch() closer to where it is checked, no functional change. Re-order declarations to be preserve the inverted christmas tree style. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa2.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 4c11619a818b..75f5d1f8554b 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -586,8 +586,8 @@ static struct device_node *dsa_get_ports(struct dsa_switch *ds, static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev) { struct device_node *np = dev->of_node; - struct device_node *ports = dsa_get_ports(ds, np); struct dsa_switch_tree *dst; + struct device_node *ports; u32 tree, index; int i, err; @@ -595,6 +595,7 @@ static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev) if (err) return err; + ports = dsa_get_ports(ds, np); if (IS_ERR(ports)) return PTR_ERR(ports); -- cgit v1.2.3 From 3b7b2b0acdbf9ffe4d3a7915e59e6127988b05db Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 26 Jan 2017 14:08:36 -0800 Subject: net: ipv6: remove skb_reserve in getroute Remove skb_reserve and skb_reset_mac_header from inet6_rtm_getroute. The allocated skb is not passed through the routing engine (like it is for IPv4) and has not since the beginning of git time. Signed-off-by: David Ahern Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv6/route.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4b1f0f98a0e9..74bb1190800e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3416,12 +3416,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) goto errout; } - /* Reserve room for dummy headers, this skb can pass - through good chunk of routing engine. - */ - skb_reset_mac_header(skb); - skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); - skb_dst_set(skb, &rt->dst); err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, -- cgit v1.2.3 From 1f17e2f2c8a8be3430813119fa7b633398f6185b Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 26 Jan 2017 13:54:08 -0800 Subject: net: ipv6: ignore null_entry on route dumps lkp-robot reported a BUG: [ 10.151226] BUG: unable to handle kernel NULL pointer dereference at 00000198 [ 10.152525] IP: rt6_fill_node+0x164/0x4b8 [ 10.153307] *pdpt = 0000000012ee5001 *pde = 0000000000000000 [ 10.153309] [ 10.154492] Oops: 0000 [#1] [ 10.154987] CPU: 0 PID: 909 Comm: netifd Not tainted 4.10.0-rc4-00722-g41e8c70ee162-dirty #10 [ 10.156482] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.7.5-20140531_083030-gandalf 04/01/2014 [ 10.158254] task: d0deb000 task.stack: d0e0c000 [ 10.159059] EIP: rt6_fill_node+0x164/0x4b8 [ 10.159780] EFLAGS: 00010296 CPU: 0 [ 10.160404] EAX: 00000000 EBX: d10c2358 ECX: c1f7c6cc EDX: c1f6ff44 [ 10.161469] ESI: 00000000 EDI: c2059900 EBP: d0e0dc4c ESP: d0e0dbe4 [ 10.162534] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068 [ 10.163482] CR0: 80050033 CR2: 00000198 CR3: 10d94660 CR4: 000006b0 [ 10.164535] Call Trace: [ 10.164993] ? paravirt_sched_clock+0x9/0xd [ 10.165727] ? sched_clock+0x9/0xc [ 10.166329] ? sched_clock_cpu+0x19/0xe9 [ 10.166991] ? lock_release+0x13e/0x36c [ 10.167652] rt6_dump_route+0x4c/0x56 [ 10.168276] fib6_dump_node+0x1d/0x3d [ 10.168913] fib6_walk_continue+0xab/0x167 [ 10.169611] fib6_walk+0x2a/0x40 [ 10.170182] inet6_dump_fib+0xfb/0x1e0 [ 10.170855] netlink_dump+0xcd/0x21f This happens when the loopback device is set down and a ipv6 fib route dump is requested. ip6_null_entry is the root of all ipv6 fib tables making it integrated into the table and hence passed to the ipv6 route dump code. The null_entry route uses the loopback device for dst.dev but may not have rt6i_idev set because of the order in which initializations are done -- ip6_route_net_init is run before addrconf_init has initialized the loopback device. Fixing the initialization order is a much bigger problem with no obvious solution thus far. The BUG is triggered when the loopback is set down and the netif_running check added by a1a22c1206 fails. The fill_node descends to checking rt->rt6i_idev for ignore_routes_with_linkdown and since rt6i_idev is NULL it faults. The null_entry route should not be processed in a dump request. Catch and ignore. This check is done in rt6_dump_route as it is the highest place in the callchain with knowledge of both the route and the network namespace. Fixes: a1a22c1206("net: ipv6: Keep nexthop of multipath route on admin down") Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 74bb1190800e..5046d2b24004 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3320,6 +3320,10 @@ nla_put_failure: int rt6_dump_route(struct rt6_info *rt, void *p_arg) { struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; + struct net *net = arg->net; + + if (rt == net->ipv6.ip6_null_entry) + return 0; if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) { struct rtmsg *rtm = nlmsg_data(arg->cb->nlh); @@ -3332,7 +3336,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg) } } - return rt6_fill_node(arg->net, + return rt6_fill_node(net, arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq, NLM_F_MULTI); -- cgit v1.2.3 From 6a4bc2b4c3c9afd643e8da44dd2318a5c5f22cd0 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 26 Jan 2017 14:44:17 -0800 Subject: net: Fix ndo_setup_tc comment Commit 16e5cc647173 ("net: rework setup_tc ndo op to consume general tc operand") changed the ndo_setup_tc() signature, but did not update the comments in netdevice.h, so do that now. Signed-off-by: Florian Fainelli Acked-by: John Fastabend Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/netdevice.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3868c32d98af..d63cacb67ea6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -964,11 +964,12 @@ struct netdev_xdp { * with PF and querying it may introduce a theoretical security risk. * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); - * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) - * Called to setup 'tc' number of traffic classes in the net device. This - * is always called from the stack with the rtnl lock held and netif tx - * queues stopped. This allows the netdevice to perform queue management - * safely. + * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, + * __be16 protocol, struct tc_to_netdev *tc); + * Called to setup any 'tc' scheduler, classifier or action on @dev. + * This is always called from the stack with the rtnl lock held and netif + * tx queues stopped. This allows the netdevice to perform queue + * management safely. * * Fiber Channel over Ethernet (FCoE) offload functions. * int (*ndo_fcoe_enable)(struct net_device *dev); -- cgit v1.2.3 From d35a00b8e33dab7385f724e713ae71c8be0a49f4 Mon Sep 17 00:00:00 2001 From: Felix Jia Date: Thu, 26 Jan 2017 16:59:17 +1300 Subject: net/ipv6: allow sysctl to change link-local address generation mode The address generation mode for IPv6 link-local can only be configured by netlink messages. This patch adds the ability to change the address generation mode via sysctl. v1 -> v2 Removed the rtnl lock and switch to use RCU lock to iterate through the netdev list. v2 -> v3 Removed the addrgenmode variable from the idev structure and use the systcl storage for the flag. Simplifed the logic for sysctl handling by removing the supported for all operation. Added support for more types of tunnel interfaces for link-local address generation. Based the patches from net-next. v3 -> v4 Removed unnecessary whitespace changes. Signed-off-by: Felix Jia Signed-off-by: David S. Miller --- include/linux/ipv6.h | 1 + include/net/if_inet6.h | 1 - include/uapi/linux/ipv6.h | 1 + net/ipv6/addrconf.c | 104 +++++++++++++++++++++++++++++++++++++--------- 4 files changed, 86 insertions(+), 21 deletions(-) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 671d014e6429..71be5b330d21 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -69,6 +69,7 @@ struct ipv6_devconf { __s32 seg6_require_hmac; #endif __u32 enhanced_dad; + __u32 addr_gen_mode; struct ctl_table_header *sysctl_header; }; diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 0fa4c324b713..f656f9051aca 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -205,7 +205,6 @@ struct inet6_dev { __s32 rs_interval; /* in jiffies */ __u8 rs_probes; - __u8 addr_gen_mode; unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ struct rcu_head rcu; }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index eaf65dc82e22..8ef9e75e004e 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -182,6 +182,7 @@ enum { DEVCONF_SEG6_ENABLED, DEVCONF_SEG6_REQUIRE_HMAC, DEVCONF_ENHANCED_DAD, + DEVCONF_ADDR_GEN_MODE, DEVCONF_MAX }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ac9bd5620f81..e35259dd17ba 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -243,6 +243,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .seg6_require_hmac = 0, #endif .enhanced_dad = 1, + .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -294,6 +295,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .seg6_require_hmac = 0, #endif .enhanced_dad = 1, + .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64, }; /* Check if a valid qdisc is available */ @@ -386,9 +388,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); if (ndev->cnf.stable_secret.initialized) - ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; + ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; else - ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64; + ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode; ndev->cnf.mtu6 = dev->mtu; ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); @@ -2387,8 +2389,8 @@ static void manage_tempaddrs(struct inet6_dev *idev, static bool is_addr_mode_generate_stable(struct inet6_dev *idev) { - return idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY || - idev->addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM; + return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY || + idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM; } int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, @@ -3152,7 +3154,7 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); - switch (idev->addr_gen_mode) { + switch (idev->cnf.addr_gen_mode) { case IN6_ADDR_GEN_MODE_RANDOM: ipv6_gen_mode_random_init(idev); /* fallthrough */ @@ -3204,8 +3206,8 @@ static void addrconf_dev_config(struct net_device *dev) /* this device type has no EUI support */ if (dev->type == ARPHRD_NONE && - idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) - idev->addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM; + idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) + idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM; addrconf_addr_gen(idev, false); } @@ -4982,6 +4984,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac; #endif array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad; + array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode; } static inline size_t inet6_ifla6_size(void) @@ -5093,7 +5096,7 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, if (!nla) goto nla_put_failure; - if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->addr_gen_mode)) + if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode)) goto nla_put_failure; read_lock_bh(&idev->lock); @@ -5211,6 +5214,26 @@ static int inet6_validate_link_af(const struct net_device *dev, return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy); } +static int check_addr_gen_mode(int mode) +{ + if (mode != IN6_ADDR_GEN_MODE_EUI64 && + mode != IN6_ADDR_GEN_MODE_NONE && + mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY && + mode != IN6_ADDR_GEN_MODE_RANDOM) + return -EINVAL; + return 1; +} + +static int check_stable_privacy(struct inet6_dev *idev, struct net *net, + int mode) +{ + if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY && + !idev->cnf.stable_secret.initialized && + !net->ipv6.devconf_dflt->stable_secret.initialized) + return -EINVAL; + return 1; +} + static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla) { int err = -EINVAL; @@ -5232,18 +5255,11 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla) if (tb[IFLA_INET6_ADDR_GEN_MODE]) { u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]); - if (mode != IN6_ADDR_GEN_MODE_EUI64 && - mode != IN6_ADDR_GEN_MODE_NONE && - mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY && - mode != IN6_ADDR_GEN_MODE_RANDOM) - return -EINVAL; - - if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY && - !idev->cnf.stable_secret.initialized && - !dev_net(dev)->ipv6.devconf_dflt->stable_secret.initialized) + if (check_addr_gen_mode(mode) < 0 || + check_stable_privacy(idev, dev_net(dev), mode) < 0) return -EINVAL; - idev->addr_gen_mode = mode; + idev->cnf.addr_gen_mode = mode; err = 0; } @@ -5652,6 +5668,47 @@ int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write, return ret; } +static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret = 0; + int new_val; + struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1; + struct net *net = (struct net *)ctl->extra2; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (write) { + new_val = *((int *)ctl->data); + + if (check_addr_gen_mode(new_val) < 0) + return -EINVAL; + + /* request for default */ + if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) { + ipv6_devconf_dflt.addr_gen_mode = new_val; + + /* request for individual net device */ + } else { + if (!idev) + return ret; + + if (check_stable_privacy(idev, net, new_val) < 0) + return -EINVAL; + + if (idev->cnf.addr_gen_mode != new_val) { + idev->cnf.addr_gen_mode = new_val; + rtnl_lock(); + addrconf_dev_config(idev->dev); + rtnl_unlock(); + } + } + } + + return ret; +} + static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -5702,14 +5759,14 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write, struct inet6_dev *idev = __in6_dev_get(dev); if (idev) { - idev->addr_gen_mode = + idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; } } } else { struct inet6_dev *idev = ctl->extra1; - idev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; + idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; } out: @@ -6096,6 +6153,13 @@ static const struct ctl_table addrconf_sysctl[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "addr_gen_mode", + .data = &ipv6_devconf.addr_gen_mode, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = addrconf_sysctl_addr_gen_mode, + }, { /* sentinel */ } -- cgit v1.2.3 From 45ce0fd19da9ca21635c367d504b9e4904621ff4 Mon Sep 17 00:00:00 2001 From: Felix Jia Date: Thu, 26 Jan 2017 16:59:18 +1300 Subject: net/ipv6: support more tunnel interfaces for EUI64 link-local generation Signed-off-by: Felix Jia Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 5 +++++ net/ipv6/ip6_gre.c | 3 +++ net/ipv6/ip6_vti.c | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e35259dd17ba..4c47656b9f09 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2146,12 +2146,14 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) case ARPHRD_SIT: return addrconf_ifid_sit(eui, dev); case ARPHRD_IPGRE: + case ARPHRD_TUNNEL: return addrconf_ifid_gre(eui, dev); case ARPHRD_6LOWPAN: return addrconf_ifid_eui64(eui, dev); case ARPHRD_IEEE1394: return addrconf_ifid_ieee1394(eui, dev); case ARPHRD_TUNNEL6: + case ARPHRD_IP6GRE: return addrconf_ifid_ip6tnl(eui, dev); } return -1; @@ -3195,6 +3197,9 @@ static void addrconf_dev_config(struct net_device *dev) (dev->type != ARPHRD_IEEE1394) && (dev->type != ARPHRD_TUNNEL6) && (dev->type != ARPHRD_6LOWPAN) && + (dev->type != ARPHRD_IP6GRE) && + (dev->type != ARPHRD_IPGRE) && + (dev->type != ARPHRD_TUNNEL) && (dev->type != ARPHRD_NONE)) { /* Alas, we support only Ethernet autoconfiguration. */ return; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 65bdfd1cca80..1ba7567b4d8f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -993,6 +993,9 @@ static void ip6gre_tunnel_setup(struct net_device *dev) dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); netif_keep_dst(dev); + /* This perm addr will be used as interface identifier by IPv6 */ + dev->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(dev->perm_addr); } static int ip6gre_tunnel_init_common(struct net_device *dev) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index d82042c8d8fd..c795fee372c4 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -49,6 +49,7 @@ #include #include #include +#include #define IP6_VTI_HASH_SIZE_SHIFT 5 #define IP6_VTI_HASH_SIZE (1 << IP6_VTI_HASH_SIZE_SHIFT) @@ -842,6 +843,9 @@ static void vti6_dev_setup(struct net_device *dev) dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); netif_keep_dst(dev); + /* This perm addr will be used as interface identifier by IPv6 */ + dev->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(dev->perm_addr); } /** -- cgit v1.2.3 From 0fc9ae107669760c2a8658cb5b5876dbe525e08d Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Fri, 27 Jan 2017 14:07:01 +0100 Subject: net: phy: broadcom: add support for BCM54210E MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's Broadcom PHY simply described as single-port RGMII 10/100/1000BASE-T PHY. It requires disabling delay skew and GTXCLK bits. Signed-off-by: Rafał Miłecki Signed-off-by: David S. Miller --- drivers/net/phy/broadcom.c | 34 +++++++++++++++++++++++++++++++++- include/linux/brcmphy.h | 1 + 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 97d1c057c0a1..794b9ec81ba5 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -30,6 +30,22 @@ MODULE_DESCRIPTION("Broadcom PHY driver"); MODULE_AUTHOR("Maciej W. Rozycki"); MODULE_LICENSE("GPL"); +static int bcm54210e_config_init(struct phy_device *phydev) +{ + int val; + + val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC); + val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; + val |= MII_BCM54XX_AUXCTL_MISC_WREN; + bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val); + + val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL); + val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN; + bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val); + + return 0; +} + static int bcm54810_config(struct phy_device *phydev) { int rc, val; @@ -230,7 +246,11 @@ static int bcm54xx_config_init(struct phy_device *phydev) (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE)) bcm54xx_adjust_rxrefclk(phydev); - if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) { + if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) { + err = bcm54210e_config_init(phydev); + if (err) + return err; + } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) { err = bcm54810_config(phydev); if (err) return err; @@ -541,6 +561,17 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, +}, { + .phy_id = PHY_ID_BCM54210E, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM54210E", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = bcm54xx_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = bcm_phy_ack_intr, + .config_intr = bcm_phy_config_intr, }, { .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, @@ -680,6 +711,7 @@ module_phy_driver(broadcom_drivers); static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM5411, 0xfffffff0 }, { PHY_ID_BCM5421, 0xfffffff0 }, + { PHY_ID_BCM54210E, 0xfffffff0 }, { PHY_ID_BCM5461, 0xfffffff0 }, { PHY_ID_BCM54612E, 0xfffffff0 }, { PHY_ID_BCM54616S, 0xfffffff0 }, diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index cf93f1399d3e..5881d1fdc1fb 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -17,6 +17,7 @@ #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 #define PHY_ID_BCM5421 0x002060e0 +#define PHY_ID_BCM54210E 0x600d84a0 #define PHY_ID_BCM5464 0x002060b0 #define PHY_ID_BCM5461 0x002060c0 #define PHY_ID_BCM54612E 0x03625e60 -- cgit v1.2.3 From 2b368b234ec43abbf46f2983478c438d36e58134 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Fri, 27 Jan 2017 15:26:32 +0100 Subject: net: wan: Remove unused stats member from struct frad_local The stats member of struct frad_locl is used neither by the dlci nor the sdla driver, so it might as well be removed. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- include/linux/if_frad.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h index 4316aa173dde..46df7e565d6f 100644 --- a/include/linux/if_frad.h +++ b/include/linux/if_frad.h @@ -66,8 +66,6 @@ struct dlci_local struct frad_local { - struct net_device_stats stats; - /* devices which this FRAD is slaved to */ struct net_device *master[CONFIG_DLCI_MAX]; short dlci[CONFIG_DLCI_MAX]; -- cgit v1.2.3 From 2d3d4ec01641148636f8961ccd64308dbf2072f4 Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Fri, 27 Jan 2017 15:02:11 +0000 Subject: sfc: fixes to filter restore handling If the NIC is switched from full-featured to low-latency, encapsulated filters are no longer available, and this causes errors. This patch removes those filters from the filter table on restore. Also, if filters which are removed by the above, or which we fail to insert when restoring filters, were UC, MC or broadcast default filters, invalidate the corresponding vlan->default_filters entry. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 42 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 7c53da28ad64..b64b47b4a7f1 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4557,9 +4557,12 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int invalid_filters = 0, failed = 0; + struct efx_ef10_filter_vlan *vlan; struct efx_filter_spec *spec; unsigned int filter_idx; - bool failed = false; + u32 mcdi_flags; + int match_pri; int rc; WARN_ON(!rwsem_is_locked(&efx->filter_sem)); @@ -4577,6 +4580,20 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) if (!spec) continue; + mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); + match_pri = 0; + while (match_pri < table->rx_match_count && + table->rx_match_mcdi_flags[match_pri] != mcdi_flags) + ++match_pri; + if (match_pri >= table->rx_match_count) { + invalid_filters++; + goto not_restored; + } + if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT && + spec->rss_context != nic_data->rx_rss_context) + netif_warn(efx, drv, efx->net_dev, + "Warning: unable to restore a filter with specific RSS context.\n"); + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; spin_unlock_bh(&efx->filter_lock); @@ -4584,10 +4601,19 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) &table->entry[filter_idx].handle, false); if (rc) - failed = true; - + failed++; spin_lock_bh(&efx->filter_lock); + if (rc) { +not_restored: + list_for_each_entry(vlan, &table->vlan_list, list) { + if (vlan->ucdef == filter_idx) + vlan->ucdef = EFX_EF10_FILTER_ID_INVALID; + if (vlan->mcdef == filter_idx) + vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + if (vlan->bcast == filter_idx) + vlan->bcast = EFX_EF10_FILTER_ID_INVALID; + } kfree(spec); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); } else { @@ -4598,9 +4624,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) spin_unlock_bh(&efx->filter_lock); + /* This can happen validly if the MC's capabilities have changed, so + * is not an error. + */ + if (invalid_filters) + netif_dbg(efx, drv, efx->net_dev, + "Did not restore %u filters that are now unsupported.\n", + invalid_filters); + if (failed) netif_err(efx, hw, efx->net_dev, - "unable to restore all filters\n"); + "unable to restore %u filters\n", failed); else nic_data->must_restore_filters = false; } -- cgit v1.2.3 From f617f27653c4d9f5b2aa43d567ac0405df889944 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Fri, 27 Jan 2017 15:02:26 +0000 Subject: net: implement netif_cond_dbg macro For reporting things that may or may not be serious, depending on some condition, netif_cond_dbg will check the condition and print the report at either dbg (if the condition is true) or the specified level. Suggested-by: Jon Cooper Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- include/linux/netdevice.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d63cacb67ea6..9511e5a1cfc6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4347,6 +4347,15 @@ do { \ }) #endif +/* if @cond then downgrade to debug, else print at @level */ +#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ + do { \ + if (cond) \ + netif_dbg(priv, type, netdev, fmt, ##args); \ + else \ + netif_ ## level(priv, type, netdev, fmt, ##args); \ + } while (0) + #if defined(VERBOSE_DEBUG) #define netif_vdbg netif_dbg #else -- cgit v1.2.3 From 34e7aefb2a15f3180635e5730f01f5ce4a0734ec Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Fri, 27 Jan 2017 15:02:39 +0000 Subject: sfc: refactor debug-or-warnings printks Rationalise several debug-or-warnings printks using netif_cond_dbg to make output more consistent. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 8 ++++---- drivers/net/ethernet/sfc/mcdi.c | 19 ++++++++----------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index b64b47b4a7f1..d0c1ed0e0354 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4881,10 +4881,10 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING, - efx->net_dev, - "%scast mismatch filter insert failed rc=%d\n", - multicast ? "Multi" : "Uni", rc); + netif_cond_dbg(efx, drv, efx->net_dev, + rc == -EPERM, warn, + "%scast mismatch filter insert failed rc=%d\n", + multicast ? "Multi" : "Uni", rc); } else if (multicast) { EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID); vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 995651341b94..24b271b9c260 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -837,11 +837,9 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, outbuf, outlen, outlen_actual, quiet, NULL, raw_rc); } else { - netif_printk(efx, hw, - rc == -EPERM ? KERN_DEBUG : KERN_ERR, - efx->net_dev, - "MC command 0x%x failed after proxy auth rc=%d\n", - cmd, rc); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x failed after proxy auth rc=%d\n", + cmd, rc); if (rc == -EINTR || rc == -EIO) efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); @@ -1084,10 +1082,9 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, code = MCDI_DWORD(outbuf, ERR_CODE); if (outlen >= MC_CMD_ERR_ARG_OFST + 4) err_arg = MCDI_DWORD(outbuf, ERR_ARG); - netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR, - efx->net_dev, - "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", - cmd, inlen, rc, code, err_arg); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", + cmd, inlen, rc, code, err_arg); } /* Switch to polled MCDI completions. This can be called in various @@ -2057,8 +2054,8 @@ fail: /* Older firmware lacks GET_WORKAROUNDS and this isn't especially * terrifying. The call site will have to deal with it though. */ - netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR, - efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err, + "%s: failed rc=%d\n", __func__, rc); return rc; } -- cgit v1.2.3 From 9b41080125176841ece49b661aa133afffe99381 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Fri, 27 Jan 2017 15:02:52 +0000 Subject: sfc: insert catch-all filters for encapsulated traffic 8000 series adapters support filtering VXLAN, NVGRE and GENEVE traffic based on inner fields, and when the NIC recognises such traffic, it does not match unencapsulated traffic filters any more. So add catch- all filters for encapsulated traffic on supporting platforms. Although recognition of VXLAN and GENEVE is based on UDP ports, and thus will not occur until the driver (on the primary PF) notifies the firmware of UDP ports to use, NVGRE will always be recognised, hence without this patch 8000 series adapters will drop all NVGRE traffic. Partly based on patches by Jon Cooper . Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 554 ++++++++++++++++++++++++++++---------- drivers/net/ethernet/sfc/filter.h | 41 ++- 2 files changed, 449 insertions(+), 146 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index d0c1ed0e0354..8bec9383d754 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -60,15 +60,33 @@ struct efx_ef10_vlan { u16 vid; }; +enum efx_ef10_default_filters { + EFX_EF10_BCAST, + EFX_EF10_UCDEF, + EFX_EF10_MCDEF, + EFX_EF10_VXLAN4_UCDEF, + EFX_EF10_VXLAN4_MCDEF, + EFX_EF10_VXLAN6_UCDEF, + EFX_EF10_VXLAN6_MCDEF, + EFX_EF10_NVGRE4_UCDEF, + EFX_EF10_NVGRE4_MCDEF, + EFX_EF10_NVGRE6_UCDEF, + EFX_EF10_NVGRE6_MCDEF, + EFX_EF10_GENEVE4_UCDEF, + EFX_EF10_GENEVE4_MCDEF, + EFX_EF10_GENEVE6_UCDEF, + EFX_EF10_GENEVE6_MCDEF, + + EFX_EF10_NUM_DEFAULT_FILTERS +}; + /* Per-VLAN filters information */ struct efx_ef10_filter_vlan { struct list_head list; u16 vid; u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; - u16 ucdef; - u16 bcast; - u16 mcdef; + u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS]; }; struct efx_ef10_dev_addr { @@ -78,7 +96,7 @@ struct efx_ef10_dev_addr { struct efx_ef10_filter_table { /* The MCDI match masks supported by this fw & hw, in order of priority */ u32 rx_match_mcdi_flags[ - MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; + MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; unsigned int rx_match_count; struct { @@ -3592,6 +3610,104 @@ efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, table->entry[filter_idx].spec = (unsigned long)spec | flags; } +static void +efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx, + const struct efx_filter_spec *spec, + efx_dword_t *inbuf) +{ + enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); + u32 match_fields = 0, uc_match, mc_match; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_ef10_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_INSERT : + MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); + + /* Convert match flags and values. Unlike almost + * everything else in MCDI, these fields are in + * network byte order. + */ +#define COPY_VALUE(value, mcdi_field) \ + do { \ + match_fields |= \ + 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ + mcdi_field ## _LBN; \ + BUILD_BUG_ON( \ + MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ + sizeof(value)); \ + memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ + &value, sizeof(value)); \ + } while (0) +#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ + if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ + COPY_VALUE(spec->gen_field, mcdi_field); \ + } + /* Handle encap filters first. They will always be mismatch + * (unknown UC or MC) filters + */ + if (encap_type) { + /* ether_type and outer_ip_proto need to be variables + * because COPY_VALUE wants to memcpy them + */ + __be16 ether_type = + htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? + ETH_P_IPV6 : ETH_P_IP); + u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE; + u8 outer_ip_proto; + + switch (encap_type & EFX_ENCAP_TYPES_MASK) { + case EFX_ENCAP_TYPE_VXLAN: + vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; + /* fallthrough */ + case EFX_ENCAP_TYPE_GENEVE: + COPY_VALUE(ether_type, ETHER_TYPE); + outer_ip_proto = IPPROTO_UDP; + COPY_VALUE(outer_ip_proto, IP_PROTO); + /* We always need to set the type field, even + * though we're not matching on the TNI. + */ + MCDI_POPULATE_DWORD_1(inbuf, + FILTER_OP_EXT_IN_VNI_OR_VSID, + FILTER_OP_EXT_IN_VNI_TYPE, + vni_type); + break; + case EFX_ENCAP_TYPE_NVGRE: + COPY_VALUE(ether_type, ETHER_TYPE); + outer_ip_proto = IPPROTO_GRE; + COPY_VALUE(outer_ip_proto, IP_PROTO); + break; + default: + WARN_ON(1); + } + + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; + } else { + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; + } + + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) + match_fields |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << mc_match : + 1 << uc_match; + COPY_FIELD(REM_HOST, rem_host, SRC_IP); + COPY_FIELD(LOC_HOST, loc_host, DST_IP); + COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); + COPY_FIELD(REM_PORT, rem_port, SRC_PORT); + COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); + COPY_FIELD(LOC_PORT, loc_port, DST_PORT); + COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); + COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); + COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); + COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); +#undef COPY_FIELD +#undef COPY_VALUE + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, + match_fields); +} + static void efx_ef10_filter_push_prep(struct efx_nic *efx, const struct efx_filter_spec *spec, efx_dword_t *inbuf, u64 handle, @@ -3600,7 +3716,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, struct efx_ef10_nic_data *nic_data = efx->nic_data; u32 flags = spec->flags; - memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); + memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); /* Remove RSS flag if we don't have an RSS context. */ if (flags & EFX_FILTER_FLAG_RX_RSS && @@ -3613,46 +3729,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, MC_CMD_FILTER_OP_IN_OP_REPLACE); MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); } else { - u32 match_fields = 0; - - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, - efx_ef10_filter_is_exclusive(spec) ? - MC_CMD_FILTER_OP_IN_OP_INSERT : - MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); - - /* Convert match flags and values. Unlike almost - * everything else in MCDI, these fields are in - * network byte order. - */ - if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) - match_fields |= - is_multicast_ether_addr(spec->loc_mac) ? - 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : - 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; -#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ - if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ - match_fields |= \ - 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN; \ - BUILD_BUG_ON( \ - MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ - sizeof(spec->gen_field)); \ - memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ - &spec->gen_field, sizeof(spec->gen_field)); \ - } - COPY_FIELD(REM_HOST, rem_host, SRC_IP); - COPY_FIELD(LOC_HOST, loc_host, DST_IP); - COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); - COPY_FIELD(REM_PORT, rem_port, SRC_PORT); - COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); - COPY_FIELD(LOC_PORT, loc_port, DST_PORT); - COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); - COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); - COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); - COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); -#undef COPY_FIELD - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, - match_fields); + efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf); } MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); @@ -3681,8 +3758,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx, const struct efx_filter_spec *spec, u64 *handle, bool replacing) { - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); int rc; efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); @@ -3697,37 +3774,58 @@ static int efx_ef10_filter_push(struct efx_nic *efx, static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) { + enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); unsigned int match_flags = spec->match_flags; + unsigned int uc_match, mc_match; u32 mcdi_flags = 0; - if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { - match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; - mcdi_flags |= - is_multicast_ether_addr(spec->loc_mac) ? - (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) : - (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN); - } - -#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \ - unsigned int old_match_flags = match_flags; \ +#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ + unsigned int old_match_flags = match_flags; \ match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ if (match_flags != old_match_flags) \ mcdi_flags |= \ - (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN); \ + (1 << ((encap) ? \ + MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ + mcdi_field ## _LBN : \ + MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\ + mcdi_field ## _LBN)); \ } - MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP); - MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP); - MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC); - MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT); - MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC); - MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT); - MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE); - MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN); - MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN); - MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO); + /* inner or outer based on encap type */ + MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); + MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); + MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); + MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); + MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); + /* always outer */ + MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); + MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); #undef MAP_FILTER_TO_MCDI_FLAG + /* special handling for encap type, and mismatch */ + if (encap_type) { + match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; + mcdi_flags |= + (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); + mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); + + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; + } else { + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; + } + + if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { + match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; + mcdi_flags |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << mc_match : + 1 << uc_match; + } + /* Did we map them all? */ WARN_ON_ONCE(match_flags); @@ -4387,29 +4485,54 @@ efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, #endif /* CONFIG_RFS_ACCEL */ -static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) +static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) { int match_flags = 0; -#define MAP_FLAG(gen_flag, mcdi_field) { \ +#define MAP_FLAG(gen_flag, mcdi_field) do { \ u32 old_mcdi_flags = mcdi_flags; \ - mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN); \ + mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ + mcdi_field ## _LBN); \ if (mcdi_flags != old_mcdi_flags) \ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ + } while (0) + + if (encap) { + /* encap filters must specify encap type */ + match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + /* and imply ethertype and ip proto */ + mcdi_flags &= + ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); + mcdi_flags &= + ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); + /* VLAN tags refer to the outer packet */ + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + /* everything else refers to the inner packet */ + MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, IFRM_SRC_IP); + MAP_FLAG(LOC_HOST, IFRM_DST_IP); + MAP_FLAG(REM_MAC, IFRM_SRC_MAC); + MAP_FLAG(REM_PORT, IFRM_SRC_PORT); + MAP_FLAG(LOC_MAC, IFRM_DST_MAC); + MAP_FLAG(LOC_PORT, IFRM_DST_PORT); + MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); + MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); + } else { + MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, SRC_IP); + MAP_FLAG(LOC_HOST, DST_IP); + MAP_FLAG(REM_MAC, SRC_MAC); + MAP_FLAG(REM_PORT, SRC_PORT); + MAP_FLAG(LOC_MAC, DST_MAC); + MAP_FLAG(LOC_PORT, DST_PORT); + MAP_FLAG(ETHER_TYPE, ETHER_TYPE); + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + MAP_FLAG(IP_PROTO, IP_PROTO); } - MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); - MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); - MAP_FLAG(REM_HOST, SRC_IP); - MAP_FLAG(LOC_HOST, DST_IP); - MAP_FLAG(REM_MAC, SRC_MAC); - MAP_FLAG(REM_PORT, SRC_PORT); - MAP_FLAG(LOC_MAC, DST_MAC); - MAP_FLAG(LOC_PORT, DST_PORT); - MAP_FLAG(ETHER_TYPE, ETHER_TYPE); - MAP_FLAG(INNER_VID, INNER_VLAN); - MAP_FLAG(OUTER_VID, OUTER_VLAN); - MAP_FLAG(IP_PROTO, IP_PROTO); #undef MAP_FLAG /* Did we map them all? */ @@ -4436,6 +4559,7 @@ static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx) } static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, + bool encap, enum efx_filter_match_flags match_flags) { unsigned int match_pri; @@ -4444,7 +4568,7 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, for (match_pri = 0; match_pri < table->rx_match_count; match_pri++) { - mf = efx_ef10_filter_match_flags_from_mcdi( + mf = efx_ef10_filter_match_flags_from_mcdi(encap, table->rx_match_mcdi_flags[match_pri]); if (mf == match_flags) return true; @@ -4453,39 +4577,30 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, return false; } -static int efx_ef10_filter_table_probe(struct efx_nic *efx) +static int +efx_ef10_filter_table_probe_matches(struct efx_nic *efx, + struct efx_ef10_filter_table *table, + bool encap) { MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct net_device *net_dev = efx->net_dev; unsigned int pd_match_pri, pd_match_count; - struct efx_ef10_filter_table *table; - struct efx_ef10_vlan *vlan; size_t outlen; int rc; - if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) - return -EINVAL; - - if (efx->filter_state) /* already probed */ - return 0; - - table = kzalloc(sizeof(*table), GFP_KERNEL); - if (!table) - return -ENOMEM; - /* Find out which RX filter types are supported, and their priorities */ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, + encap ? + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) - goto fail; + return rc; + pd_match_count = MCDI_VAR_ARRAY_LEN( outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); - table->rx_match_count = 0; for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { u32 mcdi_flags = @@ -4493,7 +4608,7 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) outbuf, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, pd_match_pri); - rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); + rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags); if (rc < 0) { netif_dbg(efx, probe, efx->net_dev, "%s: fw flags %#x pri %u not supported in driver\n", @@ -4508,10 +4623,40 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) } } + return 0; +} + +static int efx_ef10_filter_table_probe(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + struct efx_ef10_filter_table *table; + struct efx_ef10_vlan *vlan; + int rc; + + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return -EINVAL; + + if (efx->filter_state) /* already probed */ + return 0; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + table->rx_match_count = 0; + rc = efx_ef10_filter_table_probe_matches(efx, table, false); + if (rc) + goto fail; + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) + rc = efx_ef10_filter_table_probe_matches(efx, table, true); + if (rc) + goto fail; if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && - !(efx_ef10_filter_match_supported(table, + !(efx_ef10_filter_match_supported(table, false, (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && - efx_ef10_filter_match_supported(table, + efx_ef10_filter_match_supported(table, false, (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { netif_info(efx, probe, net_dev, "VLAN filters are not supported in this firmware variant\n"); @@ -4563,7 +4708,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) unsigned int filter_idx; u32 mcdi_flags; int match_pri; - int rc; + int rc, i; WARN_ON(!rwsem_is_locked(&efx->filter_sem)); @@ -4606,14 +4751,12 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) if (rc) { not_restored: - list_for_each_entry(vlan, &table->vlan_list, list) { - if (vlan->ucdef == filter_idx) - vlan->ucdef = EFX_EF10_FILTER_ID_INVALID; - if (vlan->mcdef == filter_idx) - vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; - if (vlan->bcast == filter_idx) - vlan->bcast = EFX_EF10_FILTER_ID_INVALID; - } + list_for_each_entry(vlan, &table->vlan_list, list) + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i) + if (vlan->default_filters[i] == filter_idx) + vlan->default_filters[i] = + EFX_EF10_FILTER_ID_INVALID; + kfree(spec); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); } else { @@ -4712,9 +4855,8 @@ static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx, efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]); for (i = 0; i < table->dev_mc_count; i++) efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]); - efx_ef10_filter_mark_one_old(efx, &vlan->ucdef); - efx_ef10_filter_mark_one_old(efx, &vlan->bcast); - efx_ef10_filter_mark_one_old(efx, &vlan->mcdef); + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]); } /* Mark old filters that may need to be removed. @@ -4832,6 +4974,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, if (multicast && rollback) { /* Also need an Ethernet broadcast filter */ + EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] != + EFX_EF10_FILTER_ID_INVALID); efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, vlan->vid, baddr); @@ -4848,9 +4992,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, } return rc; } else { - EFX_WARN_ON_PARANOID(vlan->bcast != - EFX_EF10_FILTER_ID_INVALID); - vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); + vlan->default_filters[EFX_EF10_BCAST] = + efx_ef10_filter_get_unsafe_id(efx, rc); } } @@ -4859,6 +5002,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, static int efx_ef10_filter_insert_def(struct efx_nic *efx, struct efx_ef10_filter_vlan *vlan, + enum efx_encap_type encap_type, bool multicast, bool rollback) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -4866,6 +5010,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, struct efx_filter_spec spec; u8 baddr[ETH_ALEN]; int rc; + u16 *id; filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; @@ -4876,19 +5021,75 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, else efx_filter_set_uc_def(&spec); + if (encap_type) { + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) + efx_filter_set_encap_type(&spec, encap_type); + else + /* don't insert encap filters on non-supporting + * platforms. ID will be left as INVALID. + */ + return 0; + } + if (vlan->vid != EFX_FILTER_VID_UNSPEC) efx_filter_set_eth_local(&spec, vlan->vid, NULL); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { + const char *um = multicast ? "Multicast" : "Unicast"; + const char *encap_name = ""; + const char *encap_ipv = ""; + + if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_VXLAN) + encap_name = "VXLAN "; + else if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_NVGRE) + encap_name = "NVGRE "; + else if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_GENEVE) + encap_name = "GENEVE "; + if (encap_type & EFX_ENCAP_FLAG_IPV6) + encap_ipv = "IPv6 "; + else if (encap_type) + encap_ipv = "IPv4 "; + + /* unprivileged functions can't insert mismatch filters + * for encapsulated or unicast traffic, so downgrade + * those warnings to debug. + */ netif_cond_dbg(efx, drv, efx->net_dev, - rc == -EPERM, warn, - "%scast mismatch filter insert failed rc=%d\n", - multicast ? "Multi" : "Uni", rc); + rc == -EPERM && (encap_type || !multicast), warn, + "%s%s%s mismatch filter insert failed rc=%d\n", + encap_name, encap_ipv, um, rc); } else if (multicast) { - EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID); - vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc); - if (!nic_data->workaround_26807) { + /* mapping from encap types to default filter IDs (multicast) */ + static enum efx_ef10_default_filters map[] = { + [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF, + [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF, + [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF, + [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF, + [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_VXLAN6_MCDEF, + [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_NVGRE6_MCDEF, + [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_GENEVE6_MCDEF, + }; + + /* quick bounds check (BCAST result impossible) */ + BUILD_BUG_ON(EFX_EF10_BCAST != 0); + if (encap_type > ARRAY_SIZE(map) || map[encap_type] == 0) { + WARN_ON(1); + return -EINVAL; + } + /* then follow map */ + id = &vlan->default_filters[map[encap_type]]; + + EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); + *id = efx_ef10_filter_get_unsafe_id(efx, rc); + if (!nic_data->workaround_26807 && !encap_type) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); @@ -4903,20 +5104,44 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, /* Roll back the mc_def filter */ efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - vlan->mcdef); - vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + *id); + *id = EFX_EF10_FILTER_ID_INVALID; return rc; } } else { - EFX_WARN_ON_PARANOID(vlan->bcast != - EFX_EF10_FILTER_ID_INVALID); - vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); + EFX_WARN_ON_PARANOID( + vlan->default_filters[EFX_EF10_BCAST] != + EFX_EF10_FILTER_ID_INVALID); + vlan->default_filters[EFX_EF10_BCAST] = + efx_ef10_filter_get_unsafe_id(efx, rc); } } rc = 0; } else { - EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID); - vlan->ucdef = rc; + /* mapping from encap types to default filter IDs (unicast) */ + static enum efx_ef10_default_filters map[] = { + [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF, + [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF, + [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF, + [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF, + [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_VXLAN6_UCDEF, + [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_NVGRE6_UCDEF, + [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_GENEVE6_UCDEF, + }; + + /* quick bounds check (BCAST result impossible) */ + BUILD_BUG_ON(EFX_EF10_BCAST != 0); + if (encap_type > ARRAY_SIZE(map) || map[encap_type] == 0) { + WARN_ON(1); + return -EINVAL; + } + /* then follow map */ + id = &vlan->default_filters[map[encap_type]]; + EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); + *id = rc; rc = 0; } return rc; @@ -5039,7 +5264,8 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* Insert/renew unicast filters */ if (table->uc_promisc) { - efx_ef10_filter_insert_def(efx, vlan, false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, + false, false); efx_ef10_filter_insert_addr_list(efx, vlan, false, false); } else { /* If any of the filters failed to insert, fall back to @@ -5047,8 +5273,25 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, * our individual unicast filters. */ if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false)) - efx_ef10_filter_insert_def(efx, vlan, false, false); + efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + false, false); } + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | + EFX_ENCAP_FLAG_IPV6, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | + EFX_ENCAP_FLAG_IPV6, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | + EFX_ENCAP_FLAG_IPV6, + false, false); /* Insert/renew multicast filters */ /* If changing promiscuous state with cascaded multicast filters, remove @@ -5062,7 +5305,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* If we failed to insert promiscuous filters, rollback * and fall back to individual multicast filters */ - if (efx_ef10_filter_insert_def(efx, vlan, true, true)) { + if (efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, true)) { /* Changing promisc state, so remove old filters */ efx_ef10_filter_remove_old(efx); efx_ef10_filter_insert_addr_list(efx, vlan, @@ -5072,7 +5317,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* If we failed to insert promiscuous filters, don't * rollback. Regardless, also insert the mc_list */ - efx_ef10_filter_insert_def(efx, vlan, true, false); + efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, false); efx_ef10_filter_insert_addr_list(efx, vlan, true, false); } } else { @@ -5085,11 +5332,28 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* Changing promisc state, so remove old filters */ if (nic_data->workaround_26807) efx_ef10_filter_remove_old(efx); - if (efx_ef10_filter_insert_def(efx, vlan, true, true)) + if (efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, true)) efx_ef10_filter_insert_addr_list(efx, vlan, true, false); } } + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | + EFX_ENCAP_FLAG_IPV6, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | + EFX_ENCAP_FLAG_IPV6, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | + EFX_ENCAP_FLAG_IPV6, + true, false); } /* Caller must hold efx->filter_sem for read if race against @@ -5176,9 +5440,8 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid) vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; - vlan->ucdef = EFX_EF10_FILTER_ID_INVALID; - vlan->bcast = EFX_EF10_FILTER_ID_INVALID; - vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID; list_add_tail(&vlan->list, &table->vlan_list); @@ -5205,9 +5468,10 @@ static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mc[i]); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef); + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID) + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->default_filters[i]); kfree(vlan); } diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index d0ed7f71ea7e..8189a1cd973f 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -27,6 +27,7 @@ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit. + * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type. * Used for RX default unicast and multicast/broadcast filters. * * Only some combinations are supported, depending on NIC type: @@ -54,6 +55,7 @@ enum efx_filter_match_flags { EFX_FILTER_MATCH_OUTER_VID = 0x0100, EFX_FILTER_MATCH_IP_PROTO = 0x0200, EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, + EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800, }; /** @@ -98,6 +100,26 @@ enum efx_filter_flags { EFX_FILTER_FLAG_TX = 0x10, }; +/** enum efx_encap_type - types of encapsulation + * @EFX_ENCAP_TYPE_NONE: no encapsulation + * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation + * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation + * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation + * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame + * + * Contains both enumerated types and flags. + * To get just the type, OR with @EFX_ENCAP_TYPES_MASK. + */ +enum efx_encap_type { + EFX_ENCAP_TYPE_NONE = 0, + EFX_ENCAP_TYPE_VXLAN = 1, + EFX_ENCAP_TYPE_NVGRE = 2, + EFX_ENCAP_TYPE_GENEVE = 3, + + EFX_ENCAP_TYPES_MASK = 7, + EFX_ENCAP_FLAG_IPV6 = 8, +}; + /** * struct efx_filter_spec - specification for a hardware filter * @match_flags: Match type flags, from &enum efx_filter_match_flags @@ -118,6 +140,8 @@ enum efx_filter_flags { * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set + * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if + * %EFX_FILTER_MATCH_ENCAP_TYPE is set * * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be * used to initialise the structure. The efx_filter_set_*() functions @@ -144,7 +168,8 @@ struct efx_filter_spec { __be32 rem_host[4]; __be16 loc_port; __be16 rem_port; - /* total 64 bytes */ + u32 encap_type:4; + /* total 65 bytes */ }; enum { @@ -269,4 +294,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec) return 0; } +static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec, + enum efx_encap_type encap_type) +{ + spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + spec->encap_type = encap_type; +} + +static inline enum efx_encap_type efx_filter_get_encap_type( + const struct efx_filter_spec *spec) +{ + if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE) + return spec->encap_type; + return EFX_ENCAP_TYPE_NONE; +} #endif /* EFX_FILTER_H */ -- cgit v1.2.3 From 158f323b9868b59967ad96957c4ca388161be321 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 27 Jan 2017 07:11:27 -0800 Subject: net: adjust skb->truesize in pskb_expand_head() Slava Shwartsman reported a warning in skb_try_coalesce(), when we detect skb->truesize is completely wrong. In his case, issue came from IPv6 reassembly coping with malicious datagrams, that forced various pskb_may_pull() to reallocate a bigger skb->head than the one allocated by NIC driver before entering GRO layer. Current code does not change skb->truesize, leaving this burden to callers if they care enough. Blindly changing skb->truesize in pskb_expand_head() is not easy, as some producers might track skb->truesize, for example in xmit path for back pressure feedback (sk->sk_wmem_alloc) We can detect the cases where it should be safe to change skb->truesize : 1) skb is not attached to a socket. 2) If it is attached to a socket, destructor is sock_edemux() My audit gave only two callers doing their own skb->truesize manipulation. I had to remove skb parameter in sock_edemux macro when CONFIG_INET is not set to avoid a compile error. Signed-off-by: Eric Dumazet Reported-by: Slava Shwartsman Signed-off-by: David S. Miller --- include/net/sock.h | 2 +- net/core/skbuff.c | 14 +++++++++++--- net/netlink/af_netlink.c | 8 +++----- net/wireless/util.c | 2 -- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 7144750d14e5..94e65fd70354 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1534,7 +1534,7 @@ void sock_efree(struct sk_buff *skb); #ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb); #else -#define sock_edemux(skb) sock_efree(skb) +#define sock_edemux sock_efree #endif int sock_setsockopt(struct socket *sock, int level, int op, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f8dbe4a7ab46..26c1344cc23e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1192,10 +1192,10 @@ EXPORT_SYMBOL(__pskb_copy_fclone); int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask) { - int i; - u8 *data; - int size = nhead + skb_end_offset(skb) + ntail; + int i, osize = skb_end_offset(skb); + int size = osize + nhead + ntail; long off; + u8 *data; BUG_ON(nhead < 0); @@ -1257,6 +1257,14 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->hdr_len = 0; skb->nohdr = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); + + /* It is not generally safe to change skb->truesize. + * For the moment, we really care of rx path, or + * when skb is orphaned (not attached to a socket). + */ + if (!skb->sk || skb->destructor == sock_edemux) + skb->truesize += size - osize; + return 0; nofrags: diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index edcc1e19ad53..7b73c7c161a9 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1210,11 +1210,9 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) skb = nskb; } - if (!pskb_expand_head(skb, 0, -delta, - (allocation & ~__GFP_DIRECT_RECLAIM) | - __GFP_NOWARN | __GFP_NORETRY)) - skb->truesize -= delta; - + pskb_expand_head(skb, 0, -delta, + (allocation & ~__GFP_DIRECT_RECLAIM) | + __GFP_NOWARN | __GFP_NORETRY); return skb; } diff --git a/net/wireless/util.c b/net/wireless/util.c index 1b9296882dcd..68e5f2ecee1a 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -618,8 +618,6 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) return -ENOMEM; - - skb->truesize += head_need; } if (encaps_data) { -- cgit v1.2.3 From 849a564b7e28db7afed18d4b921303b7bd883112 Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Fri, 20 Jan 2017 13:49:44 +0200 Subject: wil6210: add disable_ap_sme module parameter By default, AP SME is handled by driver/FW. In case disable_ap_sme is true, driver doesn't turn-on WIPHY_FLAG_HAVE_AP_SME and the responsibility for AP SME is passed to user space. With AP SME disabled, driver reports assoc request frame to user space which is then responsible for sending assoc response frame and for sending NL80211_CMD_NEW_STATION. Driver also reports disassoc frame to user space which should then send NL80211_CMD_DEL_STATION. NL80211_CMD_SET_STATION with NL80211_STA_FLAG_AUTHORIZED is used by user space to allow/disallow data transmit. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 92 +++++++++++++++++++++++++++-- drivers/net/wireless/ath/wil6210/main.c | 10 +++- drivers/net/wireless/ath/wil6210/wil6210.h | 9 ++- drivers/net/wireless/ath/wil6210/wmi.c | 68 +++++++++++++++++---- drivers/net/wireless/ath/wil6210/wmi.h | 23 +++++++- 5 files changed, 178 insertions(+), 24 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 54dd11670fdb..e6001bba85ce 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -20,6 +20,10 @@ #define WIL_MAX_ROC_DURATION_MS 5000 +bool disable_ap_sme; +module_param(disable_ap_sme, bool, S_IRUGO); +MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME"); + #define CHAN60G(_channel, _flags) { \ .band = NL80211_BAND_60GHZ, \ .center_freq = 56160 + (2160 * (_channel)), \ @@ -62,9 +66,16 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = { }, [NL80211_IFTYPE_AP] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | - BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + BIT(IEEE80211_STYPE_PROBE_RESP >> 4) | + BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | - BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | @@ -1322,6 +1333,28 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, return 0; } +static int wil_cfg80211_add_station(struct wiphy *wiphy, + struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "add station %pM aid %d\n", mac, params->aid); + + if (!disable_ap_sme) { + wil_err(wil, "not supported with AP SME enabled\n"); + return -EOPNOTSUPP; + } + + if (params->aid > WIL_MAX_DMG_AID) { + wil_err(wil, "invalid aid\n"); + return -EINVAL; + } + + return wmi_new_sta(wil, mac, params->aid); +} + static int wil_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, struct station_del_parameters *params) @@ -1338,6 +1371,52 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy, return 0; } +static int wil_cfg80211_change_station(struct wiphy *wiphy, + struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int authorize; + int cid, i; + struct vring_tx_data *txdata = NULL; + + wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x\n", mac, + params->sta_flags_mask, params->sta_flags_set); + + if (!disable_ap_sme) { + wil_dbg_misc(wil, "not supported with AP SME enabled\n"); + return -EOPNOTSUPP; + } + + if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) + return 0; + + cid = wil_find_cid(wil, mac); + if (cid < 0) { + wil_err(wil, "station not found\n"); + return -ENOLINK; + } + + for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) + if (wil->vring2cid_tid[i][0] == cid) { + txdata = &wil->vring_tx_data[i]; + break; + } + + if (!txdata) { + wil_err(wil, "vring data not found\n"); + return -ENOLINK; + } + + authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED); + txdata->dot1x_open = authorize ? 1 : 0; + wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i, + txdata->dot1x_open); + + return 0; +} + /* probe_client handling */ static void wil_probe_client_handle(struct wil6210_priv *wil, struct wil_probe_client_req *req) @@ -1521,7 +1600,9 @@ static const struct cfg80211_ops wil_cfg80211_ops = { .change_beacon = wil_cfg80211_change_beacon, .start_ap = wil_cfg80211_start_ap, .stop_ap = wil_cfg80211_stop_ap, + .add_station = wil_cfg80211_add_station, .del_station = wil_cfg80211_del_station, + .change_station = wil_cfg80211_change_station, .probe_client = wil_cfg80211_probe_client, .change_bss = wil_cfg80211_change_bss, /* P2P device */ @@ -1542,10 +1623,11 @@ static void wil_wiphy_init(struct wiphy *wiphy) BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_MONITOR); - wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_PS_ON_BY_DEFAULT; + if (!disable_ap_sme) + wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME; dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n", __func__, wiphy->flags); wiphy->probe_resp_offload = diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index e2e021bcaa03..57c19d0cb354 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -176,8 +176,12 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) sta->status); /* inform upper/lower layers */ if (sta->status != wil_sta_unused) { - if (!from_event) - wmi_disconnect_sta(wil, sta->addr, reason_code, true); + if (!from_event) { + bool del_sta = (wdev->iftype == NL80211_IFTYPE_AP) ? + disable_ap_sme : false; + wmi_disconnect_sta(wil, sta->addr, reason_code, + true, del_sta); + } switch (wdev->iftype) { case NL80211_IFTYPE_AP: diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 237e1666df2d..34c4a00829ef 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -33,6 +33,7 @@ extern int agg_wsize; extern u32 vring_idle_trsh; extern bool rx_align_2; extern bool debug_fw; +extern bool disable_ap_sme; #define WIL_NAME "wil6210" #define WIL_FW_NAME "wil6210.fw" /* code */ @@ -98,6 +99,9 @@ static inline u32 wil_mtu2macbuf(u32 mtu) #define WIL6210_RX_HIGH_TRSH_INIT (0) #define WIL6210_RX_HIGH_TRSH_DEFAULT \ (1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3)) +#define WIL_MAX_DMG_AID 254 /* for DMG only 1-254 allowed (see + * 802.11REVmc/D5.0, section 9.4.1.8) + */ /* Hardware definitions begin */ /* @@ -816,8 +820,8 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); int wmi_rxon(struct wil6210_priv *wil, bool on); int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); -int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, - bool full_disconnect); +int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, + u16 reason, bool full_disconnect, bool del_sta); int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout); int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason); int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason); @@ -827,6 +831,7 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile); int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short); int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short); +int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid); int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 7585003bef67..556acd3518c5 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -556,7 +556,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) wil_err(wil, "%s: config tx vring failed for CID %d, rc (%d)\n", __func__, evt->cid, rc); wmi_disconnect_sta(wil, wil->sta[evt->cid].addr, - WLAN_REASON_UNSPECIFIED, false); + WLAN_REASON_UNSPECIFIED, false, false); } else { wil_info(wil, "%s: successful connection to CID %d\n", __func__, evt->cid); @@ -583,8 +583,12 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { - if (rc) + if (rc) { + if (disable_ap_sme) + /* notify new_sta has failed */ + cfg80211_del_sta(ndev, evt->bssid, GFP_KERNEL); goto out; + } memset(&sinfo, 0, sizeof(sinfo)); @@ -687,6 +691,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) { struct wmi_vring_en_event *evt = d; u8 vri = evt->vring_index; + struct wireless_dev *wdev = wil_to_wdev(wil); wil_dbg_wmi(wil, "Enable vring %d\n", vri); @@ -694,7 +699,12 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) wil_err(wil, "Enable for invalid vring %d\n", vri); return; } - wil->vring_tx_data[vri].dot1x_open = true; + + if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme) + /* in AP mode with disable_ap_sme, this is done by + * wil_cfg80211_change_station() + */ + wil->vring_tx_data[vri].dot1x_open = true; if (vri == wil->bcast_vring) /* no BA for bcast */ return; if (agg_wsize >= 0) @@ -1069,6 +1079,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, .pcp_max_assoc_sta = max_assoc_sta, .hidden_ssid = hidden_ssid, .is_go = is_go, + .disable_ap_sme = disable_ap_sme, }; struct { struct wmi_cmd_hdr wmi; @@ -1086,6 +1097,13 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, cmd.pcp_max_assoc_sta = WIL6210_MAX_CID; } + if (disable_ap_sme && + !test_bit(WMI_FW_CAPABILITY_DISABLE_AP_SME, + wil->fw_capabilities)) { + wil_err(wil, "disable_ap_sme not supported by FW\n"); + return -EOPNOTSUPP; + } + /* * Processing time may be huge, in case of secure AP it takes about * 3500ms for FW to start AP @@ -1456,12 +1474,15 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) return 0; } -int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, - bool full_disconnect) +int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, + u16 reason, bool full_disconnect, bool del_sta) { int rc; u16 reason_code; - struct wmi_disconnect_sta_cmd cmd = { + struct wmi_disconnect_sta_cmd disc_sta_cmd = { + .disconnect_reason = cpu_to_le16(reason), + }; + struct wmi_del_sta_cmd del_sta_cmd = { .disconnect_reason = cpu_to_le16(reason), }; struct { @@ -1469,12 +1490,19 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, struct wmi_disconnect_event evt; } __packed reply; - ether_addr_copy(cmd.dst_mac, mac); - wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason); - rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd), - WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000); + if (del_sta) { + ether_addr_copy(del_sta_cmd.dst_mac, mac); + rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd, + sizeof(del_sta_cmd), WMI_DISCONNECT_EVENTID, + &reply, sizeof(reply), 1000); + } else { + ether_addr_copy(disc_sta_cmd.dst_mac, mac); + rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &disc_sta_cmd, + sizeof(disc_sta_cmd), WMI_DISCONNECT_EVENTID, + &reply, sizeof(reply), 1000); + } /* failure to disconnect in reasonable time treated as FW error */ if (rc) { wil_fw_error_recovery(wil); @@ -1686,6 +1714,24 @@ int wmi_abort_scan(struct wil6210_priv *wil) return rc; } +int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid) +{ + int rc; + struct wmi_new_sta_cmd cmd = { + .aid = aid, + }; + + wil_dbg_wmi(wil, "new sta %pM, aid %d\n", mac, aid); + + ether_addr_copy(cmd.dst_mac, mac); + + rc = wmi_send(wil, WMI_NEW_STA_CMDID, &cmd, sizeof(cmd)); + if (rc) + wil_err(wil, "Failed to send new sta (%d)\n", rc); + + return rc; +} + void wmi_event_flush(struct wil6210_priv *wil) { struct pending_wmi_event *evt, *t; diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index d93a4d490d24..9c4a0bdc51f3 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * Copyright (c) 2006-2012 Wilocity * * Permission to use, copy, modify, and/or distribute this software for any @@ -56,6 +56,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_PS_CONFIG = 1, WMI_FW_CAPABILITY_RF_SECTORS = 2, WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, + WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, WMI_FW_CAPABILITY_MAX, }; @@ -187,6 +188,8 @@ enum wmi_command_id { WMI_AOA_MEAS_CMDID = 0x923, WMI_SET_MGMT_RETRY_LIMIT_CMDID = 0x930, WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931, + WMI_NEW_STA_CMDID = 0x935, + WMI_DEL_STA_CMDID = 0x936, WMI_TOF_SESSION_START_CMDID = 0x991, WMI_TOF_GET_CAPABILITIES_CMDID = 0x992, WMI_TOF_SET_LCR_CMDID = 0x993, @@ -543,7 +546,8 @@ struct wmi_pcp_start_cmd { u8 pcp_max_assoc_sta; u8 hidden_ssid; u8 is_go; - u8 reserved0[7]; + u8 reserved0[6]; + u8 disable_ap_sme; u8 network_type; u8 channel; u8 disable_sec_offload; @@ -902,6 +906,18 @@ struct wmi_set_mgmt_retry_limit_cmd { u8 reserved[3]; } __packed; +/* WMI_NEW_STA_CMDID */ +struct wmi_new_sta_cmd { + u8 dst_mac[WMI_MAC_LEN]; + u8 aid; +} __packed; + +/* WMI_DEL_STA_CMDID */ +struct wmi_del_sta_cmd { + u8 dst_mac[WMI_MAC_LEN]; + __le16 disconnect_reason; +} __packed; + enum wmi_tof_burst_duration { WMI_TOF_BURST_DURATION_250_USEC = 2, WMI_TOF_BURST_DURATION_500_USEC = 3, @@ -1292,7 +1308,7 @@ struct wmi_connect_event { u8 assoc_info[0]; } __packed; -/* WMI_DISCONNECT_EVENTID */ +/* disconnect_reason */ enum wmi_disconnect_reason { WMI_DIS_REASON_NO_NETWORK_AVAIL = 0x01, /* bmiss */ @@ -1310,6 +1326,7 @@ enum wmi_disconnect_reason { WMI_DIS_REASON_IBSS_MERGE = 0x0E, }; +/* WMI_DISCONNECT_EVENTID */ struct wmi_disconnect_event { /* reason code, see 802.11 spec. */ __le16 protocol_reason_status; -- cgit v1.2.3 From a351f2f537b711bed3d0f6a75d7e9fddcbe51ffe Mon Sep 17 00:00:00 2001 From: Lazar Alexei Date: Fri, 20 Jan 2017 13:49:45 +0200 Subject: wil6210: support loading dedicated image for sparrow-plus devices Driver may be used in platforms where some use sparrow cards while other use sparrow-plus cards, where different FW image is needed. Add the capability to load dedicated FW image in case sparrow-plus card is detected and fallback to default image if such does not exist. Signed-off-by: Lazar Alexei Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 3 ++- drivers/net/wireless/ath/wil6210/fw.c | 7 +++--- drivers/net/wireless/ath/wil6210/fw_inc.c | 21 ++++++++++++++++- drivers/net/wireless/ath/wil6210/main.c | 8 +++---- drivers/net/wireless/ath/wil6210/pcie_bus.c | 36 ++++++++++++++++++++++------- drivers/net/wireless/ath/wil6210/wil6210.h | 18 +++++++++++---- 6 files changed, 72 insertions(+), 21 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 5e4058a4037b..e5a838231ee7 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -1699,6 +1699,7 @@ static const struct dbg_off dbg_wil_off[] = { WIL_FIELD(recovery_count, S_IRUGO, doff_u32), WIL_FIELD(ap_isolate, S_IRUGO, doff_u32), WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8), + WIL_FIELD(chip_revision, S_IRUGO, doff_u8), {}, }; diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c index 82aae2d705b4..540fc20984d8 100644 --- a/drivers/net/wireless/ath/wil6210/fw.c +++ b/drivers/net/wireless/ath/wil6210/fw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -19,8 +19,9 @@ #include "wil6210.h" #include "fw.h" -MODULE_FIRMWARE(WIL_FW_NAME); -MODULE_FIRMWARE(WIL_FW2_NAME); +MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT); +MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS); +MODULE_FIRMWARE(WIL_BOARD_FILE_NAME); static void wil_memset_toio_32(volatile void __iomem *dst, u32 val, diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index 8f40eb301924..f4901587c005 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -537,3 +537,22 @@ out: release_firmware(fw); return rc; } + +/** + * wil_fw_verify_file_exists - checks if firmware file exist + * + * @wil: driver context + * @name: firmware file name + * + * return value - boolean, true for success, false for failure + */ +bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, name, wil_to_dev(wil)); + if (!rc) + release_firmware(fw); + return rc != -ENOENT; +} diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 57c19d0cb354..3580c6d3015a 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -934,16 +934,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil_set_oob_mode(wil, oob_mode); if (load_fw) { - wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME, - WIL_FW2_NAME); + wil_info(wil, "Use firmware <%s> + board <%s>\n", + wil->wil_fw_name, WIL_BOARD_FILE_NAME); wil_halt_cpu(wil); memset(wil->fw_version, 0, sizeof(wil->fw_version)); /* Loading f/w from the file */ - rc = wil_request_firmware(wil, WIL_FW_NAME, true); + rc = wil_request_firmware(wil, wil->wil_fw_name, true); if (rc) return rc; - rc = wil_request_firmware(wil, WIL_FW2_NAME, true); + rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true); if (rc) return rc; diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 44746ca0d2e6..bdb3d37400a6 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -36,18 +36,38 @@ static int wil6210_pm_notify(struct notifier_block *notify_block, static void wil_set_capabilities(struct wil6210_priv *wil) { - u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID); + u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID); + u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) & + RGF_USER_REVISION_ID_MASK); bitmap_zero(wil->hw_capabilities, hw_capability_last); bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX); - - switch (rev_id) { - case JTAG_DEV_ID_SPARROW_B0: - wil->hw_name = "Sparrow B0"; - wil->hw_version = HW_VER_SPARROW_B0; + wil->wil_fw_name = WIL_FW_NAME_DEFAULT; + wil->chip_revision = chip_revision; + + switch (jtag_id) { + case JTAG_DEV_ID_SPARROW: + switch (chip_revision) { + case REVISION_ID_SPARROW_D0: + wil->hw_name = "Sparrow D0"; + wil->hw_version = HW_VER_SPARROW_D0; + if (wil_fw_verify_file_exists(wil, + WIL_FW_NAME_SPARROW_PLUS)) + wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS; + break; + case REVISION_ID_SPARROW_B0: + wil->hw_name = "Sparrow B0"; + wil->hw_version = HW_VER_SPARROW_B0; + break; + default: + wil->hw_name = "Unknown"; + wil->hw_version = HW_VER_UNKNOWN; + break; + } break; default: - wil_err(wil, "Unknown board hardware 0x%08x\n", rev_id); + wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n", + jtag_id, chip_revision); wil->hw_name = "Unknown"; wil->hw_version = HW_VER_UNKNOWN; } @@ -55,7 +75,7 @@ void wil_set_capabilities(struct wil6210_priv *wil) wil_info(wil, "Board hardware is %s\n", wil->hw_name); /* extract FW capabilities from file without loading the FW */ - wil_request_firmware(wil, WIL_FW_NAME, false); + wil_request_firmware(wil, wil->wil_fw_name, false); } void wil_disable_irq(struct wil6210_priv *wil) diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 34c4a00829ef..65f201631fe9 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -36,8 +36,9 @@ extern bool debug_fw; extern bool disable_ap_sme; #define WIL_NAME "wil6210" -#define WIL_FW_NAME "wil6210.fw" /* code */ -#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */ +#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */ +#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */ +#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */ #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ @@ -253,7 +254,12 @@ struct RGF_ICR { #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) #define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */ - #define JTAG_DEV_ID_SPARROW_B0 (0x2632072f) + #define JTAG_DEV_ID_SPARROW (0x2632072f) + +#define RGF_USER_REVISION_ID (0x88afe4) +#define RGF_USER_REVISION_ID_MASK (3) + #define REVISION_ID_SPARROW_B0 (0x0) + #define REVISION_ID_SPARROW_D0 (0x3) /* crash codes for FW/Ucode stored here */ #define RGF_FW_ASSERT_CODE (0x91f020) @@ -261,7 +267,8 @@ struct RGF_ICR { enum { HW_VER_UNKNOWN, - HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */ + HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */ + HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */ }; /* popular locations */ @@ -587,7 +594,9 @@ struct wil6210_priv { DECLARE_BITMAP(status, wil_status_last); u8 fw_version[ETHTOOL_FWVERS_LEN]; u32 hw_version; + u8 chip_revision; const char *hw_name; + const char *wil_fw_name; DECLARE_BITMAP(hw_capabilities, hw_capability_last); DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX); u8 n_mids; /* number of additional MIDs as reported by FW */ @@ -923,6 +932,7 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type); int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd); int wil_request_firmware(struct wil6210_priv *wil, const char *name, bool load); +bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name); int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime); int wil_suspend(struct wil6210_priv *wil, bool is_runtime); -- cgit v1.2.3 From af3db60a30331d3a37b534570bc52dd64a7c0e5d Mon Sep 17 00:00:00 2001 From: Lazar Alexei Date: Fri, 20 Jan 2017 13:49:46 +0200 Subject: wil6210: remove __func__ from debug printouts __func__ is automatically added to printouts by dynamic debug mechanism and by wil_info/wil_err macros. Remove __func__ from debug printouts to avoid duplication. Signed-off-by: Lazar Alexei Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 79 +++++++++++------------ drivers/net/wireless/ath/wil6210/debugfs.c | 4 +- drivers/net/wireless/ath/wil6210/ethtool.c | 10 +-- drivers/net/wireless/ath/wil6210/interrupt.c | 30 ++++----- drivers/net/wireless/ath/wil6210/main.c | 50 +++++++------- drivers/net/wireless/ath/wil6210/netdev.c | 14 ++-- drivers/net/wireless/ath/wil6210/p2p.c | 36 +++++------ drivers/net/wireless/ath/wil6210/pcie_bus.c | 14 ++-- drivers/net/wireless/ath/wil6210/pm.c | 17 ++--- drivers/net/wireless/ath/wil6210/pmc.c | 79 +++++++++++------------ drivers/net/wireless/ath/wil6210/rx_reorder.c | 8 +-- drivers/net/wireless/ath/wil6210/txrx.c | 36 ++++++----- drivers/net/wireless/ath/wil6210/wil_crash_dump.c | 18 +++--- drivers/net/wireless/ath/wil6210/wmi.c | 50 +++++++------- 14 files changed, 211 insertions(+), 234 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index e6001bba85ce..f8499a823662 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -205,7 +205,7 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy, int cid = wil_find_cid(wil, mac); - wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); + wil_dbg_misc(wil, "get_station: %pM CID %d\n", mac, cid); if (cid < 0) return cid; @@ -244,7 +244,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy, return -ENOENT; ether_addr_copy(mac, wil->sta[cid].addr); - wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); + wil_dbg_misc(wil, "dump_station: %pM CID %d\n", mac, cid); rc = wil_cid_fill_sinfo(wil, cid, sinfo); @@ -261,16 +261,15 @@ wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name, struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *p2p_wdev; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "add_iface\n"); if (type != NL80211_IFTYPE_P2P_DEVICE) { - wil_err(wil, "%s: unsupported iftype %d\n", __func__, type); + wil_err(wil, "unsupported iftype %d\n", type); return ERR_PTR(-EINVAL); } if (wil->p2p_wdev) { - wil_err(wil, "%s: P2P_DEVICE interface already created\n", - __func__); + wil_err(wil, "P2P_DEVICE interface already created\n"); return ERR_PTR(-EINVAL); } @@ -293,11 +292,10 @@ static int wil_cfg80211_del_iface(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "del_iface\n"); if (wdev != wil->p2p_wdev) { - wil_err(wil, "%s: delete of incorrect interface 0x%p\n", - __func__, wdev); + wil_err(wil, "delete of incorrect interface 0x%p\n", wdev); return -EINVAL; } @@ -315,7 +313,7 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy, struct wireless_dev *wdev = wil_to_wdev(wil); int rc; - wil_dbg_misc(wil, "%s() type=%d\n", __func__, type); + wil_dbg_misc(wil, "change_iface: type=%d\n", type); if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) { wil_dbg_misc(wil, "interface is up. resetting...\n"); @@ -362,8 +360,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, uint i, n; int rc; - wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n", - __func__, wdev, wdev->iftype); + wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype); /* check we are client side */ switch (wdev->iftype) { @@ -568,7 +565,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, int rc = 0; enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "connect\n"); wil_print_connect_params(wil, sme); if (test_bit(wil_status_fwconnecting, wil->status) || @@ -700,12 +697,11 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code); + wil_dbg_misc(wil, "disconnect: reason=%d\n", reason_code); if (!(test_bit(wil_status_fwconnecting, wil->status) || test_bit(wil_status_fwconnected, wil->status))) { - wil_err(wil, "%s: Disconnect was called while disconnected\n", - __func__); + wil_err(wil, "Disconnect was called while disconnected\n"); return 0; } @@ -713,7 +709,7 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, WMI_DISCONNECT_EVENTID, NULL, 0, WIL6210_DISCONNECT_TO_MS); if (rc) - wil_err(wil, "%s: disconnect error %d\n", __func__, rc); + wil_err(wil, "disconnect error %d\n", rc); return rc; } @@ -761,7 +757,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, * different from currently "listened" channel and fail if it is. */ - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "mgmt_tx\n"); print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len); cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); @@ -822,7 +818,7 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, break; } } - wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]); + wil_dbg_misc(wil, "detect_key_usage: -> %s\n", key_usage_str[rc]); return rc; } @@ -927,13 +923,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, return -EINVAL; } - wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__, + wil_dbg_misc(wil, "add_key: %pM %s[%d] PN %*phN\n", mac_addr, key_usage_str[key_usage], key_index, params->seq_len, params->seq); if (IS_ERR(cs)) { - wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n", - __func__, mac_addr, key_usage_str[key_usage], key_index, + wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n", + mac_addr, key_usage_str[key_usage], key_index, params->seq_len, params->seq); return -EINVAL; } @@ -942,8 +938,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) { wil_err(wil, - "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n", - params->seq_len, __func__, mac_addr, + "Wrong PN len %d, %pM %s[%d] PN %*phN\n", + params->seq_len, mac_addr, key_usage_str[key_usage], key_index, params->seq_len, params->seq); return -EINVAL; @@ -967,11 +963,11 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy, struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage, mac_addr); - wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr, + wil_dbg_misc(wil, "del_key: %pM %s[%d]\n", mac_addr, key_usage_str[key_usage], key_index); if (IS_ERR(cs)) - wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__, + wil_info(wil, "Not connected, %pM %s[%d]\n", mac_addr, key_usage_str[key_usage], key_index); if (!IS_ERR_OR_NULL(cs)) @@ -988,7 +984,7 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s: entered\n", __func__); + wil_dbg_misc(wil, "set_default_key: entered\n"); return 0; } @@ -1001,8 +997,9 @@ static int wil_remain_on_channel(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; - wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n", - __func__, chan->center_freq, duration, wdev->iftype); + wil_dbg_misc(wil, + "remain_on_channel: center_freq=%d, duration=%d iftype=%d\n", + chan->center_freq, duration, wdev->iftype); rc = wil_p2p_listen(wil, wdev, duration, chan, cookie); return rc; @@ -1014,7 +1011,7 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "cancel_remain_on_channel\n"); return wil_p2p_cancel_listen(wil, cookie); } @@ -1170,9 +1167,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, if (pbss) wmi_nettype = WMI_NETTYPE_P2P; - wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go); + wil_dbg_misc(wil, "start_ap: is_go=%d\n", is_go); if (is_go && !pbss) { - wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__); + wil_err(wil, "P2P GO must be in PBSS\n"); return -ENOTSUPP; } @@ -1227,7 +1224,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, int rc; u32 privacy = 0; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "change_beacon\n"); wil_print_bcon_data(bcon); if (bcon->tail && @@ -1266,7 +1263,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct cfg80211_crypto_settings *crypto = &info->crypto; u8 hidden_ssid; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "start_ap\n"); if (!channel) { wil_err(wil, "AP: No channel???\n"); @@ -1317,7 +1314,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "stop_ap\n"); netif_carrier_off(ndev); wil_set_recovery_state(wil, fw_recovery_idle); @@ -1361,7 +1358,7 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac, + wil_dbg_misc(wil, "del_station: %pM, reason=%d\n", params->mac, params->reason_code); mutex_lock(&wil->mutex); @@ -1466,7 +1463,7 @@ void wil_probe_client_flush(struct wil6210_priv *wil) { struct wil_probe_client_req *req, *t; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "probe_client_flush\n"); mutex_lock(&wil->probe_client_mutex); @@ -1486,7 +1483,7 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy, struct wil_probe_client_req *req; int cid = wil_find_cid(wil, peer); - wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid); + wil_dbg_misc(wil, "probe_client: %pM => CID %d\n", peer, cid); if (cid < 0) return -ENOLINK; @@ -1514,7 +1511,7 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); if (params->ap_isolate >= 0) { - wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__, + wil_dbg_misc(wil, "change_bss: ap_isolate %d => %d\n", wil->ap_isolate, params->ap_isolate); wil->ap_isolate = params->ap_isolate; } @@ -1527,7 +1524,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s: entered\n", __func__); + wil_dbg_misc(wil, "start_p2p_device: entered\n"); wil->p2p.p2p_dev_started = 1; return 0; } @@ -1541,7 +1538,7 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, if (!p2p->p2p_dev_started) return; - wil_dbg_misc(wil, "%s: entered\n", __func__); + wil_dbg_misc(wil, "stop_p2p_device: entered\n"); mutex_lock(&wil->mutex); mutex_lock(&wil->p2p_wdev_mutex); wil_p2p_stop_radio_operations(wil); diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index e5a838231ee7..db6527ada946 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -813,7 +813,7 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, rc = wil_cfg80211_mgmt_tx(wiphy, wdev, ¶ms, NULL); kfree(frame); - wil_info(wil, "%s() -> %d\n", __func__, rc); + wil_info(wil, "-> %d\n", rc); return len; } @@ -855,7 +855,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, rc1 = wmi_send(wil, cmdid, cmd, cmdlen); kfree(wmi); - wil_info(wil, "%s(0x%04x[%d]) -> %d\n", __func__, cmdid, cmdlen, rc1); + wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c index 7053b62ca8d3..adcfef4dabf7 100644 --- a/drivers/net/wireless/ath/wil6210/ethtool.c +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -27,7 +27,7 @@ static int wil_ethtoolops_begin(struct net_device *ndev) mutex_lock(&wil->mutex); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "ethtoolops_begin\n"); return 0; } @@ -36,7 +36,7 @@ static void wil_ethtoolops_complete(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "ethtoolops_complete\n"); mutex_unlock(&wil->mutex); } @@ -48,7 +48,7 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev, u32 tx_itr_en, tx_itr_val = 0; u32 rx_itr_en, rx_itr_val = 0; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "ethtoolops_get_coalesce\n"); tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL); if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN) @@ -68,7 +68,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev, { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s(rx %d usec, tx %d usec)\n", __func__, + wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n", cp->rx_coalesce_usecs, cp->tx_coalesce_usecs); if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 64046e0bd0a2..cab1e5c0e374 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -94,7 +94,7 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil) static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp) { - wil_dbg_irq(wil, "%s: mask_halp(%s)\n", __func__, + wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n", mask_halp ? "true" : "false"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS), @@ -103,7 +103,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp) void wil6210_mask_halp(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "mask_halp\n"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS), BIT_DMA_EP_MISC_ICR_HALP); @@ -111,7 +111,7 @@ void wil6210_mask_halp(struct wil6210_priv *wil) static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "mask_irq_pseudo\n"); wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE); @@ -134,7 +134,7 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil) static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp) { - wil_dbg_irq(wil, "%s: unmask_halp(%s)\n", __func__, + wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n", unmask_halp ? "true" : "false"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC), @@ -143,7 +143,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp) static void wil6210_unmask_halp(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "unmask_halp\n"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC), BIT_DMA_EP_MISC_ICR_HALP); @@ -151,7 +151,7 @@ static void wil6210_unmask_halp(struct wil6210_priv *wil) static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "unmask_irq_pseudo\n"); set_bit(wil_status_irqen, wil->status); @@ -160,7 +160,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) void wil_mask_irq(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "mask_irq\n"); wil6210_mask_irq_tx(wil); wil6210_mask_irq_rx(wil); @@ -170,7 +170,7 @@ void wil_mask_irq(struct wil6210_priv *wil) void wil_unmask_irq(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "unmask_irq\n"); wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC), WIL_ICR_ICC_VALUE); @@ -187,7 +187,7 @@ void wil_unmask_irq(struct wil6210_priv *wil) void wil_configure_interrupt_moderation(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "configure_interrupt_moderation\n"); /* disable interrupt moderation for monitor * to get better timestamp precision @@ -400,7 +400,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) } if (isr & BIT_DMA_EP_MISC_ICR_HALP) { - wil_dbg_irq(wil, "%s: HALP IRQ invoked\n", __func__); + wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n"); wil6210_mask_halp(wil); isr &= ~BIT_DMA_EP_MISC_ICR_HALP; complete(&wil->halp.comp); @@ -599,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil) void wil6210_set_halp(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "set_halp\n"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS), BIT_DMA_EP_MISC_ICR_HALP); @@ -607,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil) void wil6210_clear_halp(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "clear_halp\n"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR), BIT_DMA_EP_MISC_ICR_HALP); @@ -618,7 +618,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) { int rc; - wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx"); + wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx"); rc = request_threaded_irq(irq, wil6210_hardirq, wil6210_thread_irq, @@ -629,7 +629,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) void wil6210_fini_irq(struct wil6210_priv *wil, int irq) { - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "fini_irq:\n"); wil_mask_irq(wil); free_irq(irq, wil); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 3580c6d3015a..9b8fa6a182ec 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -172,8 +172,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) struct wil_sta_info *sta = &wil->sta[cid]; might_sleep(); - wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid, - sta->status); + wil_dbg_misc(wil, "disconnect_cid: CID %d, status %d\n", + cid, sta->status); /* inform upper/lower layers */ if (sta->status != wil_sta_unused) { if (!from_event) { @@ -241,7 +241,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, return; might_sleep(); - wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid, + wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid, reason_code, from_event ? "+" : "-"); /* Cases are: @@ -351,7 +351,7 @@ static int wil_wait_for_recovery(struct wil6210_priv *wil) void wil_set_recovery_state(struct wil6210_priv *wil, int state) { - wil_dbg_misc(wil, "%s(%d -> %d)\n", __func__, + wil_dbg_misc(wil, "set_recovery_state: %d -> %d\n", wil->recovery_state, state); wil->recovery_state = state; @@ -493,7 +493,7 @@ int wil_priv_init(struct wil6210_priv *wil) { uint i; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "priv_init\n"); memset(wil->sta, 0, sizeof(wil->sta)); for (i = 0; i < WIL6210_MAX_CID; i++) @@ -568,7 +568,7 @@ out_wmi_wq: void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, u16 reason_code, bool from_event) { - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "disconnect\n"); del_timer_sync(&wil->connect_timer); _wil6210_disconnect(wil, bssid, reason_code, from_event); @@ -576,7 +576,7 @@ void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, void wil_priv_deinit(struct wil6210_priv *wil) { - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "priv_deinit\n"); wil_set_recovery_state(wil, fw_recovery_idle); del_timer_sync(&wil->scan_timer); @@ -609,7 +609,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil) static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable) { - wil_info(wil, "%s: enable=%d\n", __func__, enable); + wil_info(wil, "enable=%d\n", enable); if (enable) wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE); else @@ -865,7 +865,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) { int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "reset\n"); WARN_ON(!mutex_is_locked(&wil->mutex)); WARN_ON(test_bit(wil_status_napi_en, wil->status)); @@ -888,9 +888,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) rc = wil->platform_ops.notify(wil->platform_handle, WIL_PLATFORM_EVT_PRE_RESET); if (rc) - wil_err(wil, - "%s: PRE_RESET platform notify failed, rc %d\n", - __func__, rc); + wil_err(wil, "PRE_RESET platform notify failed, rc %d\n", + rc); } set_bit(wil_status_resetting, wil->status); @@ -980,8 +979,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) /* check FW is responsive */ rc = wmi_echo(wil); if (rc) { - wil_err(wil, "%s: wmi_echo failed, rc %d\n", - __func__, rc); + wil_err(wil, "wmi_echo failed, rc %d\n", rc); return rc; } @@ -991,9 +989,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) rc = wil->platform_ops.notify(wil->platform_handle, WIL_PLATFORM_EVT_FW_RDY); if (rc) { - wil_err(wil, - "%s: FW_RDY notify failed, rc %d\n", - __func__, rc); + wil_err(wil, "FW_RDY notify failed, rc %d\n", + rc); rc = 0; } } @@ -1077,7 +1074,7 @@ int wil_up(struct wil6210_priv *wil) { int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "up\n"); mutex_lock(&wil->mutex); rc = __wil_up(wil); @@ -1117,7 +1114,7 @@ int wil_down(struct wil6210_priv *wil) { int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "down\n"); wil_set_recovery_state(wil, fw_recovery_idle); mutex_lock(&wil->mutex); @@ -1150,25 +1147,24 @@ void wil_halp_vote(struct wil6210_priv *wil) mutex_lock(&wil->halp.lock); - wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__, + wil_dbg_irq(wil, "halp_vote: start, HALP ref_cnt (%d)\n", wil->halp.ref_cnt); if (++wil->halp.ref_cnt == 1) { wil6210_set_halp(wil); rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies); if (!rc) { - wil_err(wil, "%s: HALP vote timed out\n", __func__); + wil_err(wil, "HALP vote timed out\n"); /* Mask HALP as done in case the interrupt is raised */ wil6210_mask_halp(wil); } else { wil_dbg_irq(wil, - "%s: HALP vote completed after %d ms\n", - __func__, + "halp_vote: HALP vote completed after %d ms\n", jiffies_to_msecs(to_jiffies - rc)); } } - wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__, + wil_dbg_irq(wil, "halp_vote: end, HALP ref_cnt (%d)\n", wil->halp.ref_cnt); mutex_unlock(&wil->halp.lock); @@ -1180,15 +1176,15 @@ void wil_halp_unvote(struct wil6210_priv *wil) mutex_lock(&wil->halp.lock); - wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__, + wil_dbg_irq(wil, "halp_unvote: start, HALP ref_cnt (%d)\n", wil->halp.ref_cnt); if (--wil->halp.ref_cnt == 0) { wil6210_clear_halp(wil); - wil_dbg_irq(wil, "%s: HALP unvote\n", __func__); + wil_dbg_irq(wil, "HALP unvote\n"); } - wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__, + wil_dbg_irq(wil, "halp_unvote:end, HALP ref_cnt (%d)\n", wil->halp.ref_cnt); mutex_unlock(&wil->halp.lock); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 6676001dcbca..d5df744a12b3 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,10 +22,10 @@ static int wil_open(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "open\n"); if (debug_fw) { - wil_err(wil, "%s() while in debug_fw mode\n", __func__); + wil_err(wil, "while in debug_fw mode\n"); return -EINVAL; } @@ -36,7 +36,7 @@ static int wil_stop(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "stop\n"); return wil_down(wil); } @@ -132,7 +132,7 @@ void *wil_if_alloc(struct device *dev) wil->wdev = wdev; wil->radio_wdev = wdev; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_alloc\n"); rc = wil_priv_init(wil); if (rc) { @@ -179,7 +179,7 @@ void wil_if_free(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_free\n"); if (!ndev) return; @@ -234,7 +234,7 @@ void wil_if_remove(struct wil6210_priv *wil) struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil_to_wdev(wil); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_remove\n"); unregister_netdev(ndev); wiphy_unregister(wdev->wiphy); diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c index fbae99525e01..792484756654 100644 --- a/drivers/net/wireless/ath/wil6210/p2p.c +++ b/drivers/net/wireless/ath/wil6210/p2p.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -69,7 +69,7 @@ void wil_p2p_discovery_timer_fn(ulong x) { struct wil6210_priv *wil = (void *)x; - wil_dbg_misc(wil, "%s\n", __func__); + wil_dbg_misc(wil, "p2p_discovery_timer_fn\n"); schedule_work(&wil->p2p.discovery_expired_work); } @@ -80,27 +80,25 @@ int wil_p2p_search(struct wil6210_priv *wil, int rc; struct wil_p2p_info *p2p = &wil->p2p; - wil_dbg_misc(wil, "%s: channel %d\n", - __func__, P2P_DMG_SOCIAL_CHANNEL); + wil_dbg_misc(wil, "p2p_search: channel %d\n", P2P_DMG_SOCIAL_CHANNEL); lockdep_assert_held(&wil->mutex); if (p2p->discovery_started) { - wil_err(wil, "%s: search failed. discovery already ongoing\n", - __func__); + wil_err(wil, "search failed. discovery already ongoing\n"); rc = -EBUSY; goto out; } rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI); if (rc) { - wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__); + wil_err(wil, "wmi_p2p_cfg failed\n"); goto out; } rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID); if (rc) { - wil_err(wil, "%s: wmi_set_ssid failed\n", __func__); + wil_err(wil, "wmi_set_ssid failed\n"); goto out_stop; } @@ -108,8 +106,7 @@ int wil_p2p_search(struct wil6210_priv *wil, rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); if (rc) { - wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n", - __func__); + wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n"); goto out_stop; } @@ -119,14 +116,13 @@ int wil_p2p_search(struct wil6210_priv *wil, rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, request->ie_len, request->ie); if (rc) { - wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n", - __func__); + wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n"); goto out_stop; } rc = wmi_start_search(wil); if (rc) { - wil_err(wil, "%s: wmi_start_search failed\n", __func__); + wil_err(wil, "wmi_start_search failed\n"); goto out_stop; } @@ -153,12 +149,12 @@ int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev, if (!chan) return -EINVAL; - wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration); + wil_dbg_misc(wil, "p2p_listen: duration %d\n", duration); mutex_lock(&wil->mutex); if (p2p->discovery_started) { - wil_err(wil, "%s: discovery already ongoing\n", __func__); + wil_err(wil, "discovery already ongoing\n"); rc = -EBUSY; goto out; } @@ -220,8 +216,8 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) mutex_lock(&wil->mutex); if (cookie != p2p->cookie) { - wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n", - __func__, p2p->cookie, cookie); + wil_info(wil, "Cookie mismatch: 0x%016llx vs. 0x%016llx\n", + p2p->cookie, cookie); mutex_unlock(&wil->mutex); return -ENOENT; } @@ -231,7 +227,7 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) mutex_unlock(&wil->mutex); if (!started) { - wil_err(wil, "%s: listen not started\n", __func__); + wil_err(wil, "listen not started\n"); return -ENOENT; } @@ -253,7 +249,7 @@ void wil_p2p_listen_expired(struct work_struct *work) struct wil6210_priv, p2p); u8 started; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "p2p_listen_expired\n"); mutex_lock(&wil->mutex); started = wil_p2p_stop_discovery(wil); @@ -279,7 +275,7 @@ void wil_p2p_search_expired(struct work_struct *work) struct wil6210_priv, p2p); u8 started; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "p2p_search_expired\n"); mutex_lock(&wil->mutex); started = wil_p2p_stop_discovery(wil); diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index bdb3d37400a6..8baa603bec42 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -100,7 +100,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) int msi_only = pdev->msi_enabled; bool _use_msi = use_msi; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_pcie_enable\n"); pdev->msi_enabled = 0; @@ -145,7 +145,7 @@ static int wil_if_pcie_disable(struct wil6210_priv *wil) { struct pci_dev *pdev = wil->pdev; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_pcie_disable\n"); pci_clear_master(pdev); /* disable and release IRQ */ @@ -309,7 +309,7 @@ static void wil_pcie_remove(struct pci_dev *pdev) struct wil6210_priv *wil = pci_get_drvdata(pdev); void __iomem *csr = wil->csr; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "pcie_remove\n"); #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP @@ -347,8 +347,7 @@ static int wil6210_suspend(struct device *dev, bool is_runtime) struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); rc = wil_can_suspend(wil, is_runtime); if (rc) @@ -374,8 +373,7 @@ static int wil6210_resume(struct device *dev, bool is_runtime) struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); /* allow master */ pci_set_master(pdev); @@ -395,7 +393,7 @@ static int wil6210_pm_notify(struct notifier_block *notify_block, int rc = 0; enum wil_platform_event evt; - wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode); + wil_dbg_pm(wil, "pm_notify: mode (%ld)\n", mode); switch (mode) { case PM_HIBERNATION_PREPARE: diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 11ee24d509e5..a0acb2d0cb79 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014,2016 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -21,8 +21,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) int rc = 0; struct wireless_dev *wdev = wil->wdev; - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system"); if (!netif_running(wil_to_ndev(wil))) { /* can always sleep when down */ @@ -59,7 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) } out: - wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__, + wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n", is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc); return rc; @@ -70,8 +69,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) int rc = 0; struct net_device *ndev = wil_to_ndev(wil); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); /* if netif up, hardware is alive, shut it down */ if (ndev->flags & IFF_UP) { @@ -86,7 +84,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) rc = wil->platform_ops.suspend(wil->platform_handle); out: - wil_dbg_pm(wil, "%s(%s) => %d\n", __func__, + wil_dbg_pm(wil, "suspend: %s => %d\n", is_runtime ? "runtime" : "system", rc); return rc; } @@ -96,8 +94,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) int rc = 0; struct net_device *ndev = wil_to_ndev(wil); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); if (wil->platform_ops.resume) { rc = wil->platform_ops.resume(wil->platform_handle); @@ -115,7 +112,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) rc = wil_up(wil); out: - wil_dbg_pm(wil, "%s(%s) => %d\n", __func__, + wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system", rc); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c index b9faae0278c9..3ff4f4ce9fef 100644 --- a/drivers/net/wireless/ath/wil6210/pmc.c +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -60,7 +60,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, if (wil_is_pmc_allocated(pmc)) { /* sanity check */ - wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__); + wil_err(wil, "ERROR pmc is already allocated\n"); goto no_release_err; } if ((num_descriptors <= 0) || (descriptor_size <= 0)) { @@ -90,21 +90,20 @@ void wil_pmc_alloc(struct wil6210_priv *wil, pmc->num_descriptors = num_descriptors; pmc->descriptor_size = descriptor_size; - wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n", - __func__, num_descriptors, descriptor_size); + wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n", + num_descriptors, descriptor_size); /* allocate descriptors info list in pmc context*/ pmc->descriptors = kcalloc(num_descriptors, sizeof(struct desc_alloc_info), GFP_KERNEL); if (!pmc->descriptors) { - wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__); + wil_err(wil, "ERROR allocating pmc skb list\n"); goto no_release_err; } - wil_dbg_misc(wil, - "%s: allocated descriptors info list %p\n", - __func__, pmc->descriptors); + wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n", + pmc->descriptors); /* Allocate pring buffer and descriptors. * vring->va should be aligned on its size rounded up to power of 2 @@ -116,15 +115,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil, GFP_KERNEL); wil_dbg_misc(wil, - "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", - __func__, + "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", pmc->pring_va, &pmc->pring_pa, sizeof(struct vring_tx_desc), num_descriptors, sizeof(struct vring_tx_desc) * num_descriptors); if (!pmc->pring_va) { - wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__); + wil_err(wil, "ERROR allocating pmc pring\n"); goto release_pmc_skb_list; } @@ -143,9 +141,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, GFP_KERNEL); if (unlikely(!pmc->descriptors[i].va)) { - wil_err(wil, - "%s: ERROR allocating pmc descriptor %d", - __func__, i); + wil_err(wil, "ERROR allocating pmc descriptor %d", i); goto release_pmc_skbs; } @@ -165,21 +161,21 @@ void wil_pmc_alloc(struct wil6210_priv *wil, *_d = *d; } - wil_dbg_misc(wil, "%s: allocated successfully\n", __func__); + wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n"); pmc_cmd.op = WMI_PMC_ALLOCATE; pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); - wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__); + wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n"); pmc->last_cmd_status = wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, - "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d", - __func__, pmc->last_cmd_status); + "WMI_PMC_CMD with ALLOCATE op failed with status %d", + pmc->last_cmd_status); goto release_pmc_skbs; } @@ -188,7 +184,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, return; release_pmc_skbs: - wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__); + wil_err(wil, "exit on error: Releasing skbs...\n"); for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) { dma_free_coherent(dev, descriptor_size, @@ -197,7 +193,7 @@ release_pmc_skbs: pmc->descriptors[i].va = NULL; } - wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__); + wil_err(wil, "exit on error: Releasing pring...\n"); dma_free_coherent(dev, sizeof(struct vring_tx_desc) * num_descriptors, @@ -207,8 +203,7 @@ release_pmc_skbs: pmc->pring_va = NULL; release_pmc_skb_list: - wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n", - __func__); + wil_err(wil, "exit on error: Releasing descriptors info list...\n"); kfree(pmc->descriptors); pmc->descriptors = NULL; @@ -232,24 +227,23 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) pmc->last_cmd_status = 0; if (!wil_is_pmc_allocated(pmc)) { - wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n", - __func__); + wil_dbg_misc(wil, + "pmc_free: Error, can't free - not allocated\n"); pmc->last_cmd_status = -EPERM; mutex_unlock(&pmc->lock); return; } if (send_pmc_cmd) { - wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n", - __func__); + wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n"); pmc_cmd.op = WMI_PMC_RELEASE; pmc->last_cmd_status = wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, - "%s WMI_PMC_CMD with RELEASE op failed, status %d", - __func__, pmc->last_cmd_status); + "WMI_PMC_CMD with RELEASE op failed, status %d", + pmc->last_cmd_status); /* There's nothing we can do with this error. * Normally, it should never occur. * Continue to freeing all memory allocated for pmc. @@ -261,8 +255,8 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) size_t buf_size = sizeof(struct vring_tx_desc) * pmc->num_descriptors; - wil_dbg_misc(wil, "%s: free pring va %p\n", - __func__, pmc->pring_va); + wil_dbg_misc(wil, "pmc_free: free pring va %p\n", + pmc->pring_va); dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); pmc->pring_va = NULL; @@ -281,11 +275,11 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) pmc->descriptors[i].pa); pmc->descriptors[i].va = NULL; } - wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n", - __func__, i, pmc->num_descriptors); + wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i, + pmc->num_descriptors); wil_dbg_misc(wil, - "%s: free pmc descriptors info list %p\n", - __func__, pmc->descriptors); + "pmc_free: free pmc descriptors info list %p\n", + pmc->descriptors); kfree(pmc->descriptors); pmc->descriptors = NULL; } else { @@ -301,7 +295,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) */ int wil_pmc_last_cmd_status(struct wil6210_priv *wil) { - wil_dbg_misc(wil, "%s: status %d\n", __func__, + wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n", wil->pmc.last_cmd_status); return wil->pmc.last_cmd_status; @@ -324,7 +318,7 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, mutex_lock(&pmc->lock); if (!wil_is_pmc_allocated(pmc)) { - wil_err(wil, "%s: error, pmc is not allocated!\n", __func__); + wil_err(wil, "error, pmc is not allocated!\n"); pmc->last_cmd_status = -EPERM; mutex_unlock(&pmc->lock); return -EPERM; @@ -333,8 +327,8 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, pmc_size = pmc->descriptor_size * pmc->num_descriptors; wil_dbg_misc(wil, - "%s: size %u, pos %lld\n", - __func__, (unsigned)count, *f_pos); + "pmc_read: size %u, pos %lld\n", + (u32)count, *f_pos); pmc->last_cmd_status = 0; @@ -343,15 +337,16 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, offset = *f_pos - (idx * pmc->descriptor_size); if (*f_pos >= pmc_size) { - wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n", - __func__, *f_pos, (unsigned)pmc_size); + wil_dbg_misc(wil, + "pmc_read: reached end of pmc buf: %lld >= %u\n", + *f_pos, (u32)pmc_size); pmc->last_cmd_status = -ERANGE; goto out; } wil_dbg_misc(wil, - "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", - __func__, *f_pos, idx, offset, count); + "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", + *f_pos, idx, offset, count); /* if no errors, return the copied byte count */ retval = simple_read_from_buffer(buf, diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 19ed127d4d05..7404b6f39c6a 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -349,8 +349,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status, agg_amsdu, agg_wsize, agg_timeout); if (rc || (status != WLAN_STATUS_SUCCESS)) { - wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n", - __func__, rc, status); + wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc, + status); goto out; } @@ -387,7 +387,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize) txdata->addba_in_progress = true; rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout); if (rc) { - wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc); + wil_err(wil, "wmi_addba failed, rc (%d)", rc); txdata->addba_in_progress = false; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index c1b4bb03e997..6e7dc8d98219 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -112,7 +112,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) size_t sz = vring->size * sizeof(vring->va[0]); uint i; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "vring_alloc:\n"); BUILD_BUG_ON(sizeof(vring->va[0]) != 32); @@ -745,7 +745,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); return; } - wil_dbg_txrx(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "rx_handle\n"); while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { (*quota)--; @@ -768,7 +768,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size) struct vring *vring = &wil->vring_rx; int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "rx_init\n"); if (vring->va) { wil_err(wil, "Rx ring already allocated\n"); @@ -799,7 +799,7 @@ void wil_rx_fini(struct wil6210_priv *wil) { struct vring *vring = &wil->vring_rx; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "rx_fini\n"); if (vring->va) wil_vring_free(wil, vring, 0); @@ -851,7 +851,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; - wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, + wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n", cmd.vring_cfg.tx_sw_ring.max_mpdu_size); lockdep_assert_held(&wil->mutex); @@ -931,7 +931,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; - wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, + wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n", cmd.vring_cfg.tx_sw_ring.max_mpdu_size); lockdep_assert_held(&wil->mutex); @@ -993,7 +993,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) if (!vring->va) return; - wil_dbg_misc(wil, "%s() id=%d\n", __func__, id); + wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id); spin_lock_bh(&txdata->lock); txdata->dot1x_open = false; @@ -1032,12 +1032,14 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, struct vring *v = &wil->vring_tx[i]; struct vring_tx_data *txdata = &wil->vring_tx_data[i]; - wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n", - __func__, eth->h_dest, i); + wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", + eth->h_dest, i); if (v->va && txdata->enabled) { return v; } else { - wil_dbg_txrx(wil, "vring[%d] not valid\n", i); + wil_dbg_txrx(wil, + "find_tx_ucast: vring[%d] not valid\n", + i); return NULL; } } @@ -1373,8 +1375,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, int gso_type; int rc = -EINVAL; - wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", - __func__, skb->len, vring_index); + wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len, + vring_index); if (unlikely(!txdata->enabled)) return -EINVAL; @@ -1643,8 +1645,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, bool mcast = (vring_index == wil->bcast_vring); uint len = skb_headlen(skb); - wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", - __func__, skb->len, vring_index); + wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len, + vring_index); if (unlikely(!txdata->enabled)) return -EINVAL; @@ -1884,7 +1886,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) static bool pr_once_fw; int rc; - wil_dbg_txrx(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "start_xmit\n"); if (unlikely(!test_bit(wil_status_fwready, wil->status))) { if (!pr_once_fw) { wil_err(wil, "FW not ready\n"); @@ -1982,7 +1984,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) return 0; } - wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); + wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid); used_before_complete = wil_vring_used_tx(vring); diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c index d051eea47a54..e53cf0cf7031 100644 --- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c +++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Qualcomm Atheros, Inc. + * Copyright (c) 2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -62,13 +62,13 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size) u32 host_min, dump_size, offset, len; if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) { - wil_err(wil, "%s: fail to obtain crash dump size\n", __func__); + wil_err(wil, "fail to obtain crash dump size\n"); return -EINVAL; } if (dump_size > size) { - wil_err(wil, "%s: not enough space for dump. Need %d have %d\n", - __func__, dump_size, size); + wil_err(wil, "not enough space for dump. Need %d have %d\n", + dump_size, size); return -EINVAL; } @@ -83,8 +83,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size) len = map->to - map->from; offset = map->host - host_min; - wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n", - __func__, fw_mapping[i].name, len, offset); + wil_dbg_misc(wil, + "fw_copy_crash_dump: - dump %s, size %d, offset %d\n", + fw_mapping[i].name, len, offset); wil_memcpy_fromio_32((void * __force)(dest + offset), (const void __iomem * __force)data, len); @@ -99,7 +100,7 @@ void wil_fw_core_dump(struct wil6210_priv *wil) u32 fw_dump_size; if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) { - wil_err(wil, "%s: fail to get fw dump size\n", __func__); + wil_err(wil, "fail to get fw dump size\n"); return; } @@ -115,6 +116,5 @@ void wil_fw_core_dump(struct wil6210_priv *wil) * after 5 min */ dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL); - wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__, - fw_dump_size); + wil_info(wil, "fw core dumped, size %d bytes\n", fw_dump_size); } diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 556acd3518c5..0137ac5b9bb3 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -539,8 +539,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { if (wil->sta[evt->cid].status != wil_sta_unused) { - wil_err(wil, "%s: AP: Invalid status %d for CID %d\n", - __func__, wil->sta[evt->cid].status, evt->cid); + wil_err(wil, "AP: Invalid status %d for CID %d\n", + wil->sta[evt->cid].status, evt->cid); mutex_unlock(&wil->mutex); return; } @@ -553,22 +553,19 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) rc = wil_tx_init(wil, evt->cid); if (rc) { - wil_err(wil, "%s: config tx vring failed for CID %d, rc (%d)\n", - __func__, evt->cid, rc); + wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n", + evt->cid, rc); wmi_disconnect_sta(wil, wil->sta[evt->cid].addr, WLAN_REASON_UNSPECIFIED, false, false); } else { - wil_info(wil, "%s: successful connection to CID %d\n", - __func__, evt->cid); + wil_info(wil, "successful connection to CID %d\n", evt->cid); } if ((wdev->iftype == NL80211_IFTYPE_STATION) || (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { if (rc) { netif_carrier_off(ndev); - wil_err(wil, - "%s: cfg80211_connect_result with failure\n", - __func__); + wil_err(wil, "cfg80211_connect_result with failure\n"); cfg80211_connect_result(ndev, evt->bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, @@ -601,8 +598,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); } else { - wil_err(wil, "%s: unhandled iftype %d for CID %d\n", - __func__, wdev->iftype, evt->cid); + wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype, + evt->cid); goto out; } @@ -929,8 +926,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil) offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail); if (immed_reply) { - wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n", - __func__, wil->reply_id); + wil_dbg_wmi(wil, "recv_cmd: Complete WMI 0x%04x\n", + wil->reply_id); kfree(evt); num_immed_reply++; complete(&wil->wmi_call); @@ -944,7 +941,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) } } /* normally, 1 event per IRQ should be processed */ - wil_dbg_wmi(wil, "%s -> %d events queued, %d completed\n", __func__, + wil_dbg_wmi(wil, "recv_cmd: -> %d events queued, %d completed\n", n - num_immed_reply, num_immed_reply); } @@ -1370,7 +1367,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on) struct wmi_listen_started_event evt; } __packed reply; - wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off"); + wil_info(wil, "(%s)\n", on ? "on" : "off"); if (on) { rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0, @@ -1490,7 +1487,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, struct wmi_disconnect_event evt; } __packed reply; - wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason); + wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason); if (del_sta) { ether_addr_copy(del_sta_cmd.dst_mac, mac); @@ -1535,8 +1532,8 @@ int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout) .amsdu = 0, }; - wil_dbg_wmi(wil, "%s(ring %d size %d timeout %d)\n", __func__, - ringid, size, timeout); + wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size, + timeout); return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd)); } @@ -1548,8 +1545,7 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason) .reason = cpu_to_le16(reason), }; - wil_dbg_wmi(wil, "%s(ring %d reason %d)\n", __func__, - ringid, reason); + wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason); return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd)); } @@ -1561,8 +1557,8 @@ int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason) .reason = cpu_to_le16(reason), }; - wil_dbg_wmi(wil, "%s(CID %d TID %d reason %d)\n", __func__, - cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason); + wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf, + (cidxtid >> 4) & 0xf, reason); return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd)); } @@ -1736,7 +1732,7 @@ void wmi_event_flush(struct wil6210_priv *wil) { struct pending_wmi_event *evt, *t; - wil_dbg_wmi(wil, "%s()\n", __func__); + wil_dbg_wmi(wil, "event_flush\n"); list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) { list_del(&evt->list); @@ -1777,8 +1773,8 @@ static void wmi_event_handle(struct wil6210_priv *wil, WARN_ON(wil->reply_buf); wmi_evt_call_handler(wil, id, evt_data, len - sizeof(*wmi)); - wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n", - __func__, id); + wil_dbg_wmi(wil, "event_handle: Complete WMI 0x%04x\n", + id); complete(&wil->wmi_call); return; } @@ -1825,11 +1821,11 @@ void wmi_event_worker(struct work_struct *work) struct pending_wmi_event *evt; struct list_head *lh; - wil_dbg_wmi(wil, "Start %s\n", __func__); + wil_dbg_wmi(wil, "event_worker: Start\n"); while ((lh = next_wmi_ev(wil)) != NULL) { evt = list_entry(lh, struct pending_wmi_event, list); wmi_event_handle(wil, &evt->event.hdr); kfree(evt); } - wil_dbg_wmi(wil, "Finished %s\n", __func__); + wil_dbg_wmi(wil, "event_worker: Finished\n"); } -- cgit v1.2.3 From 0d2370e939acca97b5e1abc1aedd47c5c9a5f500 Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Fri, 20 Jan 2017 13:49:47 +0200 Subject: wil6210: support new WMI-only FW capability WMI_ONLY FW is used for testing in production. It cannot be used for scan/connect, etc. In case FW reports this capability, driver will not allow interface up. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/netdev.c | 5 +++-- drivers/net/wireless/ath/wil6210/pcie_bus.c | 10 +++++++--- drivers/net/wireless/ath/wil6210/wmi.h | 1 + 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index d5df744a12b3..1843d98136d9 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -24,8 +24,9 @@ static int wil_open(struct net_device *ndev) wil_dbg_misc(wil, "open\n"); - if (debug_fw) { - wil_err(wil, "while in debug_fw mode\n"); + if (debug_fw || + test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) { + wil_err(wil, "while in debug_fw or wmi_only mode\n"); return -EINVAL; } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 8baa603bec42..b85b4f71b15d 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -99,8 +99,10 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) */ int msi_only = pdev->msi_enabled; bool _use_msi = use_msi; + bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY, + wil->fw_capabilities); - wil_dbg_misc(wil, "if_pcie_enable\n"); + wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only); pdev->msi_enabled = 0; @@ -123,9 +125,11 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) if (rc) goto stop_master; - /* need reset here to obtain MAC */ + /* need reset here to obtain MAC or in case of WMI-only FW, full reset + * and fw loading takes place + */ mutex_lock(&wil->mutex); - rc = wil_reset(wil, false); + rc = wil_reset(wil, wmi_only); mutex_unlock(&wil->mutex); if (rc) goto release_irq; diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 9c4a0bdc51f3..906aa723e415 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -57,6 +57,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_RF_SECTORS = 2, WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, + WMI_FW_CAPABILITY_WMI_ONLY = 5, WMI_FW_CAPABILITY_MAX, }; -- cgit v1.2.3 From 4d4c4dc324b4444d40e9164834044feebaf2d72d Mon Sep 17 00:00:00 2001 From: Lior David Date: Fri, 20 Jan 2017 13:49:48 +0200 Subject: wil6210: missing reinit_completion in wmi_call The code in wmi_call uses the wil->wmi_call completion structure to wait for a reply. In some scenarios, complete was called twice on the completion structure. This happened mainly with a disconnect event which can arrive both unsolicited and as a reply to a disconnect request. In this case the completion structure was left marked as "done" and the next wmi_call returned immediately with a corrupted reply buffer. This caused unexpected results including crashes. Fix this by adding the missing call to reinit_completion. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wmi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 0137ac5b9bb3..d30b3fca7749 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -957,6 +957,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, wil->reply_id = reply_id; wil->reply_buf = reply; wil->reply_size = reply_size; + reinit_completion(&wil->wmi_call); spin_unlock(&wil->wmi_ev_lock); rc = __wmi_send(wil, cmdid, buf, len); -- cgit v1.2.3 From 3ee908dc3b48fa0a5be2883362d36e085b6b52f4 Mon Sep 17 00:00:00 2001 From: Hamad Kadmany Date: Fri, 20 Jan 2017 13:49:49 +0200 Subject: wil6210: protect against false interrupt during reset sequence During reset sequence it is seen that device is generating an interrupt eventhough interrupts are masked at device level. Add workaround to disable the interrupts from host side during reset and clear any pending interrupts before re-enabling the interrupt. Signed-off-by: Hamad Kadmany Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 9b8fa6a182ec..85a795a87843 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -918,7 +918,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) flush_workqueue(wil->wmi_wq); wil_bl_crash_info(wil, false); + wil_disable_irq(wil); rc = wil_target_reset(wil); + wil6210_clear_irq(wil); + wil_enable_irq(wil); wil_rx_fini(wil); if (rc) { wil_bl_crash_info(wil, true); -- cgit v1.2.3 From a895cb8b990c12945b70f70d9e0798d587d507c0 Mon Sep 17 00:00:00 2001 From: Lior David Date: Fri, 20 Jan 2017 13:49:50 +0200 Subject: wil6210: fix for broadcast workaround in PBSS Currently we do not have full support for broadcast from a station inside a PBSS network. We have a workaround where instead of broadcast we do a unicast to every known station in the PBSS. This workaround was performed only for P2P clients. This fix will perform the broadcast workaround also for a regular station inside a PBSS. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 1 + drivers/net/wireless/ath/wil6210/txrx.c | 35 ++++++++++++++++------------- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index f8499a823662..9a9279092a5d 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -601,6 +601,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, goto out; } wil->privacy = sme->privacy; + wil->pbss = sme->pbss; if (wil->privacy) { /* For secure assoc, remove old keys */ diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 6e7dc8d98219..1311688554ee 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1195,17 +1195,6 @@ found: return v; } -static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil, - struct sk_buff *skb) -{ - struct wireless_dev *wdev = wil->wdev; - - if (wdev->iftype != NL80211_IFTYPE_AP) - return wil_find_tx_bcast_2(wil, skb); - - return wil_find_tx_bcast_1(wil, skb); -} - static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, int vring_index) { @@ -1905,12 +1894,26 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) pr_once_fw = false; /* find vring */ - if (wil->wdev->iftype == NL80211_IFTYPE_STATION) { - /* in STA mode (ESS), all to same VRING */ + if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) { + /* in STA mode (ESS), all to same VRING (to AP) */ vring = wil_find_tx_vring_sta(wil, skb); - } else { /* direct communication, find matching VRING */ - vring = bcast ? wil_find_tx_bcast(wil, skb) : - wil_find_tx_ucast(wil, skb); + } else if (bcast) { + if (wil->pbss) + /* in pbss, no bcast VRING - duplicate skb in + * all stations VRINGs + */ + vring = wil_find_tx_bcast_2(wil, skb); + else if (wil->wdev->iftype == NL80211_IFTYPE_AP) + /* AP has a dedicated bcast VRING */ + vring = wil_find_tx_bcast_1(wil, skb); + else + /* unexpected combination, fallback to duplicating + * the skb in all stations VRINGs + */ + vring = wil_find_tx_bcast_2(wil, skb); + } else { + /* unicast, find specific VRING by dest. address */ + vring = wil_find_tx_ucast(wil, skb); } if (unlikely(!vring)) { wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); -- cgit v1.2.3 From a52fb913aee97149f5986fd001a424e68c096d19 Mon Sep 17 00:00:00 2001 From: Lior David Date: Fri, 20 Jan 2017 13:49:50 +0200 Subject: wil6210: align to latest auto generated wmi.h Align to latest version of the auto generated wmi file describing the interface with FW. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wmi.h | 45 ++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 906aa723e415..7c9fee57aa91 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -187,6 +187,7 @@ enum wmi_command_id { WMI_RS_CFG_CMDID = 0x921, WMI_GET_DETAILED_RS_RES_CMDID = 0x922, WMI_AOA_MEAS_CMDID = 0x923, + WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924, WMI_SET_MGMT_RETRY_LIMIT_CMDID = 0x930, WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931, WMI_NEW_STA_CMDID = 0x935, @@ -547,7 +548,9 @@ struct wmi_pcp_start_cmd { u8 pcp_max_assoc_sta; u8 hidden_ssid; u8 is_go; - u8 reserved0[6]; + u8 reserved0[5]; + /* abft_len override if non-0 */ + u8 abft_len; u8 disable_ap_sme; u8 network_type; u8 channel; @@ -1084,6 +1087,7 @@ enum wmi_event_id { WMI_RS_CFG_DONE_EVENTID = 0x1921, WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922, WMI_AOA_MEAS_EVENTID = 0x1923, + WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924, WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930, WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931, WMI_TOF_SESSION_END_EVENTID = 0x1991, @@ -1304,7 +1308,8 @@ struct wmi_connect_event { u8 assoc_req_len; u8 assoc_resp_len; u8 cid; - u8 reserved2[3]; + u8 aid; + u8 reserved2[2]; /* not in use */ u8 assoc_info[0]; } __packed; @@ -1777,6 +1782,42 @@ struct wmi_get_detailed_rs_res_event { u8 reserved[3]; } __packed; +/* BRP antenna limit mode */ +enum wmi_brp_ant_limit_mode { + /* Disable BRP force antenna limit */ + WMI_BRP_ANT_LIMIT_MODE_DISABLE = 0x00, + /* Define maximal antennas limit. Only effective antennas will be + * actually used + */ + WMI_BRP_ANT_LIMIT_MODE_EFFECTIVE = 0x01, + /* Force a specific number of antennas */ + WMI_BRP_ANT_LIMIT_MODE_FORCE = 0x02, + /* number of BRP antenna limit modes */ + WMI_BRP_ANT_LIMIT_MODES_NUM = 0x03, +}; + +/* WMI_BRP_SET_ANT_LIMIT_CMDID */ +struct wmi_brp_set_ant_limit_cmd { + /* connection id */ + u8 cid; + /* enum wmi_brp_ant_limit_mode */ + u8 limit_mode; + /* antenna limit count, 1-27 + * disable_mode - ignored + * effective_mode - upper limit to number of antennas to be used + * force_mode - exact number of antennas to be used + */ + u8 ant_limit; + u8 reserved; +} __packed; + +/* WMI_BRP_SET_ANT_LIMIT_EVENTID */ +struct wmi_brp_set_ant_limit_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* broadcast connection ID */ #define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF) -- cgit v1.2.3 From 9d865ee232eccf51950b16f223f4130f7dc06e0f Mon Sep 17 00:00:00 2001 From: Lior David Date: Fri, 20 Jan 2017 13:49:51 +0200 Subject: wil6210: report association ID (AID) per station in debugfs Add reporting of the association ID (AID) for each station as part of the stations file in the debugfs. Valid AID values are 1-254. 0 is reported if the AID is unknown or not reported by firmware. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 4 +++- drivers/net/wireless/ath/wil6210/wil6210.h | 1 + drivers/net/wireless/ath/wil6210/wmi.c | 5 +++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index db6527ada946..bb8a59a6ed74 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1379,6 +1379,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { struct wil_sta_info *p = &wil->sta[i]; char *status = "unknown"; + u8 aid = 0; switch (p->status) { case wil_sta_unused: @@ -1389,9 +1390,10 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) break; case wil_sta_connected: status = "connected"; + aid = p->aid; break; } - seq_printf(s, "[%d] %pM %s\n", i, p->addr, status); + seq_printf(s, "[%d] %pM %s AID %d\n", i, p->addr, status, aid); if (p->status == wil_sta_connected) { spin_lock_bh(&p->tid_rx_lock); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 65f201631fe9..6dbaff5ceff1 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -523,6 +523,7 @@ struct wil_sta_info { unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)]; struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM]; struct wil_tid_crypto_rx group_crypto_rx; + u8 aid; /* 1-254; 0 if unknown/not reported */ }; enum { diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index d30b3fca7749..598096f17c18 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -495,8 +495,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } ch = evt->channel + 1; - wil_info(wil, "Connect %pM channel [%d] cid %d\n", - evt->bssid, ch, evt->cid); + wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n", + evt->bssid, ch, evt->cid, evt->aid); wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, evt->assoc_info, len - sizeof(*evt), true); @@ -604,6 +604,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } wil->sta[evt->cid].status = wil_sta_connected; + wil->sta[evt->cid].aid = evt->aid; set_bit(wil_status_fwconnected, wil->status); wil_update_net_queues_bh(wil, NULL, false); -- cgit v1.2.3 From c3bfea05a6fdecee03c7cf08f5bfee6aa0645cee Mon Sep 17 00:00:00 2001 From: Lior David Date: Fri, 20 Jan 2017 13:49:52 +0200 Subject: wil6210: option to override A-BFT length in start AP/PCP Add an option to specify and override the A-BFT length when starting an AP/PCP. See IEEE P802.11-2016, 10.38.5. The abft_len must be set before starting AP/PCP. It is only needed for diagnostics and certification. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 1 + drivers/net/wireless/ath/wil6210/wil6210.h | 1 + drivers/net/wireless/ath/wil6210/wmi.c | 1 + 3 files changed, 3 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index bb8a59a6ed74..97e908842a2b 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1702,6 +1702,7 @@ static const struct dbg_off dbg_wil_off[] = { WIL_FIELD(ap_isolate, S_IRUGO, doff_u32), WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8), WIL_FIELD(chip_revision, S_IRUGO, doff_u8), + WIL_FIELD(abft_len, S_IRUGO | S_IWUSR, doff_u8), {}, }; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 6dbaff5ceff1..085a2dbfa21d 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -667,6 +667,7 @@ struct wil6210_priv { struct dentry *debug; struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)]; u8 discovery_mode; + u8 abft_len; void *platform_handle; struct wil_platform_ops platform_ops; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 598096f17c18..27d21a3e7c7d 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1079,6 +1079,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, .hidden_ssid = hidden_ssid, .is_go = is_go, .disable_ap_sme = disable_ap_sme, + .abft_len = wil->abft_len, }; struct { struct wmi_cmd_hdr wmi; -- cgit v1.2.3 From 78484c44e4b305b23cbfcfb9257ed96c8753e86d Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Fri, 20 Jan 2017 13:49:53 +0200 Subject: wil6210: convert symbolic permissions to octal permissions Symbolic permissions are no longer recommended. This patch changes the symbolic permissions in wil6210 driver to octal permissions. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 2 +- drivers/net/wireless/ath/wil6210/debugfs.c | 128 ++++++++++++++-------------- drivers/net/wireless/ath/wil6210/main.c | 16 ++-- drivers/net/wireless/ath/wil6210/pcie_bus.c | 2 +- drivers/net/wireless/ath/wil6210/txrx.c | 4 +- drivers/net/wireless/ath/wil6210/wmi.c | 6 +- 6 files changed, 79 insertions(+), 79 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 9a9279092a5d..e25e78e71f54 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -21,7 +21,7 @@ #define WIL_MAX_ROC_DURATION_MS 5000 bool disable_ap_sme; -module_param(disable_ap_sme, bool, S_IRUGO); +module_param(disable_ap_sme, bool, 0444); MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME"); #define CHAN60G(_channel, _flags) { \ diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 97e908842a2b..3e8cdf12feda 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -364,13 +364,13 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil, } static const struct dbg_off isr_off[] = { - {"ICC", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICC), doff_io32}, - {"ICR", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICR), doff_io32}, - {"ICM", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICM), doff_io32}, - {"ICS", S_IWUSR, offsetof(struct RGF_ICR, ICS), doff_io32}, - {"IMV", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, IMV), doff_io32}, - {"IMS", S_IWUSR, offsetof(struct RGF_ICR, IMS), doff_io32}, - {"IMC", S_IWUSR, offsetof(struct RGF_ICR, IMC), doff_io32}, + {"ICC", 0644, offsetof(struct RGF_ICR, ICC), doff_io32}, + {"ICR", 0644, offsetof(struct RGF_ICR, ICR), doff_io32}, + {"ICM", 0644, offsetof(struct RGF_ICR, ICM), doff_io32}, + {"ICS", 0244, offsetof(struct RGF_ICR, ICS), doff_io32}, + {"IMV", 0644, offsetof(struct RGF_ICR, IMV), doff_io32}, + {"IMS", 0244, offsetof(struct RGF_ICR, IMS), doff_io32}, + {"IMC", 0244, offsetof(struct RGF_ICR, IMC), doff_io32}, {}, }; @@ -390,9 +390,9 @@ static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil, } static const struct dbg_off pseudo_isr_off[] = { - {"CAUSE", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32}, - {"MASK_SW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32}, - {"MASK_FW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32}, + {"CAUSE", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32}, + {"MASK_SW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32}, + {"MASK_FW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32}, {}, }; @@ -411,40 +411,40 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil, } static const struct dbg_off lgc_itr_cnt_off[] = { - {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32}, - {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32}, - {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32}, + {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32}, + {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32}, + {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32}, {}, }; static const struct dbg_off tx_itr_cnt_off[] = { - {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH), + {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH), doff_io32}, - {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA), + {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA), doff_io32}, - {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL), + {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL), doff_io32}, - {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH), + {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH), doff_io32}, - {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA), + {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA), doff_io32}, - {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL), + {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL), doff_io32}, {}, }; static const struct dbg_off rx_itr_cnt_off[] = { - {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH), + {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH), doff_io32}, - {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA), + {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA), doff_io32}, - {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL), + {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL), doff_io32}, - {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH), + {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH), doff_io32}, - {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA), + {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA), doff_io32}, - {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL), + {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL), doff_io32}, {}, }; @@ -1624,7 +1624,7 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil, blob->data = (void * __force)wil->csr + HOSTADDR(map->host); blob->size = map->to - map->from; snprintf(name, sizeof(name), "blob_%s", map->name); - wil_debugfs_create_ioblob(name, S_IRUGO, dbg, wil_blob); + wil_debugfs_create_ioblob(name, 0444, dbg, wil_blob); } } @@ -1634,29 +1634,29 @@ static const struct { umode_t mode; const struct file_operations *fops; } dbg_files[] = { - {"mbox", S_IRUGO, &fops_mbox}, - {"vrings", S_IRUGO, &fops_vring}, - {"stations", S_IRUGO, &fops_sta}, - {"desc", S_IRUGO, &fops_txdesc}, - {"bf", S_IRUGO, &fops_bf}, - {"ssid", S_IRUGO | S_IWUSR, &fops_ssid}, - {"mem_val", S_IRUGO, &fops_memread}, - {"reset", S_IWUSR, &fops_reset}, - {"rxon", S_IWUSR, &fops_rxon}, - {"tx_mgmt", S_IWUSR, &fops_txmgmt}, - {"wmi_send", S_IWUSR, &fops_wmi}, - {"back", S_IRUGO | S_IWUSR, &fops_back}, - {"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg}, - {"pmcdata", S_IRUGO, &fops_pmcdata}, - {"temp", S_IRUGO, &fops_temp}, - {"freq", S_IRUGO, &fops_freq}, - {"link", S_IRUGO, &fops_link}, - {"info", S_IRUGO, &fops_info}, - {"recovery", S_IRUGO | S_IWUSR, &fops_recovery}, - {"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg}, - {"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time}, - {"fw_capabilities", S_IRUGO, &fops_fw_capabilities}, - {"fw_version", S_IRUGO, &fops_fw_version}, + {"mbox", 0444, &fops_mbox}, + {"vrings", 0444, &fops_vring}, + {"stations", 0444, &fops_sta}, + {"desc", 0444, &fops_txdesc}, + {"bf", 0444, &fops_bf}, + {"ssid", 0644, &fops_ssid}, + {"mem_val", 0644, &fops_memread}, + {"reset", 0244, &fops_reset}, + {"rxon", 0244, &fops_rxon}, + {"tx_mgmt", 0244, &fops_txmgmt}, + {"wmi_send", 0244, &fops_wmi}, + {"back", 0644, &fops_back}, + {"pmccfg", 0644, &fops_pmccfg}, + {"pmcdata", 0444, &fops_pmcdata}, + {"temp", 0444, &fops_temp}, + {"freq", 0444, &fops_freq}, + {"link", 0444, &fops_link}, + {"info", 0444, &fops_info}, + {"recovery", 0644, &fops_recovery}, + {"led_cfg", 0644, &fops_led_cfg}, + {"led_blink_time", 0644, &fops_led_blink_time}, + {"fw_capabilities", 0444, &fops_fw_capabilities}, + {"fw_version", 0444, &fops_fw_version}, }; static void wil6210_debugfs_init_files(struct wil6210_priv *wil, @@ -1695,32 +1695,32 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil, /* fields in struct wil6210_priv */ static const struct dbg_off dbg_wil_off[] = { - WIL_FIELD(privacy, S_IRUGO, doff_u32), - WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong), - WIL_FIELD(hw_version, S_IRUGO, doff_x32), - WIL_FIELD(recovery_count, S_IRUGO, doff_u32), - WIL_FIELD(ap_isolate, S_IRUGO, doff_u32), - WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8), - WIL_FIELD(chip_revision, S_IRUGO, doff_u8), - WIL_FIELD(abft_len, S_IRUGO | S_IWUSR, doff_u8), + WIL_FIELD(privacy, 0444, doff_u32), + WIL_FIELD(status[0], 0644, doff_ulong), + WIL_FIELD(hw_version, 0444, doff_x32), + WIL_FIELD(recovery_count, 0444, doff_u32), + WIL_FIELD(ap_isolate, 0444, doff_u32), + WIL_FIELD(discovery_mode, 0644, doff_u8), + WIL_FIELD(chip_revision, 0444, doff_u8), + WIL_FIELD(abft_len, 0644, doff_u8), {}, }; static const struct dbg_off dbg_wil_regs[] = { - {"RGF_MAC_MTRL_COUNTER_0", S_IRUGO, HOSTADDR(RGF_MAC_MTRL_COUNTER_0), + {"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0), doff_io32}, - {"RGF_USER_USAGE_1", S_IRUGO, HOSTADDR(RGF_USER_USAGE_1), doff_io32}, + {"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32}, {}, }; /* static parameters */ static const struct dbg_off dbg_statics[] = { - {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32}, - {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32}, - {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32}, - {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh, + {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32}, + {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32}, + {"mem_addr", 0644, (ulong)&mem_addr, doff_u32}, + {"vring_idle_trsh", 0644, (ulong)&vring_idle_trsh, doff_u32}, - {"led_polarity", S_IRUGO | S_IWUSR, (ulong)&led_polarity, doff_u8}, + {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, {}, }; diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 85a795a87843..efb1f59aafd9 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -27,23 +27,23 @@ #define WAIT_FOR_SCAN_ABORT_MS 1000 bool debug_fw; /* = false; */ -module_param(debug_fw, bool, S_IRUGO); +module_param(debug_fw, bool, 0444); MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug"); static bool oob_mode; -module_param(oob_mode, bool, S_IRUGO); +module_param(oob_mode, bool, 0444); MODULE_PARM_DESC(oob_mode, " enable out of the box (OOB) mode in FW, for diagnostics and certification"); bool no_fw_recovery; -module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); +module_param(no_fw_recovery, bool, 0644); MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery"); /* if not set via modparam, will be set to default value of 1/8 of * rx ring size during init flow */ unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT; -module_param(rx_ring_overflow_thrsh, ushort, S_IRUGO); +module_param(rx_ring_overflow_thrsh, ushort, 0444); MODULE_PARM_DESC(rx_ring_overflow_thrsh, " RX ring overflow threshold in descriptors."); @@ -73,7 +73,7 @@ static const struct kernel_param_ops mtu_max_ops = { .get = param_get_uint, }; -module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO); +module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444); MODULE_PARM_DESC(mtu_max, " Max MTU value."); static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT; @@ -102,11 +102,11 @@ static const struct kernel_param_ops ring_order_ops = { .get = param_get_uint, }; -module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO); +module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, 0444); MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order"); -module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO); +module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, 0444); MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order"); -module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, S_IRUGO); +module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444); MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order"); #define RST_DELAY (20) /* msec, for loop in @wil_target_reset */ diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index b85b4f71b15d..874c787727fe 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -23,7 +23,7 @@ #include static bool use_msi = true; -module_param(use_msi, bool, S_IRUGO); +module_param(use_msi, bool, 0444); MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); #ifdef CONFIG_PM diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 1311688554ee..072182e527e6 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -29,12 +29,12 @@ #include "trace.h" static bool rtap_include_phy_info; -module_param(rtap_include_phy_info, bool, S_IRUGO); +module_param(rtap_include_phy_info, bool, 0444); MODULE_PARM_DESC(rtap_include_phy_info, " Include PHY info in the radiotap header, default - no"); bool rx_align_2; -module_param(rx_align_2, bool, S_IRUGO); +module_param(rx_align_2, bool, 0444); MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); static inline uint wil_rx_snaplen(void) diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 27d21a3e7c7d..1f22c19696b1 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -24,16 +24,16 @@ #include "trace.h" static uint max_assoc_sta = WIL6210_MAX_CID; -module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR); +module_param(max_assoc_sta, uint, 0644); MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP"); int agg_wsize; /* = 0; */ -module_param(agg_wsize, int, S_IRUGO | S_IWUSR); +module_param(agg_wsize, int, 0644); MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;" " 0 - use default; < 0 - don't auto-establish"); u8 led_id = WIL_LED_INVALID_ID; -module_param(led_id, byte, S_IRUGO); +module_param(led_id, byte, 0444); MODULE_PARM_DESC(led_id, " 60G device led enablement. Set the led ID (0-2) to enable"); -- cgit v1.2.3 From dd51fa3d8a1d4781647f333e731590b65965b332 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Fri, 27 Jan 2017 19:52:36 +0200 Subject: ath10k: remove multiple defines of DIAG_TRANSFER_LIMIT DIAG_TRANSFER_LIMIT is redefined with same value and comments just below this entry, remove this duplicate entry. Signed-off-by: Srinivas Kandagatla Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 9854ad56b2de..c76789d5de99 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -24,11 +24,6 @@ #include "ce.h" #include "ahb.h" -/* - * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite - */ -#define DIAG_TRANSFER_LIMIT 2048 - /* * maximum number of bytes that can be * handled atomically by DiagRead/DiagWrite -- cgit v1.2.3 From 0de4df5ba2ad3f3c46d2c079f3a76c69d71800be Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Fri, 27 Jan 2017 19:52:37 +0200 Subject: ath10k: use dma_zalloc_coherent() use dma_zalloc_coherent() instead of dma_alloc_coherent and memset(). Signed-off-by: Srinivas Kandagatla Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.c | 9 +-------- drivers/net/wireless/ath/ath10k/pci.c | 3 +-- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index c04e68796a20..da466ab2d823 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -958,7 +958,7 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, * coherent DMA are unsupported */ dest_ring->base_addr_owner_space_unaligned = - dma_alloc_coherent(ar->dev, + dma_zalloc_coherent(ar->dev, (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), &base_addr, GFP_KERNEL); @@ -969,13 +969,6 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, dest_ring->base_addr_ce_space_unaligned = base_addr; - /* - * Correctly initialize memory to 0 to prevent garbage - * data crashing system when download firmware - */ - memset(dest_ring->base_addr_owner_space_unaligned, 0, - nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN); - dest_ring->base_addr_owner_space = PTR_ALIGN( dest_ring->base_addr_owner_space_unaligned, CE_DESC_RING_ALIGN); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 95ccd6db7623..8f0d518bca46 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -896,7 +896,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, */ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); - data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, + data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, GFP_ATOMIC); @@ -905,7 +905,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, ret = -ENOMEM; goto done; } - memset(data_buf, 0, alloc_nbytes); remaining_bytes = nbytes; ce_data = ce_data_base; -- cgit v1.2.3 From 1ad38fd719da87980480886f21130053c73007ac Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Fri, 27 Jan 2017 19:52:37 +0200 Subject: ath10k: fix typo in addr calculation CORE_CTRL_ADDRESS is offset in register address space, it does not make sense to OR it to derive the final address. It looks like its a typo, so fix it. Signed-off-by: Srinivas Kandagatla Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 8f0d518bca46..d2aa9e5ec05d 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1937,7 +1937,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) { u32 addr, val; - addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; + addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS; val = ath10k_pci_read32(ar, addr); val |= CORE_CTRL_CPU_INTR_MASK; ath10k_pci_write32(ar, addr, val); -- cgit v1.2.3 From f2593cb1b29185d38db706cbcbe22ed538720ae1 Mon Sep 17 00:00:00 2001 From: Waldemar Rymarkiewicz Date: Fri, 27 Jan 2017 19:52:19 +0200 Subject: ath10k: Search SMBIOS for OEM board file extension Board Data File (BDF) is loaded upon driver boot-up procedure. The right board data file is identified, among others, by device and sybsystem ids. The problem, however, can occur when the (default) board data file cannot fulfill with the vendor requirements and it is necessary to use a different board data file. To solve the issue QCA uses SMBIOS type 0xF8 to store Board Data File Name Extension to specify the extension/variant name. The driver will take the extension suffix into consideration and will load the right (non-default) board data file if necessary. If it is unnecessary to use extension board data file, please leave the SMBIOS field blank and default configuration will be used. Example: If a default board data file for a specific board is identified by a string "bus=pci,vendor=168c,device=003e,subsystem-vendor=1028, subsystem-device=0310" then the OEM specific data file, if used, could be identified by variant suffix: "bus=pci,vendor=168c,device=003e,subsystem-vendor=1028, subsystem-device=0310,variant=DE_1AB" Signed-off-by: Waldemar Rymarkiewicz Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 84 ++++++++++++++++++++++++++++++++-- drivers/net/wireless/ath/ath10k/core.h | 19 ++++++++ 2 files changed, 100 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 874c2a755c66..c2afcca6fd60 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include "core.h" @@ -707,6 +709,72 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) return 0; } +static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data) +{ + struct ath10k *ar = data; + const char *bdf_ext; + const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC; + u8 bdf_enabled; + int i; + + if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE) + return; + + if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "wrong smbios bdf ext type length (%d).\n", + hdr->length); + return; + } + + bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET); + if (!bdf_enabled) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n"); + return; + } + + /* Only one string exists (per spec) */ + bdf_ext = (char *)hdr + hdr->length; + + if (memcmp(bdf_ext, magic, strlen(magic)) != 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant magic does not match.\n"); + return; + } + + for (i = 0; i < strlen(bdf_ext); i++) { + if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant name contains non ascii chars.\n"); + return; + } + } + + /* Copy extension name without magic suffix */ + if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic), + sizeof(ar->id.bdf_ext)) < 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", + bdf_ext); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found and validated bdf variant smbios_type 0x%x bdf %s\n", + ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext); +} + +static int ath10k_core_check_smbios(struct ath10k *ar) +{ + ar->id.bdf_ext[0] = '\0'; + dmi_walk(ath10k_core_check_bdfext, ar); + + if (ar->id.bdf_ext[0] == '\0') + return -ENODATA; + + return 0; +} + static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; @@ -1053,6 +1121,9 @@ err: static int ath10k_core_create_board_name(struct ath10k *ar, char *name, size_t name_len) { + /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ + char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; + if (ar->id.bmi_ids_valid) { scnprintf(name, name_len, "bus=%s,bmi-chip-id=%d,bmi-board-id=%d", @@ -1062,12 +1133,15 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name, goto out; } + if (ar->id.bdf_ext[0] != '\0') + scnprintf(variant, sizeof(variant), ",variant=%s", + ar->id.bdf_ext); + scnprintf(name, name_len, - "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x", + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", ath10k_bus_str(ar->hif.bus), ar->id.vendor, ar->id.device, - ar->id.subsystem_vendor, ar->id.subsystem_device); - + ar->id.subsystem_vendor, ar->id.subsystem_device, variant); out: ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name); @@ -2128,6 +2202,10 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_free_firmware_files; } + ret = ath10k_core_check_smbios(ar); + if (ret) + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n"); + ret = ath10k_core_fetch_board_file(ar); if (ret) { ath10k_err(ar, "failed to fetch board file: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 757242ef52ac..88d14be7fcce 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -69,6 +69,23 @@ #define ATH10K_NAPI_BUDGET 64 #define ATH10K_NAPI_QUOTA_LIMIT 60 +/* SMBIOS type containing Board Data File Name Extension */ +#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8 + +/* SMBIOS type structure length (excluding strings-set) */ +#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9 + +/* Offset pointing to Board Data File Name Extension */ +#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8 + +/* Board Data File Name Extension string length. + * String format: BDF__\0 + */ +#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20 + +/* The magic used by QCA spec */ +#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_" + struct ath10k; enum ath10k_bus { @@ -798,6 +815,8 @@ struct ath10k { bool bmi_ids_valid; u8 bmi_board_id; u8 bmi_chip_id; + + char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; } id; int fw_api; -- cgit v1.2.3 From b3d75a81f07c757ab73c9022631170c3baefe380 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Thu, 19 Jan 2017 10:51:25 +0100 Subject: brcmfmac: drop duplicated core selection from brcmf_pcie_attach MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It was left after reworking PCIe reset in commit 07fe2e38c7fd ("brcmfmac: Reset PCIE devices after recognition."). Cc: Hante Meuleman Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 19db8f26729a..6fae4cf3f6ab 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -601,7 +601,6 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo) { u32 config; - brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); /* BAR1 window may not be sized properly */ brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0); -- cgit v1.2.3 From 34db70b92faed22f59fdc1dba766e8cb6248fe9e Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 20 Jan 2017 14:28:24 +0100 Subject: rt2x00: add copy of clk for soc devices Since clk_get() is not trivial add copy of clk pointer to rt2x00dev for System On Chip devices and initialize it on probe routine. Signed-off-by: Stanislaw Gruszka Acked-by: Daniel Golle Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 4 ++++ drivers/net/wireless/ralink/rt2x00/rt2x00soc.c | 1 + 2 files changed, 5 insertions(+) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index bea7ac30522f..b59e721c8a5d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -39,6 +39,7 @@ #include #include #include +#include #include @@ -1011,6 +1012,9 @@ struct rt2x00_dev { unsigned int extra_tx_headroom; struct usb_anchor *anchor; + + /* Clock for System On Chip devices. */ + struct clk *clk; }; struct rt2x00_bar_list_entry { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c index 69a0cdadb07f..362f9d3b98fc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c @@ -92,6 +92,7 @@ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops) rt2x00dev->hw = hw; rt2x00dev->irq = platform_get_irq(pdev, 0); rt2x00dev->name = pdev->dev.driver->name; + rt2x00dev->clk = clk_get(&pdev->dev); rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC); -- cgit v1.2.3 From 5c4412e0728063583ec971323f8256e8005a32b6 Mon Sep 17 00:00:00 2001 From: Daniel Golle Date: Fri, 20 Jan 2017 14:28:25 +0100 Subject: rt2x00: rt2800lib: add support for RT3352 with 20MHz crystal On Rt3352 the driver needs to know the frequency of an external crystal which can be either 40 MHz (as on all other WiSoCs until now) or 20 MHz. Get the clock attached by ramips WiSoC platform code which probes SYSC_REG_SYSCFG (added by John Crispin in commit 6ac8579b96e3b) and introduce a new flag clk_is_20mhz in struct hw_mode_spec to make the driver aware and use either 40 MHz or 20 MHz specific rf_vals on those WiSoC platforms. The introduced support for boards with a 20 MHz crystal is also needed for RT5350. Signed-off-by: Daniel Golle Signed-off-by: Gabor Juhos Signed-off-by: Mathias Kresin Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 31 +++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 8ea844d35ecb..5ae2950890bc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1935,6 +1935,11 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev, rt2x00dev->lna_gain = lna_gain; } +static inline bool rt2800_clk_is_20mhz(struct rt2x00_dev *rt2x00dev) +{ + return clk_get_rate(rt2x00dev->clk) == 20000000; +} + #define FREQ_OFFSET_BOUND 0x5f static void rt2800_freq_cal_mode1(struct rt2x00_dev *rt2x00dev) @@ -7426,6 +7431,27 @@ static const struct rf_channel rf_vals_3x[] = { {173, 0x61, 0, 9}, }; +/* + * RF value list for rt3xxx with Xtal20MHz + * Supports: 2.4 GHz (all) (RF3322) + */ +static const struct rf_channel rf_vals_3x_xtal20[] = { + {1, 0xE2, 2, 0x14}, + {2, 0xE3, 2, 0x14}, + {3, 0xE4, 2, 0x14}, + {4, 0xE5, 2, 0x14}, + {5, 0xE6, 2, 0x14}, + {6, 0xE7, 2, 0x14}, + {7, 0xE8, 2, 0x14}, + {8, 0xE9, 2, 0x14}, + {9, 0xEA, 2, 0x14}, + {10, 0xEB, 2, 0x14}, + {11, 0xEC, 2, 0x14}, + {12, 0xED, 2, 0x14}, + {13, 0xEE, 2, 0x14}, + {14, 0xF0, 2, 0x18}, +}; + static const struct rf_channel rf_vals_5592_xtal20[] = { /* Channel, N, K, mod, R */ {1, 482, 4, 10, 3}, @@ -7654,7 +7680,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF5390: case RF5392: spec->num_channels = 14; - spec->channels = rf_vals_3x; + if (rt2800_clk_is_20mhz(rt2x00dev)) + spec->channels = rf_vals_3x_xtal20; + else + spec->channels = rf_vals_3x; break; case RF3052: -- cgit v1.2.3 From 98e71f44c51d0ade36896dc34115c108912d909d Mon Sep 17 00:00:00 2001 From: Serge Vasilugin Date: Fri, 20 Jan 2017 14:28:26 +0100 Subject: rt2x00: add support for RT5350 WiSoC Support for the RT5350 WiSoC was added to OpenWrt after having a lengthy debate about the legality of the original submission, see https://lists.openwrt.org/pipermail/openwrt-devel/2013-January/018224.html MTK/Ralink Acked replied and says we can merge this patch under the GPL. https://dev.openwrt.org/changeset/36177 Signed-off-by: Serge Vasilugin Tested-by: Michel Stempin Acked-by: John Crispin [daniel@makrotopia.org: added commit message, cleaned up code] Signed-off-by: Daniel Golle Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800.h | 1 + drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 125 +++++++++++++++++++++++-- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 1 + 3 files changed, 120 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index 18096903e20f..256496bfbafb 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -72,6 +72,7 @@ #define RF5592 0x000f #define RF3070 0x3070 #define RF3290 0x3290 +#define RF5350 0x5350 #define RF5360 0x5360 #define RF5362 0x5362 #define RF5370 0x5370 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 5ae2950890bc..755c09364b19 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -2761,6 +2761,13 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86}; + rt2800_rfcsr_write(rt2x00dev, 59, + r59_non_bt[idx]); + } else if (rt2x00_rt(rt2x00dev, RT5350)) { + static const char r59_non_bt[] = {0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, + 0x0a, 0x09, 0x08, 0x07, 0x07, 0x06}; + rt2800_rfcsr_write(rt2x00dev, 59, r59_non_bt[idx]); } @@ -3200,6 +3207,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info); break; case RF3070: + case RF5350: case RF5360: case RF5362: case RF5370: @@ -3218,6 +3226,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, if (rt2x00_rf(rt2x00dev, RF3070) || rt2x00_rf(rt2x00dev, RF3290) || rt2x00_rf(rt2x00dev, RF3322) || + rt2x00_rf(rt2x00dev, RF5350) || rt2x00_rf(rt2x00dev, RF5360) || rt2x00_rf(rt2x00dev, RF5362) || rt2x00_rf(rt2x00dev, RF5370) || @@ -3475,7 +3484,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, /* * Clear update flag */ - if (rt2x00_rt(rt2x00dev, RT3352)) { + if (rt2x00_rt(rt2x00dev, RT3352) || + rt2x00_rt(rt2x00dev, RT5350)) { rt2800_bbp_read(rt2x00dev, 49, &bbp); rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0); rt2800_bbp_write(rt2x00dev, 49, bbp); @@ -4356,6 +4366,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) case RF3053: case RF3070: case RF3290: + case RF5350: case RF5360: case RF5362: case RF5370: @@ -4738,6 +4749,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); + } else if (rt2x00_rt(rt2x00dev, RT5350)) { + rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); } else { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); @@ -5383,9 +5396,13 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 82, 0x62); - rt2800_bbp_write(rt2x00dev, 83, 0x6a); - - rt2800_bbp_write(rt2x00dev, 84, 0x99); + if (rt2x00_rt(rt2x00dev, RT5350)) { + rt2800_bbp_write(rt2x00dev, 83, 0x7a); + rt2800_bbp_write(rt2x00dev, 84, 0x9a); + } else { + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + rt2800_bbp_write(rt2x00dev, 84, 0x99); + } rt2800_bbp_write(rt2x00dev, 86, 0x38); @@ -5399,9 +5416,13 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 104, 0x92); - rt2800_bbp_write(rt2x00dev, 105, 0x34); - - rt2800_bbp_write(rt2x00dev, 106, 0x05); + if (rt2x00_rt(rt2x00dev, RT5350)) { + rt2800_bbp_write(rt2x00dev, 105, 0x3c); + rt2800_bbp_write(rt2x00dev, 106, 0x03); + } else { + rt2800_bbp_write(rt2x00dev, 105, 0x34); + rt2800_bbp_write(rt2x00dev, 106, 0x05); + } rt2800_bbp_write(rt2x00dev, 120, 0x50); @@ -5426,6 +5447,16 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 143, 0xa2); rt2800_bbp_write(rt2x00dev, 148, 0xc8); + + if (rt2x00_rt(rt2x00dev, RT5350)) { + /* Antenna Software OFDM */ + rt2800_bbp_write(rt2x00dev, 150, 0x40); + /* Antenna Software CCK */ + rt2800_bbp_write(rt2x00dev, 151, 0x30); + rt2800_bbp_write(rt2x00dev, 152, 0xa3); + /* Clear previously selected antenna */ + rt2800_bbp_write(rt2x00dev, 154, 0); + } } static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev) @@ -5726,6 +5757,7 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) rt2800_init_bbp_3290(rt2x00dev); break; case RT3352: + case RT5350: rt2800_init_bbp_3352(rt2x00dev); break; case RT3390: @@ -6536,6 +6568,76 @@ static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev) /* TODO: enable stream mode support */ } +static void rt2800_init_rfcsr_5350(struct rt2x00_dev *rt2x00dev) +{ + rt2800_rfcsr_write(rt2x00dev, 0, 0xf0); + rt2800_rfcsr_write(rt2x00dev, 1, 0x23); + rt2800_rfcsr_write(rt2x00dev, 2, 0x50); + rt2800_rfcsr_write(rt2x00dev, 3, 0x08); + rt2800_rfcsr_write(rt2x00dev, 4, 0x49); + rt2800_rfcsr_write(rt2x00dev, 5, 0x10); + rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); + rt2800_rfcsr_write(rt2x00dev, 7, 0x00); + rt2800_rfcsr_write(rt2x00dev, 8, 0xf1); + rt2800_rfcsr_write(rt2x00dev, 9, 0x02); + rt2800_rfcsr_write(rt2x00dev, 10, 0x53); + rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); + rt2800_rfcsr_write(rt2x00dev, 12, 0x46); + if (rt2800_clk_is_20mhz(rt2x00dev)) + rt2800_rfcsr_write(rt2x00dev, 13, 0x1f); + else + rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); + rt2800_rfcsr_write(rt2x00dev, 14, 0x00); + rt2800_rfcsr_write(rt2x00dev, 15, 0x00); + rt2800_rfcsr_write(rt2x00dev, 16, 0xc0); + rt2800_rfcsr_write(rt2x00dev, 18, 0x03); + rt2800_rfcsr_write(rt2x00dev, 19, 0x00); + rt2800_rfcsr_write(rt2x00dev, 20, 0x00); + rt2800_rfcsr_write(rt2x00dev, 21, 0x00); + rt2800_rfcsr_write(rt2x00dev, 22, 0x20); + rt2800_rfcsr_write(rt2x00dev, 23, 0x00); + rt2800_rfcsr_write(rt2x00dev, 24, 0x00); + rt2800_rfcsr_write(rt2x00dev, 25, 0x80); + rt2800_rfcsr_write(rt2x00dev, 26, 0x00); + rt2800_rfcsr_write(rt2x00dev, 27, 0x03); + rt2800_rfcsr_write(rt2x00dev, 28, 0x00); + rt2800_rfcsr_write(rt2x00dev, 29, 0xd0); + rt2800_rfcsr_write(rt2x00dev, 30, 0x10); + rt2800_rfcsr_write(rt2x00dev, 31, 0x80); + rt2800_rfcsr_write(rt2x00dev, 32, 0x80); + rt2800_rfcsr_write(rt2x00dev, 33, 0x00); + rt2800_rfcsr_write(rt2x00dev, 34, 0x07); + rt2800_rfcsr_write(rt2x00dev, 35, 0x12); + rt2800_rfcsr_write(rt2x00dev, 36, 0x00); + rt2800_rfcsr_write(rt2x00dev, 37, 0x08); + rt2800_rfcsr_write(rt2x00dev, 38, 0x85); + rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); + rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); + rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); + rt2800_rfcsr_write(rt2x00dev, 42, 0xd5); + rt2800_rfcsr_write(rt2x00dev, 43, 0x9b); + rt2800_rfcsr_write(rt2x00dev, 44, 0x0c); + rt2800_rfcsr_write(rt2x00dev, 45, 0xa6); + rt2800_rfcsr_write(rt2x00dev, 46, 0x73); + rt2800_rfcsr_write(rt2x00dev, 47, 0x00); + rt2800_rfcsr_write(rt2x00dev, 48, 0x10); + rt2800_rfcsr_write(rt2x00dev, 49, 0x80); + rt2800_rfcsr_write(rt2x00dev, 50, 0x00); + rt2800_rfcsr_write(rt2x00dev, 51, 0x00); + rt2800_rfcsr_write(rt2x00dev, 52, 0x38); + rt2800_rfcsr_write(rt2x00dev, 53, 0x00); + rt2800_rfcsr_write(rt2x00dev, 54, 0x38); + rt2800_rfcsr_write(rt2x00dev, 55, 0x43); + rt2800_rfcsr_write(rt2x00dev, 56, 0x82); + rt2800_rfcsr_write(rt2x00dev, 57, 0x00); + rt2800_rfcsr_write(rt2x00dev, 58, 0x39); + rt2800_rfcsr_write(rt2x00dev, 59, 0x0b); + rt2800_rfcsr_write(rt2x00dev, 60, 0x45); + rt2800_rfcsr_write(rt2x00dev, 61, 0xd1); + rt2800_rfcsr_write(rt2x00dev, 62, 0x00); + rt2800_rfcsr_write(rt2x00dev, 63, 0x00); +} + static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev) { rt2800_rf_init_calibration(rt2x00dev, 2); @@ -6773,6 +6875,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) case RT3593: rt2800_init_rfcsr_3593(rt2x00dev); break; + case RT5350: + rt2800_init_rfcsr_5350(rt2x00dev); + break; case RT5390: rt2800_init_rfcsr_5390(rt2x00dev); break; @@ -7152,6 +7257,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf); else if (rt2x00_rt(rt2x00dev, RT3352)) rf = RF3322; + else if (rt2x00_rt(rt2x00dev, RT5350)) + rf = RF5350; else rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); @@ -7170,6 +7277,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) case RF3290: case RF3320: case RF3322: + case RF5350: case RF5360: case RF5362: case RF5370: @@ -7673,6 +7781,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF3290: case RF3320: case RF3322: + case RF5350: case RF5360: case RF5362: case RF5370: @@ -7809,6 +7918,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF3053: case RF3070: case RF3290: + case RF5350: case RF5360: case RF5362: case RF5370: @@ -7849,6 +7959,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev) case RT3390: case RT3572: case RT3593: + case RT5350: case RT5390: case RT5392: case RT5592: diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index b59e721c8a5d..ea299c4e7ada 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -170,6 +170,7 @@ struct rt2x00_chip { #define RT3572 0x3572 #define RT3593 0x3593 #define RT3883 0x3883 /* WSOC */ +#define RT5350 0x5350 /* WSOC 2.4GHz */ #define RT5390 0x5390 /* 2.4GHz */ #define RT5392 0x5392 /* 2.4GHz */ #define RT5592 0x5592 -- cgit v1.2.3 From 3e66849865edca1d04c9c6227b9153e9bdbdaec0 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 24 Jan 2017 19:05:44 +0530 Subject: mwifiex: mwifiex_unmap_pci_memory() handling for sleep confirm Sleep confirm is a special command for which "adapter->cur_cmd" pointer is not set. When it's response is received, host writes SLEEP confirm done to a register. Firmware will perform DMA for writing sleep cookie signature on same buffer after this. Let's not immediately call mwifiex_unmap_pci_memory() for this special command. Unmapping will be done when firmware completes writing sleep cookie signature. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 19 ++++++++++++++++--- drivers/net/wireless/marvell/mwifiex/pcie.h | 1 + 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index ca51411e4a4c..3f4ca28356ac 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -441,7 +441,7 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, u32 sleep_cookie, count; for (count = 0; count < max_delay_loop_cnt; count++) { - buffer = card->cmdrsp_buf->data - INTF_HEADER_LEN; + buffer = card->cmdrsp_buf->data; sleep_cookie = READ_ONCE(*(u32 *)buffer); if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) { @@ -1690,7 +1690,13 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, CMD, "info: Rx CMD Response\n"); - mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); + if (adapter->curr_cmd) + mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); + else + pci_dma_sync_single_for_cpu(card->dev, + MWIFIEX_SKB_DMA_ADDR(skb), + MWIFIEX_UPLD_SIZE, + PCI_DMA_FROMDEVICE); /* Unmap the command as a response has been received. */ if (card->cmd_buf) { @@ -1703,10 +1709,13 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) rx_len = le16_to_cpu(pkt_len); skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len); skb_trim(skb, rx_len); - skb_pull(skb, INTF_HEADER_LEN); if (!adapter->curr_cmd) { if (adapter->ps_state == PS_STATE_SLEEP_CFM) { + pci_dma_sync_single_for_device(card->dev, + MWIFIEX_SKB_DMA_ADDR(skb), + MWIFIEX_SLEEP_COOKIE_SIZE, + PCI_DMA_FROMDEVICE); if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_SLEEP_CFM_DONE)) { @@ -1716,6 +1725,9 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) } mwifiex_delay_for_sleep_cookie(adapter, MWIFIEX_MAX_DELAY_COUNT); + mwifiex_unmap_pci_memory(adapter, skb, + PCI_DMA_FROMDEVICE); + skb_pull(skb, INTF_HEADER_LEN); while (reg->sleep_cookie && (count++ < 10) && mwifiex_pcie_ok_to_access_hw(adapter)) usleep_range(50, 60); @@ -1733,6 +1745,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) PCI_DMA_FROMDEVICE)) return -1; } else if (mwifiex_pcie_ok_to_access_hw(adapter)) { + skb_pull(skb, INTF_HEADER_LEN); adapter->curr_cmd->resp_skb = skb; adapter->cmd_resp_received = true; /* Take the pointer and set it to CMD node and will diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 21ba5e6a4c2a..00e8ee5ad4a8 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -116,6 +116,7 @@ /* FW awake cookie after FW ready */ #define FW_AWAKE_COOKIE (0xAA55AA55) #define MWIFIEX_DEF_SLEEP_COOKIE 0xBEEFBEEF +#define MWIFIEX_SLEEP_COOKIE_SIZE 4 #define MWIFIEX_MAX_DELAY_COUNT 100 struct mwifiex_pcie_card_reg { -- cgit v1.2.3 From cc37d8efd2ba3a4cf395a727935f920c00b6f1a2 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 24 Jan 2017 19:05:45 +0530 Subject: mwifiex: use pci_dma_sync_single* APIs On some platforms, driver is unable read sleep cookie signature even if firmware has written it through DMA. The problem is fixed by using pci_dma_sync_single* APIs while reading DMA buffer shared with firmware. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 3f4ca28356ac..a0d918094889 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -439,9 +439,14 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, struct pcie_service_card *card = adapter->card; u8 *buffer; u32 sleep_cookie, count; + struct sk_buff *cmdrsp = card->cmdrsp_buf; for (count = 0; count < max_delay_loop_cnt; count++) { - buffer = card->cmdrsp_buf->data; + pci_dma_sync_single_for_cpu(card->dev, + MWIFIEX_SKB_DMA_ADDR(cmdrsp), + sizeof(sleep_cookie), + PCI_DMA_FROMDEVICE); + buffer = cmdrsp->data; sleep_cookie = READ_ONCE(*(u32 *)buffer); if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) { @@ -449,6 +454,10 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, "sleep cookie found at count %d\n", count); break; } + pci_dma_sync_single_for_device(card->dev, + MWIFIEX_SKB_DMA_ADDR(cmdrsp), + sizeof(sleep_cookie), + PCI_DMA_FROMDEVICE); usleep_range(20, 30); } -- cgit v1.2.3 From 64ad08605e8433b86249fef29b671812ccca65a5 Mon Sep 17 00:00:00 2001 From: Guy Mishol Date: Thu, 26 Jan 2017 17:35:13 +0200 Subject: wlcore: print the sdio buffer after reading it fix an issue where we printed the sdio buffer before actually read it. Signed-off-by: Guy Mishol Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/sdio.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 47fe7f96a242..287023ef4a78 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -81,13 +81,6 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr, sdio_claim_host(func); - if (unlikely(dump)) { - printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr); - print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ", - DUMP_PREFIX_OFFSET, 16, 1, - buf, len, false); - } - if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", @@ -107,6 +100,13 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr, if (WARN_ON(ret)) dev_err(child->parent, "sdio read failed (%d)\n", ret); + if (unlikely(dump)) { + printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr); + print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ", + DUMP_PREFIX_OFFSET, 16, 1, + buf, len, false); + } + return ret; } -- cgit v1.2.3 From 575ddce0507789bf9830d089557d2199d2f91865 Mon Sep 17 00:00:00 2001 From: Michael Schenk Date: Thu, 26 Jan 2017 11:25:04 -0600 Subject: rtlwifi: rtl_usb: Fix for URB leaking when doing ifconfig up/down In the function rtl_usb_start we pre-allocate a certain number of urbs for RX path but they will not be freed when calling rtl_usb_stop. This results in leaking urbs when doing ifconfig up and down. Eventually, the system has no available urbs. Signed-off-by: Michael Schenk Signed-off-by: Larry Finger Cc: Stable Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/usb.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 20d3fcc19a58..44bdb2bdeeb1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -818,12 +818,30 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct urb *urb; /* should after adapter start and interrupt enable. */ set_hal_stop(rtlhal); cancel_work_sync(&rtlpriv->works.fill_h2c_cmd); /* Enable software */ SET_USB_STOP(rtlusb); + + /* free pre-allocated URBs from rtl_usb_start() */ + usb_kill_anchored_urbs(&rtlusb->rx_submitted); + + tasklet_kill(&rtlusb->rx_work_tasklet); + cancel_work_sync(&rtlpriv->works.lps_change_work); + + flush_workqueue(rtlpriv->works.rtl_wq); + + skb_queue_purge(&rtlusb->rx_queue); + + while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); + } + rtlpriv->cfg->ops->hw_disable(hw); } -- cgit v1.2.3 From 7c946062b3ae2b7f002383e5e402113e98ad3c77 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 28 Jan 2017 10:12:39 +0100 Subject: batman-adv: Fix double call of dev_queue_xmit The net_xmit_eval has side effects because it is not making sure that e isn't evaluated twice. #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) The code requested by David Miller [1] return net_xmit_eval(dev_queue_xmit(skb)); will get transformed into return ((dev_queue_xmit(skb)) == NET_XMIT_CN ? 0 : (dev_queue_xmit(skb))) dev_queue_xmit will therefore be tried again (with an already consumed skb) whenever the return code is not NET_XMIT_CN. [1] https://lkml.kernel.org/r/20170125.225624.965229145391320056.davem@davemloft.net Fixes: c33705188c49 ("batman-adv: Treat NET_XMIT_CN as transmit successfully") Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/send.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index d9b2889064a6..1489ec27daff 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -77,6 +77,7 @@ int batadv_send_skb_packet(struct sk_buff *skb, { struct batadv_priv *bat_priv; struct ethhdr *ethhdr; + int ret; bat_priv = netdev_priv(hard_iface->soft_iface); @@ -115,7 +116,8 @@ int batadv_send_skb_packet(struct sk_buff *skb, * congestion and traffic shaping, it drops and returns NET_XMIT_DROP * (which is > 0). This will not be treated as an error. */ - return net_xmit_eval(dev_queue_xmit(skb)); + ret = dev_queue_xmit(skb); + return net_xmit_eval(ret); send_skb_err: kfree_skb(skb); return NET_XMIT_DROP; -- cgit v1.2.3 From 3e7514afc7d728dd47c5fe9d7a1f5216fe659cda Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 28 Jan 2017 10:23:30 +0100 Subject: batman-adv: Fix includes for IS_ERR/ERR_PTR IS_ERR/ERR_PTR are not defined in linux/device.h but in linux/err.h. The files using these macros therefore have to include the correct one. Reported-by: Linus Luessing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/debugfs.c | 2 +- net/batman-adv/tp_meter.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 5406148b9497..e32ad47c6efd 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -19,7 +19,7 @@ #include "main.h" #include -#include +#include #include #include #include diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index 07f64b60b528..c94ebdecdc3d 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From 10931923b5a5624d254ec3c8db6970f63b277cac Mon Sep 17 00:00:00 2001 From: Andreas Schultz Date: Thu, 26 Jan 2017 16:21:48 +0100 Subject: gtp: remove unnecessary rcu_read_lock The rcu read lock is hold by default in the ip input path. There is no need to hold it twice in the socket recv decapsulate code path. Signed-off-by: Andreas Schultz Signed-off-by: David S. Miller --- drivers/net/gtp.c | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 99d3df788ce8..e11a1ead3228 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -183,7 +183,6 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, sizeof(struct gtp0_header); struct gtp0_header *gtp0; struct pdp_ctx *pctx; - int ret = 0; if (!pskb_may_pull(skb, hdrlen)) return -1; @@ -196,26 +195,19 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, if (gtp0->type != GTP_TPDU) return 1; - rcu_read_lock(); pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); - ret = -1; - goto out_rcu; + return -1; } if (!gtp_check_src_ms(skb, pctx, hdrlen)) { netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); - ret = -1; - goto out_rcu; + return -1; } - rcu_read_unlock(); /* Get rid of the GTP + UDP headers. */ return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet); -out_rcu: - rcu_read_unlock(); - return ret; } static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, @@ -225,7 +217,6 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, sizeof(struct gtp1_header); struct gtp1_header *gtp1; struct pdp_ctx *pctx; - int ret = 0; if (!pskb_may_pull(skb, hdrlen)) return -1; @@ -253,26 +244,19 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); - rcu_read_lock(); pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); - ret = -1; - goto out_rcu; + return -1; } if (!gtp_check_src_ms(skb, pctx, hdrlen)) { netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); - ret = -1; - goto out_rcu; + return -1; } - rcu_read_unlock(); /* Get rid of the GTP + UDP headers. */ return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet); -out_rcu: - rcu_read_unlock(); - return ret; } static void gtp_encap_disable(struct gtp_dev *gtp) -- cgit v1.2.3 From 1796a81de4fbd3a93290de61e443ea4153ad28d4 Mon Sep 17 00:00:00 2001 From: Andreas Schultz Date: Thu, 26 Jan 2017 16:21:49 +0100 Subject: gtp: let userspace handle packets for invalid tunnels enable userspace to send error replies for invalid tunnels Acked-by: Harald Welte Signed-off-by: Andreas Schultz Signed-off-by: David S. Miller --- drivers/net/gtp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index e11a1ead3228..bda0c6413450 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -198,12 +198,12 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); - return -1; + return 1; } if (!gtp_check_src_ms(skb, pctx, hdrlen)) { netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); - return -1; + return 1; } /* Get rid of the GTP + UDP headers. */ @@ -247,12 +247,12 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); - return -1; + return 1; } if (!gtp_check_src_ms(skb, pctx, hdrlen)) { netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); - return -1; + return 1; } /* Get rid of the GTP + UDP headers. */ -- cgit v1.2.3 From e52cfb63a0ba55bc962e61b5af9be166f08827ae Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 27 Jan 2017 14:43:46 +0000 Subject: net: ethernet: aquantia: remove redundant err check The check on err < 0 is redundant and can be removed. Detected by CoverityScan, CID#1398321 ("Logically Dead Code") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index afcecdbf124c..da4bc09dac51 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -145,8 +145,6 @@ int aq_pci_func_init(struct aq_pci_func_s *self) } } - if (err < 0) - goto err_exit; for (i = 0; i < self->aq_hw_caps.msix_irqs; i++) self->msix_entry[i].entry = i; -- cgit v1.2.3 From f81e5ca9155e8b4527662aa66ba984f58257ccdb Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 27 Jan 2017 15:00:25 +0000 Subject: net: ethernet: aquantia: remove another redundant err check The check on err < 0 is redundant and can be removed. Detected by CoverityScan, CID#1398318 ("Logically Dead Code") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index b517b2670a71..817c145520c8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -304,8 +304,6 @@ int aq_ring_rx_fill(struct aq_ring_s *self) buff = NULL; } - if (err < 0) - goto err_exit; err_exit: if (err < 0) { -- cgit v1.2.3 From 1a28242bac44b9eb24fb2e84131256cb3c63372c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 27 Jan 2017 15:06:46 +0000 Subject: net: ethernet: aquantia: return -ETIME in macro AQ_HW_WAIT_FOR The macro is returning ETIME which means various checks to see if the returned err is less than zero never work. I believe a -ETIME should be returned instead. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h index 78fcc0c3ab4d..03b72ddbffb9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h @@ -31,7 +31,7 @@ do { \ udelay(_US_); \ } \ if (!AQ_HW_WAIT_FOR_i) {\ - err = ETIME; \ + err = -ETIME; \ } \ } while (0) -- cgit v1.2.3 From 61fccb2d6274f77de6d16a0dc74eda813e90eb64 Mon Sep 17 00:00:00 2001 From: Kazuya Mizuguchi Date: Fri, 27 Jan 2017 20:46:26 +0100 Subject: ravb: Add tx and rx clock internal delays mode of APSR This patch enables tx and rx clock internal delay modes (TDM and RDM). This is to address a failure in the case of 1Gbps communication using the by salvator-x board with the KSZ9031RNX phy. This has been reported to occur with both the r8a7795 (H3) and r8a7796 (M3-W) SoCs. With this change APSR internal delay modes are enabled for "rgmii-id", "rgmii-rxid" and "rgmii-txid" phy modes as follows: phy mode | ASPR delay mode -----------+---------------- rgmii-id | TDM and RDM rgmii-rxid | RDM rgmii-txid | TDM Signed-off-by: Kazuya Mizuguchi Signed-off-by: Simon Horman Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb.h | 10 ++++++++++ drivers/net/ethernet/renesas/ravb_main.c | 23 +++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index f1109661a533..0525bd696d5d 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -76,6 +76,7 @@ enum ravb_reg { CDAR20 = 0x0060, CDAR21 = 0x0064, ESR = 0x0088, + APSR = 0x008C, /* R-Car Gen3 only */ RCR = 0x0090, RQC0 = 0x0094, RQC1 = 0x0098, @@ -248,6 +249,15 @@ enum ESR_BIT { ESR_EIL = 0x00001000, }; +/* APSR */ +enum APSR_BIT { + APSR_MEMS = 0x00000002, + APSR_CMSW = 0x00000010, + APSR_DM = 0x00006000, /* Undocumented? */ + APSR_DM_RDM = 0x00002000, + APSR_DM_TDM = 0x00004000, +}; + /* RCR */ enum RCR_BIT { RCR_EFFS = 0x00000001, diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 301f48755093..9d8320edea53 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1920,6 +1920,23 @@ static void ravb_set_config_mode(struct net_device *ndev) } } +/* Set tx and rx clock internal delay modes */ +static void ravb_set_delay_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + int set = 0; + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) + set |= APSR_DM_RDM; + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) + set |= APSR_DM_TDM; + + ravb_modify(ndev, APSR, APSR_DM, set); +} + static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -2032,6 +2049,9 @@ static int ravb_probe(struct platform_device *pdev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + if (priv->chip_id != RCAR_GEN2) + ravb_set_delay_mode(ndev); + /* Allocate descriptor base address table */ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, @@ -2168,6 +2188,9 @@ static int __maybe_unused ravb_resume(struct device *dev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + if (priv->chip_id != RCAR_GEN2) + ravb_set_delay_mode(ndev); + /* Restore descriptor base address table */ ravb_write(ndev, priv->desc_bat_dma, DBAT); -- cgit v1.2.3 From 0e98f9d5f0b4a9012bd4fb5ff88b3ea290deb6a8 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 27 Jan 2017 20:46:27 +0100 Subject: ravb: Support 1Gbps on R-Car H3 ES1.1+ and R-Car M3-W The limitation to 10/100Mbit speeds on R-Car Gen3 is valid for R-Car H3 ES1.0 only. Check for the exact SoC model to allow 1Gbps on newer revisions of R-Car H3, and on R-Car M3-W. Signed-off-by: Geert Uytterhoeven Signed-off-by: Simon Horman Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 9d8320edea53..8cfc4a54f2dc 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -31,6 +31,7 @@ #include #include #include +#include #include @@ -988,6 +989,11 @@ static void ravb_adjust_link(struct net_device *ndev) phy_print_status(phydev); } +static const struct soc_device_attribute r8a7795es10[] = { + { .soc_id = "r8a7795", .revision = "ES1.0", }, + { /* sentinel */ } +}; + /* PHY init function */ static int ravb_phy_init(struct net_device *ndev) { @@ -1023,10 +1029,10 @@ static int ravb_phy_init(struct net_device *ndev) goto err_deregister_fixed_link; } - /* This driver only support 10/100Mbit speeds on Gen3 + /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0 * at this time. */ - if (priv->chip_id == RCAR_GEN3) { + if (soc_device_match(r8a7795es10)) { err = phy_set_max_speed(phydev, SPEED_100); if (err) { netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); -- cgit v1.2.3 From a0c02161ecfc2f40a0837926efac5376bc6fd6d3 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 27 Jan 2017 15:29:36 -0500 Subject: net: dsa: variable number of ports Change the ports[DSA_MAX_PORTS] array of the dsa_switch structure for a zero-length array, allocated at the same time as the dsa_switch structure itself. A dsa_switch_alloc() helper is provided for that. This commit brings no functional change yet since we pass DSA_MAX_PORTS as the number of ports for the moment. Future patches can update the DSA drivers separately to support dynamic number of ports. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 7 ++++--- drivers/net/dsa/mv88e6xxx/chip.c | 3 +-- drivers/net/dsa/qca8k.c | 3 +-- include/net/dsa.h | 6 +++++- net/dsa/dsa.c | 5 ++--- net/dsa/dsa2.c | 16 ++++++++++++++++ 6 files changed, 29 insertions(+), 11 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index bb210b12ad1b..31afc4d4b68b 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1790,14 +1790,15 @@ struct b53_device *b53_switch_alloc(struct device *base, struct dsa_switch *ds; struct b53_device *dev; - ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL); + ds = dsa_switch_alloc(base, DSA_MAX_PORTS); if (!ds) return NULL; - dev = (struct b53_device *)(ds + 1); + dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; ds->priv = dev; - ds->dev = base; dev->dev = base; dev->ds = ds; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 921e53351786..cb7b24748336 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -4361,11 +4361,10 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) struct device *dev = chip->dev; struct dsa_switch *ds; - ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); + ds = dsa_switch_alloc(dev, DSA_MAX_PORTS); if (!ds) return -ENOMEM; - ds->dev = dev; ds->priv = chip; ds->ops = &mv88e6xxx_switch_ops; diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index c084aa484d2b..f67c6a3cebff 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -954,12 +954,11 @@ qca8k_sw_probe(struct mdio_device *mdiodev) if (id != QCA8K_ID_QCA8337) return -ENODEV; - priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); + priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS); if (!priv->ds) return -ENOMEM; priv->ds->priv = priv; - priv->ds->dev = &mdiodev->dev; priv->ds->ops = &qca8k_switch_ops; mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); diff --git a/include/net/dsa.h b/include/net/dsa.h index 92fd795e9573..24e1d935ae68 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -190,8 +190,11 @@ struct dsa_switch { u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; - struct dsa_port ports[DSA_MAX_PORTS]; struct mii_bus *slave_mii_bus; + + /* Dynamically allocated ports, keep last */ + size_t num_ports; + struct dsa_port ports[]; }; static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) @@ -386,6 +389,7 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) return dst->rcv != NULL; } +struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n); void dsa_unregister_switch(struct dsa_switch *ds); int dsa_register_switch(struct dsa_switch *ds, struct device *dev); #ifdef CONFIG_PM_SLEEP diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 07e863369e04..de3ffb421ee4 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -347,8 +347,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, /* * Allocate and initialise switch state. */ - ds = devm_kzalloc(parent, sizeof(*ds), GFP_KERNEL); - if (ds == NULL) + ds = dsa_switch_alloc(parent, DSA_MAX_PORTS); + if (!ds) return ERR_PTR(-ENOMEM); ds->dst = dst; @@ -356,7 +356,6 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, ds->cd = cd; ds->ops = ops; ds->priv = priv; - ds->dev = parent; ret = dsa_switch_setup_one(ds, parent); if (ret) diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 75f5d1f8554b..4b3a44bec5c8 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -666,6 +666,22 @@ out: return err; } +struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n) +{ + size_t size = sizeof(struct dsa_switch) + n * sizeof(struct dsa_port); + struct dsa_switch *ds; + + ds = devm_kzalloc(dev, size, GFP_KERNEL); + if (!ds) + return NULL; + + ds->dev = dev; + ds->num_ports = n; + + return ds; +} +EXPORT_SYMBOL_GPL(dsa_switch_alloc); + int dsa_register_switch(struct dsa_switch *ds, struct device *dev) { int err; -- cgit v1.2.3 From 26895e299cfb583d304553e9c259e694a7e83397 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 27 Jan 2017 15:29:37 -0500 Subject: net: dsa: use ds->num_ports when possible The dsa_switch structure contains the number of ports. Use it where the structure is valid instead of the DSA_MAX_PORTS value. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/dsa.c | 16 ++++++++-------- net/dsa/dsa2.c | 12 ++++++------ net/dsa/slave.c | 2 +- net/dsa/tag_brcm.c | 2 +- net/dsa/tag_dsa.c | 2 +- net/dsa/tag_edsa.c | 2 +- net/dsa/tag_trailer.c | 2 +- 7 files changed, 19 insertions(+), 19 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index de3ffb421ee4..619e57a44d1d 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -145,7 +145,7 @@ static int dsa_cpu_dsa_setups(struct dsa_switch *ds, struct device *dev) struct dsa_port *dport; int ret, port; - for (port = 0; port < DSA_MAX_PORTS; port++) { + for (port = 0; port < ds->num_ports; port++) { if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) continue; @@ -218,7 +218,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) /* * Validate supplied switch configuration. */ - for (i = 0; i < DSA_MAX_PORTS; i++) { + for (i = 0; i < ds->num_ports; i++) { char *name; name = cd->port_names[i]; @@ -242,7 +242,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) valid_name_found = true; } - if (!valid_name_found && i == DSA_MAX_PORTS) + if (!valid_name_found && i == ds->num_ports) return -EINVAL; /* Make the built-in MII bus mask match the number of ports, @@ -295,7 +295,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) /* * Create network devices for physical switch ports. */ - for (i = 0; i < DSA_MAX_PORTS; i++) { + for (i = 0; i < ds->num_ports; i++) { ds->ports[i].dn = cd->port_dn[i]; if (!(ds->enabled_port_mask & (1 << i))) @@ -377,7 +377,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds) int port; /* Destroy network devices for physical switch ports. */ - for (port = 0; port < DSA_MAX_PORTS; port++) { + for (port = 0; port < ds->num_ports; port++) { if (!(ds->enabled_port_mask & (1 << port))) continue; @@ -388,7 +388,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds) } /* Disable configuration of the CPU and DSA ports */ - for (port = 0; port < DSA_MAX_PORTS; port++) { + for (port = 0; port < ds->num_ports; port++) { if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) continue; dsa_cpu_dsa_destroy(&ds->ports[port]); @@ -408,7 +408,7 @@ int dsa_switch_suspend(struct dsa_switch *ds) int i, ret = 0; /* Suspend slave network devices */ - for (i = 0; i < DSA_MAX_PORTS; i++) { + for (i = 0; i < ds->num_ports; i++) { if (!dsa_is_port_initialized(ds, i)) continue; @@ -435,7 +435,7 @@ int dsa_switch_resume(struct dsa_switch *ds) return ret; /* Resume slave network devices */ - for (i = 0; i < DSA_MAX_PORTS; i++) { + for (i = 0; i < ds->num_ports; i++) { if (!dsa_is_port_initialized(ds, i)) continue; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 4b3a44bec5c8..6e7b3e88b778 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -98,7 +98,7 @@ static bool dsa_ds_find_port_dn(struct dsa_switch *ds, { u32 index; - for (index = 0; index < DSA_MAX_PORTS; index++) + for (index = 0; index < ds->num_ports; index++) if (ds->ports[index].dn == port) return true; return false; @@ -159,7 +159,7 @@ static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds) u32 index; int err; - for (index = 0; index < DSA_MAX_PORTS; index++) { + for (index = 0; index < ds->num_ports; index++) { port = &ds->ports[index]; if (!dsa_port_is_valid(port)) continue; @@ -312,7 +312,7 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds) return err; } - for (index = 0; index < DSA_MAX_PORTS; index++) { + for (index = 0; index < ds->num_ports; index++) { port = &ds->ports[index]; if (!dsa_port_is_valid(port)) continue; @@ -344,7 +344,7 @@ static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds) struct dsa_port *port; u32 index; - for (index = 0; index < DSA_MAX_PORTS; index++) { + for (index = 0; index < ds->num_ports; index++) { port = &ds->ports[index]; if (!dsa_port_is_valid(port)) continue; @@ -475,7 +475,7 @@ static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds) u32 index; int err; - for (index = 0; index < DSA_MAX_PORTS; index++) { + for (index = 0; index < ds->num_ports; index++) { port = &ds->ports[index]; if (!dsa_port_is_valid(port)) continue; @@ -529,7 +529,7 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds) if (err) return err; - if (reg >= DSA_MAX_PORTS) + if (reg >= ds->num_ports) return -EINVAL; ds->ports[reg].dn = port; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 9750dd6f8c17..26b2c070b15a 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -404,7 +404,7 @@ static int dsa_fastest_ageing_time(struct dsa_switch *ds, { int i; - for (i = 0; i < DSA_MAX_PORTS; ++i) { + for (i = 0; i < ds->num_ports; ++i) { struct dsa_port *dp = &ds->ports[i]; if (dp && dp->ageing_time && dp->ageing_time < ageing_time) diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index cb5a2b7a0118..93e4458c02f9 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -128,7 +128,7 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, source_port = brcm_tag[3] & BRCM_EG_PID_MASK; /* Validate port against switch setup, either the port is totally */ - if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev) + if (source_port >= ds->num_ports || !ds->ports[source_port].netdev) goto out_drop; /* Remove Broadcom tag and update checksum */ diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index bce79ffe342b..8fa4b1942671 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -114,7 +114,7 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev, if (!ds) goto out_drop; - if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev) + if (source_port >= ds->num_ports || !ds->ports[source_port].netdev) goto out_drop; /* diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 6c1720e88537..929de581a846 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -127,7 +127,7 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev, if (!ds) goto out_drop; - if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev) + if (source_port >= ds->num_ports || !ds->ports[source_port].netdev) goto out_drop; /* diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 271128a2dc64..d8e55a845d7f 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -82,7 +82,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, goto out_drop; source_port = trailer[1] & 7; - if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev) + if (source_port >= ds->num_ports || !ds->ports[source_port].netdev) goto out_drop; pskb_trim_rcsum(skb, skb->len - 4); -- cgit v1.2.3 From 818be8489d6fc8f4cc2c7699bbfd8e1983080f10 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 27 Jan 2017 15:29:38 -0500 Subject: net: dsa: add ds and index to dsa_port Add the physical switch instance and port index a DSA port belongs to to the dsa_port structure. That can be used later to retrieve information about a physical port when configuring a switch fabric, or lighten up struct dsa_slave_priv. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 2 ++ net/dsa/dsa2.c | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/include/net/dsa.h b/include/net/dsa.h index 24e1d935ae68..6bd1f8b05dbd 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -140,6 +140,8 @@ struct dsa_switch_tree { }; struct dsa_port { + struct dsa_switch *ds; + unsigned int index; struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 6e7b3e88b778..9f8cc26be9ea 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -670,6 +670,7 @@ struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n) { size_t size = sizeof(struct dsa_switch) + n * sizeof(struct dsa_port); struct dsa_switch *ds; + int i; ds = devm_kzalloc(dev, size, GFP_KERNEL); if (!ds) @@ -678,6 +679,11 @@ struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n) ds->dev = dev; ds->num_ports = n; + for (i = 0; i < ds->num_ports; ++i) { + ds->ports[i].index = i; + ds->ports[i].ds = ds; + } + return ds; } EXPORT_SYMBOL_GPL(dsa_switch_alloc); -- cgit v1.2.3 From afdcf151c1f7346207dcee3f8d6d82991dbbb7e5 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 27 Jan 2017 15:29:39 -0500 Subject: net: dsa: store a dsa_port in dsa_slave_priv Store a pointer to the dsa_port structure in the dsa_slave_priv structure, instead of the switch/port index. This will allow to store more information such as the bridge device, needed in DSA drivers for multi-chip configuration. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/dsa_priv.h | 8 +-- net/dsa/slave.c | 164 +++++++++++++++++++++++++------------------------- net/dsa/tag_brcm.c | 4 +- net/dsa/tag_dsa.c | 8 +-- net/dsa/tag_edsa.c | 8 +-- net/dsa/tag_qca.c | 2 +- net/dsa/tag_trailer.c | 2 +- 7 files changed, 96 insertions(+), 100 deletions(-) diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 16194a4bb2fe..c519bd0e9206 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -25,12 +25,8 @@ struct dsa_slave_priv { struct sk_buff * (*xmit)(struct sk_buff *skb, struct net_device *dev); - /* - * Which switch this port is a part of, and the port index - * for this port. - */ - struct dsa_switch *parent; - u8 port; + /* DSA port data, such as switch, port index, etc. */ + struct dsa_port *dp; /* * The phylib phy_device pointer for the PHY connected diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 26b2c070b15a..a2f0267c4a3c 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -61,7 +61,7 @@ static int dsa_slave_get_iflink(const struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); - return p->parent->dst->master_netdev->ifindex; + return p->dp->ds->dst->master_netdev->ifindex; } static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p) @@ -96,8 +96,8 @@ static void dsa_port_set_stp_state(struct dsa_switch *ds, int port, u8 state) static int dsa_slave_open(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); - struct net_device *master = p->parent->dst->master_netdev; - struct dsa_switch *ds = p->parent; + struct net_device *master = p->dp->ds->dst->master_netdev; + struct dsa_switch *ds = p->dp->ds; u8 stp_state = dsa_port_is_bridged(p) ? BR_STATE_BLOCKING : BR_STATE_FORWARDING; int err; @@ -123,12 +123,12 @@ static int dsa_slave_open(struct net_device *dev) } if (ds->ops->port_enable) { - err = ds->ops->port_enable(ds, p->port, p->phy); + err = ds->ops->port_enable(ds, p->dp->index, p->phy); if (err) goto clear_promisc; } - dsa_port_set_stp_state(ds, p->port, stp_state); + dsa_port_set_stp_state(ds, p->dp->index, stp_state); if (p->phy) phy_start(p->phy); @@ -151,8 +151,8 @@ out: static int dsa_slave_close(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); - struct net_device *master = p->parent->dst->master_netdev; - struct dsa_switch *ds = p->parent; + struct net_device *master = p->dp->ds->dst->master_netdev; + struct dsa_switch *ds = p->dp->ds; if (p->phy) phy_stop(p->phy); @@ -168,9 +168,9 @@ static int dsa_slave_close(struct net_device *dev) dev_uc_del(master, dev->dev_addr); if (ds->ops->port_disable) - ds->ops->port_disable(ds, p->port, p->phy); + ds->ops->port_disable(ds, p->dp->index, p->phy); - dsa_port_set_stp_state(ds, p->port, BR_STATE_DISABLED); + dsa_port_set_stp_state(ds, p->dp->index, BR_STATE_DISABLED); return 0; } @@ -178,7 +178,7 @@ static int dsa_slave_close(struct net_device *dev) static void dsa_slave_change_rx_flags(struct net_device *dev, int change) { struct dsa_slave_priv *p = netdev_priv(dev); - struct net_device *master = p->parent->dst->master_netdev; + struct net_device *master = p->dp->ds->dst->master_netdev; if (change & IFF_ALLMULTI) dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); @@ -189,7 +189,7 @@ static void dsa_slave_change_rx_flags(struct net_device *dev, int change) static void dsa_slave_set_rx_mode(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); - struct net_device *master = p->parent->dst->master_netdev; + struct net_device *master = p->dp->ds->dst->master_netdev; dev_mc_sync(master, dev); dev_uc_sync(master, dev); @@ -198,7 +198,7 @@ static void dsa_slave_set_rx_mode(struct net_device *dev) static int dsa_slave_set_mac_address(struct net_device *dev, void *a) { struct dsa_slave_priv *p = netdev_priv(dev); - struct net_device *master = p->parent->dst->master_netdev; + struct net_device *master = p->dp->ds->dst->master_netdev; struct sockaddr *addr = a; int err; @@ -228,16 +228,17 @@ static int dsa_slave_port_vlan_add(struct net_device *dev, struct switchdev_trans *trans) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_port *dp = p->dp; + struct dsa_switch *ds = dp->ds; if (switchdev_trans_ph_prepare(trans)) { if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add) return -EOPNOTSUPP; - return ds->ops->port_vlan_prepare(ds, p->port, vlan, trans); + return ds->ops->port_vlan_prepare(ds, dp->index, vlan, trans); } - ds->ops->port_vlan_add(ds, p->port, vlan, trans); + ds->ops->port_vlan_add(ds, dp->index, vlan, trans); return 0; } @@ -246,12 +247,12 @@ static int dsa_slave_port_vlan_del(struct net_device *dev, const struct switchdev_obj_port_vlan *vlan) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (!ds->ops->port_vlan_del) return -EOPNOTSUPP; - return ds->ops->port_vlan_del(ds, p->port, vlan); + return ds->ops->port_vlan_del(ds, p->dp->index, vlan); } static int dsa_slave_port_vlan_dump(struct net_device *dev, @@ -259,10 +260,10 @@ static int dsa_slave_port_vlan_dump(struct net_device *dev, switchdev_obj_dump_cb_t *cb) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->ops->port_vlan_dump) - return ds->ops->port_vlan_dump(ds, p->port, vlan, cb); + return ds->ops->port_vlan_dump(ds, p->dp->index, vlan, cb); return -EOPNOTSUPP; } @@ -272,16 +273,16 @@ static int dsa_slave_port_fdb_add(struct net_device *dev, struct switchdev_trans *trans) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (switchdev_trans_ph_prepare(trans)) { if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add) return -EOPNOTSUPP; - return ds->ops->port_fdb_prepare(ds, p->port, fdb, trans); + return ds->ops->port_fdb_prepare(ds, p->dp->index, fdb, trans); } - ds->ops->port_fdb_add(ds, p->port, fdb, trans); + ds->ops->port_fdb_add(ds, p->dp->index, fdb, trans); return 0; } @@ -290,11 +291,11 @@ static int dsa_slave_port_fdb_del(struct net_device *dev, const struct switchdev_obj_port_fdb *fdb) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; int ret = -EOPNOTSUPP; if (ds->ops->port_fdb_del) - ret = ds->ops->port_fdb_del(ds, p->port, fdb); + ret = ds->ops->port_fdb_del(ds, p->dp->index, fdb); return ret; } @@ -304,10 +305,10 @@ static int dsa_slave_port_fdb_dump(struct net_device *dev, switchdev_obj_dump_cb_t *cb) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->ops->port_fdb_dump) - return ds->ops->port_fdb_dump(ds, p->port, fdb, cb); + return ds->ops->port_fdb_dump(ds, p->dp->index, fdb, cb); return -EOPNOTSUPP; } @@ -317,16 +318,16 @@ static int dsa_slave_port_mdb_add(struct net_device *dev, struct switchdev_trans *trans) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (switchdev_trans_ph_prepare(trans)) { if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add) return -EOPNOTSUPP; - return ds->ops->port_mdb_prepare(ds, p->port, mdb, trans); + return ds->ops->port_mdb_prepare(ds, p->dp->index, mdb, trans); } - ds->ops->port_mdb_add(ds, p->port, mdb, trans); + ds->ops->port_mdb_add(ds, p->dp->index, mdb, trans); return 0; } @@ -335,10 +336,10 @@ static int dsa_slave_port_mdb_del(struct net_device *dev, const struct switchdev_obj_port_mdb *mdb) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->ops->port_mdb_del) - return ds->ops->port_mdb_del(ds, p->port, mdb); + return ds->ops->port_mdb_del(ds, p->dp->index, mdb); return -EOPNOTSUPP; } @@ -348,10 +349,10 @@ static int dsa_slave_port_mdb_dump(struct net_device *dev, switchdev_obj_dump_cb_t *cb) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->ops->port_mdb_dump) - return ds->ops->port_mdb_dump(ds, p->port, mdb, cb); + return ds->ops->port_mdb_dump(ds, p->dp->index, mdb, cb); return -EOPNOTSUPP; } @@ -371,12 +372,12 @@ static int dsa_slave_stp_state_set(struct net_device *dev, struct switchdev_trans *trans) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (switchdev_trans_ph_prepare(trans)) return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP; - dsa_port_set_stp_state(ds, p->port, attr->u.stp_state); + dsa_port_set_stp_state(ds, p->dp->index, attr->u.stp_state); return 0; } @@ -386,14 +387,14 @@ static int dsa_slave_vlan_filtering(struct net_device *dev, struct switchdev_trans *trans) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; /* bridge skips -EOPNOTSUPP, so skip the prepare phase */ if (switchdev_trans_ph_prepare(trans)) return 0; if (ds->ops->port_vlan_filtering) - return ds->ops->port_vlan_filtering(ds, p->port, + return ds->ops->port_vlan_filtering(ds, p->dp->index, attr->u.vlan_filtering); return 0; @@ -419,7 +420,7 @@ static int dsa_slave_ageing_time(struct net_device *dev, struct switchdev_trans *trans) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; unsigned long ageing_jiffies = clock_t_to_jiffies(attr->u.ageing_time); unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); @@ -428,7 +429,7 @@ static int dsa_slave_ageing_time(struct net_device *dev, return 0; /* Keep the fastest ageing time in case of multiple bridges */ - ds->ports[p->port].ageing_time = ageing_time; + p->dp->ageing_time = ageing_time; ageing_time = dsa_fastest_ageing_time(ds, ageing_time); if (ds->ops->set_ageing_time) @@ -553,13 +554,13 @@ static int dsa_slave_bridge_port_join(struct net_device *dev, struct net_device *br) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; int ret = -EOPNOTSUPP; p->bridge_dev = br; if (ds->ops->port_bridge_join) - ret = ds->ops->port_bridge_join(ds, p->port, br); + ret = ds->ops->port_bridge_join(ds, p->dp->index, br); return ret == -EOPNOTSUPP ? 0 : ret; } @@ -567,25 +568,25 @@ static int dsa_slave_bridge_port_join(struct net_device *dev, static void dsa_slave_bridge_port_leave(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->ops->port_bridge_leave) - ds->ops->port_bridge_leave(ds, p->port); + ds->ops->port_bridge_leave(ds, p->dp->index); p->bridge_dev = NULL; /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, * so allow it to be in BR_STATE_FORWARDING to be kept functional */ - dsa_port_set_stp_state(ds, p->port, BR_STATE_FORWARDING); + dsa_port_set_stp_state(ds, p->dp->index, BR_STATE_FORWARDING); } static int dsa_slave_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: @@ -633,7 +634,7 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) /* Queue the SKB for transmission on the parent interface, but * do not modify its EtherType */ - nskb->dev = p->parent->dst->master_netdev; + nskb->dev = p->dp->ds->dst->master_netdev; dev_queue_xmit(nskb); return NETDEV_TX_OK; @@ -680,10 +681,10 @@ static void dsa_slave_get_drvinfo(struct net_device *dev, static int dsa_slave_get_regs_len(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->ops->get_regs_len) - return ds->ops->get_regs_len(ds, p->port); + return ds->ops->get_regs_len(ds, p->dp->index); return -EOPNOTSUPP; } @@ -692,10 +693,10 @@ static void dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->ops->get_regs) - ds->ops->get_regs(ds, p->port, regs, _p); + ds->ops->get_regs(ds, p->dp->index, regs, _p); } static int dsa_slave_nway_reset(struct net_device *dev) @@ -723,7 +724,7 @@ static u32 dsa_slave_get_link(struct net_device *dev) static int dsa_slave_get_eeprom_len(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->cd && ds->cd->eeprom_len) return ds->cd->eeprom_len; @@ -738,7 +739,7 @@ static int dsa_slave_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->ops->get_eeprom) return ds->ops->get_eeprom(ds, eeprom, data); @@ -750,7 +751,7 @@ static int dsa_slave_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->ops->set_eeprom) return ds->ops->set_eeprom(ds, eeprom, data); @@ -762,7 +763,7 @@ static void dsa_slave_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (stringset == ETH_SS_STATS) { int len = ETH_GSTRING_LEN; @@ -772,7 +773,7 @@ static void dsa_slave_get_strings(struct net_device *dev, strncpy(data + 2 * len, "rx_packets", len); strncpy(data + 3 * len, "rx_bytes", len); if (ds->ops->get_strings) - ds->ops->get_strings(ds, p->port, data + 4 * len); + ds->ops->get_strings(ds, p->dp->index, data + 4 * len); } } @@ -853,20 +854,20 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev, uint64_t *data) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; data[0] = dev->stats.tx_packets; data[1] = dev->stats.tx_bytes; data[2] = dev->stats.rx_packets; data[3] = dev->stats.rx_bytes; if (ds->ops->get_ethtool_stats) - ds->ops->get_ethtool_stats(ds, p->port, data + 4); + ds->ops->get_ethtool_stats(ds, p->dp->index, data + 4); } static int dsa_slave_get_sset_count(struct net_device *dev, int sset) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (sset == ETH_SS_STATS) { int count; @@ -884,20 +885,20 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset) static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; if (ds->ops->get_wol) - ds->ops->get_wol(ds, p->port, w); + ds->ops->get_wol(ds, p->dp->index, w); } static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; int ret = -EOPNOTSUPP; if (ds->ops->set_wol) - ret = ds->ops->set_wol(ds, p->port, w); + ret = ds->ops->set_wol(ds, p->dp->index, w); return ret; } @@ -905,13 +906,13 @@ static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; int ret; if (!ds->ops->set_eee) return -EOPNOTSUPP; - ret = ds->ops->set_eee(ds, p->port, p->phy, e); + ret = ds->ops->set_eee(ds, p->dp->index, p->phy, e); if (ret) return ret; @@ -924,13 +925,13 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; int ret; if (!ds->ops->get_eee) return -EOPNOTSUPP; - ret = ds->ops->get_eee(ds, p->port, e); + ret = ds->ops->get_eee(ds, p->dp->index, e); if (ret) return ret; @@ -945,7 +946,7 @@ static int dsa_slave_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; struct net_device *master = ds->dst->master_netdev; struct netpoll *netpoll; int err = 0; @@ -988,7 +989,7 @@ static int dsa_slave_get_phys_port_name(struct net_device *dev, { struct dsa_slave_priv *p = netdev_priv(dev); - if (snprintf(name, len, "p%d", p->port) >= len) + if (snprintf(name, len, "p%d", p->dp->index) >= len) return -EINVAL; return 0; @@ -1059,7 +1060,7 @@ static struct device_type dsa_type = { static void dsa_slave_adjust_link(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; unsigned int status_changed = 0; if (p->old_link != p->phy->link) { @@ -1078,7 +1079,7 @@ static void dsa_slave_adjust_link(struct net_device *dev) } if (ds->ops->adjust_link && status_changed) - ds->ops->adjust_link(ds, p->port, p->phy); + ds->ops->adjust_link(ds, p->dp->index, p->phy); if (status_changed) phy_print_status(p->phy); @@ -1092,9 +1093,9 @@ static int dsa_slave_fixed_link_update(struct net_device *dev, if (dev) { p = netdev_priv(dev); - ds = p->parent; + ds = p->dp->ds; if (ds->ops->fixed_link_update) - ds->ops->fixed_link_update(ds, p->port, status); + ds->ops->fixed_link_update(ds, p->dp->index, status); } return 0; @@ -1105,7 +1106,7 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, struct net_device *slave_dev, int addr) { - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; p->phy = mdiobus_get_phy(ds->slave_mii_bus, addr); if (!p->phy) { @@ -1123,13 +1124,13 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, static int dsa_slave_phy_setup(struct dsa_slave_priv *p, struct net_device *slave_dev) { - struct dsa_switch *ds = p->parent; + struct dsa_switch *ds = p->dp->ds; struct device_node *phy_dn, *port_dn; bool phy_is_fixed = false; u32 phy_flags = 0; int mode, ret; - port_dn = ds->ports[p->port].dn; + port_dn = p->dp->dn; mode = of_get_phy_mode(port_dn); if (mode < 0) mode = PHY_INTERFACE_MODE_NA; @@ -1150,7 +1151,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, } if (ds->ops->get_phy_flags) - phy_flags = ds->ops->get_phy_flags(ds, p->port); + phy_flags = ds->ops->get_phy_flags(ds, p->dp->index); if (phy_dn) { int phy_id = of_mdio_parse_addr(&slave_dev->dev, phy_dn); @@ -1185,9 +1186,10 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, * MDIO bus instead */ if (!p->phy) { - ret = dsa_slave_phy_connect(p, slave_dev, p->port); + ret = dsa_slave_phy_connect(p, slave_dev, p->dp->index); if (ret) { - netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret); + netdev_err(slave_dev, "failed to connect to port %d: %d\n", + p->dp->index, ret); if (phy_is_fixed) of_phy_deregister_fixed_link(port_dn); return ret; @@ -1275,8 +1277,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, slave_dev->vlan_features = master->vlan_features; p = netdev_priv(slave_dev); - p->parent = ds; - p->port = port; + p->dp = &ds->ports[port]; p->xmit = dst->tag_ops->xmit; p->old_pause = -1; @@ -1309,10 +1310,9 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, void dsa_slave_destroy(struct net_device *slave_dev) { struct dsa_slave_priv *p = netdev_priv(slave_dev); - struct dsa_switch *ds = p->parent; struct device_node *port_dn; - port_dn = ds->ports[p->port].dn; + port_dn = p->dp->dn; netif_carrier_off(slave_dev); if (p->phy) { diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index 93e4458c02f9..5d925b6b2bb1 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -80,9 +80,9 @@ static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev ((skb->priority << BRCM_IG_TC_SHIFT) & BRCM_IG_TC_MASK); brcm_tag[1] = 0; brcm_tag[2] = 0; - if (p->port == 8) + if (p->dp->index == 8) brcm_tag[2] = BRCM_IG_DSTMAP2_MASK; - brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK; + brcm_tag[3] = (1 << p->dp->index) & BRCM_IG_DSTMAP1_MASK; return skb; diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index 8fa4b1942671..72579ceea381 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -33,8 +33,8 @@ static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev) * Construct tagged FROM_CPU DSA tag from 802.1q tag. */ dsa_header = skb->data + 2 * ETH_ALEN; - dsa_header[0] = 0x60 | p->parent->index; - dsa_header[1] = p->port << 3; + dsa_header[0] = 0x60 | p->dp->ds->index; + dsa_header[1] = p->dp->index << 3; /* * Move CFI field from byte 2 to byte 1. @@ -54,8 +54,8 @@ static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev) * Construct untagged FROM_CPU DSA tag. */ dsa_header = skb->data + 2 * ETH_ALEN; - dsa_header[0] = 0x40 | p->parent->index; - dsa_header[1] = p->port << 3; + dsa_header[0] = 0x40 | p->dp->ds->index; + dsa_header[1] = p->dp->index << 3; dsa_header[2] = 0x00; dsa_header[3] = 0x00; } diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 929de581a846..648c051817a1 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -42,8 +42,8 @@ static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev) edsa_header[1] = ETH_P_EDSA & 0xff; edsa_header[2] = 0x00; edsa_header[3] = 0x00; - edsa_header[4] = 0x60 | p->parent->index; - edsa_header[5] = p->port << 3; + edsa_header[4] = 0x60 | p->dp->ds->index; + edsa_header[5] = p->dp->index << 3; /* * Move CFI field from byte 6 to byte 5. @@ -67,8 +67,8 @@ static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev) edsa_header[1] = ETH_P_EDSA & 0xff; edsa_header[2] = 0x00; edsa_header[3] = 0x00; - edsa_header[4] = 0x40 | p->parent->index; - edsa_header[5] = p->port << 3; + edsa_header[4] = 0x40 | p->dp->ds->index; + edsa_header[5] = p->dp->index << 3; edsa_header[6] = 0x00; edsa_header[7] = 0x00; } diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index 736ca8e8c31e..30240f343aea 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -54,7 +54,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) /* Set the version field, and set destination port information */ hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S | QCA_HDR_XMIT_FROM_CPU | - BIT(p->port); + BIT(p->dp->index); *phdr = htons(hdr); diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index d8e55a845d7f..26f977176978 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -50,7 +50,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev) trailer = skb_put(nskb, 4); trailer[0] = 0x80; - trailer[1] = 1 << p->port; + trailer[1] = 1 << p->dp->index; trailer[2] = 0x10; trailer[3] = 0x00; -- cgit v1.2.3 From a5e9a02e1f182237ef44eb3919cf4dd45ed4db9b Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 27 Jan 2017 15:29:40 -0500 Subject: net: dsa: move bridge device in dsa_port Move the bridge_dev pointer from dsa_slave_priv to dsa_port so that DSA drivers can access this information and remove the need to cache it. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 1 + net/dsa/dsa_priv.h | 1 - net/dsa/slave.c | 10 +++++----- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index 6bd1f8b05dbd..924533fd4425 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -146,6 +146,7 @@ struct dsa_port { struct device_node *dn; unsigned int ageing_time; u8 stp_state; + struct net_device *bridge_dev; }; struct dsa_switch { diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index c519bd0e9206..3022f2e42cdc 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -38,7 +38,6 @@ struct dsa_slave_priv { int old_pause; int old_duplex; - struct net_device *bridge_dev; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a2f0267c4a3c..cdb1df87e111 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -64,9 +64,9 @@ static int dsa_slave_get_iflink(const struct net_device *dev) return p->dp->ds->dst->master_netdev->ifindex; } -static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p) +static inline bool dsa_port_is_bridged(struct dsa_port *dp) { - return !!p->bridge_dev; + return !!dp->bridge_dev; } static void dsa_port_set_stp_state(struct dsa_switch *ds, int port, u8 state) @@ -98,7 +98,7 @@ static int dsa_slave_open(struct net_device *dev) struct dsa_slave_priv *p = netdev_priv(dev); struct net_device *master = p->dp->ds->dst->master_netdev; struct dsa_switch *ds = p->dp->ds; - u8 stp_state = dsa_port_is_bridged(p) ? + u8 stp_state = dsa_port_is_bridged(p->dp) ? BR_STATE_BLOCKING : BR_STATE_FORWARDING; int err; @@ -557,7 +557,7 @@ static int dsa_slave_bridge_port_join(struct net_device *dev, struct dsa_switch *ds = p->dp->ds; int ret = -EOPNOTSUPP; - p->bridge_dev = br; + p->dp->bridge_dev = br; if (ds->ops->port_bridge_join) ret = ds->ops->port_bridge_join(ds, p->dp->index, br); @@ -574,7 +574,7 @@ static void dsa_slave_bridge_port_leave(struct net_device *dev) if (ds->ops->port_bridge_leave) ds->ops->port_bridge_leave(ds, p->dp->index); - p->bridge_dev = NULL; + p->dp->bridge_dev = NULL; /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, * so allow it to be in BR_STATE_FORWARDING to be kept functional -- cgit v1.2.3 From f123f2fbedc7c2723ceb050cd88c2ea1d6a8be32 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 27 Jan 2017 15:29:41 -0500 Subject: net: dsa: pass bridge device when a port leaves Upon reception of the NETDEV_CHANGEUPPER, a leaving port is already unbridged, so reflect this by assigning the port's bridge_dev pointer to NULL before calling the port_bridge_leave DSA driver operation. Now that the bridge_dev pointer is exposed to the drivers, reflecting the current state of the DSA switch fabric is necessary for the drivers to adjust their port based VLANs correctly. Pass the bridge device pointer to the port_bridge_leave operation so that drivers have all information to re-program their chips properly, and do not need to cache it anymore. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 2 +- drivers/net/dsa/b53/b53_priv.h | 2 +- drivers/net/dsa/mv88e6xxx/chip.c | 3 ++- drivers/net/dsa/qca8k.c | 2 +- include/net/dsa.h | 3 ++- net/dsa/slave.c | 10 +++++----- 6 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 31afc4d4b68b..32fdcf5570c8 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1354,7 +1354,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) } EXPORT_SYMBOL(b53_br_join); -void b53_br_leave(struct dsa_switch *ds, int port) +void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; struct net_device *bridge = dev->ports[port].bridge_dev; diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index a8031b382c55..5dafb70e75fc 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -382,7 +382,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data); void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); int b53_get_sset_count(struct dsa_switch *ds); int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); -void b53_br_leave(struct dsa_switch *ds, int port); +void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); void b53_br_fast_age(struct dsa_switch *ds, int port); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index cb7b24748336..8eb0dc063f4e 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2343,7 +2343,8 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, return err; } -static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) +static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *br) { struct mv88e6xxx_chip *chip = ds->priv; struct net_device *bridge = chip->ports[port].bridge_dev; diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index f67c6a3cebff..c85b187aa3d9 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -775,7 +775,7 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, } static void -qca8k_port_bridge_leave(struct dsa_switch *ds, int port) +qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int i; diff --git a/include/net/dsa.h b/include/net/dsa.h index 924533fd4425..b951e2ebda75 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -325,7 +325,8 @@ struct dsa_switch_ops { int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs); int (*port_bridge_join)(struct dsa_switch *ds, int port, struct net_device *bridge); - void (*port_bridge_leave)(struct dsa_switch *ds, int port); + void (*port_bridge_leave)(struct dsa_switch *ds, int port, + struct net_device *bridge); void (*port_stp_state_set)(struct dsa_switch *ds, int port, u8 state); void (*port_fast_age)(struct dsa_switch *ds, int port); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index cdb1df87e111..08725286f79d 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -565,16 +565,16 @@ static int dsa_slave_bridge_port_join(struct net_device *dev, return ret == -EOPNOTSUPP ? 0 : ret; } -static void dsa_slave_bridge_port_leave(struct net_device *dev) +static void dsa_slave_bridge_port_leave(struct net_device *dev, + struct net_device *br) { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->dp->ds; + p->dp->bridge_dev = NULL; if (ds->ops->port_bridge_leave) - ds->ops->port_bridge_leave(ds, p->dp->index); - - p->dp->bridge_dev = NULL; + ds->ops->port_bridge_leave(ds, p->dp->index, br); /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, * so allow it to be in BR_STATE_FORWARDING to be kept functional @@ -1343,7 +1343,7 @@ static int dsa_slave_port_upper_event(struct net_device *dev, if (info->linking) err = dsa_slave_bridge_port_join(dev, upper); else - dsa_slave_bridge_port_leave(dev); + dsa_slave_bridge_port_leave(dev, upper); } break; -- cgit v1.2.3 From fae8a25e5b3b627f1ea005ffa663d067f4d5fe44 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 27 Jan 2017 15:29:42 -0500 Subject: net: dsa: mv88e6xxx: use dsa_port's bridge pointer Now that DSA exposes the bridge device pointer to which a port belongs, use it when programming the port based VLANs and thus remove the cache. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 27 +++++++++++---------------- drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 6 ------ 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8eb0dc063f4e..84cba32443de 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1247,8 +1247,8 @@ static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) { - struct net_device *bridge = chip->ports[port].bridge_dev; struct dsa_switch *ds = chip->ds; + struct net_device *bridge = ds->ports[port].bridge_dev; u16 output_ports = 0; int i; @@ -1258,7 +1258,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) } else { for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { /* allow sending frames to every group member */ - if (bridge && chip->ports[i].bridge_dev == bridge) + if (bridge && ds->ports[i].bridge_dev == bridge) output_ports |= BIT(i); /* allow sending frames to CPU port and DSA link(s) */ @@ -1820,17 +1820,17 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) continue; - if (chip->ports[i].bridge_dev == - chip->ports[port].bridge_dev) + if (ds->ports[i].bridge_dev == + ds->ports[port].bridge_dev) break; /* same bridge, check next VLAN */ - if (!chip->ports[i].bridge_dev) + if (!ds->ports[i].bridge_dev) continue; netdev_warn(ds->ports[port].netdev, "hardware VLAN %d already used by %s\n", vlan.vid, - netdev_name(chip->ports[i].bridge_dev)); + netdev_name(ds->ports[i].bridge_dev)); err = -EOPNOTSUPP; goto unlock; } @@ -2320,18 +2320,16 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, } static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct net_device *br) { struct mv88e6xxx_chip *chip = ds->priv; int i, err = 0; mutex_lock(&chip->reg_lock); - /* Assign the bridge and remap each port's VLANTable */ - chip->ports[port].bridge_dev = bridge; - + /* Remap each port's VLANTable */ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { - if (chip->ports[i].bridge_dev == bridge) { + if (ds->ports[i].bridge_dev == br) { err = _mv88e6xxx_port_based_vlan_map(chip, i); if (err) break; @@ -2347,16 +2345,13 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct mv88e6xxx_chip *chip = ds->priv; - struct net_device *bridge = chip->ports[port].bridge_dev; int i; mutex_lock(&chip->reg_lock); - /* Unassign the bridge and remap each port's VLANTable */ - chip->ports[port].bridge_dev = NULL; - + /* Remap each port's VLANTable */ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) - if (i == port || chip->ports[i].bridge_dev == bridge) + if (i == port || ds->ports[i].bridge_dev == br) if (_mv88e6xxx_port_based_vlan_map(chip, i)) netdev_warn(ds->ports[i].netdev, "failed to remap\n"); diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 572d585dc1e2..e126ed00937b 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -676,10 +676,6 @@ struct mv88e6xxx_vtu_entry { struct mv88e6xxx_bus_ops; -struct mv88e6xxx_priv_port { - struct net_device *bridge_dev; -}; - struct mv88e6xxx_irq { u16 masked; struct irq_chip chip; @@ -720,8 +716,6 @@ struct mv88e6xxx_chip { */ struct mutex stats_mutex; - struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS]; - /* A switch may have a GPIO line tied to its reset pin. Parse * this from the device tree, and use it before performing * switch soft reset. -- cgit v1.2.3 From 922754a48a4e4b87ee6f5bf80e7463a82e124ab8 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 27 Jan 2017 15:29:43 -0500 Subject: net: dsa: qca8k: use dsa_port's bridge pointer Now that DSA exposes the bridge device pointer to which a port belongs, use it when programming the port based VLANs and thus remove the cache. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 12 ++++-------- drivers/net/dsa/qca8k.h | 1 - 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index c85b187aa3d9..a4fd4ccf7b67 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -746,17 +746,14 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) } static int -qca8k_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) +qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int port_mask = BIT(QCA8K_CPU_PORT); int i; - priv->port_sts[port].bridge_dev = bridge; - for (i = 1; i < QCA8K_NUM_PORTS; i++) { - if (priv->port_sts[i].bridge_dev != bridge) + if (ds->ports[i].bridge_dev != br) continue; /* Add this port to the portvlan mask of the other ports * in the bridge @@ -781,8 +778,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) int i; for (i = 1; i < QCA8K_NUM_PORTS; i++) { - if (priv->port_sts[i].bridge_dev != - priv->port_sts[port].bridge_dev) + if (ds->ports[i].bridge_dev != br) continue; /* Remove this port to the portvlan mask of the other ports * in the bridge @@ -791,7 +787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) QCA8K_PORT_LOOKUP_CTRL(i), BIT(port)); } - priv->port_sts[port].bridge_dev = NULL; + /* Set the cpu port to be the only one in the portvlan mask of * this port */ diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 201464719531..1ed4fac6cd6d 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -157,7 +157,6 @@ enum qca8k_fdb_cmd { struct ar8xxx_port_status { struct ethtool_eee eee; - struct net_device *bridge_dev; int enabled; }; -- cgit v1.2.3 From ddd3a0c8408df9ec07279f1e3dc9a98781a5217e Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 27 Jan 2017 15:29:44 -0500 Subject: net: dsa: b53: use dsa_port's bridge pointer Now that DSA exposes the bridge device pointer to which a port belongs, use it when programming the port based VLANs and thus remove the cache. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 9 +++------ drivers/net/dsa/b53/b53_priv.h | 1 - 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 32fdcf5570c8..3a7d16b6c3eb 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1308,7 +1308,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_fdb_dump); -int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) +int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; s8 cpu_port = ds->dst->cpu_port; @@ -1326,11 +1326,10 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); } - dev->ports[port].bridge_dev = bridge; b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); b53_for_each_port(dev, i) { - if (dev->ports[i].bridge_dev != bridge) + if (ds->ports[i].bridge_dev != br) continue; /* Add this local port to the remote port VLAN control @@ -1357,7 +1356,6 @@ EXPORT_SYMBOL(b53_br_join); void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; - struct net_device *bridge = dev->ports[port].bridge_dev; struct b53_vlan *vl = &dev->vlans[0]; s8 cpu_port = ds->dst->cpu_port; unsigned int i; @@ -1367,7 +1365,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) b53_for_each_port(dev, i) { /* Don't touch the remaining ports */ - if (dev->ports[i].bridge_dev != bridge) + if (ds->ports[i].bridge_dev != br) continue; b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); @@ -1382,7 +1380,6 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); dev->ports[port].vlan_ctl_mask = pvlan; - dev->ports[port].bridge_dev = NULL; if (is5325(dev) || is5365(dev)) pvid = 1; diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 5dafb70e75fc..9d87889728ac 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -70,7 +70,6 @@ enum { struct b53_port { u16 vlan_ctl_mask; - struct net_device *bridge_dev; }; struct b53_vlan { -- cgit v1.2.3 From 5b8784aaf29be20ba8d363e1124d7436d42ef9bf Mon Sep 17 00:00:00 2001 From: andy zhou Date: Fri, 27 Jan 2017 13:45:28 -0800 Subject: openvswitch: Simplify do_execute_actions(). do_execute_actions() implements a worthwhile optimization: in case an output action is the last action in an action list, skb_clone() can be avoided by outputing the current skb. However, the implementation is more complicated than necessary. This patch simplify this logic. Signed-off-by: Andy Zhou Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- net/openvswitch/actions.c | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 514f7bcf7c63..efa9a8858cc6 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1141,12 +1141,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, struct sw_flow_key *key, const struct nlattr *attr, int len) { - /* Every output action needs a separate clone of 'skb', but the common - * case is just a single output action, so that doing a clone and - * then freeing the original skbuff is wasteful. So the following code - * is slightly obscure just to avoid that. - */ - int prev_port = -1; const struct nlattr *a; int rem; @@ -1154,20 +1148,28 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, a = nla_next(a, &rem)) { int err = 0; - if (unlikely(prev_port != -1)) { - struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC); - - if (out_skb) - do_output(dp, out_skb, prev_port, key); + switch (nla_type(a)) { + case OVS_ACTION_ATTR_OUTPUT: { + int port = nla_get_u32(a); + struct sk_buff *clone; + + /* Every output action needs a separate clone + * of 'skb', In case the output action is the + * last action, cloning can be avoided. + */ + if (nla_is_last(a, rem)) { + do_output(dp, skb, port, key); + /* 'skb' has been used for output. + */ + return 0; + } + clone = skb_clone(skb, GFP_ATOMIC); + if (clone) + do_output(dp, clone, port, key); OVS_CB(skb)->cutlen = 0; - prev_port = -1; - } - - switch (nla_type(a)) { - case OVS_ACTION_ATTR_OUTPUT: - prev_port = nla_get_u32(a); break; + } case OVS_ACTION_ATTR_TRUNC: { struct ovs_action_trunc *trunc = nla_data(a); @@ -1257,11 +1259,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, } } - if (prev_port != -1) - do_output(dp, skb, prev_port, key); - else - consume_skb(skb); - + consume_skb(skb); return 0; } -- cgit v1.2.3 From 9da34f27c13bfc8d13b5599808d815382ef41128 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Fri, 27 Jan 2017 16:43:43 -0600 Subject: net: qcom/emac: display the phy driver info after we connect The PHY driver is attached only when the driver calls phy_connect_direct(). Calling phy_attached_print() to display information about the PHY driver prior to that point is meaningless. The interface can be brought down, a new PHY driver can be loaded, and the interface then brought back up. This is the correct time to display information about the attached driver. Since phy_attached_print() also prints information about the interrupt, that needs to be set as well. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 4 +++- drivers/net/ethernet/qualcomm/emac/emac-phy.c | 3 --- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index e4793d703ed9..155e273ab3de 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -981,6 +981,7 @@ int emac_mac_up(struct emac_adapter *adpt) emac_mac_config(adpt); emac_mac_rx_descs_refill(adpt, &adpt->rx_q); + adpt->phydev->irq = PHY_IGNORE_INTERRUPT; ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, PHY_INTERFACE_MODE_SGMII); if (ret) { @@ -988,11 +989,12 @@ int emac_mac_up(struct emac_adapter *adpt) return ret; } + phy_attached_print(adpt->phydev, NULL); + /* enable mac irq */ writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); - adpt->phydev->irq = PHY_IGNORE_INTERRUPT; phy_start(adpt->phydev); napi_enable(&adpt->rx_q.napi); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 1d7852f4ccaa..441c19366489 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c @@ -226,8 +226,5 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt) return -ENODEV; } - if (adpt->phydev->drv) - phy_attached_print(adpt->phydev, NULL); - return 0; } -- cgit v1.2.3 From 3db5d555eaec44ee0e1c80194963c4256b23f6ee Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Fri, 27 Jan 2017 16:43:44 -0600 Subject: net: qcom/emac: always use autonegotiation to configure the SGMII link Regardless of how the external PHY is configured, the internal PHY (the "SGMII" block) is capable of configuring the SGMII link automatically. When the external PHY link comes up, regardless of how it is configured, the SGMII link is configured automatically. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-sgmii.c | 49 +++++-------------------- 1 file changed, 10 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index 0149b523eda4..b5269c4dd4ee 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -47,44 +47,19 @@ #define SERDES_START_WAIT_TIMES 100 -static int emac_sgmii_link_init(struct emac_adapter *adpt) +/* Initialize the SGMII link between the internal and external PHYs. */ +static void emac_sgmii_link_init(struct emac_adapter *adpt) { - struct phy_device *phydev = adpt->phydev; struct emac_sgmii *phy = &adpt->phy; u32 val; + /* Always use autonegotiation. It works no matter how the external + * PHY is configured. + */ val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - - if (phydev->autoneg == AUTONEG_ENABLE) { - val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); - val |= AN_ENABLE; - writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - } else { - u32 speed_cfg; - - switch (phydev->speed) { - case SPEED_10: - speed_cfg = SPDMODE_10; - break; - case SPEED_100: - speed_cfg = SPDMODE_100; - break; - case SPEED_1000: - speed_cfg = SPDMODE_1000; - break; - default: - return -EINVAL; - } - - if (phydev->duplex == DUPLEX_FULL) - speed_cfg |= DUPLEX_MODE; - - val &= ~AN_ENABLE; - writel(speed_cfg, phy->base + EMAC_SGMII_PHY_SPEED_CFG1); - writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - } - - return 0; + val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); + val |= AN_ENABLE; + writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); } static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) @@ -145,12 +120,7 @@ void emac_sgmii_reset(struct emac_adapter *adpt) int ret; emac_sgmii_reset_prepare(adpt); - - ret = emac_sgmii_link_init(adpt); - if (ret) { - netdev_err(adpt->netdev, "unsupported link speed\n"); - return; - } + emac_sgmii_link_init(adpt); ret = adpt->phy.initialize(adpt); if (ret) @@ -287,6 +257,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) goto error; emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR); + emac_sgmii_link_init(adpt); /* We've remapped the addresses, so we don't need the device any * more. of_find_device_by_node() says we should release it. -- cgit v1.2.3 From 0f20276dd51bf4b74c7ba961c32fffb5a155fb1e Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Fri, 27 Jan 2017 16:43:45 -0600 Subject: net: qcom/emac: do not call emac_mac_start twice emac_mac_start() uses information from the external PHY to program the MAC, so it makes no sense to call it before the link is up. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 2 +- drivers/net/ethernet/qualcomm/emac/emac-mac.h | 1 - drivers/net/ethernet/qualcomm/emac/emac.c | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 155e273ab3de..3f3cd0008449 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -556,7 +556,7 @@ void emac_mac_reset(struct emac_adapter *adpt) emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN); } -void emac_mac_start(struct emac_adapter *adpt) +static void emac_mac_start(struct emac_adapter *adpt) { struct phy_device *phydev = adpt->phydev; u32 mac, csr1; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h index f3aa24dc4a29..5028fb4bec2b 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h @@ -230,7 +230,6 @@ struct emac_adapter; int emac_mac_up(struct emac_adapter *adpt); void emac_mac_down(struct emac_adapter *adpt); void emac_mac_reset(struct emac_adapter *adpt); -void emac_mac_start(struct emac_adapter *adpt); void emac_mac_stop(struct emac_adapter *adpt); void emac_mac_mode_config(struct emac_adapter *adpt); void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 3e1be9107d55..75305ad436d5 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -280,8 +280,6 @@ static int emac_open(struct net_device *netdev) return ret; } - emac_mac_start(adpt); - return 0; } -- cgit v1.2.3 From e7e7454b40d290f6efb63c792c56c416922dcef8 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Fri, 27 Jan 2017 16:43:47 -0600 Subject: net: qcom/emac: remove extraneous wake-on-lan code The EMAC driver does not support wake-on-lan, but there is still code left-over that partially enables it. Remove that code and a few macros that support it. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 10 ---------- drivers/net/ethernet/qualcomm/emac/emac.h | 4 ---- 2 files changed, 14 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 3f3cd0008449..33d7ff1f40e0 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -103,14 +103,6 @@ #define RXEN 0x00000002 #define TXEN 0x00000001 - -/* EMAC_WOL_CTRL0 */ -#define LK_CHG_PME 0x20 -#define LK_CHG_EN 0x10 -#define MG_FRAME_PME 0x8 -#define MG_FRAME_EN 0x4 -#define WK_FRAME_EN 0x1 - /* EMAC_DESC_CTRL_3 */ #define RFD_RING_SIZE_BMSK 0xfff @@ -619,8 +611,6 @@ static void emac_mac_start(struct emac_adapter *adpt) emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL, (HEADER_ENABLE | HEADER_CNT_EN), 0); - - emac_reg_update32(adpt->csr + EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN); } void emac_mac_stop(struct emac_adapter *adpt) diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 2725507ae866..ef91dcc7f646 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -167,10 +167,6 @@ enum emac_clk_id { #define EMAC_MAX_SETUP_LNK_CYCLE 100 -/* Wake On Lan */ -#define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */ -#define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */ - struct emac_stats { /* rx */ u64 rx_ok; /* good packets */ -- cgit v1.2.3 From fd0e97b806f0331df95f5fc58cdd488d169efb7f Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Fri, 27 Jan 2017 16:43:48 -0600 Subject: net: qcom/emac: add an error interrupt handler for the sgmii The SGMII (internal PHY) can report decode errors via an interrupt. It can also report autonegotiation status changes, but we don't need to track those. The SGMII can recover automatically from most decode errors, so we only reset the interface if we get multiple consecutive errors. It's possible for bogus decode errors to be reported while the link is being brought up. The interrupt is registered when the interface is opened, and it's enabled after the link is up. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 8 +- drivers/net/ethernet/qualcomm/emac/emac-sgmii.c | 126 +++++++++++++++++++++++- drivers/net/ethernet/qualcomm/emac/emac-sgmii.h | 16 ++- drivers/net/ethernet/qualcomm/emac/emac.c | 10 ++ 4 files changed, 153 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 33d7ff1f40e0..b991219862b1 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -951,12 +951,16 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, static void emac_adjust_link(struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_sgmii *sgmii = &adpt->phy; struct phy_device *phydev = netdev->phydev; - if (phydev->link) + if (phydev->link) { emac_mac_start(adpt); - else + sgmii->link_up(adpt); + } else { + sgmii->link_down(adpt); emac_mac_stop(adpt); + } phy_print_status(phydev); } diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index b5269c4dd4ee..040b28977ee7 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -25,7 +25,9 @@ #define EMAC_SGMII_PHY_SPEED_CFG1 0x0074 #define EMAC_SGMII_PHY_IRQ_CMD 0x00ac #define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x00b0 +#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x00b4 #define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x00b8 +#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x00d4 #define FORCE_AN_TX_CFG BIT(5) #define FORCE_AN_RX_CFG BIT(4) @@ -36,6 +38,8 @@ #define SPDMODE_100 BIT(0) #define SPDMODE_10 0 +#define CDR_ALIGN_DET BIT(6) + #define IRQ_GLOBAL_CLEAR BIT(0) #define DECODE_CODE_ERR BIT(7) @@ -44,6 +48,7 @@ #define SGMII_PHY_IRQ_CLR_WAIT_TIME 10 #define SGMII_PHY_INTERRUPT_ERR (DECODE_CODE_ERR | DECODE_DISP_ERR) +#define SGMII_ISR_MASK (SGMII_PHY_INTERRUPT_ERR) #define SERDES_START_WAIT_TIMES 100 @@ -96,6 +101,51 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) return 0; } +/* The number of decode errors that triggers a reset */ +#define DECODE_ERROR_LIMIT 2 + +static irqreturn_t emac_sgmii_interrupt(int irq, void *data) +{ + struct emac_adapter *adpt = data; + struct emac_sgmii *phy = &adpt->phy; + u32 status; + + status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS); + status &= SGMII_ISR_MASK; + if (!status) + return IRQ_HANDLED; + + /* If we get a decoding error and CDR is not locked, then try + * resetting the internal PHY. The internal PHY uses an embedded + * clock with Clock and Data Recovery (CDR) to recover the + * clock and data. + */ + if (status & SGMII_PHY_INTERRUPT_ERR) { + int count; + + /* The SGMII is capable of recovering from some decode + * errors automatically. However, if we get multiple + * decode errors in a row, then assume that something + * is wrong and reset the interface. + */ + count = atomic_inc_return(&phy->decode_error_count); + if (count == DECODE_ERROR_LIMIT) { + schedule_work(&adpt->work_thread); + atomic_set(&phy->decode_error_count, 0); + } + } else { + /* We only care about consecutive decode errors. */ + atomic_set(&phy->decode_error_count, 0); + } + + if (emac_sgmii_irq_clear(adpt, status)) { + netdev_warn(adpt->netdev, "failed to clear SGMII interrupt\n"); + schedule_work(&adpt->work_thread); + } + + return IRQ_HANDLED; +} + static void emac_sgmii_reset_prepare(struct emac_adapter *adpt) { struct emac_sgmii *phy = &adpt->phy; @@ -129,6 +179,68 @@ void emac_sgmii_reset(struct emac_adapter *adpt) ret); } +static int emac_sgmii_open(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + int ret; + + if (sgmii->irq) { + /* Make sure interrupts are cleared and disabled first */ + ret = emac_sgmii_irq_clear(adpt, 0xff); + if (ret) + return ret; + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + + ret = request_irq(sgmii->irq, emac_sgmii_interrupt, 0, + "emac-sgmii", adpt); + if (ret) { + netdev_err(adpt->netdev, + "could not register handler for internal PHY\n"); + return ret; + } + } + + return 0; +} + +static int emac_sgmii_close(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + + /* Make sure interrupts are disabled */ + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + free_irq(sgmii->irq, adpt); + + return 0; +} + +/* The error interrupts are only valid after the link is up */ +static int emac_sgmii_link_up(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + int ret; + + /* Clear and enable interrupts */ + ret = emac_sgmii_irq_clear(adpt, 0xff); + if (ret) + return ret; + + writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + + return 0; +} + +static int emac_sgmii_link_down(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + + /* Disable interrupts */ + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + synchronize_irq(sgmii->irq); + + return 0; +} + static int emac_sgmii_acpi_match(struct device *dev, void *data) { #ifdef CONFIG_ACPI @@ -139,7 +251,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) {} }; const struct acpi_device_id *id = acpi_match_device(match_table, dev); - emac_sgmii_initialize *initialize = data; + emac_sgmii_function *initialize = data; if (id) { acpi_handle handle = ACPI_HANDLE(dev); @@ -226,9 +338,14 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) goto error_put_device; } - phy->initialize = (emac_sgmii_initialize)match->data; + phy->initialize = (emac_sgmii_function)match->data; } + phy->open = emac_sgmii_open; + phy->close = emac_sgmii_close; + phy->link_up = emac_sgmii_link_up; + phy->link_down = emac_sgmii_link_down; + /* Base address is the first address */ res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0); if (!res) { @@ -256,9 +373,12 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) if (ret) goto error; - emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR); emac_sgmii_link_init(adpt); + ret = platform_get_irq(sgmii_pdev, 0); + if (ret > 0) + phy->irq = ret; + /* We've remapped the addresses, so we don't need the device any * more. of_find_device_by_node() says we should release it. */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h index 4a8f6b174f4b..e7c0c3b2baa4 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h @@ -16,17 +16,29 @@ struct emac_adapter; struct platform_device; -typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt); +typedef int (*emac_sgmii_function)(struct emac_adapter *adpt); /** emac_sgmii - internal emac phy * @base base address * @digital per-lane digital block + * @irq the interrupt number + * @decode_error_count reference count of consecutive decode errors * @initialize initialization function + * @open called when the driver is opened + * @close called when the driver is closed + * @link_up called when the link comes up + * @link_down called when the link comes down */ struct emac_sgmii { void __iomem *base; void __iomem *digital; - emac_sgmii_initialize initialize; + unsigned int irq; + atomic_t decode_error_count; + emac_sgmii_function initialize; + emac_sgmii_function open; + emac_sgmii_function close; + emac_sgmii_function link_up; + emac_sgmii_function link_down; }; int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 75305ad436d5..f519351ef3a2 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -280,6 +280,14 @@ static int emac_open(struct net_device *netdev) return ret; } + ret = adpt->phy.open(adpt); + if (ret) { + emac_mac_down(adpt); + emac_mac_rx_tx_rings_free_all(adpt); + free_irq(irq->irq, irq); + return ret; + } + return 0; } @@ -290,6 +298,7 @@ static int emac_close(struct net_device *netdev) mutex_lock(&adpt->reset_lock); + adpt->phy.close(adpt); emac_mac_down(adpt); emac_mac_rx_tx_rings_free_all(adpt); @@ -645,6 +654,7 @@ static int emac_probe(struct platform_device *pdev) adpt->msg_enable = EMAC_MSG_DEFAULT; phy = &adpt->phy; + atomic_set(&phy->decode_error_count, 0); mutex_init(&adpt->reset_lock); spin_lock_init(&adpt->stats.lock); -- cgit v1.2.3 From 7e98102f489775d8c000884fca8a0d995ea688a9 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Fri, 27 Jan 2017 16:24:38 -0800 Subject: tcp: record pkts sent and retransmistted Add two stats in SCM_TIMESTAMPING_OPT_STATS: TCP_NLA_DATA_SEGS_OUT: total data packets sent including retransmission TCP_NLA_TOTAL_RETRANS: total data packets retransmitted The names are picked to be consistent with corresponding fields in TCP_INFO. This allows applications that are using the timestamping API to measure latency stats to also retrive retransmission rate of application write. Signed-off-by: Yuchung Cheng Signed-off-by: Soheil Hassas Yeganeh Acked-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/uapi/linux/tcp.h | 2 ++ net/ipv4/tcp.c | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 6ff35eb48d10..38a2b07afdff 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -227,6 +227,8 @@ enum { TCP_NLA_BUSY, /* Time (usec) busy sending data */ TCP_NLA_RWND_LIMITED, /* Time (usec) limited by receive window */ TCP_NLA_SNDBUF_LIMITED, /* Time (usec) limited by send buffer */ + TCP_NLA_DATA_SEGS_OUT, /* Data pkts sent including retransmission */ + TCP_NLA_TOTAL_RETRANS, /* Data pkts retransmitted */ }; /* for TCP_MD5SIG socket option */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2ed472ebf3b5..b751abc56935 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2870,7 +2870,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) struct sk_buff *stats; struct tcp_info info; - stats = alloc_skb(3 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC); + stats = alloc_skb(5 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC); if (!stats) return NULL; @@ -2881,6 +2881,10 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) info.tcpi_rwnd_limited, TCP_NLA_PAD); nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, info.tcpi_sndbuf_limited, TCP_NLA_PAD); + nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, + tp->data_segs_out, TCP_NLA_PAD); + nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, + tp->total_retrans, TCP_NLA_PAD); return stats; } -- cgit v1.2.3 From 678550c651aee051d66112933c87894f129b9355 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Fri, 27 Jan 2017 16:24:39 -0800 Subject: tcp: include locally failed retries in retransmission stats Currently the retransmission stats are not incremented if the retransmit fails locally. But we always increment the other packet counters that track total packet/bytes sent. Awkwardly while we don't count these failed retransmits in RETRANSSEGS, we do count them in FAILEDRETRANS. If the qdisc is dropping many packets this could under-estimate TCP retransmission rate substantially from both SNMP or per-socket TCP_INFO stats. This patch changes this by always incrementing retransmission stats on retransmission attempts and failures. Another motivation is to properly track retransmists in SCM_TIMESTAMPING_OPT_STATS. Since SCM_TSTAMP_SCHED collection is triggered in tcp_transmit_skb(), If tp->total_retrans is incremented after the function, we'll always mis-count by the amount of the latest retransmission. Signed-off-by: Yuchung Cheng Signed-off-by: Soheil Hassas Yeganeh Acked-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 671c69535671..7b2d8762f15f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2771,6 +2771,13 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) tcp_ecn_clear_syn(sk, skb); + /* Update global and local TCP statistics. */ + segs = tcp_skb_pcount(skb); + TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); + tp->total_retrans += segs; + /* make sure skb->data is aligned on arches that require it * and check if ack-trimming & collapsing extended the headroom * beyond what csum_start can cover. @@ -2788,14 +2795,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) } if (likely(!err)) { - segs = tcp_skb_pcount(skb); - TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; - /* Update global TCP statistics. */ - TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); - if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); - tp->total_retrans += segs; + } else if (err != -EBUSY) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); } return err; } @@ -2818,8 +2820,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) if (!tp->retrans_stamp) tp->retrans_stamp = tcp_skb_timestamp(skb); - } else if (err != -EBUSY) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); } if (tp->undo_retrans < 0) -- cgit v1.2.3 From 40be0dda0725886b623d67868db3219a2e74683b Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sat, 28 Jan 2017 15:15:42 +0100 Subject: net: add devm version of alloc_etherdev_mqs function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds devm_alloc_etherdev_mqs function and devm_alloc_etherdev macro. These can be used for simpler netdev allocation without having to care about calling free_netdev. Thanks to this change drivers, their error paths and removal paths may get simpler by a bit. Signed-off-by: Rafał Miłecki Signed-off-by: David S. Miller --- include/linux/etherdevice.h | 5 +++++ net/ethernet/eth.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 42add77ae47d..c62b709b1ce0 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -54,6 +54,11 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) +struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, + unsigned int txqs, + unsigned int rxqs); +#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1) + struct sk_buff **eth_gro_receive(struct sk_buff **head, struct sk_buff *skb); int eth_gro_complete(struct sk_buff *skb, int nhoff); diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 8c5a479681ca..efdaaab735fc 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -392,6 +392,34 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, } EXPORT_SYMBOL(alloc_etherdev_mqs); +static void devm_free_netdev(struct device *dev, void *res) +{ + free_netdev(*(struct net_device **)res); +} + +struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, + unsigned int txqs, unsigned int rxqs) +{ + struct net_device **dr; + struct net_device *netdev; + + dr = devres_alloc(devm_free_netdev, sizeof(*dr), GFP_KERNEL); + if (!dr) + return NULL; + + netdev = alloc_etherdev_mqs(sizeof_priv, txqs, rxqs); + if (!netdev) { + devres_free(dr); + return NULL; + } + + *dr = netdev; + devres_add(dev, dr); + + return netdev; +} +EXPORT_SYMBOL(devm_alloc_etherdev_mqs); + ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len) { return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr); -- cgit v1.2.3 From f991bb9da142ba79b54ed0757f22e756f45e2c5a Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Mon, 30 Jan 2017 06:45:38 +0100 Subject: net: Drop secpath on free after gro merge. With a followup patch, a gro merged skb can have a secpath. So drop it before freeing or reusing the skb. Signed-off-by: Steffen Klassert --- net/core/dev.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index 56818f7eab2b..ef3a969477bf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4623,6 +4623,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) case GRO_MERGED_FREE: if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { skb_dst_drop(skb); + secpath_reset(skb); kmem_cache_free(skbuff_head_cache, skb); } else { __kfree_skb(skb); @@ -4663,6 +4664,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) skb->encapsulation = 0; skb_shinfo(skb)->gso_type = 0; skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); + secpath_reset(skb); napi->skb = skb; } -- cgit v1.2.3 From 1995876a06bcf6f9f7d7b699bdbf387831679771 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Mon, 30 Jan 2017 06:45:43 +0100 Subject: xfrm: Add a dummy network device for napi. This patch adds a dummy network device so that we can use gro_cells for IPsec GRO. With this, we handle IPsec GRO with no impact on the generic networking code. Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_input.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 6e3f0254d8a1..3213fe8027be 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -21,6 +21,9 @@ static struct kmem_cache *secpath_cachep __read_mostly; static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); static struct xfrm_input_afinfo __rcu *xfrm_input_afinfo[NPROTO]; +static struct gro_cells gro_cells; +static struct net_device xfrm_napi_dev; + int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo) { int err = 0; @@ -371,7 +374,7 @@ resume: if (decaps) { skb_dst_drop(skb); - netif_rx(skb); + gro_cells_receive(&gro_cells, skb); return 0; } else { return x->inner_mode->afinfo->transport_finish(skb, async); @@ -394,6 +397,13 @@ EXPORT_SYMBOL(xfrm_input_resume); void __init xfrm_input_init(void) { + int err; + + init_dummy_netdev(&xfrm_napi_dev); + err = gro_cells_init(&gro_cells, &xfrm_napi_dev); + if (err) + gro_cells.cells = NULL; + secpath_cachep = kmem_cache_create("secpath_cache", sizeof(struct sec_path), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, -- cgit v1.2.3 From bf9f26485d232916cdc2257e42831781e1f075e8 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 30 Jan 2017 09:48:40 -0800 Subject: net: dsa: Hook {get,set}_rxnfc ethtool operations In preparation for adding support for CFP/TCAMP in the bcm_sf2 driver add the plumbing to call into driver specific {get,set}_rxnfc operations. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 8 ++++++++ net/dsa/slave.c | 26 ++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/include/net/dsa.h b/include/net/dsa.h index b951e2ebda75..d5d618c3de64 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -377,6 +377,14 @@ struct dsa_switch_ops { int (*port_mdb_dump)(struct dsa_switch *ds, int port, struct switchdev_obj_port_mdb *mdb, int (*cb)(struct switchdev_obj *obj)); + + /* + * RXNFC + */ + int (*get_rxnfc)(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc, u32 *rule_locs); + int (*set_rxnfc)(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc); }; struct dsa_switch_driver { diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 08725286f79d..6881889e1a9b 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1002,6 +1002,30 @@ void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops) ops->get_strings = dsa_cpu_port_get_strings; } +static int dsa_slave_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *nfc, u32 *rule_locs) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->dp->ds; + + if (!ds->ops->get_rxnfc) + return -EOPNOTSUPP; + + return ds->ops->get_rxnfc(ds, p->dp->index, nfc, rule_locs); +} + +static int dsa_slave_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *nfc) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->dp->ds; + + if (!ds->ops->set_rxnfc) + return -EOPNOTSUPP; + + return ds->ops->set_rxnfc(ds, p->dp->index, nfc); +} + static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_drvinfo = dsa_slave_get_drvinfo, .get_regs_len = dsa_slave_get_regs_len, @@ -1020,6 +1044,8 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_eee = dsa_slave_get_eee, .get_link_ksettings = dsa_slave_get_link_ksettings, .set_link_ksettings = dsa_slave_set_link_ksettings, + .get_rxnfc = dsa_slave_get_rxnfc, + .set_rxnfc = dsa_slave_set_rxnfc, }; static const struct net_device_ops dsa_slave_netdev_ops = { -- cgit v1.2.3 From e1b9147cbdb84cae017f450b34781f4cfc790095 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 30 Jan 2017 09:48:41 -0800 Subject: net: dsa: bcm_sf2: Configure traffic classes to queue mapping By default, all traffic goes to queue 0, re-configure the traffic classes to quality of service mapping such that priority X maps to queue X, where X is from 0 through 7. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 9 +++++++++ drivers/net/dsa/bcm_sf2_regs.h | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 8eecfd227e06..637072da3acf 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -229,6 +229,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; + unsigned int i; u32 reg; /* Clear the memory power down */ @@ -240,6 +241,14 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, if (priv->brcm_tag_mask & BIT(port)) bcm_sf2_brcm_hdr_setup(priv, port); + /* Configure Traffic Class to QoS mapping, allow each priority to map + * to a different queue number + */ + reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); + for (i = 0; i < 8; i++) + reg |= i << (PRT_TO_QID_SHIFT * i); + core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); + /* Clear the Rx and Tx disable bits and set to no spanning tree */ core_writel(priv, 0, CORE_G_PCTL_PORT(port)); diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 3b33b8010cc8..6b63c00928ba 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -238,6 +238,10 @@ enum bcm_sf2_reg_offs { #define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \ ((x) * P_TXQ_PSM_VDD_SHIFT)) +#define CORE_PORT_TC2_QOS_MAP_PORT(x) (0xc1c0 + ((x) * 0x10)) +#define PRT_TO_QID_MASK 0x3 +#define PRT_TO_QID_SHIFT 3 + #define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) #define PORT_VLAN_CTRL_MASK 0x1ff -- cgit v1.2.3 From 853458087ef0e5300fa978dccef40b73d411c6e6 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 30 Jan 2017 09:48:42 -0800 Subject: net: dsa: bcm_sf2: Add CFP registers definitions Add Compact Field Processor definitions for the Broadcom Starfighter 2 and compatible versions of the switch. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2_regs.h | 146 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 6b63c00928ba..26052450091e 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -255,4 +255,150 @@ enum bcm_sf2_reg_offs { #define CORE_EEE_EN_CTRL 0x24800 #define CORE_EEE_LPI_INDICATE 0x24810 +#define CORE_CFP_ACC 0x28000 +#define OP_STR_DONE (1 << 0) +#define OP_SEL_SHIFT 1 +#define OP_SEL_READ (1 << OP_SEL_SHIFT) +#define OP_SEL_WRITE (2 << OP_SEL_SHIFT) +#define OP_SEL_SEARCH (4 << OP_SEL_SHIFT) +#define OP_SEL_MASK (7 << OP_SEL_SHIFT) +#define CFP_RAM_CLEAR (1 << 4) +#define RAM_SEL_SHIFT 10 +#define TCAM_SEL (1 << RAM_SEL_SHIFT) +#define ACT_POL_RAM (2 << RAM_SEL_SHIFT) +#define RATE_METER_RAM (4 << RAM_SEL_SHIFT) +#define GREEN_STAT_RAM (8 << RAM_SEL_SHIFT) +#define YELLOW_STAT_RAM (16 << RAM_SEL_SHIFT) +#define RED_STAT_RAM (24 << RAM_SEL_SHIFT) +#define RAM_SEL_MASK (0x1f << RAM_SEL_SHIFT) +#define TCAM_RESET (1 << 15) +#define XCESS_ADDR_SHIFT 16 +#define XCESS_ADDR_MASK 0xff +#define SEARCH_STS (1 << 27) +#define RD_STS_SHIFT 28 +#define RD_STS_TCAM (1 << RD_STS_SHIFT) +#define RD_STS_ACT_POL_RAM (2 << RD_STS_SHIFT) +#define RD_STS_RATE_METER_RAM (4 << RD_STS_SHIFT) +#define RD_STS_STAT_RAM (8 << RD_STS_SHIFT) + +#define CORE_CFP_RATE_METER_GLOBAL_CTL 0x28010 + +#define CORE_CFP_DATA_PORT_0 0x28040 +#define CORE_CFP_DATA_PORT(x) (CORE_CFP_DATA_PORT_0 + \ + (x) * 0x10) + +/* UDF_DATA7 */ +#define L3_FRAMING_SHIFT 24 +#define L3_FRAMING_MASK (0x3 << L3_FRAMING_SHIFT) +#define IPPROTO_SHIFT 8 +#define IPPROTO_MASK (0xff << IPPROTO_SHIFT) +#define IP_FRAG (1 << 7) + +/* UDF_DATA0 */ +#define SLICE_VALID 3 +#define SLICE_NUM_SHIFT 2 +#define SLICE_NUM(x) ((x) << SLICE_NUM_SHIFT) + +#define CORE_CFP_MASK_PORT_0 0x280c0 + +#define CORE_CFP_MASK_PORT(x) (CORE_CFP_MASK_PORT_0 + \ + (x) * 0x10) + +#define CORE_ACT_POL_DATA0 0x28140 +#define VLAN_BYP (1 << 0) +#define EAP_BYP (1 << 1) +#define STP_BYP (1 << 2) +#define REASON_CODE_SHIFT 3 +#define REASON_CODE_MASK 0x3f +#define LOOP_BK_EN (1 << 9) +#define NEW_TC_SHIFT 10 +#define NEW_TC_MASK 0x7 +#define CHANGE_TC (1 << 13) +#define DST_MAP_IB_SHIFT 14 +#define DST_MAP_IB_MASK 0x1ff +#define CHANGE_FWRD_MAP_IB_SHIFT 24 +#define CHANGE_FWRD_MAP_IB_MASK 0x3 +#define CHANGE_FWRD_MAP_IB_NO_DEST (0 << CHANGE_FWRD_MAP_IB_SHIFT) +#define CHANGE_FWRD_MAP_IB_REM_ARL (1 << CHANGE_FWRD_MAP_IB_SHIFT) +#define CHANGE_FWRD_MAP_IB_REP_ARL (2 << CHANGE_FWRD_MAP_IB_SHIFT) +#define CHANGE_FWRD_MAP_IB_ADD_DST (3 << CHANGE_FWRD_MAP_IB_SHIFT) +#define NEW_DSCP_IB_SHIFT 26 +#define NEW_DSCP_IB_MASK 0x3f + +#define CORE_ACT_POL_DATA1 0x28150 +#define CHANGE_DSCP_IB (1 << 0) +#define DST_MAP_OB_SHIFT 1 +#define DST_MAP_OB_MASK 0x3ff +#define CHANGE_FWRD_MAP_OB_SHIT 11 +#define CHANGE_FWRD_MAP_OB_MASK 0x3 +#define NEW_DSCP_OB_SHIFT 13 +#define NEW_DSCP_OB_MASK 0x3f +#define CHANGE_DSCP_OB (1 << 19) +#define CHAIN_ID_SHIFT 20 +#define CHAIN_ID_MASK 0xff +#define CHANGE_COLOR (1 << 28) +#define NEW_COLOR_SHIFT 29 +#define NEW_COLOR_MASK 0x3 +#define NEW_COLOR_GREEN (0 << NEW_COLOR_SHIFT) +#define NEW_COLOR_YELLOW (1 << NEW_COLOR_SHIFT) +#define NEW_COLOR_RED (2 << NEW_COLOR_SHIFT) +#define RED_DEFAULT (1 << 31) + +#define CORE_ACT_POL_DATA2 0x28160 +#define MAC_LIMIT_BYPASS (1 << 0) +#define CHANGE_TC_O (1 << 1) +#define NEW_TC_O_SHIFT 2 +#define NEW_TC_O_MASK 0x7 +#define SPCP_RMK_DISABLE (1 << 5) +#define CPCP_RMK_DISABLE (1 << 6) +#define DEI_RMK_DISABLE (1 << 7) + +#define CORE_RATE_METER0 0x28180 +#define COLOR_MODE (1 << 0) +#define POLICER_ACTION (1 << 1) +#define COUPLING_FLAG (1 << 2) +#define POLICER_MODE_SHIFT 3 +#define POLICER_MODE_MASK 0x3 +#define POLICER_MODE_RFC2698 (0 << POLICER_MODE_SHIFT) +#define POLICER_MODE_RFC4115 (1 << POLICER_MODE_SHIFT) +#define POLICER_MODE_MEF (2 << POLICER_MODE_SHIFT) +#define POLICER_MODE_DISABLE (3 << POLICER_MODE_SHIFT) + +#define CORE_RATE_METER1 0x28190 +#define EIR_TK_BKT_MASK 0x7fffff + +#define CORE_RATE_METER2 0x281a0 +#define EIR_BKT_SIZE_MASK 0xfffff + +#define CORE_RATE_METER3 0x281b0 +#define EIR_REF_CNT_MASK 0x7ffff + +#define CORE_RATE_METER4 0x281c0 +#define CIR_TK_BKT_MASK 0x7fffff + +#define CORE_RATE_METER5 0x281d0 +#define CIR_BKT_SIZE_MASK 0xfffff + +#define CORE_RATE_METER6 0x281e0 +#define CIR_REF_CNT_MASK 0x7ffff + +#define CORE_CFP_CTL_REG 0x28400 +#define CFP_EN_MAP_MASK 0x1ff + +/* IPv4 slices, 3 of them */ +#define CORE_UDF_0_A_0_8_PORT_0 0x28440 +#define CFG_UDF_OFFSET_MASK 0x1f +#define CFG_UDF_OFFSET_BASE_SHIFT 5 +#define CFG_UDF_SOF (0 << CFG_UDF_OFFSET_BASE_SHIFT) +#define CFG_UDF_EOL2 (2 << CFG_UDF_OFFSET_BASE_SHIFT) +#define CFG_UDF_EOL3 (3 << CFG_UDF_OFFSET_BASE_SHIFT) + +/* Number of slices for IPv4, IPv6 and non-IP */ +#define UDF_NUM_SLICES 9 + +/* Spacing between different slices */ +#define UDF_SLICE_OFFSET 0x40 + +#define CFP_NUM_RULES 256 + #endif /* __BCM_SF2_REGS_H */ -- cgit v1.2.3 From 7318166cacad158b46240f66250d7cc5a481653b Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 30 Jan 2017 09:48:43 -0800 Subject: net: dsa: bcm_sf2: Add support for ethtool::rxnfc Add support for configuring classification rules using the ethtool::rxnfc API. This is useful to program the switch's CFP/TCAM to redirect specific packets to specific ports/queues for instance. For now, we allow any kind of IPv4 5-tuple matching. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/Makefile | 1 + drivers/net/dsa/bcm_sf2.c | 14 + drivers/net/dsa/bcm_sf2.h | 17 ++ drivers/net/dsa/bcm_sf2_cfp.c | 613 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 645 insertions(+) create mode 100644 drivers/net/dsa/bcm_sf2_cfp.c diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 8346e4f9737a..da9893478e21 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o +bcm_sf2-objs += bcm_sf2_cfp.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o obj-y += b53/ diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 637072da3acf..be282b430c50 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1045,6 +1045,8 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, + .get_rxnfc = bcm_sf2_get_rxnfc, + .set_rxnfc = bcm_sf2_set_rxnfc, }; struct bcm_sf2_of_data { @@ -1168,6 +1170,12 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); + mutex_init(&priv->cfp.lock); + + /* CFP rule #0 cannot be used for specific classifications, flag it as + * permanently used + */ + set_bit(0, priv->cfp.used); bcm_sf2_identify_ports(priv, dn->child); @@ -1197,6 +1205,12 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) return ret; } + ret = bcm_sf2_cfp_rst(priv); + if (ret) { + pr_err("failed to reset CFP\n"); + goto out_mdio; + } + /* Disable all interrupts and request them */ bcm_sf2_intr_disable(priv); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 6e1f74e4d471..7d3030e04f11 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -52,6 +52,13 @@ struct bcm_sf2_port_status { struct ethtool_eee eee; }; +struct bcm_sf2_cfp_priv { + /* Mutex protecting concurrent accesses to the CFP registers */ + struct mutex lock; + DECLARE_BITMAP(used, CFP_NUM_RULES); + unsigned int rules_cnt; +}; + struct bcm_sf2_priv { /* Base registers, keep those in order with BCM_SF2_REGS_NAME */ void __iomem *core; @@ -103,6 +110,9 @@ struct bcm_sf2_priv { /* Bitmask of ports needing BRCM tags */ unsigned int brcm_tag_mask; + + /* CFP rules context */ + struct bcm_sf2_cfp_priv cfp; }; static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds) @@ -197,4 +207,11 @@ SF2_IO_MACRO(acb); SWITCH_INTR_L2(0); SWITCH_INTR_L2(1); +/* RXNFC */ +int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc, u32 *rule_locs); +int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc); +int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv); + #endif /* __BCM_SF2_H */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c new file mode 100644 index 000000000000..c71be3e0dc2d --- /dev/null +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -0,0 +1,613 @@ +/* + * Broadcom Starfighter 2 DSA switch CFP support + * + * Copyright (C) 2016, Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include "bcm_sf2.h" +#include "bcm_sf2_regs.h" + +struct cfp_udf_layout { + u8 slices[UDF_NUM_SLICES]; + u32 mask_value; + +}; + +/* UDF slices layout for a TCPv4/UDPv4 specification */ +static const struct cfp_udf_layout udf_tcpip4_layout = { + .slices = { + /* End of L2, byte offset 12, src IP[0:15] */ + CFG_UDF_EOL2 | 6, + /* End of L2, byte offset 14, src IP[16:31] */ + CFG_UDF_EOL2 | 7, + /* End of L2, byte offset 16, dst IP[0:15] */ + CFG_UDF_EOL2 | 8, + /* End of L2, byte offset 18, dst IP[16:31] */ + CFG_UDF_EOL2 | 9, + /* End of L3, byte offset 0, src port */ + CFG_UDF_EOL3 | 0, + /* End of L3, byte offset 2, dst port */ + CFG_UDF_EOL3 | 1, + 0, 0, 0 + }, + .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, +}; + +static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout) +{ + unsigned int i, count = 0; + + for (i = 0; i < UDF_NUM_SLICES; i++) { + if (layout[i] != 0) + count++; + } + + return count; +} + +static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv, + unsigned int slice_num, + const u8 *layout) +{ + u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET; + unsigned int i; + + for (i = 0; i < UDF_NUM_SLICES; i++) + core_writel(priv, layout[i], offset + i * 4); +} + +static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_CFP_ACC); + reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); + reg |= OP_STR_DONE | op; + core_writel(priv, reg, CORE_CFP_ACC); + + do { + reg = core_readl(priv, CORE_CFP_ACC); + if (!(reg & OP_STR_DONE)) + break; + + cpu_relax(); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, + unsigned int addr) +{ + u32 reg; + + WARN_ON(addr >= CFP_NUM_RULES); + + reg = core_readl(priv, CORE_CFP_ACC); + reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); + reg |= addr << XCESS_ADDR_SHIFT; + core_writel(priv, reg, CORE_CFP_ACC); +} + +static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) +{ + /* Entry #0 is reserved */ + return CFP_NUM_RULES - 1; +} + +static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, + struct ethtool_rx_flow_spec *fs) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct ethtool_tcpip4_spec *v4_spec; + const struct cfp_udf_layout *layout; + unsigned int slice_num, rule_index; + unsigned int queue_num, port_num; + u8 ip_proto, ip_frag; + u8 num_udf; + u32 reg; + int ret; + + /* Check for unsupported extensions */ + if ((fs->flow_type & FLOW_EXT) && + (fs->m_ext.vlan_etype || fs->m_ext.data[1])) + return -EINVAL; + + if (fs->location != RX_CLS_LOC_ANY && + test_bit(fs->location, priv->cfp.used)) + return -EBUSY; + + if (fs->location != RX_CLS_LOC_ANY && + fs->location > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + ip_frag = be32_to_cpu(fs->m_ext.data[0]); + + /* We do not support discarding packets, check that the + * destination port is enabled and that we are within the + * number of ports supported by the switch + */ + port_num = fs->ring_cookie / 8; + + if (fs->ring_cookie == RX_CLS_FLOW_DISC || + !(BIT(port_num) & ds->enabled_port_mask) || + port_num >= priv->hw_params.num_ports) + return -EINVAL; + + switch (fs->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + ip_proto = IPPROTO_TCP; + v4_spec = &fs->h_u.tcp_ip4_spec; + break; + case UDP_V4_FLOW: + ip_proto = IPPROTO_UDP; + v4_spec = &fs->h_u.udp_ip4_spec; + break; + default: + return -EINVAL; + } + + /* We only use one UDF slice for now */ + slice_num = 1; + layout = &udf_tcpip4_layout; + num_udf = bcm_sf2_get_num_udf_slices(layout->slices); + + /* Apply the UDF layout for this filter */ + bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices); + + /* Apply to all packets received through this port */ + core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); + + /* S-Tag status [31:30] + * C-Tag status [29:28] + * L2 framing [27:26] + * L3 framing [25:24] + * IP ToS [23:16] + * IP proto [15:08] + * IP Fragm [7] + * Non 1st frag [6] + * IP Authen [5] + * TTL range [4:3] + * PPPoE session [2] + * Reserved [1] + * UDF_Valid[8] [0] + */ + core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7, + CORE_CFP_DATA_PORT(6)); + + /* UDF_Valid[7:0] [31:24] + * S-Tag [23:8] + * C-Tag [7:0] + */ + core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5)); + + /* C-Tag [31:24] + * UDF_n_A8 [23:8] + * UDF_n_A7 [7:0] + */ + core_writel(priv, 0, CORE_CFP_DATA_PORT(4)); + + /* UDF_n_A7 [31:24] + * UDF_n_A6 [23:8] + * UDF_n_A5 [7:0] + */ + core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8, + CORE_CFP_DATA_PORT(3)); + + /* UDF_n_A5 [31:24] + * UDF_n_A4 [23:8] + * UDF_n_A3 [7:0] + */ + reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 | + (u32)be16_to_cpu(v4_spec->psrc) << 8 | + (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8; + core_writel(priv, reg, CORE_CFP_DATA_PORT(2)); + + /* UDF_n_A3 [31:24] + * UDF_n_A2 [23:8] + * UDF_n_A1 [7:0] + */ + reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 | + (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 | + (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8; + core_writel(priv, reg, CORE_CFP_DATA_PORT(1)); + + /* UDF_n_A1 [31:24] + * UDF_n_A0 [23:8] + * Reserved [7:4] + * Slice ID [3:2] + * Slice valid [1:0] + */ + reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 | + (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 | + SLICE_NUM(slice_num) | SLICE_VALID; + core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); + + /* Source port map match */ + core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); + + /* Mask with the specific layout for IPv4 packets */ + core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6)); + + /* Mask all but valid UDFs */ + core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5)); + + /* Mask all */ + core_writel(priv, 0, CORE_CFP_MASK_PORT(4)); + + /* All other UDFs should be matched with the filter */ + core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3)); + core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2)); + core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1)); + core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0)); + + /* Locate the first rule available */ + if (fs->location == RX_CLS_LOC_ANY) + rule_index = find_first_zero_bit(priv->cfp.used, + bcm_sf2_cfp_rule_size(priv)); + else + rule_index = fs->location; + + /* Insert into TCAM now */ + bcm_sf2_cfp_rule_addr_set(priv, rule_index); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); + if (ret) { + pr_err("TCAM entry at addr %d failed\n", rule_index); + return ret; + } + + /* Replace ARL derived destination with DST_MAP derived, define + * which port and queue this should be forwarded to. + * + * We have a small oddity where Port 6 just does not have a + * valid bit here (so we subtract by one). + */ + queue_num = fs->ring_cookie % 8; + if (port_num >= 7) + port_num -= 1; + + reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) | + CHANGE_TC | queue_num << NEW_TC_SHIFT; + + core_writel(priv, reg, CORE_ACT_POL_DATA0); + + /* Set classification ID that needs to be put in Broadcom tag */ + core_writel(priv, rule_index << CHAIN_ID_SHIFT, + CORE_ACT_POL_DATA1); + + core_writel(priv, 0, CORE_ACT_POL_DATA2); + + /* Configure policer RAM now */ + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM); + if (ret) { + pr_err("Policer entry at %d failed\n", rule_index); + return ret; + } + + /* Disable the policer */ + core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0); + + /* Now the rate meter */ + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM); + if (ret) { + pr_err("Meter entry at %d failed\n", rule_index); + return ret; + } + + /* Turn on CFP for this rule now */ + reg = core_readl(priv, CORE_CFP_CTL_REG); + reg |= BIT(port); + core_writel(priv, reg, CORE_CFP_CTL_REG); + + /* Flag the rule as being used and return it */ + set_bit(rule_index, priv->cfp.used); + fs->location = rule_index; + + return 0; +} + +static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, + u32 loc) +{ + int ret; + u32 reg; + + /* Refuse deletion of unused rules, and the default reserved rule */ + if (!test_bit(loc, priv->cfp.used) || loc == 0) + return -EINVAL; + + /* Indicate which rule we want to read */ + bcm_sf2_cfp_rule_addr_set(priv, loc); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); + if (ret) + return ret; + + /* Clear its valid bits */ + reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); + reg &= ~SLICE_VALID; + core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); + + /* Write back this entry into the TCAM now */ + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); + if (ret) + return ret; + + clear_bit(loc, priv->cfp.used); + + return 0; +} + +static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) +{ + unsigned int i; + + for (i = 0; i < sizeof(flow->m_u); i++) + flow->m_u.hdata[i] ^= 0xff; + + flow->m_ext.vlan_etype ^= cpu_to_be16(~0); + flow->m_ext.vlan_tci ^= cpu_to_be16(~0); + flow->m_ext.data[0] ^= cpu_to_be32(~0); + flow->m_ext.data[1] ^= cpu_to_be32(~0); +} + +static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, + struct ethtool_rxnfc *nfc, bool search) +{ + struct ethtool_tcpip4_spec *v4_spec; + unsigned int queue_num; + u16 src_dst_port; + u32 reg, ipv4; + int ret; + + if (!search) { + bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM); + if (ret) + return ret; + + reg = core_readl(priv, CORE_ACT_POL_DATA0); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); + if (ret) + return ret; + } else { + reg = core_readl(priv, CORE_ACT_POL_DATA0); + } + + /* Extract the destination port */ + nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) & + DST_MAP_IB_MASK) - 1; + + /* There is no Port 6, so we compensate for that here */ + if (nfc->fs.ring_cookie >= 6) + nfc->fs.ring_cookie++; + nfc->fs.ring_cookie *= 8; + + /* Extract the destination queue */ + queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK; + nfc->fs.ring_cookie += queue_num; + + /* Extract the IP protocol */ + reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); + switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { + case IPPROTO_TCP: + nfc->fs.flow_type = TCP_V4_FLOW; + v4_spec = &nfc->fs.h_u.tcp_ip4_spec; + break; + case IPPROTO_UDP: + nfc->fs.flow_type = UDP_V4_FLOW; + v4_spec = &nfc->fs.h_u.udp_ip4_spec; + break; + default: + /* Clear to exit the search process */ + if (search) + core_readl(priv, CORE_CFP_DATA_PORT(7)); + return -EINVAL; + } + + v4_spec->tos = (reg >> 16) & IPPROTO_MASK; + nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1); + + reg = core_readl(priv, CORE_CFP_DATA_PORT(3)); + /* src port [15:8] */ + src_dst_port = reg << 8; + + reg = core_readl(priv, CORE_CFP_DATA_PORT(2)); + /* src port [7:0] */ + src_dst_port |= (reg >> 24); + + v4_spec->pdst = cpu_to_be16(src_dst_port); + nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + v4_spec->psrc = cpu_to_be16((u16)(reg >> 8)); + nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); + + /* IPv4 dst [15:8] */ + ipv4 = (u16)(reg & 0xff) << 8; + reg = core_readl(priv, CORE_CFP_DATA_PORT(1)); + /* IPv4 dst [31:16] */ + ipv4 |= (u32)((reg >> 8) & 0xffffff) << 16; + /* IPv4 dst [7:0] */ + ipv4 |= (reg >> 24) & 0xff; + v4_spec->ip4dst = cpu_to_be32(ipv4); + nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); + + /* IPv4 src [15:8] */ + ipv4 = (u16)(reg & 0xff) << 8; + reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); + + if (!(reg & SLICE_VALID)) + return -EINVAL; + + /* IPv4 src [7:0] */ + ipv4 |= (reg >> 24) & 0xff; + /* IPv4 src [31:16] */ + ipv4 |= ((reg >> 8) & 0xffffff) << 16; + v4_spec->ip4src = cpu_to_be32(ipv4); + nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); + + /* Read last to avoid next entry clobbering the results during search + * operations + */ + reg = core_readl(priv, CORE_CFP_DATA_PORT(7)); + if (!(reg & 1 << port)) + return -EINVAL; + + bcm_sf2_invert_masks(&nfc->fs); + + /* Put the TCAM size here */ + nfc->data = bcm_sf2_cfp_rule_size(priv); + + return 0; +} + +/* We implement the search doing a TCAM search operation */ +static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, + int port, struct ethtool_rxnfc *nfc, + u32 *rule_locs) +{ + unsigned int index = 1, rules_cnt = 0; + int ret; + u32 reg; + + /* Do not poll on OP_STR_DONE to be self-clearing for search + * operations, we cannot use bcm_sf2_cfp_op here because it completes + * on clearing OP_STR_DONE which won't clear until the entire search + * operation is over. + */ + reg = core_readl(priv, CORE_CFP_ACC); + reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); + reg |= index << XCESS_ADDR_SHIFT; + reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); + reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE; + core_writel(priv, reg, CORE_CFP_ACC); + + do { + /* Wait for results to be ready */ + reg = core_readl(priv, CORE_CFP_ACC); + + /* Extract the address we are searching */ + index = reg >> XCESS_ADDR_SHIFT; + index &= XCESS_ADDR_MASK; + + /* We have a valid search result, so flag it accordingly */ + if (reg & SEARCH_STS) { + ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true); + if (ret) + continue; + + rule_locs[rules_cnt] = index; + rules_cnt++; + } + + /* Search is over break out */ + if (!(reg & OP_STR_DONE)) + break; + + } while (index < CFP_NUM_RULES); + + /* Put the TCAM size here */ + nfc->data = bcm_sf2_cfp_rule_size(priv); + nfc->rule_cnt = rules_cnt; + + return 0; +} + +int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc, u32 *rule_locs) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + int ret = 0; + + mutex_lock(&priv->cfp.lock); + + switch (nfc->cmd) { + case ETHTOOL_GRXCLSRLCNT: + /* Subtract the default, unusable rule */ + nfc->rule_cnt = bitmap_weight(priv->cfp.used, + CFP_NUM_RULES) - 1; + /* We support specifying rule locations */ + nfc->data |= RX_CLS_LOC_SPECIAL; + break; + case ETHTOOL_GRXCLSRULE: + ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false); + break; + case ETHTOOL_GRXCLSRLALL: + ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&priv->cfp.lock); + + return ret; +} + +int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + int ret = 0; + + mutex_lock(&priv->cfp.lock); + + switch (nfc->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs); + break; + + case ETHTOOL_SRXCLSRLDEL: + ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&priv->cfp.lock); + + return ret; +} + +int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_CFP_ACC); + reg |= TCAM_RESET; + core_writel(priv, reg, CORE_CFP_ACC); + + do { + reg = core_readl(priv, CORE_CFP_ACC); + if (!(reg & TCAM_RESET)) + break; + + cpu_relax(); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} -- cgit v1.2.3 From 63a6fff353d01da5a22b72670c434bf12fa0e3b8 Mon Sep 17 00:00:00 2001 From: Robert Shearman Date: Thu, 26 Jan 2017 18:02:24 +0000 Subject: net: Avoid receiving packets with an l3mdev on unbound UDP sockets Packets arriving in a VRF currently are delivered to UDP sockets that aren't bound to any interface. TCP defaults to not delivering packets arriving in a VRF to unbound sockets. IP route lookup and socket transmit both assume that unbound means using the default table and UDP applications that haven't been changed to be aware of VRFs may not function correctly in this case since they may not be able to handle overlapping IP address ranges, or be able to send packets back to the original sender if required. So add a sysctl, udp_l3mdev_accept, to control this behaviour with it being analgous to the existing tcp_l3mdev_accept, namely to allow a process to have a VRF-global listen socket. Have this default to off as this is the behaviour that users will expect, given that there is no explicit mechanism to set unmodified VRF-unaware application into a default VRF. Signed-off-by: Robert Shearman Acked-by: David Ahern Tested-by: David Ahern Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 7 +++++++ Documentation/networking/vrf.txt | 7 ++++--- include/net/netns/ipv4.h | 4 ++++ net/ipv4/sysctl_net_ipv4.c | 11 +++++++++++ net/ipv4/udp.c | 27 ++++++++++++++++++++------- net/ipv6/udp.c | 27 ++++++++++++++++++++------- 6 files changed, 66 insertions(+), 17 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 17f2e7791042..fc73eeb7b3b8 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -721,6 +721,13 @@ tcp_challenge_ack_limit - INTEGER UDP variables: +udp_l3mdev_accept - BOOLEAN + Enabling this option allows a "global" bound socket to work + across L3 master domains (e.g., VRFs) with packets capable of + being received regardless of the L3 domain in which they + originated. Only valid when the kernel was compiled with + CONFIG_NET_L3_MASTER_DEV. + udp_mem - vector of 3 INTEGERs: min, pressure, max Number of pages allowed for queueing by all UDP sockets. diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt index 755dab856392..3918dae964d4 100644 --- a/Documentation/networking/vrf.txt +++ b/Documentation/networking/vrf.txt @@ -98,10 +98,11 @@ VRF device: or to specify the output device using cmsg and IP_PKTINFO. -TCP services running in the default VRF context (ie., not bound to any VRF -device) can work across all VRF domains by enabling the tcp_l3mdev_accept -sysctl option: +TCP & UDP services running in the default VRF context (ie., not bound +to any VRF device) can work across all VRF domains by enabling the +tcp_l3mdev_accept and udp_l3mdev_accept sysctl options: sysctl -w net.ipv4.tcp_l3mdev_accept=1 + sysctl -w net.ipv4.udp_l3mdev_accept=1 netfilter rules on the VRF device can be used to limit access to services running in the default VRF context as well. diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index e365732b8051..622d2da27135 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -124,6 +124,10 @@ struct netns_ipv4 { struct inet_timewait_death_row tcp_death_row; int sysctl_max_syn_backlog; +#ifdef CONFIG_NET_L3_MASTER_DEV + int sysctl_udp_l3mdev_accept; +#endif + int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1b861997fdc5..d6880a6149ee 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -1012,6 +1012,17 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = ipv4_privileged_ports, }, +#ifdef CONFIG_NET_L3_MASTER_DEV + { + .procname = "udp_l3mdev_accept", + .data = &init_net.ipv4.sysctl_udp_l3mdev_accept, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, +#endif { } }; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d6dddcf59e79..cf6ba3387401 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -134,6 +134,17 @@ EXPORT_SYMBOL(udp_memory_allocated); #define MAX_UDP_PORTS 65536 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) +/* IPCB reference means this can not be used from early demux */ +static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (!net->ipv4.sysctl_udp_l3mdev_accept && + skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) + return true; +#endif + return false; +} + static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, unsigned long *bitmap, @@ -369,7 +380,8 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum) static int compute_score(struct sock *sk, struct net *net, __be32 saddr, __be16 sport, - __be32 daddr, unsigned short hnum, int dif) + __be32 daddr, unsigned short hnum, int dif, + bool exact_dif) { int score; struct inet_sock *inet; @@ -400,7 +412,7 @@ static int compute_score(struct sock *sk, struct net *net, score += 4; } - if (sk->sk_bound_dev_if) { + if (sk->sk_bound_dev_if || exact_dif) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; @@ -425,7 +437,7 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr, /* called with rcu_read_lock() */ static struct sock *udp4_lib_lookup2(struct net *net, __be32 saddr, __be16 sport, - __be32 daddr, unsigned int hnum, int dif, + __be32 daddr, unsigned int hnum, int dif, bool exact_dif, struct udp_hslot *hslot2, struct sk_buff *skb) { @@ -437,7 +449,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, badness = 0; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { score = compute_score(sk, net, saddr, sport, - daddr, hnum, dif); + daddr, hnum, dif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { @@ -472,6 +484,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; + bool exact_dif = udp_lib_exact_dif_match(net, skb); int score, badness, matches = 0, reuseport = 0; u32 hash = 0; @@ -484,7 +497,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, - hslot2, skb); + exact_dif, hslot2, skb); if (!result) { unsigned int old_slot2 = slot2; hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); @@ -499,7 +512,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, - hslot2, skb); + exact_dif, hslot2, skb); } return result; } @@ -508,7 +521,7 @@ begin: badness = 0; sk_for_each_rcu(sk, &hslot->head) { score = compute_score(sk, net, saddr, sport, - daddr, hnum, dif); + daddr, hnum, dif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 05d69324862e..b4c6516a3a0c 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -55,6 +55,16 @@ #include #include "udp_impl.h" +static bool udp6_lib_exact_dif_match(struct net *net, struct sk_buff *skb) +{ +#if defined(CONFIG_NET_L3_MASTER_DEV) + if (!net->ipv4.sysctl_udp_l3mdev_accept && + skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) + return true; +#endif + return false; +} + static u32 udp6_ehashfn(const struct net *net, const struct in6_addr *laddr, const u16 lport, @@ -118,7 +128,7 @@ static void udp_v6_rehash(struct sock *sk) static int compute_score(struct sock *sk, struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned short hnum, - int dif) + int dif, bool exact_dif) { int score; struct inet_sock *inet; @@ -149,7 +159,7 @@ static int compute_score(struct sock *sk, struct net *net, score++; } - if (sk->sk_bound_dev_if) { + if (sk->sk_bound_dev_if || exact_dif) { if (sk->sk_bound_dev_if != dif) return -1; score++; @@ -165,7 +175,7 @@ static int compute_score(struct sock *sk, struct net *net, static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned int hnum, int dif, - struct udp_hslot *hslot2, + bool exact_dif, struct udp_hslot *hslot2, struct sk_buff *skb) { struct sock *sk, *result; @@ -176,7 +186,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, badness = -1; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { score = compute_score(sk, net, saddr, sport, - daddr, hnum, dif); + daddr, hnum, dif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { @@ -212,6 +222,7 @@ struct sock *__udp6_lib_lookup(struct net *net, unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; + bool exact_dif = udp6_lib_exact_dif_match(net, skb); int score, badness, matches = 0, reuseport = 0; u32 hash = 0; @@ -223,7 +234,7 @@ struct sock *__udp6_lib_lookup(struct net *net, goto begin; result = udp6_lib_lookup2(net, saddr, sport, - daddr, hnum, dif, + daddr, hnum, dif, exact_dif, hslot2, skb); if (!result) { unsigned int old_slot2 = slot2; @@ -239,7 +250,8 @@ struct sock *__udp6_lib_lookup(struct net *net, result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, - hslot2, skb); + exact_dif, hslot2, + skb); } return result; } @@ -247,7 +259,8 @@ begin: result = NULL; badness = -1; sk_for_each_rcu(sk, &hslot->head) { - score = compute_score(sk, net, saddr, sport, daddr, hnum, dif); + score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, + exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { -- cgit v1.2.3 From 6ad20165d376fa07919a70e4f43dfae564601829 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 30 Jan 2017 08:22:01 -0800 Subject: drivers: net: generalize napi_complete_done() napi_complete_done() allows to opt-in for gro_flush_timeout, added back in linux-3.19, commit 3b47d30396ba ("net: gro: add a per device gro flush timer") This allows for more efficient GRO aggregation without sacrifying latencies. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/can/at91_can.c | 2 +- drivers/net/can/c_can/c_can.c | 2 +- drivers/net/can/flexcan.c | 2 +- drivers/net/can/ifi_canfd/ifi_canfd.c | 2 +- drivers/net/can/janz-ican3.c | 2 +- drivers/net/can/m_can/m_can.c | 2 +- drivers/net/can/rcar/rcar_can.c | 2 +- drivers/net/can/rcar/rcar_canfd.c | 2 +- drivers/net/can/xilinx_can.c | 2 +- drivers/net/ethernet/3com/typhoon.c | 2 +- drivers/net/ethernet/adi/bfin_mac.c | 2 +- drivers/net/ethernet/agere/et131x.c | 2 +- drivers/net/ethernet/altera/altera_tse_main.c | 2 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 2 +- drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 2 +- drivers/net/ethernet/arc/emac_main.c | 2 +- drivers/net/ethernet/atheros/alx/main.c | 2 +- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 2 +- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 2 +- drivers/net/ethernet/atheros/atlx/atl1.c | 2 +- drivers/net/ethernet/broadcom/b44.c | 2 +- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 2 +- drivers/net/ethernet/broadcom/bgmac.c | 2 +- drivers/net/ethernet/broadcom/bnx2.c | 4 ++-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/sb1250-mac.c | 2 +- drivers/net/ethernet/brocade/bna/bnad.c | 2 +- drivers/net/ethernet/cadence/macb.c | 2 +- drivers/net/ethernet/calxeda/xgmac.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 2 +- drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 2 +- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 2 +- drivers/net/ethernet/chelsio/cxgb/sge.c | 2 +- drivers/net/ethernet/chelsio/cxgb3/sge.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2 +- drivers/net/ethernet/cisco/enic/enic_main.c | 4 ++-- drivers/net/ethernet/dec/tulip/interrupt.c | 6 +++--- drivers/net/ethernet/dnet.c | 2 +- drivers/net/ethernet/emulex/benet/be_main.c | 2 +- drivers/net/ethernet/ethoc.c | 2 +- drivers/net/ethernet/ezchip/nps_enet.c | 2 +- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2 +- drivers/net/ethernet/freescale/fec_main.c | 2 +- .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 2 +- drivers/net/ethernet/freescale/gianfar.c | 4 ++-- drivers/net/ethernet/freescale/ucc_geth.c | 2 +- drivers/net/ethernet/hisilicon/hip04_eth.c | 2 +- drivers/net/ethernet/hisilicon/hisi_femac.c | 2 +- drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | 2 +- drivers/net/ethernet/ibm/ibmveth.c | 2 +- drivers/net/ethernet/ibm/ibmvnic.c | 2 +- drivers/net/ethernet/intel/e100.c | 2 +- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 2 +- drivers/net/ethernet/korina.c | 2 +- drivers/net/ethernet/lantiq_etop.c | 21 +++++++++------------ drivers/net/ethernet/marvell/mv643xx_eth.c | 2 +- drivers/net/ethernet/marvell/mvneta.c | 6 ++---- drivers/net/ethernet/marvell/mvpp2.c | 2 +- drivers/net/ethernet/marvell/pxa168_eth.c | 2 +- drivers/net/ethernet/moxa/moxart_ether.c | 2 +- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 2 +- drivers/net/ethernet/natsemi/natsemi.c | 2 +- drivers/net/ethernet/neterion/s2io.c | 4 ++-- drivers/net/ethernet/neterion/vxge/vxge-main.c | 6 +++--- drivers/net/ethernet/nvidia/forcedeth.c | 2 +- drivers/net/ethernet/nxp/lpc_eth.c | 2 +- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 2 +- drivers/net/ethernet/pasemi/pasemi_mac.c | 2 +- .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 2 +- drivers/net/ethernet/qlogic/qede/qede_fp.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 10 +++++----- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 2 +- drivers/net/ethernet/qualcomm/emac/emac.c | 2 +- drivers/net/ethernet/realtek/r8169.c | 2 +- drivers/net/ethernet/rocker/rocker_main.c | 2 +- drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 2 +- drivers/net/ethernet/sfc/efx.c | 2 +- drivers/net/ethernet/sfc/falcon/efx.c | 2 +- drivers/net/ethernet/smsc/smsc9420.c | 2 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +- drivers/net/ethernet/sun/niu.c | 2 +- drivers/net/ethernet/sun/sungem.c | 2 +- drivers/net/ethernet/sun/sunvnet_common.c | 2 +- drivers/net/ethernet/tehuti/tehuti.c | 2 +- drivers/net/ethernet/ti/cpsw.c | 2 +- drivers/net/ethernet/ti/davinci_emac.c | 2 +- drivers/net/ethernet/ti/netcp_core.c | 2 +- drivers/net/ethernet/tile/tilegx.c | 2 +- drivers/net/ethernet/tile/tilepro.c | 2 +- drivers/net/ethernet/toshiba/ps3_gelic_net.c | 2 +- drivers/net/ethernet/toshiba/spider_net.c | 2 +- drivers/net/ethernet/toshiba/tc35815.c | 2 +- drivers/net/ethernet/tundra/tsi108_eth.c | 2 +- drivers/net/ethernet/via/via-rhine.c | 2 +- drivers/net/ethernet/via/via-velocity.c | 2 +- drivers/net/ethernet/wiznet/w5100.c | 2 +- drivers/net/ethernet/wiznet/w5300.c | 2 +- drivers/net/fjes/fjes_main.c | 2 +- drivers/net/vmxnet3/vmxnet3_drv.c | 4 ++-- drivers/net/wan/fsl_ucc_hdlc.c | 2 +- drivers/net/wan/hd64572.c | 2 +- drivers/net/wireless/ath/ath10k/pci.c | 2 +- drivers/net/wireless/ath/wil6210/netdev.c | 2 +- drivers/net/xen-netback/interface.c | 2 +- drivers/net/xen-netfront.c | 2 +- drivers/staging/octeon/ethernet-rx.c | 2 +- drivers/staging/unisys/visornic/visornic_main.c | 2 +- 109 files changed, 132 insertions(+), 137 deletions(-) diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 8f5e93cb7975..0e0df0ba288c 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -813,7 +813,7 @@ static int at91_poll(struct napi_struct *napi, int quota) u32 reg_ier = AT91_IRQ_ERR_FRAME; reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next); - napi_complete(napi); + napi_complete_done(napi, work_done); at91_write(priv, AT91_IER, reg_ier); } diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index e3dccd3200d5..606b7d8ffe13 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -1070,7 +1070,7 @@ static int c_can_poll(struct napi_struct *napi, int quota) end: if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* enable all IRQs if we are not in bus off state */ if (priv->can.state != CAN_STATE_BUS_OFF) c_can_irq_control(priv, true); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 16f7cadda5c3..43cfce8b076b 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -703,7 +703,7 @@ static int flexcan_poll(struct napi_struct *napi, int quota) work_done += flexcan_poll_bus_err(dev, reg_esr); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* enable IRQs */ flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); flexcan_write(priv->reg_ctrl_default, ®s->ctrl); diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 368bb0710d8f..138f5ae75c0b 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -578,7 +578,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); ifi_canfd_irq_enable(ndev, 1); } diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index f13bb8d9bb84..2ba1a81500c1 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1475,7 +1475,7 @@ static int ican3_napi(struct napi_struct *napi, int budget) /* We have processed all packets that the adapter had, but it * was less than our budget, stop polling */ if (received < budget) - napi_complete(napi); + napi_complete_done(napi, received); spin_lock_irqsave(&mod->lock, flags); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 195f15edb32e..7a6554efd42b 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -730,7 +730,7 @@ static int m_can_poll(struct napi_struct *napi, int quota) work_done += m_can_do_rx_poll(dev, (quota - work_done)); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); m_can_enable_all_interrupts(priv); } diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 788459f6bf5c..caed4e6960f8 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -695,7 +695,7 @@ static int rcar_can_rx_poll(struct napi_struct *napi, int quota) } /* All packets processed */ if (num_pkts < quota) { - napi_complete(napi); + napi_complete_done(napi, num_pkts); priv->ier |= RCAR_CAN_IER_RXFIE; writeb(priv->ier, &priv->regs->ier); } diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 43cdd5544b0c..4ef07d97156d 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1512,7 +1512,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) /* All packets processed */ if (num_pkts < quota) { - napi_complete(napi); + napi_complete_done(napi, num_pkts); /* Enable Rx FIFO interrupts */ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFIE); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index c71a03593595..89aec07c225f 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -726,7 +726,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) can_led_event(ndev, CAN_LED_EVENT_RX); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); ier = priv->read_reg(priv, XCAN_IER_OFFSET); ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); priv->write_reg(priv, XCAN_IER_OFFSET, ier); diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 1986ad17950a..084a6d58543a 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -1753,7 +1753,7 @@ typhoon_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); iowrite32(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK); typhoon_post_pci_writes(tp->ioaddr); diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 88164529b52a..a81731303730 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1274,7 +1274,7 @@ static int bfin_mac_poll(struct napi_struct *napi, int budget) } if (i < budget) { - napi_complete(napi); + napi_complete_done(napi, i); if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags)) enable_irq(IRQ_MAC_RX); } diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 831bab352f8e..87a11b9f0ea5 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3575,7 +3575,7 @@ static int et131x_poll(struct napi_struct *napi, int budget) et131x_handle_send_pkts(adapter); if (work_done < budget) { - napi_complete(&adapter->napi); + napi_complete_done(&adapter->napi, work_done); et131x_enable_interrupts(adapter); } diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 25864bff25ee..527908c7e384 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -513,7 +513,7 @@ static int tse_poll(struct napi_struct *napi, int budget) if (rxcomplete < budget) { - napi_complete(napi); + napi_complete_done(napi, rxcomplete); netdev_dbg(priv->dev, "NAPI Complete, did %d packets with budget %d\n", diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index de59db6c5841..ab43c52723df 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -840,7 +840,7 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget) processed = xgene_enet_process_ring(ring, budget); if (processed != budget) { - napi_complete(napi); + napi_complete_done(napi, processed); enable_irq(ring->irq); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 140962f23e61..cb30a6396a70 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -97,7 +97,7 @@ __acquires(&self->lock) work_done = budget; if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U << self->aq_ring_param.vec_idx); } diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index abc9f2a59054..23873395f100 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -275,7 +275,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) work_done = arc_emac_rx(ndev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); } diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 391bb5c09a6a..6a27c2662675 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -311,7 +311,7 @@ static int alx_poll(struct napi_struct *napi, int budget) if (!tx_complete || work == budget) return budget; - napi_complete(&np->napi); + napi_complete_done(&np->napi, work); /* enable interrupt */ if (alx->flags & ALX_FLAG_USING_MSIX) { diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 773d3b7d8dd5..7e913d8331c3 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1892,7 +1892,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget) if (work_done < budget) { quit_polling: - napi_complete(napi); + napi_complete_done(napi, work_done); adapter->hw.intr_mask |= ISR_RX_PKT; AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); } diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index e96091b652a7..ef003c522ba2 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1526,7 +1526,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget) /* If no Tx and not enough Rx work done, exit the polling mode */ if (work_done < budget) { quit_polling: - napi_complete(napi); + napi_complete_done(napi, work_done); imr_data = AT_READ_REG(&adapter->hw, REG_IMR); AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); /* test debug */ diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 7dad8e4b9d2a..022772e1e249 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2457,7 +2457,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget) if (work_done >= budget) return work_done; - napi_complete(napi); + napi_complete_done(napi, work_done); /* re-enable Interrupt */ if (likely(adapter->int_enabled)) atlx_imr_set(adapter, IMR_NORMAL_MASK); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 7aef70f7d8ef..5b95bb48ce97 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -902,7 +902,7 @@ static int b44_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); b44_enable_ints(bp); } diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index c483618b57bd..0ee6e208aa07 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -511,7 +511,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) /* no more packet in rx/tx queue, remove device from poll * queue */ - napi_complete(napi); + napi_complete_done(napi, rx_work_done); /* restore rx/tx interrupt */ enet_dmac_writel(priv, priv->dma_chan_int_mask, diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 0e066dc6b8cc..9122608df16e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1148,7 +1148,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight) return weight; if (handled < weight) { - napi_complete(napi); + napi_complete_done(napi, handled); bgmac_chip_intrs_on(bgmac); } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index de1d07c08495..e3af1f3cb61f 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3515,7 +3515,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_fast_work(bnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx); @@ -3552,7 +3552,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_work(bnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index c0dac0e5696d..9e8c06130c09 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3224,7 +3224,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) * has been updated when NAPI was scheduled. */ if (IS_FCOE_FP(fp)) { - napi_complete(napi); + napi_complete_done(napi, rx_work_done); } else { bnx2x_update_fpsb_idx(fp); /* bnx2x_has_rx_work() reads the status block, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 3d83b9028014..aff3dc114a5b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1759,7 +1759,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) } if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_pkts); BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); } return rx_pkts; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 435a2e4739d1..89d4feba1a9a 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2537,7 +2537,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget) sbdma_tx_process(sc, &(sc->sbm_txdma), 1); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 73a94113db1f..6e13c937d715 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1881,7 +1881,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget) return rcvd; poll_exit: - napi_complete(napi); + napi_complete_done(napi, rcvd); rx_ctrl->rx_complete++; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ff1e648e74c9..d7d135c95509 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1090,7 +1090,7 @@ static int macb_poll(struct napi_struct *napi, int budget) work_done = bp->macbgem_ops.mog_rx(bp, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* Packets received while interrupts were disabled */ status = macb_readl(bp, RSR); diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index b0540658afad..2bd7c638b178 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1247,7 +1247,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget) work_done = xgmac_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } return work_done; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 9261ddc06852..fbe1986b87f6 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2446,7 +2446,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) } if ((work_done < budget) && (tx_done)) { - napi_complete(napi); + napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index a6587d7019ed..78cfa8ba1b57 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1631,7 +1631,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) } if ((work_done < budget) && (tx_done)) { - napi_complete(napi); + napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 21f80f5744ba..a2138686c605 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -501,7 +501,7 @@ static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) if (work_done < budget) { /* We stopped because no more packets were available. */ - napi_complete(napi); + napi_complete_done(napi, work_done); octeon_mgmt_enable_rx_irq(p); } octeon_mgmt_update_rx_stats(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a25bb6ea2ff4..6feaa24bcfd4 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -749,7 +749,7 @@ static int nicvf_poll(struct napi_struct *napi, int budget) if (work_done < budget) { /* Slow packet rate, exit polling */ - napi_complete(napi); + napi_complete_done(napi, work_done); /* Re-enable interrupts */ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq->cq_idx); diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 86f467a2c485..d56142b98534 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1605,7 +1605,7 @@ int t1_poll(struct napi_struct *napi, int budget) int work_done = process_responses(adapter, budget); if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index e4b5b057f417..1b9d154f1149 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1843,7 +1843,7 @@ static int ofld_poll(struct napi_struct *napi, int budget) __skb_queue_head_init(&queue); skb_queue_splice_init(&q->rx_queue, &queue); if (skb_queue_empty(&queue)) { - napi_complete(napi); + napi_complete_done(napi, work_done); spin_unlock_irq(&q->lock); return work_done; } @@ -2414,7 +2414,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) int work_done = process_responses(adap, qs, budget); if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* * Because we don't atomically flush the following diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index f3ed9ce99e5e..e37dde2ba97f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1889,7 +1889,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) u32 val; if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); intr_params = rspq->next_intr_params; rspq->next_intr_params = rspq->intr_params; } else diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index c5842c525eed..91e42bea151c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1343,7 +1343,7 @@ static int enic_poll(struct napi_struct *napi, int budget) * exit polling */ - napi_complete(napi); + napi_complete_done(napi, rq_work_done); if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_set_int_moderation(enic, &enic->rq[0]); vnic_intr_unmask(&enic->intr[intr]); @@ -1500,7 +1500,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) * exit polling */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_set_int_moderation(enic, &enic->rq[rq]); vnic_intr_unmask(&enic->intr[intr]); diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c index 92306b320840..ba6ae24acf62 100644 --- a/drivers/net/ethernet/dec/tulip/interrupt.c +++ b/drivers/net/ethernet/dec/tulip/interrupt.c @@ -319,8 +319,8 @@ int tulip_poll(struct napi_struct *napi, int budget) /* Remove us from polling list and enable RX intr. */ - napi_complete(napi); - iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); + napi_complete_done(napi, work_done); + iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); /* The last op happens after poll completion. Which means the following: * 1. it can race with disabling irqs in irq handler @@ -355,7 +355,7 @@ int tulip_poll(struct napi_struct *napi, int budget) * before we did napi_complete(). See? We would lose it. */ /* remove ourselves from the polling list */ - napi_complete(napi); + napi_complete_done(napi, work_done); return work_done; } diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 2a17c59f69f9..3e77dd863175 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -415,7 +415,7 @@ static int dnet_poll(struct napi_struct *napi, int budget) /* We processed all packets available. Tell NAPI it can * stop polling then re-enable rx interrupts. */ - napi_complete(napi); + napi_complete_done(napi, npackets); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0690b57466ab..b3a054026e71 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3324,7 +3324,7 @@ int be_poll(struct napi_struct *napi, int budget) be_process_mcc(adapter); if (max_work < budget) { - napi_complete(napi); + napi_complete_done(napi, max_work); /* Skyhawk EQ_DB has a provision to set the rearm to interrupt * delay via a delay multiplier encoding value diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index c45757af9ade..f18aba05f1c2 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -614,7 +614,7 @@ static int ethoc_poll(struct napi_struct *napi, int budget) tx_work_done = ethoc_tx(priv->netdev, budget); if (rx_work_done < budget && tx_work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_work_done); ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); } diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 223f35cc034c..992ebe973d25 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -192,7 +192,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) if (work_done < budget) { u32 buf_int_enable_value = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); /* set tx_done and rx_rdy bits */ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index b7cbc26a0911..25a14a3fe784 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2001,7 +2001,7 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget) int cleaned = qman_p_poll_dqrr(np->p, budget); if (cleaned < budget) { - napi_complete(napi); + napi_complete_done(napi, cleaned); qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); } else if (np->down) { diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 38160c2bebcb..2cc552ddd8f0 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1615,7 +1615,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) fec_enet_tx(ndev); if (pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, pkts); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } return pkts; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 1f98838f32b7..54e3ce9bd94c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -301,7 +301,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) if (received < budget && tx_left) { /* done */ - napi_complete(napi); + napi_complete_done(napi, received); (*fep->ops->napi_enable)(dev); return received; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index c1b671667920..d0ebab705225 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3183,7 +3183,7 @@ static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) if (work_done < budget) { u32 imask; - napi_complete(napi); + napi_complete_done(napi, work_done); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); @@ -3272,7 +3272,7 @@ static int gfar_poll_rx(struct napi_struct *napi, int budget) if (!num_act_queues) { u32 imask; - napi_complete(napi); + napi_complete_done(napi, work_done); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 9d660888510f..3f7ae9f64cd8 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3303,7 +3303,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget) howmany += ucc_geth_rx(ugeth, i, budget - howmany); if (howmany < budget) { - napi_complete(napi); + napi_complete_done(napi, howmany); setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); } diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 97b184774784..6e50ec82b3d8 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -555,7 +555,7 @@ refill: priv->reg_inten |= RCV_INT; writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); } - napi_complete(napi); + napi_complete_done(napi, rx); done: /* clean up tx descriptors and start a new timer if necessary */ tx_remaining = hip04_tx_reclaim(ndev, false); diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 979852d56f31..2c2808830e95 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -330,7 +330,7 @@ static int hisi_femac_poll(struct napi_struct *napi, int budget) } while (ints & DEF_INT_MASK); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); hisi_femac_irq_enable(priv, DEF_INT_MASK & (~IRQ_INT_TX_PER_PACKET)); } diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 418ca1f3774a..25a6c8722eca 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -662,7 +662,7 @@ static int hix5hd2_poll(struct napi_struct *napi, int budget) } while (ints & DEF_INT_MASK); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); hix5hd2_irq_enable(priv); } diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index b618be6d14cd..72ab7b6bf20b 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1326,7 +1326,7 @@ restart_poll: ibmveth_replenish_task(adapter); if (frames_processed < budget) { - napi_complete(napi); + napi_complete_done(napi, frames_processed); /* We think we are done - reenable interrupts, * then check once more to make sure we are done. diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3c2526bde7a4..c46935d4a3fe 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -987,7 +987,7 @@ restart_poll: if (frames_processed < budget) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); - napi_complete(napi); + napi_complete_done(napi, frames_processed); if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && napi_reschedule(napi)) { disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 04e939226e08..2b7323d392dc 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2253,7 +2253,7 @@ static int e100_poll(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); e100_enable_irq(nic); } diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 5826b1ddedcf..fbd220d137b3 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1817,7 +1817,7 @@ ixgb_clean(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (!test_bit(__IXGB_DOWN, &adapter->flags)) ixgb_irq_enable(adapter); } diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 3e415b895d95..9fae98caf83a 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -464,7 +464,7 @@ static int korina_poll(struct napi_struct *napi, int budget) work_done = korina_rx(dev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); writel(readl(&lp->rx_dma_regs->dmasm) & ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index faea52da8dae..afc810069440 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -156,24 +156,21 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget) { struct ltq_etop_chan *ch = container_of(napi, struct ltq_etop_chan, napi); - int rx = 0; - int complete = 0; + int work_done = 0; - while ((rx < budget) && !complete) { + while (work_done < budget) { struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; - if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { - ltq_etop_hw_receive(ch); - rx++; - } else { - complete = 1; - } + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) + break; + ltq_etop_hw_receive(ch); + work_done++; } - if (complete || !rx) { - napi_complete(&ch->napi); + if (work_done < budget) { + napi_complete_done(&ch->napi, work_done); ltq_dma_ack_irq(&ch->dma); } - return rx; + return work_done; } static int diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1fa7c03edec2..20cb7f0de601 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2319,7 +2319,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) if (work_done < budget) { if (mp->oom) mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); - napi_complete(napi); + napi_complete_done(napi, work_done); wrlp(mp, INT_MASK, mp->int_mask); } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 6dcc951af0ff..de6c47744b8e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2767,11 +2767,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget) rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); } - budget -= rx_done; - - if (budget > 0) { + if (rx_done < budget) { cause_rx_tx = 0; - napi_complete(napi); + napi_complete_done(napi, rx_done); if (pp->neta_armada3700) { unsigned long flags; diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 69db40e1a4e1..c2fd7c36f927 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5405,7 +5405,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) if (budget > 0) { cause_rx = 0; - napi_complete(napi); + napi_complete_done(napi, rx_done); mvpp2_interrupts_enable(port); } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3af2814ada23..3376a19f1e19 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1261,7 +1261,7 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) } work_done = rxq_process(dev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); wrl(pep, INT_MASK, ALL_INTS); } diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 6a6525f7d2ea..06c9f4100cb9 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -269,7 +269,7 @@ rx_next: } if (rx < budget) { - napi_complete(napi); + napi_complete_done(napi, rx); } priv->reg_imr |= RPKT_FINISH_M; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index db297cfce6f4..3d881176a353 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1678,7 +1678,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget) myri10ge_ss_unlock_napi(ss); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); put_be32(htonl(3), ss->irq_claim); } return work_done; diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 90eac63f9606..8e72679c015f 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -2265,7 +2265,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget) np->intr_status = readl(ioaddr + IntrStatus); } while (np->intr_status); - napi_complete(napi); + napi_complete_done(napi, work_done); /* Reenable interrupts providing nothing is trying to shut * the chip down. */ diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 564f682fa4dc..203abcb0c252 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -2783,7 +2783,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) s2io_chk_rx_buffers(nic, ring); if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /*Re Enable MSI-Rx Vector*/ addr = (u8 __iomem *)&bar0->xmsi_mask_reg; addr += 7 - ring->ring_no; @@ -2817,7 +2817,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) break; } if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the ring */ writeq(0, &bar0->rx_traffic_mask); readl(&bar0->rx_traffic_mask); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index f364502229db..6a4310af5d97 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1823,8 +1823,8 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget) vxge_hw_vpath_poll_rx(ring->handle); pkts_processed = ring->pkts_processed; - if (ring->pkts_processed < budget_org) { - napi_complete(napi); + if (pkts_processed < budget_org) { + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the vpath */ vxge_hw_channel_msix_unmask( @@ -1863,7 +1863,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) VXGE_COMPLETE_ALL_TX(vdev); if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the ring */ vxge_hw_device_unmask_all(hldev); vxge_hw_device_flush_io(hldev); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index dfc2c8149d22..58ba5d3f9e5f 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -3749,7 +3749,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) if (rx_work < budget) { /* re-enable interrupts (msix not enabled in napi) */ - napi_complete(napi); + napi_complete_done(napi, rx_work); writel(np->irqmask, base + NvRegIrqMask); } diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index dd6b0d0f7fa5..9c7ffd649e9a 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -999,7 +999,7 @@ static int lpc_eth_poll(struct napi_struct *napi, int budget) rx_done = __lpc_handle_recv(ndev, budget); if (rx_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_done); lpc_eth_enable_int(pldat->net_base); } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index d461f419948e..f9e4e8eca665 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2385,7 +2385,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) poll_end_flag = true; if (poll_end_flag) { - napi_complete(napi); + napi_complete_done(napi, work_done); pch_gbe_irq_enable(adapter); } diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index badfa1d562a4..49591d9c2e1b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1575,7 +1575,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget) pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); if (pkts < budget) { /* all done, no more packets present */ - napi_complete(napi); + napi_complete_done(napi, pkts); pasemi_mac_restart_rx_intr(mac); pasemi_mac_restart_tx_intr(mac); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 86fb9d3df700..0cf8a3703275 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2396,7 +2396,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__NX_DEV_UP, &adapter->state)) netxen_nic_enable_int(sds_ring); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 445d4d2492c3..26848eed3bc1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1372,7 +1372,7 @@ int qede_poll(struct napi_struct *napi, int budget) qede_rx_int(fp, budget) : 0; if (rx_work_done < budget) { if (!qede_poll_is_more_work(fp)) { - napi_complete(napi); + napi_complete_done(napi, rx_work_done); /* Update and reenable interrupts */ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index fedd7366713c..84dd83031a1b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -975,7 +975,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { qlcnic_enable_sds_intr(adapter, sds_ring); qlcnic_enable_tx_intr(adapter, tx_ring); @@ -1019,7 +1019,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget) work_done = qlcnic_process_rcv_ring(sds_ring, budget); if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1966,7 +1966,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1994,7 +1994,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -2032,7 +2032,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget) adapter = sds_ring->adapter; work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_sds_intr(adapter, sds_ring); } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 1409412ab39d..e9e647072596 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2334,7 +2334,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); ql_enable_completion_interrupt(qdev, rx_ring->irq); } return work_done; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index f519351ef3a2..3387c0a88746 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -129,7 +129,7 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget) emac_mac_rx_process(adpt, rx_q, &work_done, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); irq->mask |= rx_q->intr; writel(irq->mask, adpt->base + EMAC_INT_MASK); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 6073f466f6e8..81f18a833527 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7583,7 +7583,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); rtl_irq_enable(tp, enable_mask); mmiowb(); diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 7c450b5a1138..0f63a44a955d 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2517,7 +2517,7 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget) } if (credits < budget) - napi_complete(napi); + napi_complete_done(napi, credits); rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 07074d9bc45d..d54490d3f7ad 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -1563,7 +1563,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget) work_done = sxgbe_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index f2ec853e356d..466c02805f72 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -331,7 +331,7 @@ static int efx_poll(struct napi_struct *napi, int budget) * since efx_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ - napi_complete(napi); + napi_complete_done(napi, spent); efx_nic_eventq_read_ack(channel); } diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 8cfbe01e1ddf..c4ff3bb1a1ef 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -327,7 +327,7 @@ static int ef4_poll(struct napi_struct *napi, int budget) * since ef4_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ - napi_complete(napi); + napi_complete_done(napi, spent); ef4_nic_eventq_read_ack(channel); } diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 3174aebb322f..2fa3c1d03abc 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -861,7 +861,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget) smsc9420_pci_flush_write(pd); if (work_done < budget) { - napi_complete(&pd->napi); + napi_complete_done(&pd->napi, work_done); /* re-enable RX DMA interrupts */ dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 38e1fb75490e..26a2185fc8a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2684,7 +2684,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget) work_done = stmmac_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); stmmac_enable_dma_irq(priv); } return work_done; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index e557a3290a25..57978056b336 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3786,7 +3786,7 @@ static int niu_poll(struct napi_struct *napi, int budget) work_done = niu_poll_core(np, lp, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); niu_ldg_rearm(np, lp, 1); } return work_done; diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index d277e4107976..5c5952e782cd 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -922,7 +922,7 @@ static int gem_poll(struct napi_struct *napi, int budget) gp->status = readl(gp->regs + GREG_STAT); } while (gp->status & GREG_STAT_NAPI); - napi_complete(napi); + napi_complete_done(napi, work_done); gem_enable_ints(gp); return work_done; diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 8878b75d68b4..191c8ade6155 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -860,7 +860,7 @@ int sunvnet_poll_common(struct napi_struct *napi, int budget) int processed = vnet_event_napi(port, budget); if (processed < budget) { - napi_complete(napi); + napi_complete_done(napi, processed); port->rx_event &= ~LDC_EVENT_DATA_READY; vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); } diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index baa3e4a5731c..f864fd0663db 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -303,7 +303,7 @@ static int bdx_poll(struct napi_struct *napi, int budget) * device lock and allow waiting tasks (eg rmmod) to advance) */ priv->napi_stop = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); bdx_enable_interrupts(priv); } return work_done; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 897ebbe50225..67b7323b6907 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -942,7 +942,7 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) } if (num_rx < budget) { - napi_complete(napi_rx); + napi_complete_done(napi_rx, num_rx); writel(0xff, &cpsw->wr_regs->rx_en); if (cpsw->quirk_irq && cpsw->rx_irq_disabled) { cpsw->rx_irq_disabled = false; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 481c7bf0395b..64d5527feb2a 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1295,7 +1295,7 @@ static int emac_poll(struct napi_struct *napi, int budget) &emac_rxhost_errcodes[cause][0], ch); } } else if (num_rx_pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, num_rx_pkts); emac_int_enable(priv); } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 2b56eed249f7..ebab1473f366 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -969,7 +969,7 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget) netcp_rxpool_refill(netcp); if (packets < budget) { - napi_complete(&netcp->rx_napi); + napi_complete_done(&netcp->rx_napi, packets); knav_queue_enable_notify(netcp->rx_queue); } diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 2255f9a6f3bc..7c634bc75615 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -681,7 +681,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) } /* There are no packets left. */ - napi_complete(&info_mpipe->napi); + napi_complete_done(&info_mpipe->napi, work); md = &mpipe_data[instance]; /* Re-enable hypervisor interrupts. */ diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 44f153713791..49ccee4b9aec 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -842,7 +842,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) } } - napi_complete(&info->napi); + napi_complete_done(&info->napi, work); if (!priv->active) goto done; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 345316c749e7..72013314bba8 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1109,7 +1109,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget) } if (packets_done < budget) { - napi_complete(napi); + napi_complete_done(napi, packets_done); gelic_card_rx_irq_on(card); } return packets_done; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index cb341dfe65ad..cec9e70ab995 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -1270,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget) /* if all packets are in the stack, enable interrupts and return 0 */ /* if not, return 1 */ if (packets_done < budget) { - napi_complete(napi); + napi_complete_done(napi, packets_done); spider_net_rx_irq_on(card); card->ignore_rx_ramfull = 0; } diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 3be61ed28741..a45f98fa4aa7 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1638,7 +1638,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget) spin_unlock(&lp->rx_lock); if (received < budget) { - napi_complete(napi); + napi_complete_done(napi, received); /* enable interrupts */ tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); } diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index f153ad729ce5..c5583991da4a 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -887,7 +887,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget) if (num_received < budget) { data->rxpending = 0; - napi_complete(napi); + napi_complete_done(napi, num_received); TSI_WRITE(TSI108_EC_INTMASK, TSI_READ(TSI108_EC_INTMASK) diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 453a1fad560c..c068c58428f7 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -861,7 +861,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); iowrite16(enable_mask, ioaddr + IntrEnable); mmiowb(); } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 4716e60e2ccb..d088788b27a7 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2160,7 +2160,7 @@ static int velocity_poll(struct napi_struct *napi, int budget) velocity_tx_srv(vptr); /* If budget not fully consumed, exit the polling mode */ if (rx_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_done); mac_enable_int(vptr->mac_regs); } spin_unlock_irqrestore(&vptr->lock, flags); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index e1296ef2cf66..f90267f0519f 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -915,7 +915,7 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_count); w5100_enable_intr(priv); } diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 724fabd38a23..56ae573001e8 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -417,7 +417,7 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_count); w5300_write(priv, W5300_IMR, IR_S0); mmiowb(); } diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 5028001429c7..b75d9cdcfb0c 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1155,7 +1155,7 @@ static int fjes_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->unset_rx_last) { adapter->rx_last_jiffies = jiffies; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e34b1297c96a..25bc764ae7dc 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1851,7 +1851,7 @@ vmxnet3_poll(struct napi_struct *napi, int budget) rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); if (rxd_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rxd_done); vmxnet3_enable_all_intrs(rx_queue->adapter); } return rxd_done; @@ -1882,7 +1882,7 @@ vmxnet3_poll_rx_only(struct napi_struct *napi, int budget) rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); if (rxd_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rxd_done); vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); } return rxd_done; diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index e38ce4da3efb..d869533d2e79 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -573,7 +573,7 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget) howmany += hdlc_rx_done(priv, budget - howmany); if (howmany < budget) { - napi_complete(napi); + napi_complete_done(napi, howmany); qe_setbits32(priv->uccf->p_uccm, (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); } diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c index 7ef49dab6855..cff0cfadd650 100644 --- a/drivers/net/wan/hd64572.c +++ b/drivers/net/wan/hd64572.c @@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget) received = sca_rx_done(port, budget); if (received < budget) { - napi_complete(napi); + napi_complete_done(napi, received); enable_intr(port); } diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 93b9790cfe8d..e757c514e2b7 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2799,7 +2799,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) done = ath10k_htt_txrx_compl_task(ar, budget); if (done < budget) { - napi_complete(ctx); + napi_complete_done(ctx, done); /* In case of MSI, it is possible that interrupts are received * while NAPI poll is inprogress. So pending interrupts that are * received after processing all copy engine pipes by NAPI poll diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 6676001dcbca..cab5334e4f9d 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -68,7 +68,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) done = budget - quota; if (done < budget) { - napi_complete(napi); + napi_complete_done(napi, done); wil6210_unmask_irq_rx(wil); wil_dbg_txrx(wil, "NAPI RX complete\n"); } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 579521327b03..1073b27e54aa 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -104,7 +104,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget) work_done = xenvif_tx_action(queue, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); xenvif_napi_schedule_or_enable_events(queue); } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 2c7c29fa268d..cf82b5b42056 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1051,7 +1051,7 @@ err: if (work_done < budget) { int more_to_do = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); if (more_to_do) diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index f0900d1c4d7b..fc849d4a1b5d 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -429,7 +429,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) if (rx_count < budget) { /* No more work */ - napi_complete(napi); + napi_complete_done(napi, rx_count); enable_irq(rx_group->irq); } return rx_count; diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index c1f674f5268c..ca3743d273e0 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -1657,7 +1657,7 @@ static int visornic_poll(struct napi_struct *napi, int budget) /* If there aren't any more packets to receive stop the poll */ if (rx_count < budget) - napi_complete(napi); + napi_complete_done(napi, rx_count); return rx_count; } -- cgit v1.2.3 From 30357d7d8aaf2a980ab17c2ce054b2b87e60af88 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 30 Jan 2017 12:07:37 -0800 Subject: lwtunnel: remove device arg to lwtunnel_build_state Nothing about lwt state requires a device reference, so remove the input argument. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/lwtunnel.h | 6 +++--- net/core/lwt_bpf.c | 2 +- net/core/lwtunnel.c | 4 ++-- net/ipv4/fib_semantics.c | 27 ++++++++------------------- net/ipv4/ip_tunnel_core.c | 4 ++-- net/ipv6/ila/ila_lwt.c | 2 +- net/ipv6/route.c | 2 +- net/ipv6/seg6_iptunnel.c | 2 +- net/mpls/mpls_iptunnel.c | 2 +- 9 files changed, 20 insertions(+), 31 deletions(-) diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 73dd87647460..45399ed132bf 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -33,7 +33,7 @@ struct lwtunnel_state { }; struct lwtunnel_encap_ops { - int (*build_state)(struct net_device *dev, struct nlattr *encap, + int (*build_state)(struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **ts); void (*destroy_state)(struct lwtunnel_state *lws); @@ -109,7 +109,7 @@ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, unsigned int num); int lwtunnel_valid_encap_type(u16 encap_type); int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len); -int lwtunnel_build_state(struct net_device *dev, u16 encap_type, +int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **lws); @@ -181,7 +181,7 @@ static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len) return -EOPNOTSUPP; } -static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type, +static inline int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **lws) diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 03600459bcfd..0cfe7b0216c3 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -237,7 +237,7 @@ static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = { [LWT_BPF_XMIT_HEADROOM] = { .type = NLA_U32 }, }; -static int bpf_build_state(struct net_device *dev, struct nlattr *nla, +static int bpf_build_state(struct nlattr *nla, unsigned int family, const void *cfg, struct lwtunnel_state **ts) { diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index c23465005f2f..6df9f8fabf0c 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -101,7 +101,7 @@ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops, } EXPORT_SYMBOL(lwtunnel_encap_del_ops); -int lwtunnel_build_state(struct net_device *dev, u16 encap_type, +int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **lws) { @@ -116,7 +116,7 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type, rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[encap_type]); if (likely(ops && ops->build_state && try_module_get(ops->owner))) { - ret = ops->build_state(dev, encap, family, cfg, lws); + ret = ops->build_state(encap, family, cfg, lws); if (ret) module_put(ops->owner); } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 319c66de92eb..6306a67880e8 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -471,7 +471,6 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining) static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, int remaining, struct fib_config *cfg) { - struct net *net = cfg->fc_nlinfo.nl_net; int ret; change_nexthops(fi) { @@ -503,16 +502,14 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, nla = nla_find(attrs, attrlen, RTA_ENCAP); if (nla) { struct lwtunnel_state *lwtstate; - struct net_device *dev = NULL; struct nlattr *nla_entype; nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); if (!nla_entype) goto err_inval; - if (cfg->fc_oif) - dev = __dev_get_by_index(net, cfg->fc_oif); - ret = lwtunnel_build_state(dev, nla_get_u16( + + ret = lwtunnel_build_state(nla_get_u16( nla_entype), nla, AF_INET, cfg, &lwtstate); @@ -597,21 +594,18 @@ static inline void fib_add_weight(struct fib_info *fi, #endif /* CONFIG_IP_ROUTE_MULTIPATH */ -static int fib_encap_match(struct net *net, u16 encap_type, +static int fib_encap_match(u16 encap_type, struct nlattr *encap, - int oif, const struct fib_nh *nh, + const struct fib_nh *nh, const struct fib_config *cfg) { struct lwtunnel_state *lwtstate; - struct net_device *dev = NULL; int ret, result = 0; if (encap_type == LWTUNNEL_ENCAP_NONE) return 0; - if (oif) - dev = __dev_get_by_index(net, oif); - ret = lwtunnel_build_state(dev, encap_type, encap, + ret = lwtunnel_build_state(encap_type, encap, AF_INET, cfg, &lwtstate); if (!ret) { result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate); @@ -623,7 +617,6 @@ static int fib_encap_match(struct net *net, u16 encap_type, int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) { - struct net *net = cfg->fc_nlinfo.nl_net; #ifdef CONFIG_IP_ROUTE_MULTIPATH struct rtnexthop *rtnh; int remaining; @@ -634,9 +627,8 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) if (cfg->fc_oif || cfg->fc_gw) { if (cfg->fc_encap) { - if (fib_encap_match(net, cfg->fc_encap_type, - cfg->fc_encap, cfg->fc_oif, - fi->fib_nh, cfg)) + if (fib_encap_match(cfg->fc_encap_type, + cfg->fc_encap, fi->fib_nh, cfg)) return 1; } if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && @@ -1093,13 +1085,10 @@ struct fib_info *fib_create_info(struct fib_config *cfg) if (cfg->fc_encap) { struct lwtunnel_state *lwtstate; - struct net_device *dev = NULL; if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE) goto err_inval; - if (cfg->fc_oif) - dev = __dev_get_by_index(net, cfg->fc_oif); - err = lwtunnel_build_state(dev, cfg->fc_encap_type, + err = lwtunnel_build_state(cfg->fc_encap_type, cfg->fc_encap, AF_INET, cfg, &lwtstate); if (err) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 9d6c10096d44..a31f47ccaad9 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -226,7 +226,7 @@ static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = { [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, }; -static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr, +static int ip_tun_build_state(struct nlattr *attr, unsigned int family, const void *cfg, struct lwtunnel_state **ts) { @@ -323,7 +323,7 @@ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, }; -static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr, +static int ip6_tun_build_state(struct nlattr *attr, unsigned int family, const void *cfg, struct lwtunnel_state **ts) { diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index 13b5e85fe0d5..ce1aae4a7fc8 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -115,7 +115,7 @@ static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, }, }; -static int ila_build_state(struct net_device *dev, struct nlattr *nla, +static int ila_build_state(struct nlattr *nla, unsigned int family, const void *cfg, struct lwtunnel_state **ts) { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 61d7006324ed..2563331b0532 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1897,7 +1897,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) if (cfg->fc_encap) { struct lwtunnel_state *lwtstate; - err = lwtunnel_build_state(dev, cfg->fc_encap_type, + err = lwtunnel_build_state(cfg->fc_encap_type, cfg->fc_encap, AF_INET6, cfg, &lwtstate); if (err) diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index c46f8cbf5ab5..6124e159c882 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -303,7 +303,7 @@ drop: return err; } -static int seg6_build_state(struct net_device *dev, struct nlattr *nla, +static int seg6_build_state(struct nlattr *nla, unsigned int family, const void *cfg, struct lwtunnel_state **ts) { diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 67b7a955de65..e4e4424f9eb1 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c @@ -133,7 +133,7 @@ drop: return -EINVAL; } -static int mpls_build_state(struct net_device *dev, struct nlattr *nla, +static int mpls_build_state(struct nlattr *nla, unsigned int family, const void *cfg, struct lwtunnel_state **ts) { -- cgit v1.2.3 From 72b8eaab245e66e5789378eb8812f72c8354dda6 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 29 Jan 2017 18:56:13 +0200 Subject: net/mlx4: Replace ENOSYS with better fitting error codes Conform the following warning: WARNING: ENOSYS means 'invalid syscall nr' and nothing else. Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 2 +- drivers/net/ethernet/mellanox/mlx4/fw.c | 2 +- drivers/net/ethernet/mellanox/mlx4/main.c | 6 +++--- drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index d5a9372ed84d..17a600b9f81b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1980,7 +1980,7 @@ static int mlx4_en_get_module_info(struct net_device *dev, modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; break; default: - return -ENOSYS; + return -EINVAL; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 84bab9f0732e..25b5b32e958f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -672,7 +672,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); func_cap->physical_port = field; if (func_cap->physical_port != gen_or_port) { - err = -ENOSYS; + err = -EINVAL; goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index bffa6f345f2f..15ef787e71ba 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -838,7 +838,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) */ if (hca_param.global_caps) { mlx4_err(dev, "Unknown hca global capabilities\n"); - return -ENOSYS; + return -EINVAL; } mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; @@ -896,7 +896,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) PF_CONTEXT_BEHAVIOUR_MASK) { mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); - return -ENOSYS; + return -EINVAL; } dev->caps.num_ports = func_cap.num_ports; @@ -3492,7 +3492,7 @@ slave_start: mlx4_enable_msi_x(dev); if ((mlx4_is_mfunc(dev)) && !(dev->flags & MLX4_FLAG_MSI_X)) { - err = -ENOSYS; + err = -EOPNOTSUPP; mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); goto err_free_eq; } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 6da6e01d07bd..84c74a79752a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1396,7 +1396,7 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) case RES_MTT: return remove_mtt_ok((struct res_mtt *)res, extra); case RES_MAC: - return -ENOSYS; + return -EOPNOTSUPP; case RES_EQ: return remove_eq_ok((struct res_eq *)res); case RES_COUNTER: -- cgit v1.2.3 From 38353364015c64cde0ba30564069eb88874aa8ec Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 29 Jan 2017 18:56:14 +0200 Subject: net/mlx4_core: Device revision support The device revision field returned by the NodeInfo MAD is incorrect on ConnectX3 devices. This patch is driver side handling to complete a FW fix added at 2.11.1172. INIT_HCA - bit at offset 0x0C.12 is set to 1 so that FW will report correct device revision. Older FW versions won't be affected from turning on that bit, no capability bit is needed. Signed-off-by: Yishai Hadas Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 25b5b32e958f..de39a5db4a6f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1875,7 +1875,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = - (ilog2(cache_line_size()) - 4) << 5; + ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4); #if defined(__LITTLE_ENDIAN) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); -- cgit v1.2.3 From ae5a2e29d11eb9c6f0ed0676dc801996a933605b Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Sun, 29 Jan 2017 18:56:15 +0200 Subject: net/mlx4_core: Add resource alloc/dealloc debugging In order to aid debugging of functions that take a resource but don't put it, add the last function name that successfully grabbed this resource. Signed-off-by: Matan Barak Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx4/resource_tracker.c | 49 ++++++++++++++++++++-- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 84c74a79752a..6fe9f76ae656 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -77,6 +77,7 @@ struct res_common { int from_state; int to_state; int removing; + const char *func_name; }; enum { @@ -837,6 +838,36 @@ static int mpt_mask(struct mlx4_dev *dev) return dev->caps.num_mpts - 1; } +static const char *mlx4_resource_type_to_str(enum mlx4_resource t) +{ + switch (t) { + case RES_QP: + return "QP"; + case RES_CQ: + return "CQ"; + case RES_SRQ: + return "SRQ"; + case RES_XRCD: + return "XRCD"; + case RES_MPT: + return "MPT"; + case RES_MTT: + return "MTT"; + case RES_MAC: + return "MAC"; + case RES_VLAN: + return "VLAN"; + case RES_COUNTER: + return "COUNTER"; + case RES_FS_RULE: + return "FS_RULE"; + case RES_EQ: + return "EQ"; + default: + return "INVALID RESOURCE"; + } +} + static void *find_res(struct mlx4_dev *dev, u64 res_id, enum mlx4_resource type) { @@ -846,9 +877,9 @@ static void *find_res(struct mlx4_dev *dev, u64 res_id, res_id); } -static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, - enum mlx4_resource type, - void *res) +static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id, + enum mlx4_resource type, + void *res, const char *func_name) { struct res_common *r; int err = 0; @@ -861,6 +892,10 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, } if (r->state == RES_ANY_BUSY) { + mlx4_warn(dev, + "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n", + func_name, slave, res_id, mlx4_resource_type_to_str(type), + r->func_name); err = -EBUSY; goto exit; } @@ -872,6 +907,7 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, r->from_state = r->state; r->state = RES_ANY_BUSY; + r->func_name = func_name; if (res) *((struct res_common **)res) = r; @@ -881,6 +917,9 @@ exit: return err; } +#define get_res(dev, slave, res_id, type, res) \ + _get_res((dev), (slave), (res_id), (type), (res), __func__) + int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, enum mlx4_resource type, u64 res_id, int *slave) @@ -911,8 +950,10 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, spin_lock_irq(mlx4_tlock(dev)); r = find_res(dev, res_id, type); - if (r) + if (r) { r->state = r->from_state; + r->func_name = ""; + } spin_unlock_irq(mlx4_tlock(dev)); } -- cgit v1.2.3 From 4b5e5b7ececc3265092712a4fef140cc6ef0d028 Mon Sep 17 00:00:00 2001 From: Alaa Hleihel Date: Sun, 29 Jan 2017 18:56:16 +0200 Subject: net/mlx4_core: Get num_tc using netdev_get_num_tc Avoid reading num_tc directly from struct net_device, but use the helper function netdev_get_num_tc. Fixes: bc6a4744b827 ("net/mlx4_en: num cores tx rings for every UP") Fixes: f5b6345ba8da ("net/mlx4_en: User prio mapping gets corrupted when changing number of channels") Signed-off-by: Alaa Hleihel Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 17a600b9f81b..785757f17687 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1788,7 +1788,7 @@ static int mlx4_en_set_channels(struct net_device *dev, netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); - if (dev->num_tc) + if (netdev_get_num_tc(dev)) mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 5886ad78058f..3ed42199d3f1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -710,7 +710,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, u16 rings_p_up = priv->num_tx_rings_p_up; u8 up = 0; - if (dev->num_tc) + if (netdev_get_num_tc(dev)) return skb_tx_hash(dev, skb); if (skb_vlan_tag_present(skb)) -- cgit v1.2.3 From 297e1cf29eac13d3e5bb896d18c64a50b6bb48eb Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Sun, 29 Jan 2017 18:56:17 +0200 Subject: net/mlx4_en: Adding support of turning off link autonegotiation via ethtool This feature will allow the user to disable auto negotiation on the port for mlx4 devices while setting the speed is limited to 1GbE speeds. Other speeds will not be accepted in autoneg off mode. This functionality is permitted providing that the firmware is compatible with this feature. The above is determined by querying a new dedicated capability bit in the device. Signed-off-by: Ariel Levkovich Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 24 +++++++++++++++++++----- include/linux/mlx4/device.h | 7 ++++++- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 785757f17687..ca730d4abbb4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -902,6 +902,7 @@ mlx4_en_set_link_ksettings(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_ptys_reg ptys_reg; __be32 proto_admin; + u8 cur_autoneg; int ret; u32 ptys_adv = ethtool2ptys_link_modes( @@ -931,10 +932,21 @@ mlx4_en_set_link_ksettings(struct net_device *dev, return 0; } - proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ? - cpu_to_be32(ptys_adv) : - speed_set_ptys_admin(priv, speed, - ptys_reg.eth_proto_cap); + cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ? + AUTONEG_DISABLE : AUTONEG_ENABLE; + + if (link_ksettings->base.autoneg == AUTONEG_DISABLE) { + proto_admin = speed_set_ptys_admin(priv, speed, + ptys_reg.eth_proto_cap); + if ((be32_to_cpu(proto_admin) & + (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) | + MLX4_PROT_MASK(MLX4_1000BASE_KX))) && + (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP)) + ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN; + } else { + proto_admin = cpu_to_be32(ptys_adv); + ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN; + } proto_admin &= ptys_reg.eth_proto_cap; if (!proto_admin) { @@ -942,7 +954,9 @@ mlx4_en_set_link_ksettings(struct net_device *dev, return -EINVAL; /* nothing to change due to bad input */ } - if (proto_admin == ptys_reg.eth_proto_admin) + if ((proto_admin == ptys_reg.eth_proto_admin) && + ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) && + (link_ksettings->base.autoneg == cur_autoneg))) return 0; /* Nothing to change */ en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6533c16e27ad..c3ac945b2759 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1539,8 +1539,13 @@ enum mlx4_ptys_proto { MLX4_PTYS_EN = 1<<2, }; +enum mlx4_ptys_flags { + MLX4_PTYS_AN_DISABLE_CAP = 1 << 5, + MLX4_PTYS_AN_DISABLE_ADMIN = 1 << 6, +}; + struct mlx4_ptys_reg { - u8 resrvd1; + u8 flags; u8 local_port; u8 resrvd2; u8 proto_mask; -- cgit v1.2.3 From 40fb4fc1e18bc641a0d0887ac21943fd194c1fa9 Mon Sep 17 00:00:00 2001 From: Shaker Daibes Date: Sun, 29 Jan 2017 18:56:18 +0200 Subject: net/mlx4_en: Pass user MTU value to Firmware at set port command When starting the port, driver will inform Firmware about the actual MTU which does not include implicit headers, such as FCS or VLAN tags. Signed-off-by: Shaker Daibes Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 8 +++ drivers/net/ethernet/mellanox/mlx4/fw.c | 2 +- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 7 ++- drivers/net/ethernet/mellanox/mlx4/port.c | 71 ++++++++++++++++++++++++-- include/linux/mlx4/device.h | 1 + 5 files changed, 82 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index cfb4a9d67b45..60a021c34881 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1695,6 +1695,14 @@ int mlx4_en_start_port(struct net_device *dev) priv->port, err); goto tx_err; } + + err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu); + if (err) { + en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n", + dev->mtu, priv->port, err); + goto tx_err; + } + /* Set default qp number */ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index de39a5db4a6f..3fe885ce1902 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -2983,7 +2983,7 @@ static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) return PTR_ERR(mailbox); context = mailbox->buf; - context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID; + context->flags2 |= SET_PORT_GEN_PHV_VALID; if (phv_bit) context->phv_en |= SET_PORT_GEN_PHV_EN; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 88ee7d8a5923..1132d76ddfdf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -487,6 +487,7 @@ struct mlx4_slave_state { bool vst_qinq_supported; u8 function; dma_addr_t vhcr_dma; + u16 user_mtu[MLX4_MAX_PORTS + 1]; u16 mtu[MLX4_MAX_PORTS + 1]; __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; @@ -590,6 +591,7 @@ struct mlx4_mfunc_master_ctx { struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; int init_port_ref[MLX4_MAX_PORTS + 1]; u16 max_mtu[MLX4_MAX_PORTS + 1]; + u16 max_user_mtu[MLX4_MAX_PORTS + 1]; u8 pptx; u8 pprx; int disable_mcast_ref[MLX4_MAX_PORTS + 1]; @@ -787,7 +789,7 @@ enum { struct mlx4_set_port_general_context { u16 reserved1; - u8 v_ignore_fcs; + u8 flags2; u8 flags; union { u8 ignore_fcs; @@ -803,7 +805,8 @@ struct mlx4_set_port_general_context { u16 reserved4; u32 reserved5; u8 phv_en; - u8 reserved6[3]; + u8 reserved6[5]; + __be16 user_mtu; }; struct mlx4_set_port_rqp_calc_context { diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index b656dd5772e5..a7b0cdcb358a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -50,7 +50,8 @@ #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL -#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 +#define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1) +#define MLX4_FLAG2_V_USER_MTU_MASK BIT(5) #define MLX4_IGNORE_FCS_MASK 0x1 #define MLX4_TC_MAX_NUMBER 8 @@ -1239,6 +1240,38 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) return; } +static void +mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + u16 user_mtu, prev_user_mtu; + + /* User Mtu is configured as the max USER_MTU among all + * the functions on the port. + */ + user_mtu = be16_to_cpu(gen_context->user_mtu); + user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]); + prev_user_mtu = slave_st->user_mtu[port]; + slave_st->user_mtu[port] = user_mtu; + if (user_mtu > master->max_user_mtu[port]) + master->max_user_mtu[port] = user_mtu; + if (user_mtu < prev_user_mtu && + prev_user_mtu == master->max_user_mtu[port]) { + int i; + + slave_st->user_mtu[port] = user_mtu; + master->max_user_mtu[port] = user_mtu; + for (i = 0; i < dev->num_slaves; i++) + master->max_user_mtu[port] = + max_t(u16, master->max_user_mtu[port], + master->slave_state[i].user_mtu[port]); + } + gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]); +} + static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) { @@ -1269,7 +1302,9 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, is_eth = op_mod; port_info = &priv->port[port]; - /* Slaves cannot perform SET_PORT operations except changing MTU */ + /* Slaves cannot perform SET_PORT operations, + * except for changing MTU and USER_MTU. + */ if (is_eth) { if (slave != dev->caps.function && in_modifier != MLX4_SET_PORT_GENERAL && @@ -1316,8 +1351,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, master->slave_state[i].mtu[port]); } } - gen_context->mtu = cpu_to_be16(master->max_mtu[port]); + + if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK) + mlx4_en_set_port_user_mtu(dev, slave, port, + gen_context); + /* Slave cannot change Global Pause configuration */ if (slave != mlx4_master_func_num(dev) && ((gen_context->pptx != master->pptx) || @@ -1608,6 +1647,30 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, } EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); +int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK; + context->user_mtu = cpu_to_be16(user_mtu); + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu); + int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) { struct mlx4_cmd_mailbox *mailbox; @@ -1619,7 +1682,7 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) if (IS_ERR(mailbox)) return PTR_ERR(mailbox); context = mailbox->buf; - context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK; + context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK; if (ignore_fcs_value) context->ignore_fcs |= MLX4_IGNORE_FCS_MASK; else diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index c3ac945b2759..7e66e4f62858 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1374,6 +1374,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port); int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); +int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu); int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, u8 promisc); int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time); -- cgit v1.2.3 From bf1f9396831ca7a4f527a326f1438f1bf85e2239 Mon Sep 17 00:00:00 2001 From: Shaker Daibes Date: Sun, 29 Jan 2017 18:56:19 +0200 Subject: net/mlx4_en: Check the enabling mtu flag in SET_PORT wrapper flow Make sure MTU mask flag is set using new field upon set port request. In addition, move this code into a helper function for better code readability. Signed-off-by: Shaker Daibes Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/port.c | 59 +++++++++++++++++++------------ 1 file changed, 37 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index a7b0cdcb358a..57a10575a7aa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -52,6 +52,7 @@ #define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1) #define MLX4_FLAG2_V_USER_MTU_MASK BIT(5) +#define MLX4_FLAG_V_MTU_MASK BIT(0) #define MLX4_IGNORE_FCS_MASK 0x1 #define MLX4_TC_MAX_NUMBER 8 @@ -1240,6 +1241,38 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) return; } +static void +mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + u16 mtu, prev_mtu; + + /* Mtu is configured as the max USER_MTU among all + * the functions on the port. + */ + mtu = be16_to_cpu(gen_context->mtu); + mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + prev_mtu = slave_st->mtu[port]; + slave_st->mtu[port] = mtu; + if (mtu > master->max_mtu[port]) + master->max_mtu[port] = mtu; + if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) { + int i; + + slave_st->mtu[port] = mtu; + master->max_mtu[port] = mtu; + for (i = 0; i < dev->num_slaves; i++) + master->max_mtu[port] = + max_t(u16, master->max_mtu[port], + master->slave_state[i].mtu[port]); + } + gen_context->mtu = cpu_to_be16(master->max_mtu[port]); +} + static void mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port, struct mlx4_set_port_general_context *gen_context) @@ -1278,7 +1311,6 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_port_info *port_info; struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; - struct mlx4_slave_state *slave_st = &master->slave_state[slave]; struct mlx4_set_port_rqp_calc_context *qpn_context; struct mlx4_set_port_general_context *gen_context; struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; @@ -1289,7 +1321,6 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, int base; u32 in_modifier; u32 promisc; - u16 mtu, prev_mtu; int err; int i, j; int offset; @@ -1332,26 +1363,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, break; case MLX4_SET_PORT_GENERAL: gen_context = inbox->buf; - /* Mtu is configured as the max MTU among all the - * the functions on the port. */ - mtu = be16_to_cpu(gen_context->mtu); - mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + - ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); - prev_mtu = slave_st->mtu[port]; - slave_st->mtu[port] = mtu; - if (mtu > master->max_mtu[port]) - master->max_mtu[port] = mtu; - if (mtu < prev_mtu && prev_mtu == - master->max_mtu[port]) { - slave_st->mtu[port] = mtu; - master->max_mtu[port] = mtu; - for (i = 0; i < dev->num_slaves; i++) { - master->max_mtu[port] = - max(master->max_mtu[port], - master->slave_state[i].mtu[port]); - } - } - gen_context->mtu = cpu_to_be16(master->max_mtu[port]); + + if (gen_context->flags & MLX4_FLAG_V_MTU_MASK) + mlx4_en_set_port_mtu(dev, slave, port, + gen_context); if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK) mlx4_en_set_port_user_mtu(dev, slave, port, -- cgit v1.2.3 From 1f8176f7352abfe1a424ac7eb845b4c2e7c31e5f Mon Sep 17 00:00:00 2001 From: Shaker Daibes Date: Sun, 29 Jan 2017 18:56:20 +0200 Subject: net/mlx4_en: Check the enabling pptx/pprx flags in SET_PORT wrapper flow Make sure pptx/pprx mask flag is set using new fields upon set port request. In addition, move this code into a helper function for better code readability. Signed-off-by: Shaker Daibes Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_port.h | 1 - drivers/net/ethernet/mellanox/mlx4/mlx4.h | 4 ++- drivers/net/ethernet/mellanox/mlx4/port.c | 42 ++++++++++++++++++---------- 3 files changed, 31 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h index 040da4b16b1c..930f961fee42 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -35,7 +35,6 @@ #define _MLX4_EN_PORT_H_ -#define SET_PORT_GEN_ALL_VALID 0x7 #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 1132d76ddfdf..7a495090b0bc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -776,7 +776,9 @@ struct mlx4_vlan_table { int max; }; -#define SET_PORT_GEN_ALL_VALID 0x7 +#define SET_PORT_GEN_ALL_VALID (MLX4_FLAG_V_MTU_MASK | \ + MLX4_FLAG_V_PPRX_MASK | \ + MLX4_FLAG_V_PPTX_MASK) #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 57a10575a7aa..5053c949148f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -53,6 +53,8 @@ #define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1) #define MLX4_FLAG2_V_USER_MTU_MASK BIT(5) #define MLX4_FLAG_V_MTU_MASK BIT(0) +#define MLX4_FLAG_V_PPRX_MASK BIT(1) +#define MLX4_FLAG_V_PPTX_MASK BIT(2) #define MLX4_IGNORE_FCS_MASK 0x1 #define MLX4_TC_MAX_NUMBER 8 @@ -1305,12 +1307,32 @@ mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port, gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]); } +static void +mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + + /* Slave cannot change Global Pause configuration */ + if (slave != mlx4_master_func_num(dev) && + (gen_context->pptx != master->pptx || + gen_context->pprx != master->pprx)) { + gen_context->pptx = master->pptx; + gen_context->pprx = master->pprx; + mlx4_warn(dev, "denying Global Pause change for slave:%d\n", + slave); + } else { + master->pptx = gen_context->pptx; + master->pprx = gen_context->pprx; + } +} + static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_port_info *port_info; - struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; struct mlx4_set_port_rqp_calc_context *qpn_context; struct mlx4_set_port_general_context *gen_context; struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; @@ -1372,19 +1394,11 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, mlx4_en_set_port_user_mtu(dev, slave, port, gen_context); - /* Slave cannot change Global Pause configuration */ - if (slave != mlx4_master_func_num(dev) && - ((gen_context->pptx != master->pptx) || - (gen_context->pprx != master->pprx))) { - gen_context->pptx = master->pptx; - gen_context->pprx = master->pprx; - mlx4_warn(dev, - "denying Global Pause change for slave:%d\n", - slave); - } else { - master->pptx = gen_context->pptx; - master->pprx = gen_context->pprx; - } + if (gen_context->flags & + (MLX4_FLAG_V_PPRX_MASK || MLX4_FLAG_V_PPTX_MASK)) + mlx4_en_set_port_global_pause(dev, slave, + gen_context); + break; case MLX4_SET_PORT_GID_TABLE: /* change to MULTIPLE entries: number of guest's gids -- cgit v1.2.3 From f50f212749e8a28803af3628acbeb85ee0458ed5 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 30 Jan 2017 12:41:40 -0800 Subject: net: dsa: Add plumbing for port mirroring Add necessary plumbing at the slave network device level to have switch drivers implement ndo_setup_tc() and most particularly the cls_matchall classifier. We add support for two switch operations: port_add_mirror and port_del_mirror() which configure, on a per-port basis the mirror parameters requested from the cls_matchall classifier. Code is largely borrowed from the Mellanox Spectrum switch driver. Reviewed-by: Jiri Pirko Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 33 +++++++++++++ net/dsa/dsa_priv.h | 3 ++ net/dsa/slave.c | 137 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 172 insertions(+), 1 deletion(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index d5d618c3de64..2cb77e64d648 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -20,6 +20,8 @@ #include #include +struct tc_action; + enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA, @@ -139,6 +141,28 @@ struct dsa_switch_tree { const struct dsa_device_ops *tag_ops; }; +/* TC matchall action types, only mirroring for now */ +enum dsa_port_mall_action_type { + DSA_PORT_MALL_MIRROR, +}; + +/* TC mirroring entry */ +struct dsa_mall_mirror_tc_entry { + u8 to_local_port; + bool ingress; +}; + +/* TC matchall entry */ +struct dsa_mall_tc_entry { + struct list_head list; + unsigned long cookie; + enum dsa_port_mall_action_type type; + union { + struct dsa_mall_mirror_tc_entry mirror; + }; +}; + + struct dsa_port { struct dsa_switch *ds; unsigned int index; @@ -385,6 +409,15 @@ struct dsa_switch_ops { struct ethtool_rxnfc *nfc, u32 *rule_locs); int (*set_rxnfc)(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc); + + /* + * TC integration + */ + int (*port_mirror_add)(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress); + void (*port_mirror_del)(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror); }; struct dsa_switch_driver { diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 3022f2e42cdc..a5509b765fc0 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -41,6 +41,9 @@ struct dsa_slave_priv { #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif + + /* TC context */ + struct list_head mall_tc_list; }; /* dsa.c */ diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6881889e1a9b..09fc3e9462c1 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -16,12 +16,17 @@ #include #include #include +#include #include #include +#include +#include #include #include #include "dsa_priv.h" +static bool dsa_slave_dev_check(struct net_device *dev); + /* slave mii_bus handling ***************************************************/ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) { @@ -995,6 +1000,133 @@ static int dsa_slave_get_phys_port_name(struct net_device *dev, return 0; } +static struct dsa_mall_tc_entry * +dsa_slave_mall_tc_entry_find(struct dsa_slave_priv *p, + unsigned long cookie) +{ + struct dsa_mall_tc_entry *mall_tc_entry; + + list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) + if (mall_tc_entry->cookie == cookie) + return mall_tc_entry; + + return NULL; +} + +static int dsa_slave_add_cls_matchall(struct net_device *dev, + __be16 protocol, + struct tc_cls_matchall_offload *cls, + bool ingress) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_mall_tc_entry *mall_tc_entry; + struct dsa_switch *ds = p->dp->ds; + struct net *net = dev_net(dev); + struct dsa_slave_priv *to_p; + struct net_device *to_dev; + const struct tc_action *a; + int err = -EOPNOTSUPP; + LIST_HEAD(actions); + int ifindex; + + if (!ds->ops->port_mirror_add) + return err; + + if (!tc_single_action(cls->exts)) + return err; + + tcf_exts_to_list(cls->exts, &actions); + a = list_first_entry(&actions, struct tc_action, list); + + if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { + struct dsa_mall_mirror_tc_entry *mirror; + + ifindex = tcf_mirred_ifindex(a); + to_dev = __dev_get_by_index(net, ifindex); + if (!to_dev) + return -EINVAL; + + if (!dsa_slave_dev_check(to_dev)) + return -EOPNOTSUPP; + + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; + + mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->type = DSA_PORT_MALL_MIRROR; + mirror = &mall_tc_entry->mirror; + + to_p = netdev_priv(to_dev); + + mirror->to_local_port = to_p->dp->index; + mirror->ingress = ingress; + + err = ds->ops->port_mirror_add(ds, p->dp->index, mirror, + ingress); + if (err) { + kfree(mall_tc_entry); + return err; + } + + list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); + } + + return 0; +} + +static void dsa_slave_del_cls_matchall(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_mall_tc_entry *mall_tc_entry; + struct dsa_switch *ds = p->dp->ds; + + if (!ds->ops->port_mirror_del) + return; + + mall_tc_entry = dsa_slave_mall_tc_entry_find(p, cls->cookie); + if (!mall_tc_entry) + return; + + list_del(&mall_tc_entry->list); + + switch (mall_tc_entry->type) { + case DSA_PORT_MALL_MIRROR: + ds->ops->port_mirror_del(ds, p->dp->index, + &mall_tc_entry->mirror); + break; + default: + WARN_ON(1); + } + + kfree(mall_tc_entry); +} + +static int dsa_slave_setup_tc(struct net_device *dev, u32 handle, + __be16 protocol, struct tc_to_netdev *tc) +{ + bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); + int ret = -EOPNOTSUPP; + + switch (tc->type) { + case TC_SETUP_MATCHALL: + switch (tc->cls_mall->command) { + case TC_CLSMATCHALL_REPLACE: + return dsa_slave_add_cls_matchall(dev, protocol, + tc->cls_mall, + ingress); + case TC_CLSMATCHALL_DESTROY: + dsa_slave_del_cls_matchall(dev, tc->cls_mall); + return 0; + } + default: + break; + } + + return ret; +} + void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops) { ops->get_sset_count = dsa_cpu_port_get_sset_count; @@ -1069,6 +1201,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, .ndo_get_phys_port_name = dsa_slave_get_phys_port_name, + .ndo_setup_tc = dsa_slave_setup_tc, }; static const struct switchdev_ops dsa_slave_switchdev_ops = { @@ -1285,7 +1418,8 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, if (slave_dev == NULL) return -ENOMEM; - slave_dev->features = master->vlan_features; + slave_dev->features = master->vlan_features | NETIF_F_HW_TC; + slave_dev->hw_features |= NETIF_F_HW_TC; slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; eth_hw_addr_inherit(slave_dev, master); slave_dev->priv_flags |= IFF_NO_QUEUE; @@ -1304,6 +1438,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, p = netdev_priv(slave_dev); p->dp = &ds->ports[port]; + INIT_LIST_HEAD(&p->mall_tc_list); p->xmit = dst->tag_ops->xmit; p->old_pause = -1; -- cgit v1.2.3 From 151da0171e12607b987d9a028a046104358b7f0d Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 30 Jan 2017 12:41:41 -0800 Subject: net: dsa: b53: Add mirror capture register definitions Add definitions for the different Roboswitch registers relevant for ingress and egress mirroring. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_regs.h | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index dac0af4e2cd0..9fd24c418fa4 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -206,6 +206,38 @@ #define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */ #define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */ +/* Mirror capture control register (16 bit) */ +#define B53_MIR_CAP_CTL 0x10 +#define CAP_PORT_MASK 0xf +#define BLK_NOT_MIR BIT(14) +#define MIRROR_EN BIT(15) + +/* Ingress mirror control register (16 bit) */ +#define B53_IG_MIR_CTL 0x12 +#define MIRROR_MASK 0x1ff +#define DIV_EN BIT(13) +#define MIRROR_FILTER_MASK 0x3 +#define MIRROR_FILTER_SHIFT 14 +#define MIRROR_ALL 0 +#define MIRROR_DA 1 +#define MIRROR_SA 2 + +/* Ingress mirror divider register (16 bit) */ +#define B53_IG_MIR_DIV 0x14 +#define IN_MIRROR_DIV_MASK 0x3ff + +/* Ingress mirror MAC address register (48 bit) */ +#define B53_IG_MIR_MAC 0x16 + +/* Egress mirror control register (16 bit) */ +#define B53_EG_MIR_CTL 0x1C + +/* Egress mirror divider register (16 bit) */ +#define B53_EG_MIR_DIV 0x1E + +/* Egress mirror MAC address register (48 bit) */ +#define B53_EG_MIR_MAC 0x20 + /* Device ID register (8 or 32 bit) */ #define B53_DEVICE_ID 0x30 -- cgit v1.2.3 From ed3af5fd08ebe3b39cdd37ec2a473ef98e05ce39 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 30 Jan 2017 12:41:42 -0800 Subject: net: dsa: b53: Add support for port mirroring Add support for configuring port mirroring through the cls_matchall classifier. We do a full ingress or egress capture towards the capture port. Future improvements could include leveraging the divider to allow less frames to be captured, as well as matching specific MAC DA/SA. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 67 ++++++++++++++++++++++++++++++++++++++++ drivers/net/dsa/b53/b53_priv.h | 4 +++ 2 files changed, 71 insertions(+) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 3a7d16b6c3eb..8cf4801994e8 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1450,6 +1450,71 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds) return DSA_TAG_PROTO_NONE; } +int b53_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, bool ingress) +{ + struct b53_device *dev = ds->priv; + u16 reg, loc; + + if (ingress) + loc = B53_IG_MIR_CTL; + else + loc = B53_EG_MIR_CTL; + + b53_read16(dev, B53_MGMT_PAGE, loc, ®); + reg &= ~MIRROR_MASK; + reg |= BIT(port); + b53_write16(dev, B53_MGMT_PAGE, loc, reg); + + b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); + reg &= ~CAP_PORT_MASK; + reg |= mirror->to_local_port; + reg |= MIRROR_EN; + b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); + + return 0; +} +EXPORT_SYMBOL(b53_mirror_add); + +void b53_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct b53_device *dev = ds->priv; + bool loc_disable = false, other_loc_disable = false; + u16 reg, loc; + + if (mirror->ingress) + loc = B53_IG_MIR_CTL; + else + loc = B53_EG_MIR_CTL; + + /* Update the desired ingress/egress register */ + b53_read16(dev, B53_MGMT_PAGE, loc, ®); + reg &= ~BIT(port); + if (!(reg & MIRROR_MASK)) + loc_disable = true; + b53_write16(dev, B53_MGMT_PAGE, loc, reg); + + /* Now look at the other one to know if we can disable mirroring + * entirely + */ + if (mirror->ingress) + b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); + else + b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); + if (!(reg & MIRROR_MASK)) + other_loc_disable = true; + + b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); + /* Both no longer have ports, let's disable mirroring */ + if (loc_disable && other_loc_disable) { + reg &= ~MIRROR_EN; + reg &= ~mirror->to_local_port; + } + b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); +} +EXPORT_SYMBOL(b53_mirror_del); + static const struct dsa_switch_ops b53_switch_ops = { .get_tag_protocol = b53_get_tag_protocol, .setup = b53_setup, @@ -1474,6 +1539,8 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, + .port_mirror_add = b53_mirror_add, + .port_mirror_del = b53_mirror_del, }; struct b53_chip_data { diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 9d87889728ac..a9dc90a01438 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -407,5 +407,9 @@ int b53_fdb_del(struct dsa_switch *ds, int port, int b53_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, int (*cb)(struct switchdev_obj *obj)); +int b53_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, bool ingress); +void b53_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror); #endif -- cgit v1.2.3 From ec960de61503ef349588dccfa3ae02efabcc2762 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 30 Jan 2017 12:41:43 -0800 Subject: net: dsa: bcm_sf2: Add support for port mirroring We can use b53_mirror_add and b53_mirror_del because the Starfighter 2 is register compatible in that specific case. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index be282b430c50..2be963252ca5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1047,6 +1047,8 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .port_fdb_del = b53_fdb_del, .get_rxnfc = bcm_sf2_get_rxnfc, .set_rxnfc = bcm_sf2_set_rxnfc, + .port_mirror_add = b53_mirror_add, + .port_mirror_del = b53_mirror_del, }; struct bcm_sf2_of_data { -- cgit v1.2.3 From cdaf25dfc058ee6f7a7b2e2353de00fa288c0cd4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 30 Jan 2017 10:55:04 +0100 Subject: smc: some potential use after free bugs Say we got really unlucky and these failed on the last iteration, then it could lead to a use after free bug. Fixes: cd6851f30386 ("smc: remote memory buffers (RMBs)") Signed-off-by: Dan Carpenter Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_core.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 8b1d34378829..0eac633fb354 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -532,6 +532,7 @@ int smc_sndbuf_create(struct smc_sock *smc) __GFP_NORETRY); if (!sndbuf_desc->cpu_addr) { kfree(sndbuf_desc); + sndbuf_desc = NULL; /* if send buffer allocation has failed, * try a smaller one */ @@ -543,6 +544,7 @@ int smc_sndbuf_create(struct smc_sock *smc) if (rc) { kfree(sndbuf_desc->cpu_addr); kfree(sndbuf_desc); + sndbuf_desc = NULL; continue; /* if mapping failed, try smaller one */ } sndbuf_desc->used = 1; @@ -596,6 +598,7 @@ int smc_rmb_create(struct smc_sock *smc) __GFP_NORETRY); if (!rmb_desc->cpu_addr) { kfree(rmb_desc); + rmb_desc = NULL; /* if RMB allocation has failed, * try a smaller one */ @@ -607,6 +610,7 @@ int smc_rmb_create(struct smc_sock *smc) if (rc) { kfree(rmb_desc->cpu_addr); kfree(rmb_desc); + rmb_desc = NULL; continue; /* if mapping failed, try smaller one */ } rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, @@ -619,6 +623,7 @@ int smc_rmb_create(struct smc_sock *smc) DMA_FROM_DEVICE); kfree(rmb_desc->cpu_addr); kfree(rmb_desc); + rmb_desc = NULL; continue; } rmb_desc->used = 1; -- cgit v1.2.3 From 891daf49b0f6e900af6bde51e2dc2cbc72b83f4e Mon Sep 17 00:00:00 2001 From: Romain Perier Date: Mon, 30 Jan 2017 20:29:33 +0100 Subject: net: dsa: mv88e6xxx: Don't forbid MDIO I/Os for PHY addr >= num_of_ports Some Marvell ethernet switches have internal ethernet transceivers with hardcoded phy addresses. These addresses can be greater than the number of ports or its value might be different than the associated port number. This is for example the case for MV88E6341 that has 6 ports and internal Port 1 to Port4 PHYs mapped at SMI addresses from 0x11 to 0x14. This commits fixes the issue by removing the condition in MDIO callbacks. Signed-off-by: Romain Perier Reviewed-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 84cba32443de..1344dad21f46 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2917,9 +2917,6 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) u16 val; int err; - if (phy >= mv88e6xxx_num_ports(chip)) - return 0xffff; - if (!chip->info->ops->phy_read) return -EOPNOTSUPP; @@ -2936,9 +2933,6 @@ static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) struct mv88e6xxx_chip *chip = mdio_bus->chip; int err; - if (phy >= mv88e6xxx_num_ports(chip)) - return 0xffff; - if (!chip->info->ops->phy_write) return -EOPNOTSUPP; -- cgit v1.2.3 From a75961d0ebfd8866e03855bfdbb5733a268b0a44 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Mon, 30 Jan 2017 20:29:34 +0100 Subject: net: dsa: mv88e6xxx: Add support for ethernet switch 88E6341 The Marvell 88E6341 device is single-chip, 6-port Ethernet switch with four integrated 10/100/1000Mbps Ethernet transceivers and one high speed SerDes interfaces. It is partially compatible with switches of family 88E6352 and switches of family 88E6390. This commit adds an initial support for this switch by describing its capabilities to the driver and introducing a new family. Signed-off-by: Gregory CLEMENT Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 54 +++++++++++++++++++++++++++++++++-- drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 19 +++++++++++- 2 files changed, 69 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 1344dad21f46..6a583527917d 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -702,6 +702,11 @@ static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip) return chip->info->family == MV88E6XXX_FAMILY_6320; } +static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6341; +} + static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip) { return chip->info->family == MV88E6XXX_FAMILY_6351; @@ -1726,7 +1731,8 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid, : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || - mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) { + mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip) || + mv88e6xxx_6341_family(chip)) { struct mv88e6xxx_vtu_entry vstp; /* Adding a VTU entry requires a valid STU entry. As VSTP is not @@ -2577,7 +2583,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) || - mv88e6xxx_6185_family(chip)) + mv88e6xxx_6185_family(chip) || mv88e6xxx_6341_family(chip)) reg = PORT_CONTROL_2_MAP_DA; if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) { @@ -2631,7 +2637,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || - mv88e6xxx_6320_family(chip)) { + mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) { /* Port ATU control: disable limiting the number of * address database entries that this port is allowed * to use. @@ -3650,6 +3656,34 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .reset = mv88e6352_g1_reset, }; +static const struct mv88e6xxx_ops mv88e6341_ops = { + /* MV88E6XXX_FAMILY_6341 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed = mv88e6390_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_jumbo_config = mv88e6165_port_jumbo_config, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_config = mv88e6097_port_pause_config, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, + .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .reset = mv88e6352_g1_reset, +}; + static const struct mv88e6xxx_ops mv88e6390_ops = { /* MV88E6XXX_FAMILY_6390 */ .get_eeprom = mv88e6xxx_g2_get_eeprom8, @@ -4037,6 +4071,20 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .ops = &mv88e6321_ops, }, + [MV88E6341] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6341, + .family = MV88E6XXX_FAMILY_6341, + .name = "Marvell 88E6341", + .num_databases = 4096, + .num_ports = 6, + .port_base_addr = 0x10, + .global1_addr = 0x1b, + .age_time_coeff = 3750, + .tag_protocol = DSA_TAG_PROTO_EDSA, + .flags = MV88E6XXX_FLAGS_FAMILY_6341, + .ops = &mv88e6341_ops, + }, + [MV88E6350] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, .family = MV88E6XXX_FAMILY_6351, diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index e126ed00937b..76837d2022bb 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -100,6 +100,7 @@ #define PORT_SWITCH_ID_PROD_NUM_6240 0x240 #define PORT_SWITCH_ID_PROD_NUM_6290 0x290 #define PORT_SWITCH_ID_PROD_NUM_6321 0x310 +#define PORT_SWITCH_ID_PROD_NUM_6341 0x341 #define PORT_SWITCH_ID_PROD_NUM_6352 0x352 #define PORT_SWITCH_ID_PROD_NUM_6350 0x371 #define PORT_SWITCH_ID_PROD_NUM_6351 0x375 @@ -382,7 +383,7 @@ #define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10) #define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff #define GLOBAL2_EEPROM_DATA 0x15 -#define GLOBAL2_EEPROM_ADDR 0x15 /* 6390 */ +#define GLOBAL2_EEPROM_ADDR 0x15 /* 6390, 6341 */ #define GLOBAL2_PTP_AVB_OP 0x16 #define GLOBAL2_PTP_AVB_DATA 0x17 #define GLOBAL2_SMI_PHY_CMD 0x18 @@ -434,6 +435,7 @@ enum mv88e6xxx_model { MV88E6290, MV88E6320, MV88E6321, + MV88E6341, MV88E6350, MV88E6351, MV88E6352, @@ -449,6 +451,7 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */ MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */ MV88E6XXX_FAMILY_6320, /* 6320 6321 */ + MV88E6XXX_FAMILY_6341, /* 6141 6341 */ MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */ MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */ MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */ @@ -602,6 +605,20 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAGS_MULTI_CHIP | \ MV88E6XXX_FLAGS_PVT) +#define MV88E6XXX_FLAGS_FAMILY_6341 \ + (MV88E6XXX_FLAG_EEE | \ + MV88E6XXX_FLAG_G1_ATU_FID | \ + MV88E6XXX_FLAG_G1_VTU_FID | \ + MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_INT | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_MULTI_CHIP | \ + MV88E6XXX_FLAGS_PVT | \ + MV88E6XXX_FLAGS_SERDES) + #define MV88E6XXX_FLAGS_FAMILY_6351 \ (MV88E6XXX_FLAG_G1_ATU_FID | \ MV88E6XXX_FLAG_G1_VTU_FID | \ -- cgit v1.2.3 From 1558727a1c1b65459cb9ae31f36ace45d56c6767 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Mon, 30 Jan 2017 20:29:35 +0100 Subject: net: dsa: mv88e6xxx: Add support for ethernet switch 88E6141 The Marvell 88E6341 device is single-chip, 6-port Ethernet switch with four integrated 10/100/1000Mbps Ethernet transceivers and one high speed SerDes interfaces. It belongs to the Topaz family and unlike the 88E6341 it does not have a TCAM. Signed-off-by: Gregory CLEMENT Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 42 +++++++++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 2 ++ 2 files changed, 44 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 6a583527917d..bf385377a461 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3656,6 +3656,34 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .reset = mv88e6352_g1_reset, }; +static const struct mv88e6xxx_ops mv88e6141_ops = { + /* MV88E6XXX_FAMILY_6341 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed = mv88e6390_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_jumbo_config = mv88e6165_port_jumbo_config, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_config = mv88e6097_port_pause_config, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, + .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .reset = mv88e6352_g1_reset, +}; + static const struct mv88e6xxx_ops mv88e6341_ops = { /* MV88E6XXX_FAMILY_6341 */ .get_eeprom = mv88e6xxx_g2_get_eeprom8, @@ -4071,6 +4099,20 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .ops = &mv88e6321_ops, }, + [MV88E6141] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6141, + .family = MV88E6XXX_FAMILY_6341, + .name = "Marvell 88E6341", + .num_databases = 4096, + .num_ports = 6, + .port_base_addr = 0x10, + .global1_addr = 0x1b, + .age_time_coeff = 3750, + .tag_protocol = DSA_TAG_PROTO_EDSA, + .flags = MV88E6XXX_FLAGS_FAMILY_6341, + .ops = &mv88e6141_ops, + }, + [MV88E6341] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6341, .family = MV88E6XXX_FAMILY_6341, diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 76837d2022bb..9c5c0472b211 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -87,6 +87,7 @@ #define PORT_SWITCH_ID_PROD_NUM_6131 0x106 #define PORT_SWITCH_ID_PROD_NUM_6320 0x115 #define PORT_SWITCH_ID_PROD_NUM_6123 0x121 +#define PORT_SWITCH_ID_PROD_NUM_6141 0x340 #define PORT_SWITCH_ID_PROD_NUM_6161 0x161 #define PORT_SWITCH_ID_PROD_NUM_6165 0x165 #define PORT_SWITCH_ID_PROD_NUM_6171 0x171 @@ -421,6 +422,7 @@ enum mv88e6xxx_model { MV88E6097, MV88E6123, MV88E6131, + MV88E6141, MV88E6161, MV88E6165, MV88E6171, -- cgit v1.2.3 From 1a0bee6c1e788218fd1d141db320db970aace7f0 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 29 Jan 2017 15:07:34 +0300 Subject: sh_eth: rename EESIPR bits Since the commit b0ca2a21f769 ("sh_eth: Add support of SH7763 to sh_eth") the *enum* declaring the EESIPR bits (interrupt mask) went out of sync with the *enum* declaring the EESR bits (interrupt status) WRT bit naming and formatting. I'd like to restore the consistency by using EESIPR as the bit name prefix, renaming the *enum* to EESIPR_BIT, and (finally) renaming the bits according to the available Renesas SH77{34|63} manuals; additionally, reconstruct couple names using the EESR bit declaration above... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 22 ++++++++++----------- drivers/net/ethernet/renesas/sh_eth.h | 36 ++++++++++++++++++++++------------- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 45a7a6ba7644..34c5df827bf4 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -556,7 +556,7 @@ static struct sh_eth_cpu_data r8a7740_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -702,7 +702,7 @@ static struct sh_eth_cpu_data sh7757_data = { .register_type = SH_ETH_REG_FAST_SH4, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003fffff, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -769,7 +769,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -800,7 +800,7 @@ static struct sh_eth_cpu_data sh7734_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003f07ff, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -830,7 +830,7 @@ static struct sh_eth_cpu_data sh7763_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003f07ff, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -851,7 +851,7 @@ static struct sh_eth_cpu_data sh7763_data = { static struct sh_eth_cpu_data sh7619_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003fffff, .apr = 1, .mpr = 1, @@ -862,7 +862,7 @@ static struct sh_eth_cpu_data sh7619_data = { static struct sh_eth_cpu_data sh771x_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003fffff, .tsu = 1, }; @@ -1547,10 +1547,10 @@ static void sh_eth_emac_interrupt(struct net_device *ndev) sh_eth_rcv_snd_disable(ndev); } else { /* Link Up */ - sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0); + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0); /* clear int */ sh_eth_modify(ndev, ECSR, 0, 0); - sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, DMAC_M_ECI); + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP); /* enable tx and rx */ sh_eth_rcv_snd_enable(ndev); } @@ -1652,7 +1652,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) * bit... */ intr_enable = sh_eth_read(ndev, EESIPR); - intr_status &= intr_enable | DMAC_M_ECI; + intr_status &= intr_enable | EESIPR_ECIIP; if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI | cd->eesr_err_check)) ret = IRQ_HANDLED; @@ -3199,7 +3199,7 @@ static int sh_eth_wol_setup(struct net_device *ndev) /* Only allow ECI interrupts */ synchronize_irq(ndev->irq); napi_disable(&mdp->napi); - sh_eth_write(ndev, DMAC_M_ECI, EESIPR); + sh_eth_write(ndev, EESIPR_ECIIP, EESIPR); /* Enable MagicPacket */ sh_eth_modify(ndev, ECMR, 0, ECMR_MPDE); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a1bb8cc413dc..82b24162c18d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -268,19 +268,29 @@ enum EESR_BIT { EESR_TFE | EESR_TDE) /* EESIPR */ -enum DMAC_IM_BIT { - DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000, - DMAC_M_RABT = 0x02000000, - DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000, - DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000, - DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000, - DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000, - DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800, - DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200, - DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080, - DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008, - DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002, - DMAC_M_RINT1 = 0x00000001, +enum EESIPR_BIT { + EESIPR_TWBIP = 0x40000000, + EESIPR_TABTIP = 0x04000000, + EESIPR_RABTIP = 0x02000000, + EESIPR_RFCOFIP = 0x01000000, + EESIPR_ADEIP = 0x00800000, + EESIPR_ECIIP = 0x00400000, + EESIPR_FTCIP = 0x00200000, + EESIPR_TDEIP = 0x00100000, + EESIPR_TFUFIP = 0x00080000, + EESIPR_FRIP = 0x00040000, + EESIPR_RDEIP = 0x00020000, + EESIPR_RFOFIP = 0x00010000, + EESIPR_CNDIP = 0x00000800, + EESIPR_DLCIP = 0x00000400, + EESIPR_CDIP = 0x00000200, + EESIPR_TROIP = 0x00000100, + EESIPR_RMAFIP = 0x00000080, + EESIPR_RRFIP = 0x00000010, + EESIPR_RTLFIP = 0x00000008, + EESIPR_RTSFIP = 0x00000004, + EESIPR_PREIP = 0x00000002, + EESIPR_CERFIP = 0x00000001, }; /* Receive descriptor 0 bits */ -- cgit v1.2.3 From 00300b2aac27556e2829cfd047b787af0f13b081 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 29 Jan 2017 15:08:09 +0300 Subject: sh_eth: add missing EESIPR bits Renesas SH77{34|63} manuals describe more EESIPR bits than the current driver. Declare the new bits with the end goal of using the bit names instead of the bare numbers for the 'sh_eth_cpu_data::eesipr_value' initializers... Signed-off-by: Sergei Shtylyov Reviewed-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 82b24162c18d..a6753ccba711 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -269,13 +269,17 @@ enum EESR_BIT { /* EESIPR */ enum EESIPR_BIT { - EESIPR_TWBIP = 0x40000000, + EESIPR_TWB1IP = 0x80000000, + EESIPR_TWBIP = 0x40000000, /* same as TWB0IP */ + EESIPR_TC1IP = 0x20000000, + EESIPR_TUCIP = 0x10000000, + EESIPR_ROCIP = 0x08000000, EESIPR_TABTIP = 0x04000000, EESIPR_RABTIP = 0x02000000, EESIPR_RFCOFIP = 0x01000000, EESIPR_ADEIP = 0x00800000, EESIPR_ECIIP = 0x00400000, - EESIPR_FTCIP = 0x00200000, + EESIPR_FTCIP = 0x00200000, /* same as TC0IP */ EESIPR_TDEIP = 0x00100000, EESIPR_TFUFIP = 0x00080000, EESIPR_FRIP = 0x00040000, @@ -286,6 +290,8 @@ enum EESIPR_BIT { EESIPR_CDIP = 0x00000200, EESIPR_TROIP = 0x00000100, EESIPR_RMAFIP = 0x00000080, + EESIPR_CEEFIP = 0x00000040, + EESIPR_CELFIP = 0x00000020, EESIPR_RRFIP = 0x00000010, EESIPR_RTLFIP = 0x00000008, EESIPR_RTSFIP = 0x00000004, -- cgit v1.2.3 From 2b2d3eb41c920b47df2fcedd1489cf748bd09466 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 29 Jan 2017 15:13:48 +0300 Subject: sh_eth: stop using bare numbers for EESIPR values Now that we have almost all EESIPR bits declared (and those that are still not are most probably reserved anyway) we can at last replace the bare numbers used for 'sh_eth_cpu_data::eesipr_value' initializers with the bit names ORed together... Signed-off-by: Sergei Shtylyov Reviewed-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 91 ++++++++++++++++++++++++++++++----- 1 file changed, 80 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 34c5df827bf4..2f08d2735fe2 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -518,7 +518,14 @@ static struct sh_eth_cpu_data r7s72100_data = { .ecsr_value = ECSR_ICD, .ecsipr_value = ECSIPR_ICDIP, - .eesipr_value = 0xe77f009f, + .eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP | + EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP | + EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -556,7 +563,14 @@ static struct sh_eth_cpu_data r8a7740_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -603,7 +617,12 @@ static struct sh_eth_cpu_data r8a777x_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = 0x01ff009f, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -626,7 +645,12 @@ static struct sh_eth_cpu_data r8a779x_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = 0x01ff009f, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -667,7 +691,12 @@ static struct sh_eth_cpu_data sh7724_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = 0x01ff009f, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -702,7 +731,14 @@ static struct sh_eth_cpu_data sh7757_data = { .register_type = SH_ETH_REG_FAST_SH4, - .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -769,7 +805,14 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -800,7 +843,13 @@ static struct sh_eth_cpu_data sh7734_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003f07ff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -830,7 +879,13 @@ static struct sh_eth_cpu_data sh7763_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003f07ff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -851,7 +906,14 @@ static struct sh_eth_cpu_data sh7763_data = { static struct sh_eth_cpu_data sh7619_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, - .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .apr = 1, .mpr = 1, @@ -862,7 +924,14 @@ static struct sh_eth_cpu_data sh7619_data = { static struct sh_eth_cpu_data sh771x_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, - .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tsu = 1, }; -- cgit v1.2.3 From 33e962c8871f015f5c8978384553dddcf5b81b22 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Sun, 29 Jan 2017 12:40:52 +0100 Subject: rt2x00: fix clk_get call clk_get() takes two arguments and might return ERR_PTR(), so we have to nullify pointer on that case, to do not break further call to clk_get_rate(). Reported-by: Felix Fietkau Fixes: 34db70b92fae ("rt2x00: add copy of clk for soc devices") Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00soc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c index 362f9d3b98fc..29250f79c4a4 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c @@ -92,7 +92,10 @@ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops) rt2x00dev->hw = hw; rt2x00dev->irq = platform_get_irq(pdev, 0); rt2x00dev->name = pdev->dev.driver->name; - rt2x00dev->clk = clk_get(&pdev->dev); + + rt2x00dev->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(rt2x00dev->clk)) + rt2x00dev->clk = NULL; rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC); -- cgit v1.2.3 From d546530e569463a7f0a4ead482d277b5ef42a3aa Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 30 Jan 2017 12:34:32 +0200 Subject: iwlwifi: alloc memory dynamically also for DVM For old firmwares the memory wasn't allocated, resulting in panic. Make it dynamically allocated as well. Allow any order of functions call. Fixes: eef187a7b8a1 ("iwlwifi: enlarge number of ucode sections") Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho Tested-by: Kalle Valo Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 1d1af4bc1530..d22821501676 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -291,11 +291,33 @@ static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces, return &pieces->img[type].sec[sec]; } +static void alloc_sec_data(struct iwl_firmware_pieces *pieces, + enum iwl_ucode_type type, + int sec) +{ + struct fw_img_parsing *img = &pieces->img[type]; + struct fw_sec *sec_memory; + int size = sec + 1; + size_t alloc_size = sizeof(*img->sec) * size; + + if (img->sec && img->sec_counter >= size) + return; + + sec_memory = krealloc(img->sec, alloc_size, GFP_KERNEL); + if (!sec_memory) + return; + + img->sec = sec_memory; + img->sec_counter = size; +} + static void set_sec_data(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, const void *data) { + alloc_sec_data(pieces, type, sec); + pieces->img[type].sec[sec].data = data; } @@ -304,6 +326,8 @@ static void set_sec_size(struct iwl_firmware_pieces *pieces, int sec, size_t size) { + alloc_sec_data(pieces, type, sec); + pieces->img[type].sec[sec].size = size; } @@ -319,6 +343,8 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces, int sec, u32 offset) { + alloc_sec_data(pieces, type, sec); + pieces->img[type].sec[sec].offset = offset; } -- cgit v1.2.3 From 2ef0359031b9ed891ca381b2687186fb52b277f8 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Fri, 27 Jan 2017 12:27:45 +0000 Subject: brcmfmac: provide a value for struct wowlan_support::max_nd_match_sets The driver advertises support for WOWLAN_NETDETECT but did not specify maximum amount of netdetect match sets. This was no issue due to a bug in nl80211. As that has been fixed, brcmfmac also needs fixing. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index ec1171c523ab..302a78deeff1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6358,6 +6358,8 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp) if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) { if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ND)) { brcmf_wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; + brcmf_wowlan_support.max_nd_match_sets = + BRCMF_PNO_MAX_PFN_COUNT; init_waitqueue_head(&cfg->wowl.nd_data_wait); } } -- cgit v1.2.3 From d29afe91af5995306d940b3dfee2419e0bb24a51 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Fri, 27 Jan 2017 12:27:46 +0000 Subject: brcmfmac: fix handling firmware results for wowl netdetect For wowl netdetect the event data changed for newer chips. This was recently fixed for scheduled scan, but same change is needed for wowl netdetect. Removing now pointles += operation from both result handlers. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 302a78deeff1..c81d78ff0113 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3330,7 +3330,6 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp, goto out_err; } - data += sizeof(struct brcmf_pno_scanresults_le); netinfo_start = brcmf_get_netinfo_array(pfn_result); for (i = 0; i < result_count; i++) { @@ -3478,8 +3477,7 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e, return -EINVAL; } - data += sizeof(struct brcmf_pno_scanresults_le); - netinfo = (struct brcmf_pno_net_info_le *)data; + netinfo = brcmf_get_netinfo_array(pfn_result); memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len); cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len; cfg->wowl.nd->n_channels = 1; -- cgit v1.2.3 From 0b57010fc18e12c19d14379cd739d4eb7c3898f3 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Fri, 27 Jan 2017 12:27:47 +0000 Subject: brcmfmac: allow wowlan support to be per device The wowlan support is (partially) determined dynamic by checking the device/firmware capabilities. So they can differ per device. So it is not possible to use a static global. Instead use the global as a template and use kmemdup(). When kmemdup() fails the template is used unmodified. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 26 ++++++++++++++++------ 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index c81d78ff0113..ab1f9f2395ad 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6339,7 +6339,7 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy) } #ifdef CONFIG_PM -static struct wiphy_wowlan_support brcmf_wowlan_support = { +static const struct wiphy_wowlan_support brcmf_wowlan_support = { .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, .n_patterns = BRCMF_WOWL_MAXPATTERNS, .pattern_max_len = BRCMF_WOWL_MAXPATTERNSIZE, @@ -6352,21 +6352,29 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp) { #ifdef CONFIG_PM struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct wiphy_wowlan_support *wowl; + + wowl = kmemdup(&brcmf_wowlan_support, sizeof(brcmf_wowlan_support), + GFP_KERNEL); + if (!wowl) { + brcmf_err("only support basic wowlan features\n"); + wiphy->wowlan = &brcmf_wowlan_support; + return; + } if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) { if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ND)) { - brcmf_wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; - brcmf_wowlan_support.max_nd_match_sets = - BRCMF_PNO_MAX_PFN_COUNT; + wowl->flags |= WIPHY_WOWLAN_NET_DETECT; + wowl->max_nd_match_sets = BRCMF_PNO_MAX_PFN_COUNT; init_waitqueue_head(&cfg->wowl.nd_data_wait); } } if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) { - brcmf_wowlan_support.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY; - brcmf_wowlan_support.flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE; + wowl->flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY; + wowl->flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE; } - wiphy->wowlan = &brcmf_wowlan_support; + wiphy->wowlan = wowl; #endif } @@ -6747,6 +6755,10 @@ static void brcmf_free_wiphy(struct wiphy *wiphy) kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels); kfree(wiphy->bands[NL80211_BAND_5GHZ]); } +#if IS_ENABLED(CONFIG_PM) + if (wiphy->wowlan != &brcmf_wowlan_support) + kfree(wiphy->wowlan); +#endif wiphy_free(wiphy); } -- cgit v1.2.3 From 2a2a5d1835b6f0baa0e207426d3c79eefd32e253 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Fri, 27 Jan 2017 12:27:48 +0000 Subject: brcmfmac: add .update_connect_params() callback Add support for the .update_connect_params() callback for roaming or subsequent (re)association. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 24 ++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index ab1f9f2395ad..0e28d0710af5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5067,6 +5067,29 @@ static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy, return ret; } +static int +brcmf_cfg80211_update_conn_params(struct wiphy *wiphy, + struct net_device *ndev, + struct cfg80211_connect_params *sme, + u32 changed) +{ + struct brcmf_if *ifp; + int err; + + if (!(changed & UPDATE_ASSOC_IES)) + return 0; + + ifp = netdev_priv(ndev); + err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG, + sme->ie, sme->ie_len); + if (err) + brcmf_err("Set Assoc REQ IE Failed\n"); + else + brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n"); + + return err; +} + #ifdef CONFIG_PM static int brcmf_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev, @@ -5134,6 +5157,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { .crit_proto_start = brcmf_cfg80211_crit_proto_start, .crit_proto_stop = brcmf_cfg80211_crit_proto_stop, .tdls_oper = brcmf_cfg80211_tdls_oper, + .update_connect_params = brcmf_cfg80211_update_conn_params, }; struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, -- cgit v1.2.3 From a971df0b9d04674e325346c17de9a895425ca5e1 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sat, 28 Jan 2017 14:31:22 +0100 Subject: bcma: use (get|put)_device when probing/removing device driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows tracking device state and e.g. makes devm work as expected. Signed-off-by: Rafał Miłecki Cc: Stable Signed-off-by: Kalle Valo --- drivers/bcma/main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 2c1798e38abd..38688236b3cd 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -633,8 +633,11 @@ static int bcma_device_probe(struct device *dev) drv); int err = 0; + get_device(dev); if (adrv->probe) err = adrv->probe(core); + if (err) + put_device(dev); return err; } @@ -647,6 +650,7 @@ static int bcma_device_remove(struct device *dev) if (adrv->remove) adrv->remove(core); + put_device(dev); return 0; } -- cgit v1.2.3 From 6715208d0a95ae417203f8e4a7937c1b4c4947f2 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 30 Jan 2017 12:12:47 +0100 Subject: rt2800: enable rt3290 unconditionally on pci probe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we restart system using sysrq RT3290 device do not initalize properly, hance always enable it via WLAN_FUN_CTRL register on probe. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=85461 Reported-and-tested-by: Giedrius Statkevičius Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 755c09364b19..572cdea4ca25 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -373,9 +373,6 @@ static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) int i, count; rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - if (rt2x00_get_field32(reg, WLAN_EN)) - return 0; - rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); rt2x00_set_field32(®, WLAN_CLK_EN, 0); -- cgit v1.2.3 From b1a4c9a19659465d63924568b7b68a2fa65e159c Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sat, 28 Jan 2017 23:11:34 +0100 Subject: bcma: make OF code more generic (not platform_device specific) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit OF allows not only specifying platform devices but also describing devices on standard buses like PCI or USB. This change will allow reading info from DT for bcma buses hosted on PCI cards. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/main.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 38688236b3cd..12da68ec48ba 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -136,17 +136,17 @@ static bool bcma_is_core_needed_early(u16 core_id) return false; } -static struct device_node *bcma_of_find_child_device(struct platform_device *parent, +static struct device_node *bcma_of_find_child_device(struct device *parent, struct bcma_device *core) { struct device_node *node; u64 size; const __be32 *reg; - if (!parent || !parent->dev.of_node) + if (!parent->of_node) return NULL; - for_each_child_of_node(parent->dev.of_node, node) { + for_each_child_of_node(parent->of_node, node) { reg = of_get_address(node, 0, &size, NULL); if (!reg) continue; @@ -156,7 +156,7 @@ static struct device_node *bcma_of_find_child_device(struct platform_device *par return NULL; } -static int bcma_of_irq_parse(struct platform_device *parent, +static int bcma_of_irq_parse(struct device *parent, struct bcma_device *core, struct of_phandle_args *out_irq, int num) { @@ -169,7 +169,7 @@ static int bcma_of_irq_parse(struct platform_device *parent, return rc; } - out_irq->np = parent->dev.of_node; + out_irq->np = parent->of_node; out_irq->args_count = 1; out_irq->args[0] = num; @@ -177,13 +177,13 @@ static int bcma_of_irq_parse(struct platform_device *parent, return of_irq_parse_raw(laddr, out_irq); } -static unsigned int bcma_of_get_irq(struct platform_device *parent, +static unsigned int bcma_of_get_irq(struct device *parent, struct bcma_device *core, int num) { struct of_phandle_args out_irq; int ret; - if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node) + if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node) return 0; ret = bcma_of_irq_parse(parent, core, &out_irq, num); @@ -196,7 +196,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent, return irq_create_of_mapping(&out_irq); } -static void bcma_of_fill_device(struct platform_device *parent, +static void bcma_of_fill_device(struct device *parent, struct bcma_device *core) { struct device_node *node; @@ -227,7 +227,7 @@ unsigned int bcma_core_irq(struct bcma_device *core, int num) return mips_irq <= 4 ? mips_irq + 2 : 0; } if (bus->host_pdev) - return bcma_of_get_irq(bus->host_pdev, core, num); + return bcma_of_get_irq(&bus->host_pdev->dev, core, num); return 0; case BCMA_HOSTTYPE_SDIO: return 0; @@ -253,7 +253,8 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) { core->dma_dev = &bus->host_pdev->dev; core->dev.parent = &bus->host_pdev->dev; - bcma_of_fill_device(bus->host_pdev, core); + if (core->dev.parent) + bcma_of_fill_device(core->dev.parent, core); } else { core->dev.dma_mask = &core->dev.coherent_dma_mask; core->dma_dev = &core->dev; -- cgit v1.2.3 From e9904990e8e70a51574e6ec6b872f3c705ec75f0 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 31 Jan 2017 16:30:02 +0000 Subject: sfc: fix an off-by-one compare on an array size encap_type should be checked to see if it is greater or equal to the size of array map to fix an off-by-one array size check. This fixes an array overrun read as detected by static analysis by CoverityScan, CID#1398883 ("Out-of-bounds-read") Fixes: 9b41080125176841e ("sfc: insert catch-all filters for encapsulated traffic") Signed-off-by: Colin Ian King Acked-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 8bec9383d754..0475f1831b92 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -5080,7 +5080,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, /* quick bounds check (BCAST result impossible) */ BUILD_BUG_ON(EFX_EF10_BCAST != 0); - if (encap_type > ARRAY_SIZE(map) || map[encap_type] == 0) { + if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { WARN_ON(1); return -EINVAL; } -- cgit v1.2.3 From f8244ab55200dbe074380c3388804adff03a8a19 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 30 Jan 2017 23:04:42 +0100 Subject: net: aquantia: atlantic: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Tested-by: Pavel Belous Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_ethtool.c | 23 +++++----- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 51 ++++++++++++++-------- drivers/net/ethernet/aquantia/atlantic/aq_nic.h | 6 ++- 3 files changed, 49 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index c5b025e0dc10..a761e91471df 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -35,24 +35,25 @@ static u32 aq_ethtool_get_link(struct net_device *ndev) return ethtool_op_get_link(ndev); } -static int aq_ethtool_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int aq_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - aq_nic_get_link_settings(aq_nic, cmd); - ethtool_cmd_speed_set(cmd, netif_carrier_ok(ndev) ? - aq_nic_get_link_speed(aq_nic) : 0U); + aq_nic_get_link_ksettings(aq_nic, cmd); + cmd->base.speed = netif_carrier_ok(ndev) ? + aq_nic_get_link_speed(aq_nic) : 0U; return 0; } -static int aq_ethtool_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int +aq_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - return aq_nic_set_link_settings(aq_nic, cmd); + return aq_nic_set_link_ksettings(aq_nic, cmd); } /* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */ @@ -248,8 +249,6 @@ const struct ethtool_ops aq_ethtool_ops = { .get_link = aq_ethtool_get_link, .get_regs_len = aq_ethtool_get_regs_len, .get_regs = aq_ethtool_get_regs, - .get_settings = aq_ethtool_get_settings, - .set_settings = aq_ethtool_set_settings, .get_drvinfo = aq_ethtool_get_drvinfo, .get_strings = aq_ethtool_get_strings, .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, @@ -257,5 +256,7 @@ const struct ethtool_ops aq_ethtool_ops = { .get_rxfh = aq_ethtool_get_rss, .get_rxnfc = aq_ethtool_get_rxnfc, .get_sset_count = aq_ethtool_get_sset_count, - .get_ethtool_stats = aq_ethtool_stats + .get_ethtool_stats = aq_ethtool_stats, + .get_link_ksettings = aq_ethtool_get_link_ksettings, + .set_link_ksettings = aq_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 84bb44186750..bed25abd2889 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -734,50 +734,65 @@ err_exit:; (void)err; } -void aq_nic_get_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd) +void aq_nic_get_link_ksettings(struct aq_nic_s *self, + struct ethtool_link_ksettings *cmd) { - cmd->port = PORT_TP; - cmd->transceiver = XCVR_EXTERNAL; + u32 supported, advertising; + + cmd->base.port = PORT_TP; /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ - cmd->duplex = DUPLEX_FULL; - cmd->autoneg = self->aq_nic_cfg.is_autoneg; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); - cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) ? + supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) ? ADVERTISED_10000baseT_Full : 0U; - cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) ? + supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) ? ADVERTISED_1000baseT_Full : 0U; - cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) ? + supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) ? ADVERTISED_100baseT_Full : 0U; - cmd->supported |= self->aq_hw_caps.flow_control ? SUPPORTED_Pause : 0; - cmd->supported |= SUPPORTED_Autoneg; + supported |= self->aq_hw_caps.flow_control ? SUPPORTED_Pause : 0; + supported |= SUPPORTED_Autoneg; + supported |= SUPPORTED_TP; - cmd->advertising = (self->aq_nic_cfg.is_autoneg) ? + advertising = (self->aq_nic_cfg.is_autoneg) ? ADVERTISED_Autoneg : 0U; - cmd->advertising |= + advertising |= (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) ? ADVERTISED_10000baseT_Full : 0U; - cmd->advertising |= + advertising |= (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) ? ADVERTISED_1000baseT_Full : 0U; - cmd->advertising |= + advertising |= (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) ? ADVERTISED_100baseT_Full : 0U; - cmd->advertising |= (self->aq_nic_cfg.flow_control) ? + advertising |= (self->aq_nic_cfg.flow_control) ? ADVERTISED_Pause : 0U; + advertising |= ADVERTISED_TP; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); } -int aq_nic_set_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd) +int aq_nic_set_link_ksettings(struct aq_nic_s *self, + const struct ethtool_link_ksettings *cmd) { u32 speed = 0U; u32 rate = 0U; int err = 0; - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { rate = self->aq_hw_caps.link_speed_msk; self->aq_nic_cfg.is_autoneg = true; } else { - speed = ethtool_cmd_speed(cmd); + speed = cmd->base.speed; switch (speed) { case SPEED_100: diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 055e2cdb0f6f..7fc2a5ecb2b7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -99,8 +99,10 @@ int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev); int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags); int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev); unsigned int aq_nic_get_link_speed(struct aq_nic_s *self); -void aq_nic_get_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd); -int aq_nic_set_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd); +void aq_nic_get_link_ksettings(struct aq_nic_s *self, + struct ethtool_link_ksettings *cmd); +int aq_nic_set_link_ksettings(struct aq_nic_s *self, + const struct ethtool_link_ksettings *cmd); struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); u32 aq_nic_get_fw_version(struct aq_nic_s *self); int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); -- cgit v1.2.3 From 4d1ceea8516cd6adf21f6b75995e2a7d4f376f9b Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 30 Jan 2017 18:25:18 -0800 Subject: net: ethtool: convert large order kmalloc allocations to vzalloc under memory pressure 'ethtool -S' command may warn: [ 2374.385195] ethtool: page allocation failure: order:4, mode:0x242c0c0 [ 2374.405573] CPU: 12 PID: 40211 Comm: ethtool Not tainted [ 2374.423071] Call Trace: [ 2374.423076] [] dump_stack+0x4d/0x64 [ 2374.423080] [] warn_alloc_failed+0xeb/0x150 [ 2374.423082] [] ? __alloc_pages_direct_compact+0x43/0xf0 [ 2374.423084] [] __alloc_pages_nodemask+0x4dc/0xbf0 [ 2374.423091] [] ? cmd_exec+0x722/0xcd0 [mlx5_core] [ 2374.423095] [] alloc_pages_current+0x8c/0x110 [ 2374.423097] [] alloc_kmem_pages+0x19/0x90 [ 2374.423099] [] kmalloc_order_trace+0x2e/0xe0 [ 2374.423101] [] __kmalloc+0x204/0x220 [ 2374.423105] [] dev_ethtool+0xe4e/0x1f80 [ 2374.423106] [] ? dev_get_by_name_rcu+0x5e/0x80 [ 2374.423108] [] dev_ioctl+0x156/0x560 [ 2374.423111] [] ? mem_cgroup_commit_charge+0x78/0x3c0 [ 2374.423117] [] sock_do_ioctl+0x42/0x50 [ 2374.423119] [] sock_ioctl+0x1b3/0x250 [ 2374.423121] [] do_vfs_ioctl+0x92/0x580 [ 2374.423123] [] ? do_audit_syscall_entry+0x4b/0x70 [ 2374.423124] [] ? syscall_trace_enter_phase1+0xfc/0x120 [ 2374.423126] [] SyS_ioctl+0x79/0x90 [ 2374.423127] [] do_syscall_64+0x50/0xa0 [ 2374.423129] [] entry_SYSCALL64_slow_path+0x25/0x25 ~1160 mlx5 counters ~= order 4 allocation which is unlikely to succeed under memory pressure. Convert them to vzalloc() as ethtool_get_regs() does. Also take care of drivers without counters similar to commit 67ae7cf1eeda ("ethtool: Allow zero-length register dumps again") and reduce warn_on to warn_on_once. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/ethtool.c | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 236a21e3c878..6b3eee0834a0 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1817,11 +1817,13 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) ret = __ethtool_get_sset_count(dev, gstrings.string_set); if (ret < 0) return ret; + if (ret > S32_MAX / ETH_GSTRING_LEN) + return -ENOMEM; + WARN_ON_ONCE(!ret); gstrings.len = ret; - - data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER); - if (!data) + data = vzalloc(gstrings.len * ETH_GSTRING_LEN); + if (gstrings.len && !data) return -ENOMEM; __ethtool_get_strings(dev, gstrings.string_set, data); @@ -1830,12 +1832,13 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) goto out; useraddr += sizeof(gstrings); - if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) + if (gstrings.len && + copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) goto out; ret = 0; out: - kfree(data); + vfree(data); return ret; } @@ -1912,14 +1915,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) n_stats = ops->get_sset_count(dev, ETH_SS_STATS); if (n_stats < 0) return n_stats; - WARN_ON(n_stats == 0); - + if (n_stats > S32_MAX / sizeof(u64)) + return -ENOMEM; + WARN_ON_ONCE(!n_stats); if (copy_from_user(&stats, useraddr, sizeof(stats))) return -EFAULT; stats.n_stats = n_stats; - data = kmalloc(n_stats * sizeof(u64), GFP_USER); - if (!data) + data = vzalloc(n_stats * sizeof(u64)); + if (n_stats && !data) return -ENOMEM; ops->get_ethtool_stats(dev, &stats, data); @@ -1928,12 +1932,12 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) if (copy_to_user(useraddr, &stats, sizeof(stats))) goto out; useraddr += sizeof(stats); - if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) + if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64))) goto out; ret = 0; out: - kfree(data); + vfree(data); return ret; } @@ -1948,17 +1952,18 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) return -EOPNOTSUPP; n_stats = phy_get_sset_count(phydev); - if (n_stats < 0) return n_stats; - WARN_ON(n_stats == 0); + if (n_stats > S32_MAX / sizeof(u64)) + return -ENOMEM; + WARN_ON_ONCE(!n_stats); if (copy_from_user(&stats, useraddr, sizeof(stats))) return -EFAULT; stats.n_stats = n_stats; - data = kmalloc_array(n_stats, sizeof(u64), GFP_USER); - if (!data) + data = vzalloc(n_stats * sizeof(u64)); + if (n_stats && !data) return -ENOMEM; mutex_lock(&phydev->lock); @@ -1969,12 +1974,12 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) if (copy_to_user(useraddr, &stats, sizeof(stats))) goto out; useraddr += sizeof(stats); - if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) + if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64))) goto out; ret = 0; out: - kfree(data); + vfree(data); return ret; } -- cgit v1.2.3 From 0d909aa7c276584cc17413a7bec64d4fb5da2968 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Tue, 31 Jan 2017 13:21:50 +0530 Subject: cxgb4: update latest firmware version supported Change t4fw_version.h to update latest firmware version number 1.16.26.0. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 2accab386323..5fdaa16426c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x0F -#define T4FW_VERSION_MICRO 0x25 +#define T4FW_VERSION_MINOR 0x10 +#define T4FW_VERSION_MICRO 0x1A #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x0F -#define T5FW_VERSION_MICRO 0x25 +#define T5FW_VERSION_MINOR 0x10 +#define T5FW_VERSION_MICRO 0x1A #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x0F -#define T6FW_VERSION_MICRO 0x25 +#define T6FW_VERSION_MINOR 0x10 +#define T6FW_VERSION_MICRO 0x1A #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 -- cgit v1.2.3 From 953046e1af0f188ffe7061ee05160318e7f9d0dd Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 30 Jan 2017 19:17:16 -0800 Subject: net: dsa: bcm_sf2: Fix build module Commit 7318166cacad ("net: dsa: bcm_sf2: Add support for ethtool::rxnfc") added a new object to build: bcm_sf2_cfp.o, but in doing so, we essentially just built this object and no longer bcm_sf2.o. Fix this by creating a module named bcm-sf2.ko which links in bcm_sf2.o and bcm_sf2_cfp.o. Fixes: 7318166cacad ("net: dsa: bcm_sf2: Add support for ethtool::rxnfc") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index da9893478e21..a3c941632217 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o -obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o -bcm_sf2-objs += bcm_sf2_cfp.o +obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o +bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o obj-y += b53/ -- cgit v1.2.3 From 34a5102c3235c470a6c77fba16cb971964d9c136 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 31 Jan 2017 19:37:54 +0100 Subject: net: bgmac: allocate struct bgmac just once & don't copy it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So far were were allocating struct bgmac in 3 places: platform code, bcma code and shared bgmac_enet_probe function. The reason for this was bgmac_enet_probe: 1) Requiring early-filled struct bgmac 2) Calling alloc_etherdev on its own in order to use netdev_priv later This solution got few drawbacks: 1) Was duplicating allocating code 2) Required copying early-filled struct 3) Resulted in platform/bcma code having access only to unused struct Solve this situation by simply extracting some probe code into the new bgmac_alloc function. Signed-off-by: Rafał Miłecki Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac-bcma.c | 4 +--- drivers/net/ethernet/broadcom/bgmac-platform.c | 2 +- drivers/net/ethernet/broadcom/bgmac.c | 25 +++++++++++++++++-------- drivers/net/ethernet/broadcom/bgmac.h | 3 ++- 4 files changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 4a4ffc0c4c65..9281abda4026 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -117,12 +117,11 @@ static int bgmac_probe(struct bcma_device *core) u8 *mac; int err; - bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL); + bgmac = bgmac_alloc(&core->dev); if (!bgmac) return -ENOMEM; bgmac->bcma.core = core; - bgmac->dev = &core->dev; bgmac->dma_dev = core->dma_dev; bgmac->irq = core->irq; @@ -307,7 +306,6 @@ static int bgmac_probe(struct bcma_device *core) err1: bcma_mdio_mii_unregister(bgmac->mii_bus); err: - kfree(bgmac); bcma_set_drvdata(core, NULL); return err; diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 6f736c19872f..805e6ed6c390 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -151,7 +151,7 @@ static int bgmac_probe(struct platform_device *pdev) struct resource *regs; const u8 *mac_addr; - bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL); + bgmac = bgmac_alloc(&pdev->dev); if (!bgmac) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 9122608df16e..fe88126b1e0c 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1446,22 +1446,32 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) } EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct); -int bgmac_enet_probe(struct bgmac *info) +struct bgmac *bgmac_alloc(struct device *dev) { struct net_device *net_dev; struct bgmac *bgmac; - int err; /* Allocation and references */ - net_dev = alloc_etherdev(sizeof(*bgmac)); + net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac)); if (!net_dev) - return -ENOMEM; + return NULL; net_dev->netdev_ops = &bgmac_netdev_ops; net_dev->ethtool_ops = &bgmac_ethtool_ops; + bgmac = netdev_priv(net_dev); - memcpy(bgmac, info, sizeof(*bgmac)); + bgmac->dev = dev; bgmac->net_dev = net_dev; + + return bgmac; +} +EXPORT_SYMBOL_GPL(bgmac_alloc); + +int bgmac_enet_probe(struct bgmac *bgmac) +{ + struct net_device *net_dev = bgmac->net_dev; + int err; + net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); @@ -1488,7 +1498,7 @@ int bgmac_enet_probe(struct bgmac *info) err = bgmac_dma_alloc(bgmac); if (err) { dev_err(bgmac->dev, "Unable to alloc memory for DMA\n"); - goto err_netdev_free; + goto err_out; } bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; @@ -1521,8 +1531,7 @@ err_phy_disconnect: phy_disconnect(net_dev->phydev); err_dma_free: bgmac_dma_free(bgmac); -err_netdev_free: - free_netdev(net_dev); +err_out: return err; } diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 71f493f2451f..dfebaded3b52 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -517,7 +517,8 @@ struct bgmac { int (*phy_connect)(struct bgmac *bgmac); }; -int bgmac_enet_probe(struct bgmac *info); +struct bgmac *bgmac_alloc(struct device *dev); +int bgmac_enet_probe(struct bgmac *bgmac); void bgmac_enet_remove(struct bgmac *bgmac); void bgmac_adjust_link(struct net_device *net_dev); int bgmac_phy_connect_direct(struct bgmac *bgmac); -- cgit v1.2.3 From aa8863e5d49417094b9457a0d53e8505e95a1863 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 31 Jan 2017 19:37:55 +0100 Subject: net: bgmac: drop struct bcma_mdio we don't need anymore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adding struct bcma_mdio was a workaround for bcma code not having access to the struct bgmac used in the core code. Now we don't duplicate this struct we can just use it internally in bcma code. This simplifies code & allows access to all bgmac driver details from all places in bcma code. Signed-off-by: Rafał Miłecki Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c | 98 ++++++++++--------------- drivers/net/ethernet/broadcom/bgmac-bcma.c | 2 +- drivers/net/ethernet/broadcom/bgmac.h | 2 +- 3 files changed, 42 insertions(+), 60 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 7c19c8e2bf91..9d9984999dce 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -12,11 +12,6 @@ #include #include "bgmac.h" -struct bcma_mdio { - struct bcma_device *core; - u8 phyaddr; -}; - static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, int timeout) { @@ -37,7 +32,7 @@ static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, * PHY ops **************************************************/ -static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) +static u16 bcma_mdio_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) { struct bcma_device *core; u16 phy_access_addr; @@ -56,12 +51,12 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); - if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bcma_mdio->core->bus->drv_gmac_cmn.core; + if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->bcma.core->bus->drv_gmac_cmn.core; phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; } else { - core = bcma_mdio->core; + core = bgmac->bcma.core; phy_access_addr = BGMAC_PHY_ACCESS; phy_ctl_addr = BGMAC_PHY_CNTL; } @@ -87,7 +82,7 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ -static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, +static int bcma_mdio_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) { struct bcma_device *core; @@ -95,12 +90,12 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, u16 phy_ctl_addr; u32 tmp; - if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bcma_mdio->core->bus->drv_gmac_cmn.core; + if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->bcma.core->bus->drv_gmac_cmn.core; phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; } else { - core = bcma_mdio->core; + core = bgmac->bcma.core; phy_access_addr = BGMAC_PHY_ACCESS; phy_ctl_addr = BGMAC_PHY_CNTL; } @@ -110,8 +105,8 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, tmp |= phyaddr; bcma_write32(core, phy_ctl_addr, tmp); - bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO); - if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) + bcma_write32(bgmac->bcma.core, BGMAC_INT_STATUS, BGMAC_IS_MDIO); + if (bcma_read32(bgmac->bcma.core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) dev_warn(&core->dev, "Error setting MDIO int\n"); tmp = BGMAC_PA_START; @@ -132,39 +127,39 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ -static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio) +static void bcma_mdio_phy_init(struct bgmac *bgmac) { - struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo; + struct bcma_chipinfo *ci = &bgmac->bcma.core->bus->chipinfo; u8 i; if (ci->id == BCMA_CHIP_ID_BCM5356) { for (i = 0; i < 5; i++) { - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b); - bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x008b); + bcma_mdio_phy_write(bgmac, i, 0x15, 0x0100); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x12, 0x2aaa); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); } } if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { - struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc; + struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc; bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); for (i = 0; i < 5; i++) { - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x5284); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x0010); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x5296); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x1073); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x9073); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x52b6); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x9273); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); } } } @@ -172,17 +167,17 @@ static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio) /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ static int bcma_mdio_phy_reset(struct mii_bus *bus) { - struct bcma_mdio *bcma_mdio = bus->priv; - u8 phyaddr = bcma_mdio->phyaddr; + struct bgmac *bgmac = bus->priv; + u8 phyaddr = bgmac->phyaddr; - if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS) + if (phyaddr == BGMAC_PHY_NOREGS) return 0; - bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET); + bcma_mdio_phy_write(bgmac, phyaddr, MII_BMCR, BMCR_RESET); udelay(100); - if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET) - dev_err(&bcma_mdio->core->dev, "PHY reset failed\n"); - bcma_mdio_phy_init(bcma_mdio); + if (bcma_mdio_phy_read(bgmac, phyaddr, MII_BMCR) & BMCR_RESET) + dev_err(bgmac->dev, "PHY reset failed\n"); + bcma_mdio_phy_init(bgmac); return 0; } @@ -202,16 +197,12 @@ static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum, return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value); } -struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) +struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) { - struct bcma_mdio *bcma_mdio; + struct bcma_device *core = bgmac->bcma.core; struct mii_bus *mii_bus; int err; - bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL); - if (!bcma_mdio) - return ERR_PTR(-ENOMEM); - mii_bus = mdiobus_alloc(); if (!mii_bus) { err = -ENOMEM; @@ -221,15 +212,12 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) mii_bus->name = "bcma_mdio mii bus"; sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num, core->core_unit); - mii_bus->priv = bcma_mdio; + mii_bus->priv = bgmac; mii_bus->read = bcma_mdio_mii_read; mii_bus->write = bcma_mdio_mii_write; mii_bus->reset = bcma_mdio_phy_reset; mii_bus->parent = &core->dev; - mii_bus->phy_mask = ~(1 << phyaddr); - - bcma_mdio->core = core; - bcma_mdio->phyaddr = phyaddr; + mii_bus->phy_mask = ~(1 << bgmac->phyaddr); err = mdiobus_register(mii_bus); if (err) { @@ -242,23 +230,17 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) err_free_bus: mdiobus_free(mii_bus); err: - kfree(bcma_mdio); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(bcma_mdio_mii_register); void bcma_mdio_mii_unregister(struct mii_bus *mii_bus) { - struct bcma_mdio *bcma_mdio; - if (!mii_bus) return; - bcma_mdio = mii_bus->priv; - mdiobus_unregister(mii_bus); mdiobus_free(mii_bus); - kfree(bcma_mdio); } EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 9281abda4026..5ef60d4f12b4 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -177,7 +177,7 @@ static int bgmac_probe(struct bcma_device *core) if (!bgmac_is_bcm4707_family(core) && !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) { - mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); + mii_bus = bcma_mdio_mii_register(bgmac); if (IS_ERR(mii_bus)) { err = PTR_ERR(mii_bus); goto err; diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index dfebaded3b52..ab2db76e4fb8 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -523,7 +523,7 @@ void bgmac_enet_remove(struct bgmac *bgmac); void bgmac_adjust_link(struct net_device *net_dev); int bgmac_phy_connect_direct(struct bgmac *bgmac); -struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr); +struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac); void bcma_mdio_mii_unregister(struct mii_bus *mii_bus); static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset) -- cgit v1.2.3 From 8e6f31baba7e2c13ab7e954fe6179420a7545a8b Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 31 Jan 2017 19:37:56 +0100 Subject: net: bgmac: use PHY subsystem for initializing PHY MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for using bgmac with PHYs supported by standalone PHY drivers. Having any PHY initialization in bgmac is hacky and shouldn't be extended but rather removed if anyone has hardware to test it. Signed-off-by: Rafał Miłecki Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 9d9984999dce..6ce80cbcb48e 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -132,6 +132,10 @@ static void bcma_mdio_phy_init(struct bgmac *bgmac) struct bcma_chipinfo *ci = &bgmac->bcma.core->bus->chipinfo; u8 i; + /* For some legacy hardware we do chipset-based PHY initialization here + * without even detecting PHY ID. It's hacky and should be cleaned as + * soon as someone can test it. + */ if (ci->id == BCMA_CHIP_ID_BCM5356) { for (i = 0; i < 5; i++) { bcma_mdio_phy_write(bgmac, i, 0x1f, 0x008b); @@ -140,6 +144,7 @@ static void bcma_mdio_phy_init(struct bgmac *bgmac) bcma_mdio_phy_write(bgmac, i, 0x12, 0x2aaa); bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); } + return; } if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || @@ -161,7 +166,12 @@ static void bcma_mdio_phy_init(struct bgmac *bgmac) bcma_mdio_phy_write(bgmac, i, 0x17, 0x9273); bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); } + return; } + + /* For all other hw do initialization using PHY subsystem. */ + if (bgmac->net_dev && bgmac->net_dev->phydev) + phy_init_hw(bgmac->net_dev->phydev); } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ -- cgit v1.2.3 From 160ca0142431c19386db976302fd4b07c587f651 Mon Sep 17 00:00:00 2001 From: Theuns Verwoerd Date: Tue, 31 Jan 2017 12:23:46 +1300 Subject: rtnetlink: Handle IFLA_MASTER parameter when processing rtnl_newlink Allow a master interface to be specified as one of the parameters when creating a new interface via rtnl_newlink. Previously this would require invoking interface creation, waiting for it to complete, and then separately binding that new interface to a master. In particular, this is used when creating a macvlan child interface for VRRP in a VRF configuration, allowing the interface creator to specify directly what master interface should be inherited by the child, without having to deal with asynchronous complications and potential race conditions. Signed-off-by: Theuns Verwoerd Acked-by: David Ahern Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 152744643074..adfb54b896da 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2570,7 +2570,7 @@ replay: return -ENODEV; } - if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) + if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) return -EOPNOTSUPP; if (!ops) { @@ -2652,6 +2652,11 @@ replay: if (err < 0) goto out_unregister; } + if (tb[IFLA_MASTER]) { + err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER])); + if (err) + goto out_unregister; + } out: if (link_net) put_net(link_net); -- cgit v1.2.3 From b2504a5dbef3305ef41988ad270b0e8ec289331c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 31 Jan 2017 10:20:32 -0800 Subject: net: reduce skb_warn_bad_offload() noise Dmitry reported warnings occurring in __skb_gso_segment() [1] All SKB_GSO_DODGY producers can allow user space to feed packets that trigger the current check. We could prevent them from doing so, rejecting packets, but this might add regressions to existing programs. It turns out our SKB_GSO_DODGY handlers properly set up checksum information that is needed anyway when packets needs to be segmented. By checking again skb_needs_check() after skb_mac_gso_segment(), we should remove these pesky warnings, at a very minor cost. With help from Willem de Bruijn [1] WARNING: CPU: 1 PID: 6768 at net/core/dev.c:2439 skb_warn_bad_offload+0x2af/0x390 net/core/dev.c:2434 lo: caps=(0x000000a2803b7c69, 0x0000000000000000) len=138 data_len=0 gso_size=15883 gso_type=4 ip_summed=0 Kernel panic - not syncing: panic_on_warn set ... CPU: 1 PID: 6768 Comm: syz-executor1 Not tainted 4.9.0 #5 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 ffff8801c063ecd8 ffffffff82346bdf ffffffff00000001 1ffff100380c7d2e ffffed00380c7d26 0000000041b58ab3 ffffffff84b37e38 ffffffff823468f1 ffffffff84820740 ffffffff84f289c0 dffffc0000000000 ffff8801c063ee20 Call Trace: [] __dump_stack lib/dump_stack.c:15 [inline] [] dump_stack+0x2ee/0x3ef lib/dump_stack.c:51 [] panic+0x1fb/0x412 kernel/panic.c:179 [] __warn+0x1c4/0x1e0 kernel/panic.c:542 [] warn_slowpath_fmt+0xc5/0x100 kernel/panic.c:565 [] skb_warn_bad_offload+0x2af/0x390 net/core/dev.c:2434 [] __skb_gso_segment+0x482/0x780 net/core/dev.c:2706 [] skb_gso_segment include/linux/netdevice.h:3985 [inline] [] validate_xmit_skb+0x5c9/0xc20 net/core/dev.c:2969 [] __dev_queue_xmit+0xe6b/0x1e70 net/core/dev.c:3383 [] dev_queue_xmit+0x17/0x20 net/core/dev.c:3424 [] packet_snd net/packet/af_packet.c:2930 [inline] [] packet_sendmsg+0x32ed/0x4d30 net/packet/af_packet.c:2955 [] sock_sendmsg_nosec net/socket.c:621 [inline] [] sock_sendmsg+0xca/0x110 net/socket.c:631 [] ___sys_sendmsg+0x8fa/0x9f0 net/socket.c:1954 [] __sys_sendmsg+0x138/0x300 net/socket.c:1988 [] SYSC_sendmsg net/socket.c:1999 [inline] [] SyS_sendmsg+0x2d/0x50 net/socket.c:1995 [] entry_SYSCALL_64_fastpath+0x1f/0xc2 Signed-off-by: Eric Dumazet Reported-by: Dmitry Vyukov Cc: Willem de Bruijn Signed-off-by: David S. Miller --- net/core/dev.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index e61528c50209..727b6fda0e8c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2658,11 +2658,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path) { + struct sk_buff *segs; + if (unlikely(skb_needs_check(skb, tx_path))) { int err; - skb_warn_bad_offload(skb); - + /* We're going to init ->check field in TCP or UDP header */ err = skb_cow_head(skb, 0); if (err < 0) return ERR_PTR(err); @@ -2690,7 +2691,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, skb_reset_mac_header(skb); skb_reset_mac_len(skb); - return skb_mac_gso_segment(skb, features); + segs = skb_mac_gso_segment(skb, features); + + if (unlikely(skb_needs_check(skb, tx_path))) + skb_warn_bad_offload(skb); + + return segs; } EXPORT_SYMBOL(__skb_gso_segment); -- cgit v1.2.3 From f696186203076770f51eb5890712e696e6bb72d7 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 31 Jan 2017 14:04:04 -0600 Subject: net: ethernet: ti: cpsw: fix NULL pointer dereference in switch mode In switch mode on struct cpsw_slave->ndev field will be initialized with proper value only for the one cpsw slave port, as result cpsw_get_usage_count() will generate "Unable to handle kernel NULL pointer dereference" exception when first ethernet interface is opening cpsw_ndo_open(). This issue causes boot regression on AM335x EVM and reproducible on am57xx-evm (switch mode). Fix it by adding additional check for !cpsw->slaves[i].ndev in cpsw_get_usage_count(). Cc: Ivan Khoronzhuk Fixes: 03fd01ad0eea ("net: ethernet: ti: cpsw: don't duplicate ndev_running") Signed-off-by: Grygorii Strashko Reviewed-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 67b7323b6907..35a95dcc755b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -677,7 +677,7 @@ static int cpsw_get_usage_count(struct cpsw_common *cpsw) u32 usage_count = 0; for (i = 0; i < cpsw->data.slaves; i++) - if (netif_running(cpsw->slaves[i].ndev)) + if (cpsw->slaves[i].ndev && netif_running(cpsw->slaves[i].ndev)) usage_count++; return usage_count; -- cgit v1.2.3 From 1f5e29ce7989eb75652774004b962ee1eb6e56ca Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 31 Jan 2017 16:51:37 -0800 Subject: net: ipv6: add NLM_F_APPEND in notifications when applicable IPv6 does not set the NLM_F_APPEND flag in notifications to signal that a NEWROUTE is an append versus a new route or a replaced one. Add the flag if the request has it. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/ip6_fib.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index ef5485204522..febde6c112bf 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -746,6 +746,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, u16 nlflags = NLM_F_EXCL; int err; + if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND)) + nlflags |= NLM_F_APPEND; + ins = &fn->leaf; for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) { -- cgit v1.2.3 From cdb478e5e413864df73096ef4f0e5d518395a92b Mon Sep 17 00:00:00 2001 From: Satanand Burla Date: Tue, 31 Jan 2017 13:04:42 -0800 Subject: liquidio: fix for iq and droq cnts going negative Flush the mmio writes before releasing spin locks. if the maintained counts get too high > 2M force writeback of the counts to clear them Signed-off-by: Satanand Burla Signed-off-by: Felix Manlunas Signed-off-by: Raghu Vatsavayi Signed-off-by: Derek Chickles Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 6 +++++- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 6 +++++- drivers/net/ethernet/cavium/liquidio/liquidio_common.h | 1 + drivers/net/ethernet/cavium/liquidio/octeon_device.c | 4 ++++ 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index fbe1986b87f6..c12cfa4113cc 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2445,7 +2445,11 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) __func__, iq_no); } - if ((work_done < budget) && (tx_done)) { + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 78cfa8ba1b57..631f1c0f9e4d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1630,7 +1630,11 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) __func__, iq_no); } - if ((work_done < budget) && (tx_done)) { + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index bc0af8af1a0c..294c6f3c6b48 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -99,6 +99,7 @@ enum octeon_tag_type { #define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1) #define BYTES_PER_DHLEN_UNIT 8 +#define MAX_REG_CNT 2000000U static inline u32 incr_index(u32 index, u32 count, u32 max) { diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index a8df493a5012..9675ffbf25e6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1361,6 +1361,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) spin_lock_bh(&droq->lock); writel(droq->pkt_count, droq->pkts_sent_reg); droq->pkt_count = 0; + /* this write needs to be flushed before we release the lock */ + mmiowb(); spin_unlock_bh(&droq->lock); oct = droq->oct_dev; } @@ -1368,6 +1370,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) spin_lock_bh(&iq->lock); writel(iq->pkt_in_done, iq->inst_cnt_reg); iq->pkt_in_done = 0; + /* this write needs to be flushed before we release the lock */ + mmiowb(); spin_unlock_bh(&iq->lock); oct = iq->oct_dev; } -- cgit v1.2.3 From ff1176f6164f3d151ee64c05d3f7b6662a81b982 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 1 Feb 2017 11:52:15 +0300 Subject: ethernet: aquantia: fix dma_mapping_error test dma_mapping_error() returns 1 if there is an error and 0 if not. Fixes: 018423e90bee ("net: ethernet: aquantia: Add ring support code") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 817c145520c8..dea9e9bbb8e7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -298,9 +298,10 @@ int aq_ring_rx_fill(struct aq_ring_s *self) buff->page, 0, AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); - err = dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa); - if (err < 0) + if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) { + err = -ENOMEM; goto err_exit; + } buff = NULL; } -- cgit v1.2.3 From 5e2ed1329ee074229d5a2f4389035be818120980 Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Wed, 1 Feb 2017 15:41:54 +0100 Subject: sh_eth: align usage of sh_eth_modify() with rest of driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To be consistent with the rest of the driver when setting bits using sh_eth_modify() the same bit should also be cleared. This have no functional change and should have been done from the start. Signed-off-by: Niklas Söderlund Suggested-by: Sergei Shtylyov Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 2f08d2735fe2..f9134c818ac6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3271,7 +3271,7 @@ static int sh_eth_wol_setup(struct net_device *ndev) sh_eth_write(ndev, EESIPR_ECIIP, EESIPR); /* Enable MagicPacket */ - sh_eth_modify(ndev, ECMR, 0, ECMR_MPDE); + sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); /* Increased clock usage so device won't be suspended */ clk_enable(mdp->clk); -- cgit v1.2.3 From 0cf45a3b1e2c47bbcc9e75cbed5c660492e297da Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Wed, 1 Feb 2017 15:41:55 +0100 Subject: sh_eth: fix wakeup event reporting from MagicPacket MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a link change interrupt happens along side the MagicPacket interrupt and the link change interrupt is ignored the interrupt handler will return and the wakeup event is not registered. Fix this by moving the MagicPacket check before the link change check. Signed-off-by: Niklas Söderlund Reported-by: Sergei Shtylyov Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f9134c818ac6..54248775f227 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1605,6 +1605,8 @@ static void sh_eth_emac_interrupt(struct net_device *ndev) sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ if (felic_stat & ECSR_ICD) ndev->stats.tx_carrier_errors++; + if (felic_stat & ECSR_MPD) + pm_wakeup_event(&mdp->pdev->dev, 0); if (felic_stat & ECSR_LCHNG) { /* Link Changed */ if (mdp->cd->no_psr || mdp->no_ether_link) @@ -1624,8 +1626,6 @@ static void sh_eth_emac_interrupt(struct net_device *ndev) sh_eth_rcv_snd_enable(ndev); } } - if (felic_stat & ECSR_MPD) - pm_wakeup_event(&mdp->pdev->dev, 0); } /* error control function */ -- cgit v1.2.3 From cadb9c9fdbc6a624d57b5ecdfd412b5800848703 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 31 Jan 2017 11:33:53 +0200 Subject: net/sched: act_sample: Fix error path in init Fix error path of in sample init, by releasing the tc hash in case of failure in psample_group creation. Fixes: 5c5670fae430 ("net/sched: Introduce sample tc action") Reported-by: Cong Wang Reviewed-by: Jiri Pirko Signed-off-by: Yotam Gigi Signed-off-by: David S. Miller --- net/sched/act_sample.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 39229756de07..02b67495829c 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -81,8 +81,11 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); psample_group = psample_group_get(net, s->psample_group_num); - if (!psample_group) + if (!psample_group) { + if (ret == ACT_P_CREATED) + tcf_hash_release(*a, bind); return -ENOMEM; + } RCU_INIT_POINTER(s->psample_group, psample_group); if (tb[TCA_SAMPLE_TRUNC_SIZE]) { -- cgit v1.2.3 From f3b20313aef93f9cfda76fec2c05625f88510f18 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 31 Jan 2017 11:33:54 +0200 Subject: net/sched: act_psample: Remove unnecessary ASSERT_RTNL The ASSERT_RTNL is not necessary in the init function, as it does not touch any rtnl protected structures, as opposed to the mirred action which does have to hold a net device. Reported-by: Cong Wang Reviewed-by: Jiri Pirko Signed-off-by: Yotam Gigi Signed-off-by: David S. Miller --- net/sched/act_sample.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 02b67495829c..0b8217b4763f 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -76,7 +76,6 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, } s = to_sample(*a); - ASSERT_RTNL(); s->tcf_action = parm->action; s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); -- cgit v1.2.3 From 62e13097c46c69dbd7544ab2cd585ccf48f360a4 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 31 Jan 2017 22:54:54 +0100 Subject: net: phy: broadcom: rehook BCM54612E specific init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This extra BCM54612E code in PHY driver isn't really aneg specific. Even without it aneg works OK but the problem is no packets pass through PHY. Moreover putting this code inside config_aneg callback didn't allow resuming PHY correctly. When driver called phy_stop and phy_start it was putting PHY machine into RESUMING state. After that machine was switching into AN and NOLINK without ever calling phy_start_aneg. This prevented this extra setup from being called and PHY didn't work. This change has been verified to fix network on BCM47186B0 SoC device with BCM54612E. Signed-off-by: Rafał Miłecki Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/broadcom.c | 67 +++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 794b9ec81ba5..9cd8b27d1292 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -46,6 +46,34 @@ static int bcm54210e_config_init(struct phy_device *phydev) return 0; } +static int bcm54612e_config_init(struct phy_device *phydev) +{ + /* Clear TX internal delay unless requested. */ + if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && + (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) { + /* Disable TXD to GTXCLK clock delay (default set) */ + /* Bit 9 is the only field in shadow register 00011 */ + bcm_phy_write_shadow(phydev, 0x03, 0); + } + + /* Clear RX internal delay unless requested. */ + if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && + (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) { + u16 reg; + + reg = bcm54xx_auxctl_read(phydev, + MII_BCM54XX_AUXCTL_SHDWSEL_MISC); + /* Disable RXD to RXC delay (default set) */ + reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; + /* Clear shadow selector field */ + reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK; + bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, + MII_BCM54XX_AUXCTL_MISC_WREN | reg); + } + + return 0; +} + static int bcm54810_config(struct phy_device *phydev) { int rc, val; @@ -250,6 +278,10 @@ static int bcm54xx_config_init(struct phy_device *phydev) err = bcm54210e_config_init(phydev); if (err) return err; + } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54612E) { + err = bcm54612e_config_init(phydev); + if (err) + return err; } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) { err = bcm54810_config(phydev); if (err) @@ -395,39 +427,6 @@ static int bcm5481_config_aneg(struct phy_device *phydev) return ret; } -static int bcm54612e_config_aneg(struct phy_device *phydev) -{ - int ret; - - /* First, auto-negotiate. */ - ret = genphy_config_aneg(phydev); - - /* Clear TX internal delay unless requested. */ - if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && - (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) { - /* Disable TXD to GTXCLK clock delay (default set) */ - /* Bit 9 is the only field in shadow register 00011 */ - bcm_phy_write_shadow(phydev, 0x03, 0); - } - - /* Clear RX internal delay unless requested. */ - if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && - (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) { - u16 reg; - - reg = bcm54xx_auxctl_read(phydev, - MII_BCM54XX_AUXCTL_SHDWSEL_MISC); - /* Disable RXD to RXC delay (default set) */ - reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; - /* Clear shadow selector field */ - reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK; - bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, - MII_BCM54XX_AUXCTL_MISC_WREN | reg); - } - - return ret; -} - static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set) { int val; @@ -590,7 +589,7 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = bcm54612e_config_aneg, + .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, -- cgit v1.2.3 From f4737a62033d7f3e0db740c449fc62119da7ab8a Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Mon, 30 Jan 2017 16:09:51 +0100 Subject: brcmfmac: check brcmf_bus_get_memdump result for error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This method may be unsupported (see: USB bus) or may just fail (see: SDIO bus). While at it rework logic in brcmf_sdio_bus_get_memdump function to avoid too many conditional code nesting levels. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/debug.c | 23 +++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c index e64557c35553..6f8a4b074c31 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c @@ -32,16 +32,25 @@ static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, { void *dump; size_t ramsize; + int err; ramsize = brcmf_bus_get_ramsize(bus); - if (ramsize) { - dump = vzalloc(len + ramsize); - if (!dump) - return -ENOMEM; - memcpy(dump, data, len); - brcmf_bus_get_memdump(bus, dump + len, ramsize); - dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL); + if (!ramsize) + return -ENOTSUPP; + + dump = vzalloc(len + ramsize); + if (!dump) + return -ENOMEM; + + memcpy(dump, data, len); + err = brcmf_bus_get_memdump(bus, dump + len, ramsize); + if (err) { + vfree(dump); + return err; } + + dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL); + return 0; } -- cgit v1.2.3 From 36401cb7ffae731295a6dd1ce2b40d7ad74245f4 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Mon, 30 Jan 2017 16:09:52 +0100 Subject: brcmfmac: be more verbose when PSM's watchdog fires MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's important to inform user so he knows things went wrong. He may also want to get memory dump for further debugging purposes. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c index 6f8a4b074c31..f4644cf371c7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c @@ -58,10 +58,18 @@ static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp, const struct brcmf_event_msg *evtmsg, void *data) { + int err; + brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx); - return brcmf_debug_create_memdump(ifp->drvr->bus_if, data, - evtmsg->datalen); + brcmf_err("PSM's watchdog has fired!\n"); + + err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data, + evtmsg->datalen); + if (err) + brcmf_err("Failed to get memory dump, %d\n", err); + + return err; } void brcmf_debugfs_init(void) -- cgit v1.2.3 From 10435c1192d06bdb0bac7666452d8219d7e7c477 Mon Sep 17 00:00:00 2001 From: Feng Date: Fri, 20 Jan 2017 21:40:43 +0800 Subject: netfilter: nf_tables: Eliminate duplicated code in nf_tables_table_enable() If something fails in nf_tables_table_enable(), it unregisters the chains. But the rollback code is the same as nf_tables_table_disable() almostly, except there is one counter check. Now create one wrapper function to eliminate the duplicated codes. Signed-off-by: Feng Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 48 ++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 6e07c214c208..e6741ac4ccc1 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -576,6 +576,28 @@ err: return err; } +static void _nf_tables_table_disable(struct net *net, + const struct nft_af_info *afi, + struct nft_table *table, + u32 cnt) +{ + struct nft_chain *chain; + u32 i = 0; + + list_for_each_entry(chain, &table->chains, list) { + if (!nft_is_active_next(net, chain)) + continue; + if (!(chain->flags & NFT_BASE_CHAIN)) + continue; + + if (cnt && i++ == cnt) + break; + + nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, + afi->nops); + } +} + static int nf_tables_table_enable(struct net *net, const struct nft_af_info *afi, struct nft_table *table) @@ -598,18 +620,8 @@ static int nf_tables_table_enable(struct net *net, } return 0; err: - list_for_each_entry(chain, &table->chains, list) { - if (!nft_is_active_next(net, chain)) - continue; - if (!(chain->flags & NFT_BASE_CHAIN)) - continue; - - if (i-- <= 0) - break; - - nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, - afi->nops); - } + if (i) + _nf_tables_table_disable(net, afi, table, i); return err; } @@ -617,17 +629,7 @@ static void nf_tables_table_disable(struct net *net, const struct nft_af_info *afi, struct nft_table *table) { - struct nft_chain *chain; - - list_for_each_entry(chain, &table->chains, list) { - if (!nft_is_active_next(net, chain)) - continue; - if (!(chain->flags & NFT_BASE_CHAIN)) - continue; - - nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, - afi->nops); - } + _nf_tables_table_disable(net, afi, table, 0); } static int nf_tables_updtable(struct nft_ctx *ctx) -- cgit v1.2.3 From 11df4b760f11ca7528c62b1c4b870735d1c62116 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 23 Jan 2017 18:21:53 +0100 Subject: netfilter: conntrack: no need to pass ctinfo to error handler It is never accessed for reading and the only places that write to it are the icmp(6) handlers, which also set skb->nfct (and skb->nfctinfo). The conntrack core specifically checks for attached skb->nfct after ->error() invocation and returns early in this case. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_l4proto.h | 2 +- net/ipv4/netfilter/nf_conntrack_proto_icmp.c | 12 ++++++------ net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | 12 ++++++------ net/netfilter/nf_conntrack_core.c | 3 +-- net/netfilter/nf_conntrack_proto_dccp.c | 1 - net/netfilter/nf_conntrack_proto_sctp.c | 2 +- net/netfilter/nf_conntrack_proto_tcp.c | 1 - net/netfilter/nf_conntrack_proto_udp.c | 3 +-- 8 files changed, 16 insertions(+), 20 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index e7b836590f0b..85e993e278d5 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -55,7 +55,7 @@ struct nf_conntrack_l4proto { void (*destroy)(struct nf_conn *ct); int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, - unsigned int dataoff, enum ip_conntrack_info *ctinfo, + unsigned int dataoff, u_int8_t pf, unsigned int hooknum); /* Print out the per-protocol part of the tuple. Return like seq_* */ diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index d075b3cf2400..566afac98a88 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -128,13 +128,13 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb, /* Returns conntrack if it dealt with ICMP, and filled in skb fields */ static int icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, - enum ip_conntrack_info *ctinfo, unsigned int hooknum) { struct nf_conntrack_tuple innertuple, origtuple; const struct nf_conntrack_l4proto *innerproto; const struct nf_conntrack_tuple_hash *h; const struct nf_conntrack_zone *zone; + enum ip_conntrack_info ctinfo; struct nf_conntrack_zone tmp; NF_CT_ASSERT(skb->nfct == NULL); @@ -160,7 +160,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, return -NF_ACCEPT; } - *ctinfo = IP_CT_RELATED; + ctinfo = IP_CT_RELATED; h = nf_conntrack_find_get(net, zone, &innertuple); if (!h) { @@ -169,11 +169,11 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, } if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) - *ctinfo += IP_CT_IS_REPLY; + ctinfo += IP_CT_IS_REPLY; /* Update skb to refer to this connection */ skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; - skb->nfctinfo = *ctinfo; + skb->nfctinfo = ctinfo; return NF_ACCEPT; } @@ -181,7 +181,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, static int icmp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) + u8 pf, unsigned int hooknum) { const struct icmphdr *icmph; struct icmphdr _ih; @@ -225,7 +225,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl, icmph->type != ICMP_REDIRECT) return NF_ACCEPT; - return icmp_error_message(net, tmpl, skb, ctinfo, hooknum); + return icmp_error_message(net, tmpl, skb, hooknum); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index f5a61bc3ec2b..44b9af3f813e 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -145,12 +145,12 @@ static int icmpv6_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int icmp6off, - enum ip_conntrack_info *ctinfo, unsigned int hooknum) { struct nf_conntrack_tuple intuple, origtuple; const struct nf_conntrack_tuple_hash *h; const struct nf_conntrack_l4proto *inproto; + enum ip_conntrack_info ctinfo; struct nf_conntrack_zone tmp; NF_CT_ASSERT(skb->nfct == NULL); @@ -176,7 +176,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl, return -NF_ACCEPT; } - *ctinfo = IP_CT_RELATED; + ctinfo = IP_CT_RELATED; h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp), &intuple); @@ -185,19 +185,19 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl, return -NF_ACCEPT; } else { if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) - *ctinfo += IP_CT_IS_REPLY; + ctinfo += IP_CT_IS_REPLY; } /* Update skb to refer to this connection */ skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; - skb->nfctinfo = *ctinfo; + skb->nfctinfo = ctinfo; return NF_ACCEPT; } static int icmpv6_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) + u8 pf, unsigned int hooknum) { const struct icmp6hdr *icmp6h; struct icmp6hdr _ih; @@ -232,7 +232,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, if (icmp6h->icmp6_type >= 128) return NF_ACCEPT; - return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum); + return icmpv6_error_message(net, tmpl, skb, dataoff, hooknum); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 3a073cd9fcf4..86186a2e2715 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1326,8 +1326,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, * inverse of the return code tells to the netfilter * core what to do with the packet. */ if (l4proto->error != NULL) { - ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, - pf, hooknum); + ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum); if (ret <= 0) { NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index b68ce6ac13b3..93dd1c5b7bff 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -561,7 +561,6 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, static int dccp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { struct dccp_hdr _dh, *dh; diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 44a647418948..33279aab583d 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -508,7 +508,7 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, } static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb, - unsigned int dataoff, enum ip_conntrack_info *ctinfo, + unsigned int dataoff, u8 pf, unsigned int hooknum) { const struct sctphdr *sh; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 69f687740c76..b122e9dacfed 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -750,7 +750,6 @@ static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK| static int tcp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index ae63944c9dc4..f6ebce6178ca 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -112,7 +112,6 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb, static int udplite_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info *ctinfo, u8 pf, unsigned int hooknum) { unsigned int udplen = skb->len - dataoff; @@ -162,7 +161,7 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl, #endif static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, - unsigned int dataoff, enum ip_conntrack_info *ctinfo, + unsigned int dataoff, u_int8_t pf, unsigned int hooknum) { -- cgit v1.2.3 From 6e10148c5c85629832d9156f337cbf67e96b69fe Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 23 Jan 2017 18:21:54 +0100 Subject: netfilter: reset netfilter state when duplicating packet We should also toss nf_bridge_info, if any -- packet is leaving via ip_local_out, also, this skb isn't bridged -- it is a locally generated copy. Also this avoids the need to touch this later when skb->nfct is replaced with 'unsigned long _nfct' in followup patch. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/nf_dup_ipv4.c | 2 +- net/ipv6/netfilter/nf_dup_ipv6.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c index cf986e1c7bbd..a981ef7151ca 100644 --- a/net/ipv4/netfilter/nf_dup_ipv4.c +++ b/net/ipv4/netfilter/nf_dup_ipv4.c @@ -68,7 +68,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, #if IS_ENABLED(CONFIG_NF_CONNTRACK) /* Avoid counting cloned packets towards the original connection. */ - nf_conntrack_put(skb->nfct); + nf_reset(skb); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c index 4a84b5ad9ecb..5f52e5f90e7e 100644 --- a/net/ipv6/netfilter/nf_dup_ipv6.c +++ b/net/ipv6/netfilter/nf_dup_ipv6.c @@ -57,7 +57,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum, return; #if IS_ENABLED(CONFIG_NF_CONNTRACK) - nf_conntrack_put(skb->nfct); + nf_reset(skb); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); -- cgit v1.2.3 From 97a6ad13decc16c5adbf181283932daba7e17faf Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 23 Jan 2017 18:21:55 +0100 Subject: netfilter: reduce direct skb->nfct usage Next patch makes direct skb->nfct access illegal, reduce noise in next patch by using accessors we already have. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 9 ++++++--- net/netfilter/nf_conntrack_core.c | 15 +++++++++------ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index cd6018a9ee24..2a344ebd7ebe 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1554,10 +1554,13 @@ static inline void ip_vs_notrack(struct sk_buff *skb) struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (!ct || !nf_ct_is_untracked(ct)) { - nf_conntrack_put(skb->nfct); - skb->nfct = &nf_ct_untracked_get()->ct_general; + struct nf_conn *untracked; + + nf_conntrack_put(&ct->ct_general); + untracked = nf_ct_untracked_get(); + nf_conntrack_get(&untracked->ct_general); + skb->nfct = &untracked->ct_general; skb->nfctinfo = IP_CT_NEW; - nf_conntrack_get(skb->nfct); } #endif } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 86186a2e2715..adb7af3a4c4c 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -686,8 +686,11 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, !nfct_nat(ct) && !nf_ct_is_dying(ct) && atomic_inc_not_zero(&ct->ct_general.use)) { - nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct); - nf_conntrack_put(skb->nfct); + enum ip_conntrack_info oldinfo; + struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo); + + nf_ct_acct_merge(ct, ctinfo, loser_ct); + nf_conntrack_put(&loser_ct->ct_general); /* Assign conntrack already in hashes to this skbuff. Don't * modify skb->nfctinfo to ensure consistent stateful filtering. */ @@ -1288,7 +1291,7 @@ unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, struct sk_buff *skb) { - struct nf_conn *ct, *tmpl = NULL; + struct nf_conn *ct, *tmpl; enum ip_conntrack_info ctinfo; struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; @@ -1298,9 +1301,9 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, int set_reply = 0; int ret; - if (skb->nfct) { + tmpl = nf_ct_get(skb, &ctinfo); + if (tmpl) { /* Previously seen (loopback or untracked)? Ignore. */ - tmpl = (struct nf_conn *)skb->nfct; if (!nf_ct_is_template(tmpl)) { NF_CT_STAT_INC_ATOMIC(net, ignore); return NF_ACCEPT; @@ -1364,7 +1367,7 @@ repeat: /* Invalid: inverse of the return code tells * the netfilter core what to do */ pr_debug("nf_conntrack_in: Can't track with proto module\n"); - nf_conntrack_put(skb->nfct); + nf_conntrack_put(&ct->ct_general); skb->nfct = NULL; NF_CT_STAT_INC_ATOMIC(net, invalid); if (ret == -NF_DROP) -- cgit v1.2.3 From cb9c68363efb6d1f950ec55fb06e031ee70db5fc Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 23 Jan 2017 18:21:56 +0100 Subject: skbuff: add and use skb_nfct helper Followup patch renames skb->nfct and changes its type so add a helper to avoid intrusive rename change later. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/skbuff.h | 13 ++++++++++--- include/net/netfilter/nf_conntrack_core.h | 2 +- net/core/skbuff.c | 2 +- net/ipv4/netfilter/ipt_SYNPROXY.c | 8 ++++---- net/ipv4/netfilter/nf_conntrack_proto_icmp.c | 2 +- net/ipv4/netfilter/nf_defrag_ipv4.c | 4 ++-- net/ipv4/netfilter/nf_dup_ipv4.c | 2 +- net/ipv6/netfilter/ip6t_SYNPROXY.c | 8 ++++---- net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | 4 ++-- net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | 4 ++-- net/netfilter/nf_conntrack_core.c | 4 ++-- net/netfilter/nf_nat_helper.c | 2 +- net/netfilter/xt_CT.c | 2 +- net/openvswitch/conntrack.c | 6 +++--- net/sched/cls_flow.c | 2 +- 15 files changed, 36 insertions(+), 29 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b53c0cfd417e..276431e047af 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3553,6 +3553,15 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, skb->csum = csum_add(skb->csum, delta); } +static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + return skb->nfct; +#else + return NULL; +#endif +} + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) void nf_conntrack_destroy(struct nf_conntrack *nfct); static inline void nf_conntrack_put(struct nf_conntrack *nfct) @@ -3652,9 +3661,7 @@ static inline bool skb_irq_freeable(const struct sk_buff *skb) #if IS_ENABLED(CONFIG_XFRM) !skb->sp && #endif -#if IS_ENABLED(CONFIG_NF_CONNTRACK) - !skb->nfct && -#endif + !skb_nfct(skb) && !skb->_skb_refdst && !skb_has_frag_list(skb); } diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 62e17d1319ff..84ec7ca5f195 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -62,7 +62,7 @@ int __nf_conntrack_confirm(struct sk_buff *skb); /* Confirm a connection: returns NF_DROP if packet must be dropped. */ static inline int nf_conntrack_confirm(struct sk_buff *skb) { - struct nf_conn *ct = (struct nf_conn *)skb->nfct; + struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb); int ret = NF_ACCEPT; if (ct && !nf_ct_is_untracked(ct)) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5a03730fbc1a..cac3ebfb4b45 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -655,7 +655,7 @@ static void skb_release_head_state(struct sk_buff *skb) skb->destructor(skb); } #if IS_ENABLED(CONFIG_NF_CONNTRACK) - nf_conntrack_put(skb->nfct); + nf_conntrack_put(skb_nfct(skb)); #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 30c0de53e254..a12d4f0aa674 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -107,8 +107,8 @@ synproxy_send_client_synack(struct net *net, synproxy_build_options(nth, opts); - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, - niph, nth, tcp_hdr_size); + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), + IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); } static void @@ -230,8 +230,8 @@ synproxy_send_client_ack(struct net *net, synproxy_build_options(nth, opts); - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, - niph, nth, tcp_hdr_size); + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), + IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); } static bool diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 566afac98a88..478a025909fc 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -137,7 +137,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, enum ip_conntrack_info ctinfo; struct nf_conntrack_zone tmp; - NF_CT_ASSERT(skb->nfct == NULL); + NF_CT_ASSERT(!skb_nfct(skb)); zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); /* Are they talking about one of our connections? */ diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index 49bd6a54404f..346bf7ccac08 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -45,7 +45,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, { u16 zone_id = NF_CT_DEFAULT_ZONE_ID; #if IS_ENABLED(CONFIG_NF_CONNTRACK) - if (skb->nfct) { + if (skb_nfct(skb)) { enum ip_conntrack_info ctinfo; const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); @@ -75,7 +75,7 @@ static unsigned int ipv4_conntrack_defrag(void *priv, #if !IS_ENABLED(CONFIG_NF_NAT) /* Previously seen (loopback)? Ignore. Do this before fragment check. */ - if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) + if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb))) return NF_ACCEPT; #endif #endif diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c index a981ef7151ca..1a5e1f53ceaa 100644 --- a/net/ipv4/netfilter/nf_dup_ipv4.c +++ b/net/ipv4/netfilter/nf_dup_ipv4.c @@ -71,7 +71,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, nf_reset(skb); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; - nf_conntrack_get(skb->nfct); + nf_conntrack_get(skb_nfct(skb)); #endif /* * If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 98c8dd38575a..2dc01d2c6ec0 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -121,8 +121,8 @@ synproxy_send_client_synack(struct net *net, synproxy_build_options(nth, opts); - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, - niph, nth, tcp_hdr_size); + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), + IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); } static void @@ -244,8 +244,8 @@ synproxy_send_client_ack(struct net *net, synproxy_build_options(nth, opts); - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, - niph, nth, tcp_hdr_size); + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), + IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); } static bool diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 44b9af3f813e..09f1661a4e88 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -153,7 +153,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl, enum ip_conntrack_info ctinfo; struct nf_conntrack_zone tmp; - NF_CT_ASSERT(skb->nfct == NULL); + NF_CT_ASSERT(!skb_nfct(skb)); /* Are they talking about one of our connections? */ if (!nf_ct_get_tuplepr(skb, @@ -224,7 +224,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, noct_valid_new[type]) { skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; - nf_conntrack_get(skb->nfct); + nf_conntrack_get(skb_nfct(skb)); return NF_ACCEPT; } diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index 8e0bdd058787..ada60d1a991b 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c @@ -37,7 +37,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, { u16 zone_id = NF_CT_DEFAULT_ZONE_ID; #if IS_ENABLED(CONFIG_NF_CONNTRACK) - if (skb->nfct) { + if (skb_nfct(skb)) { enum ip_conntrack_info ctinfo; const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); @@ -61,7 +61,7 @@ static unsigned int ipv6_defrag(void *priv, #if IS_ENABLED(CONFIG_NF_CONNTRACK) /* Previously seen (loopback)? */ - if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) + if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb))) return NF_ACCEPT; #endif diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index adb7af3a4c4c..78aebf0ee6e3 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1357,7 +1357,7 @@ repeat: goto out; } - NF_CT_ASSERT(skb->nfct); + NF_CT_ASSERT(skb_nfct(skb)); /* Decide what timeout policy we want to apply to this flow. */ timeouts = nf_ct_timeout_lookup(net, ct, l4proto); @@ -1528,7 +1528,7 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) /* Attach to new skbuff, and increment count */ nskb->nfct = &ct->ct_general; nskb->nfctinfo = ctinfo; - nf_conntrack_get(nskb->nfct); + nf_conntrack_get(skb_nfct(nskb)); } /* Bring out ya dead! */ diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c index 2840abb5bb99..211661cb2c90 100644 --- a/net/netfilter/nf_nat_helper.c +++ b/net/netfilter/nf_nat_helper.c @@ -60,7 +60,7 @@ static void mangle_contents(struct sk_buff *skb, __skb_trim(skb, skb->len + rep_len - match_len); } - if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) { + if (nf_ct_l3num((struct nf_conn *)skb_nfct(skb)) == NFPROTO_IPV4) { /* fix IP hdr checksum information */ ip_hdr(skb)->tot_len = htons(skb->len); ip_send_check(ip_hdr(skb)); diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 26b0bccfa0c5..cd7e29910ae1 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -415,7 +415,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par) skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; - nf_conntrack_get(skb->nfct); + nf_conntrack_get(skb_nfct(skb)); return XT_CONTINUE; } diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 6b78bab27755..452557946147 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -721,8 +721,8 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, /* Associate skb with specified zone. */ if (tmpl) { - if (skb->nfct) - nf_conntrack_put(skb->nfct); + if (skb_nfct(skb)) + nf_conntrack_put(skb_nfct(skb)); nf_conntrack_get(&tmpl->ct_general); skb->nfct = &tmpl->ct_general; skb->nfctinfo = IP_CT_NEW; @@ -819,7 +819,7 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key, if (err) return err; - ct = (struct nf_conn *)skb->nfct; + ct = (struct nf_conn *)skb_nfct(skb); if (ct) nf_ct_deliver_cached_events(ct); } diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 6575aba87630..3d6b9286c203 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -129,7 +129,7 @@ static u32 flow_get_mark(const struct sk_buff *skb) static u32 flow_get_nfct(const struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) - return addr_fold(skb->nfct); + return addr_fold(skb_nfct(skb)); #else return 0; #endif -- cgit v1.2.3 From c74454fadd5ea6fc866ffe2c417a0dba56b2bf1c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 23 Jan 2017 18:21:57 +0100 Subject: netfilter: add and use nf_ct_set helper Add a helper to assign a nf_conn entry and the ctinfo bits to an sk_buff. This avoids changing code in followup patch that merges skb->nfct and skb->nfctinfo into skb->_nfct. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 3 +-- include/net/netfilter/nf_conntrack.h | 8 ++++++++ net/ipv4/netfilter/ipt_SYNPROXY.c | 3 +-- net/ipv4/netfilter/nf_conntrack_proto_icmp.c | 3 +-- net/ipv4/netfilter/nf_dup_ipv4.c | 3 +-- net/ipv6/netfilter/ip6t_SYNPROXY.c | 3 +-- net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | 6 ++---- net/ipv6/netfilter/nf_dup_ipv6.c | 3 +-- net/netfilter/nf_conntrack_core.c | 11 +++-------- net/netfilter/nft_ct.c | 3 +-- net/netfilter/xt_CT.c | 6 ++---- net/openvswitch/conntrack.c | 6 ++---- 12 files changed, 24 insertions(+), 34 deletions(-) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 2a344ebd7ebe..4b46c591b542 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1559,8 +1559,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb) nf_conntrack_put(&ct->ct_general); untracked = nf_ct_untracked_get(); nf_conntrack_get(&untracked->ct_general); - skb->nfct = &untracked->ct_general; - skb->nfctinfo = IP_CT_NEW; + nf_ct_set(skb, untracked, IP_CT_NEW); } #endif } diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 5916aa9ab3f0..d704aed11684 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -34,6 +34,7 @@ union nf_conntrack_proto { struct ip_ct_sctp sctp; struct ip_ct_tcp tcp; struct nf_ct_gre gre; + unsigned int tmpl_padto; }; union nf_conntrack_expect_proto { @@ -341,6 +342,13 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, gfp_t flags); void nf_ct_tmpl_free(struct nf_conn *tmpl); +static inline void +nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) +{ + skb->nfct = &ct->ct_general; + skb->nfctinfo = info; +} + #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v)) diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index a12d4f0aa674..3240a2614e82 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -57,8 +57,7 @@ synproxy_send_tcp(struct net *net, goto free_nskb; if (nfct) { - nskb->nfct = nfct; - nskb->nfctinfo = ctinfo; + nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo); nf_conntrack_get(nfct); } diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 478a025909fc..73c591d8a9a8 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -172,8 +172,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, ctinfo += IP_CT_IS_REPLY; /* Update skb to refer to this connection */ - skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; - skb->nfctinfo = ctinfo; + nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo); return NF_ACCEPT; } diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c index 1a5e1f53ceaa..f0dbff05fc28 100644 --- a/net/ipv4/netfilter/nf_dup_ipv4.c +++ b/net/ipv4/netfilter/nf_dup_ipv4.c @@ -69,8 +69,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, #if IS_ENABLED(CONFIG_NF_CONNTRACK) /* Avoid counting cloned packets towards the original connection. */ nf_reset(skb); - skb->nfct = &nf_ct_untracked_get()->ct_general; - skb->nfctinfo = IP_CT_NEW; + nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); nf_conntrack_get(skb_nfct(skb)); #endif /* diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 2dc01d2c6ec0..4ef1ddd4bbbd 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -71,8 +71,7 @@ synproxy_send_tcp(struct net *net, skb_dst_set(nskb, dst); if (nfct) { - nskb->nfct = nfct; - nskb->nfctinfo = ctinfo; + nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo); nf_conntrack_get(nfct); } diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 09f1661a4e88..d2c2ccbfbe72 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -189,8 +189,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl, } /* Update skb to refer to this connection */ - skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; - skb->nfctinfo = ctinfo; + nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo); return NF_ACCEPT; } @@ -222,8 +221,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, type = icmp6h->icmp6_type - 130; if (type >= 0 && type < sizeof(noct_valid_new) && noct_valid_new[type]) { - skb->nfct = &nf_ct_untracked_get()->ct_general; - skb->nfctinfo = IP_CT_NEW; + nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); nf_conntrack_get(skb_nfct(skb)); return NF_ACCEPT; } diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c index 5f52e5f90e7e..ff04f6a7f45b 100644 --- a/net/ipv6/netfilter/nf_dup_ipv6.c +++ b/net/ipv6/netfilter/nf_dup_ipv6.c @@ -58,8 +58,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum, #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_reset(skb); - skb->nfct = &nf_ct_untracked_get()->ct_general; - skb->nfctinfo = IP_CT_NEW; + nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); nf_conntrack_get(skb->nfct); #endif if (hooknum == NF_INET_PRE_ROUTING || diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 78aebf0ee6e3..c9bd10747864 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -691,10 +691,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, nf_ct_acct_merge(ct, ctinfo, loser_ct); nf_conntrack_put(&loser_ct->ct_general); - /* Assign conntrack already in hashes to this skbuff. Don't - * modify skb->nfctinfo to ensure consistent stateful filtering. - */ - skb->nfct = &ct->ct_general; + nf_ct_set(skb, ct, oldinfo); return NF_ACCEPT; } NF_CT_STAT_INC(net, drop); @@ -1282,8 +1279,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, } *set_reply = 0; } - skb->nfct = &ct->ct_general; - skb->nfctinfo = *ctinfo; + nf_ct_set(skb, ct, *ctinfo); return ct; } @@ -1526,8 +1522,7 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) ctinfo = IP_CT_RELATED; /* Attach to new skbuff, and increment count */ - nskb->nfct = &ct->ct_general; - nskb->nfctinfo = ctinfo; + nf_ct_set(nskb, ct, ctinfo); nf_conntrack_get(skb_nfct(nskb)); } diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index d774d7823688..66a2377510e1 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -554,8 +554,7 @@ static void nft_notrack_eval(const struct nft_expr *expr, ct = nf_ct_untracked_get(); atomic_inc(&ct->ct_general.use); - skb->nfct = &ct->ct_general; - skb->nfctinfo = IP_CT_NEW; + nf_ct_set(skb, ct, IP_CT_NEW); } static struct nft_expr_type nft_notrack_type; diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index cd7e29910ae1..51f00e1e1208 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -30,8 +30,7 @@ static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) if (!ct) ct = nf_ct_untracked_get(); atomic_inc(&ct->ct_general.use); - skb->nfct = &ct->ct_general; - skb->nfctinfo = IP_CT_NEW; + nf_ct_set(skb, ct, IP_CT_NEW); return XT_CONTINUE; } @@ -413,8 +412,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par) if (skb->nfct != NULL) return XT_CONTINUE; - skb->nfct = &nf_ct_untracked_get()->ct_general; - skb->nfctinfo = IP_CT_NEW; + nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); nf_conntrack_get(skb_nfct(skb)); return XT_CONTINUE; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 452557946147..d1fbfcaa009a 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -460,8 +460,7 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, ct = nf_ct_tuplehash_to_ctrack(h); - skb->nfct = &ct->ct_general; - skb->nfctinfo = ovs_ct_get_info(h); + nf_ct_set(skb, ct, ovs_ct_get_info(h)); return ct; } @@ -724,8 +723,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, if (skb_nfct(skb)) nf_conntrack_put(skb_nfct(skb)); nf_conntrack_get(&tmpl->ct_general); - skb->nfct = &tmpl->ct_general; - skb->nfctinfo = IP_CT_NEW; + nf_ct_set(skb, tmpl, IP_CT_NEW); } err = nf_conntrack_in(net, info->family, -- cgit v1.2.3 From 303223092081963513494b4377fa1ac9e362ed4b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 23 Jan 2017 18:21:58 +0100 Subject: netfilter: guarantee 8 byte minalign for template addresses The next change will merge skb->nfct pointer and skb->nfctinfo status bits into single skb->_nfct (unsigned long) area. For this to work nf_conn addresses must always be aligned at least on an 8 byte boundary since we will need the lower 3bits to store nfctinfo. Conntrack templates are allocated via kmalloc. kbuild test robot reported BUILD_BUG_ON failed: NFCT_INFOMASK >= ARCH_KMALLOC_MINALIGN on v1 of this patchset, so not all platforms meet this requirement. Do manual alignment if needed, the alignment offset is stored in the nf_conn entry protocol area. This works because templates are not handed off to L4 protocol trackers. Reported-by: kbuild test robot Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack.h | 2 ++ net/netfilter/nf_conntrack_core.c | 29 ++++++++++++++++++++++++----- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index d704aed11684..06d3d2d24fe0 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -163,6 +163,8 @@ void nf_conntrack_alter_reply(struct nf_conn *ct, int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack); +#define NFCT_INFOMASK 7UL + /* Return conntrack_info and tuple hash for given skb. */ static inline struct nf_conn * nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index c9bd10747864..768968fba7f6 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -350,16 +350,31 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) spin_unlock(&pcpu->lock); } +#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK) + /* Released via destroy_conntrack() */ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, const struct nf_conntrack_zone *zone, gfp_t flags) { - struct nf_conn *tmpl; + struct nf_conn *tmpl, *p; - tmpl = kzalloc(sizeof(*tmpl), flags); - if (tmpl == NULL) - return NULL; + if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) { + tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags); + if (!tmpl) + return NULL; + + p = tmpl; + tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p); + if (tmpl != p) { + tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p); + tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p; + } + } else { + tmpl = kzalloc(sizeof(*tmpl), flags); + if (!tmpl) + return NULL; + } tmpl->status = IPS_TEMPLATE; write_pnet(&tmpl->ct_net, net); @@ -374,7 +389,11 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl) { nf_ct_ext_destroy(tmpl); nf_ct_ext_free(tmpl); - kfree(tmpl); + + if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) + kfree((char *)tmpl - tmpl->proto.tmpl_padto); + else + kfree(tmpl); } EXPORT_SYMBOL_GPL(nf_ct_tmpl_free); -- cgit v1.2.3 From a9e419dc7be6997409dca6d1b9daf3cc7046902f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 23 Jan 2017 18:21:59 +0100 Subject: netfilter: merge ctinfo into nfct pointer storage area After this change conntrack operations (lookup, creation, matching from ruleset) only access one instead of two sk_buff cache lines. This works for normal conntracks because those are allocated from a slab that guarantees hw cacheline or 8byte alignment (whatever is larger) so the 3 bits needed for ctinfo won't overlap with nf_conn addresses. Template allocation now does manual address alignment (see previous change) on arches that don't have sufficent kmalloc min alignment. Some spots intentionally use skb->_nfct instead of skb_nfct() helpers, this is to avoid undoing the skb_nfct() use when we remove untracked conntrack object in the future. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/skbuff.h | 21 +++++++++------------ include/net/netfilter/nf_conntrack.h | 11 ++++++----- net/ipv6/netfilter/nf_dup_ipv6.c | 2 +- net/netfilter/core.c | 2 +- net/netfilter/nf_conntrack_core.c | 11 ++++++----- net/netfilter/nf_conntrack_standalone.c | 3 +++ net/netfilter/xt_CT.c | 4 ++-- 7 files changed, 28 insertions(+), 26 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 276431e047af..ac0bc085b139 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -585,7 +585,6 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @cloned: Head may be cloned (check refcnt to be sure) * @ip_summed: Driver fed us an IP checksum * @nohdr: Payload reference only, must not modify header - * @nfctinfo: Relationship of this skb to the connection * @pkt_type: Packet class * @fclone: skbuff clone status * @ipvs_property: skbuff is owned by ipvs @@ -594,7 +593,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @nf_trace: netfilter packet trace flag * @protocol: Packet protocol from driver * @destructor: Destruct function - * @nfct: Associated connection, if any + * @_nfct: Associated connection, if any (with nfctinfo bits) * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @skb_iif: ifindex of device we arrived on * @tc_index: Traffic control index @@ -668,7 +667,7 @@ struct sk_buff { struct sec_path *sp; #endif #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - struct nf_conntrack *nfct; + unsigned long _nfct; #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info *nf_bridge; @@ -721,7 +720,6 @@ struct sk_buff { __u8 pkt_type:3; __u8 pfmemalloc:1; __u8 ignore_df:1; - __u8 nfctinfo:3; __u8 nf_trace:1; __u8 ip_summed:2; @@ -836,6 +834,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb) #define SKB_DST_NOREF 1UL #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) +#define SKB_NFCT_PTRMASK ~(7UL) /** * skb_dst - returns skb dst_entry * @skb: buffer @@ -3556,7 +3555,7 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) - return skb->nfct; + return (void *)(skb->_nfct & SKB_NFCT_PTRMASK); #else return NULL; #endif @@ -3590,8 +3589,8 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) static inline void nf_reset(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - nf_conntrack_put(skb->nfct); - skb->nfct = NULL; + nf_conntrack_put(skb_nfct(skb)); + skb->_nfct = 0; #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); @@ -3611,10 +3610,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, bool copy) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - dst->nfct = src->nfct; - nf_conntrack_get(src->nfct); - if (copy) - dst->nfctinfo = src->nfctinfo; + dst->_nfct = src->_nfct; + nf_conntrack_get(skb_nfct(src)); #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) dst->nf_bridge = src->nf_bridge; @@ -3629,7 +3626,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - nf_conntrack_put(dst->nfct); + nf_conntrack_put(skb_nfct(dst)); #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(dst->nf_bridge); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 06d3d2d24fe0..f540f9ad2af4 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -76,7 +76,7 @@ struct nf_conn { /* Usage count in here is 1 for hash table, 1 per skb, * plus 1 for any connection(s) we are `master' for * - * Hint, SKB address this struct and refcnt via skb->nfct and + * Hint, SKB address this struct and refcnt via skb->_nfct and * helpers nf_conntrack_get() and nf_conntrack_put(). * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt, * beware nf_ct_get() is different and don't inc refcnt. @@ -164,13 +164,15 @@ int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack); #define NFCT_INFOMASK 7UL +#define NFCT_PTRMASK ~(NFCT_INFOMASK) /* Return conntrack_info and tuple hash for given skb. */ static inline struct nf_conn * nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo) { - *ctinfo = skb->nfctinfo; - return (struct nf_conn *)skb->nfct; + *ctinfo = skb->_nfct & NFCT_INFOMASK; + + return (struct nf_conn *)(skb->_nfct & NFCT_PTRMASK); } /* decrement reference count on a conntrack */ @@ -347,8 +349,7 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl); static inline void nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) { - skb->nfct = &ct->ct_general; - skb->nfctinfo = info; + skb->_nfct = (unsigned long)ct | info; } #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c index ff04f6a7f45b..888ecd106e5f 100644 --- a/net/ipv6/netfilter/nf_dup_ipv6.c +++ b/net/ipv6/netfilter/nf_dup_ipv6.c @@ -59,7 +59,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum, #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_reset(skb); nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); - nf_conntrack_get(skb->nfct); + nf_conntrack_get(skb_nfct(skb)); #endif if (hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_IN) { diff --git a/net/netfilter/core.c b/net/netfilter/core.c index ce6adfae521a..a87a6f8a74d8 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -375,7 +375,7 @@ void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb) { void (*attach)(struct sk_buff *, const struct sk_buff *); - if (skb->nfct) { + if (skb->_nfct) { rcu_read_lock(); attach = rcu_dereference(ip_ct_attach); if (attach) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 768968fba7f6..47c4ea53daa6 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1239,7 +1239,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; } -/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ +/* On success, returns conntrack ptr, sets skb->_nfct | ctinfo */ static inline struct nf_conn * resolve_normal_ct(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, @@ -1323,7 +1323,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, NF_CT_STAT_INC_ATOMIC(net, ignore); return NF_ACCEPT; } - skb->nfct = NULL; + skb->_nfct = 0; } /* rcu_read_lock()ed by nf_hook_thresh */ @@ -1352,7 +1352,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, goto out; } /* ICMP[v6] protocol trackers may assign one conntrack. */ - if (skb->nfct) + if (skb->_nfct) goto out; } repeat: @@ -1383,7 +1383,7 @@ repeat: * the netfilter core what to do */ pr_debug("nf_conntrack_in: Can't track with proto module\n"); nf_conntrack_put(&ct->ct_general); - skb->nfct = NULL; + skb->_nfct = 0; NF_CT_STAT_INC_ATOMIC(net, invalid); if (ret == -NF_DROP) NF_CT_STAT_INC_ATOMIC(net, drop); @@ -1878,7 +1878,8 @@ int nf_conntrack_init_start(void) nf_conntrack_max = max_factor * nf_conntrack_htable_size; nf_conntrack_cachep = kmem_cache_create("nf_conntrack", - sizeof(struct nf_conn), 0, + sizeof(struct nf_conn), + NFCT_INFOMASK + 1, SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL); if (!nf_conntrack_cachep) goto err_cachep; diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index d009ae663453..2256147dcaad 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -642,6 +642,9 @@ static int __init nf_conntrack_standalone_init(void) if (ret < 0) goto out_start; + BUILD_BUG_ON(SKB_NFCT_PTRMASK != NFCT_PTRMASK); + BUILD_BUG_ON(NFCT_INFOMASK <= IP_CT_NUMBER); + #ifdef CONFIG_SYSCTL nf_ct_netfilter_header = register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 51f00e1e1208..b008db0184b8 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -23,7 +23,7 @@ static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) { /* Previously seen (loopback)? Ignore. */ - if (skb->nfct != NULL) + if (skb->_nfct != 0) return XT_CONTINUE; /* special case the untracked ct : we want the percpu object */ @@ -409,7 +409,7 @@ static unsigned int notrack_tg(struct sk_buff *skb, const struct xt_action_param *par) { /* Previously seen (loopback)? Ignore. */ - if (skb->nfct != NULL) + if (skb->_nfct != 0) return XT_CONTINUE; nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); -- cgit v1.2.3 From 90c1aff702d449a1a248c4829d51c0bc677f968e Mon Sep 17 00:00:00 2001 From: David Windsor Date: Mon, 23 Jan 2017 22:24:29 -0500 Subject: ipvs: free ip_vs_dest structs when refcnt=0 Currently, the ip_vs_dest cache frees ip_vs_dest objects when their reference count becomes < 0. Aside from not being semantically sound, this is problematic for the new type refcount_t, which will be introduced shortly in a separate patch. refcount_t is the new kernel type for holding reference counts, and provides overflow protection and a constrained interface relative to atomic_t (the type currently being used for kernel reference counts). Per Julian Anastasov: "The problem is that dest_trash currently holds deleted dests (unlinked from RCU lists) with refcnt=0." Changing dest_trash to hold dest with refcnt=1 will allow us to free ip_vs_dest structs when their refcnt=0, in ip_vs_dest_put_and_free(). Signed-off-by: David Windsor Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 2 +- net/netfilter/ipvs/ip_vs_ctl.c | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 4b46c591b542..7bdfa7d78363 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1421,7 +1421,7 @@ static inline void ip_vs_dest_put(struct ip_vs_dest *dest) static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) { - if (atomic_dec_return(&dest->refcnt) < 0) + if (atomic_dec_and_test(&dest->refcnt)) kfree(dest); } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 55e0169caa4c..5fc4836e7c79 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -711,7 +711,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af, dest->vport == svc->port))) { /* HIT */ list_del(&dest->t_list); - ip_vs_dest_hold(dest); goto out; } } @@ -741,7 +740,7 @@ static void ip_vs_dest_free(struct ip_vs_dest *dest) * When the ip_vs_control_clearup is activated by ipvs module exit, * the service tables must have been flushed and all the connections * are expired, and the refcnt of each destination in the trash must - * be 0, so we simply release them here. + * be 1, so we simply release them here. */ static void ip_vs_trash_cleanup(struct netns_ipvs *ipvs) { @@ -1080,11 +1079,10 @@ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest, if (list_empty(&ipvs->dest_trash) && !cleanup) mod_timer(&ipvs->dest_trash_timer, jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); - /* dest lives in trash without reference */ + /* dest lives in trash with reference */ list_add(&dest->t_list, &ipvs->dest_trash); dest->idle_start = 0; spin_unlock_bh(&ipvs->dest_trash_lock); - ip_vs_dest_put(dest); } @@ -1160,7 +1158,7 @@ static void ip_vs_dest_trash_expire(unsigned long data) spin_lock(&ipvs->dest_trash_lock); list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { - if (atomic_read(&dest->refcnt) > 0) + if (atomic_read(&dest->refcnt) > 1) continue; if (dest->idle_start) { if (time_before(now, dest->idle_start + -- cgit v1.2.3 From 2851940ffee313e0ff12540a8e11a8c54dea9c65 Mon Sep 17 00:00:00 2001 From: Michal Kubeček Date: Tue, 31 Jan 2017 10:30:06 +0100 Subject: netfilter: allow logging from non-init namespaces Commit 69b34fb996b2 ("netfilter: xt_LOG: add net namespace support for xt_LOG") disabled logging packets using the LOG target from non-init namespaces. The motivation was to prevent containers from flooding kernel log of the host. The plan was to keep it that way until syslog namespace implementation allows containers to log in a safe way. However, the work on syslog namespace seems to have hit a dead end somewhere in 2013 and there are users who want to use xt_LOG in all network namespaces. This patch allows to do so by setting /proc/sys/net/netfilter/nf_log_all_netns to a nonzero value. This sysctl is only accessible from init_net so that one cannot switch the behaviour from inside a container. Signed-off-by: Michal Kubecek Signed-off-by: Pablo Neira Ayuso --- Documentation/networking/netfilter-sysctl.txt | 10 ++++++++++ include/net/netfilter/nf_log.h | 3 +++ net/bridge/netfilter/ebt_log.c | 2 +- net/ipv4/netfilter/nf_log_arp.c | 2 +- net/ipv4/netfilter/nf_log_ipv4.c | 2 +- net/ipv6/netfilter/nf_log_ipv6.c | 2 +- net/netfilter/nf_log.c | 24 ++++++++++++++++++++++++ 7 files changed, 41 insertions(+), 4 deletions(-) create mode 100644 Documentation/networking/netfilter-sysctl.txt diff --git a/Documentation/networking/netfilter-sysctl.txt b/Documentation/networking/netfilter-sysctl.txt new file mode 100644 index 000000000000..55791e50e169 --- /dev/null +++ b/Documentation/networking/netfilter-sysctl.txt @@ -0,0 +1,10 @@ +/proc/sys/net/netfilter/* Variables: + +nf_log_all_netns - BOOLEAN + 0 - disabled (default) + not 0 - enabled + + By default, only init_net namespace can log packets into kernel log + with LOG target; this aims to prevent containers from flooding host + kernel log. If enabled, this target also works in other network + namespaces. This variable is only accessible from init_net. diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 450f87f95415..42e0696f38d8 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h @@ -51,6 +51,9 @@ struct nf_logger { struct module *me; }; +/* sysctl_nf_log_all_netns - allow LOG target in all network namespaces */ +extern int sysctl_nf_log_all_netns; + /* Function to register/unregister log function. */ int nf_log_register(u_int8_t pf, struct nf_logger *logger); void nf_log_unregister(struct nf_logger *logger); diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index e88bd4827ac1..98b9c8e8615e 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c @@ -78,7 +78,7 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, unsigned int bitmask; /* FIXME: Disabled from containers until syslog ns is supported */ - if (!net_eq(net, &init_net)) + if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns) return; spin_lock_bh(&ebt_log_lock); diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c index b24795e2ee6d..f6f713376e6e 100644 --- a/net/ipv4/netfilter/nf_log_arp.c +++ b/net/ipv4/netfilter/nf_log_arp.c @@ -87,7 +87,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf, struct nf_log_buf *m; /* FIXME: Disabled from containers until syslog ns is supported */ - if (!net_eq(net, &init_net)) + if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns) return; m = nf_log_buf_open(); diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c index 856648966f4c..c83a9963269b 100644 --- a/net/ipv4/netfilter/nf_log_ipv4.c +++ b/net/ipv4/netfilter/nf_log_ipv4.c @@ -319,7 +319,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf, struct nf_log_buf *m; /* FIXME: Disabled from containers until syslog ns is supported */ - if (!net_eq(net, &init_net)) + if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns) return; m = nf_log_buf_open(); diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c index 57d86066a13b..055c51b80f5d 100644 --- a/net/ipv6/netfilter/nf_log_ipv6.c +++ b/net/ipv6/netfilter/nf_log_ipv6.c @@ -351,7 +351,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf, struct nf_log_buf *m; /* FIXME: Disabled from containers until syslog ns is supported */ - if (!net_eq(net, &init_net)) + if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns) return; m = nf_log_buf_open(); diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 3dca90dc24ad..0a034f52b912 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -16,6 +16,9 @@ #define NF_LOG_PREFIXLEN 128 #define NFLOGGER_NAME_LEN 64 +int sysctl_nf_log_all_netns __read_mostly; +EXPORT_SYMBOL(sysctl_nf_log_all_netns); + static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; static DEFINE_MUTEX(nf_log_mutex); @@ -414,6 +417,18 @@ static const struct file_operations nflog_file_ops = { #ifdef CONFIG_SYSCTL static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; +static struct ctl_table_header *nf_log_sysctl_fhdr; + +static struct ctl_table nf_log_sysctl_ftable[] = { + { + .procname = "nf_log_all_netns", + .data = &sysctl_nf_log_all_netns, + .maxlen = sizeof(sysctl_nf_log_all_netns), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { } +}; static int nf_log_proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -483,6 +498,10 @@ static int netfilter_log_sysctl_init(struct net *net) nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; } + nf_log_sysctl_fhdr = register_net_sysctl(net, "net/netfilter", + nf_log_sysctl_ftable); + if (!nf_log_sysctl_fhdr) + goto err_freg; } for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) @@ -499,6 +518,9 @@ static int netfilter_log_sysctl_init(struct net *net) err_reg: if (!net_eq(net, &init_net)) kfree(table); + else + unregister_net_sysctl_table(nf_log_sysctl_fhdr); +err_freg: err_alloc: return -ENOMEM; } @@ -511,6 +533,8 @@ static void netfilter_log_sysctl_exit(struct net *net) unregister_net_sysctl_table(net->nf.nf_log_dir_header); if (!net_eq(net, &init_net)) kfree(table); + else + unregister_net_sysctl_table(nf_log_sysctl_fhdr); } #else static int netfilter_log_sysctl_init(struct net *net) -- cgit v1.2.3 From 1f3d62090d3ba4d0c14e5271be87812fc577b197 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 1 Feb 2017 17:46:02 +0100 Subject: xgene_enet: remove bogus forward declarations The device match tables for both the xgene_enet driver and its phy driver have forward declarations that declare an array without a length, leading to a clang warning when they are not followed by an actual defitinition: drivers/net/ethernet/apm/xgene/../../../phy/mdio-xgene.h:135:34: warning: tentative array definition assumed to have one element drivers/net/ethernet/apm/xgene/xgene_enet_main.c:33:36: warning: tentative array definition assumed to have one element The declarations for the mdio driver are even in a header file, so they cause duplicate definitions of the tables for each file that includes them. This removes all four forward declarations and moves the actual definitions up a little, so they are in front of their first user. For the OF match tables, this means having to remove the #ifdef around them, and passing the actual structure into of_match_device(). This has no effect on the generated object code though, as the of_match_device function has an empty stub that does not evaluate its argument, and the symbol gets dropped either way. Fixes: 43b3cf6634a4 ("drivers: net: phy: xgene: Add MDIO driver") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 50 ++++++++++++------------ drivers/net/phy/mdio-xgene.c | 50 ++++++++++++------------ drivers/net/phy/mdio-xgene.h | 4 -- 3 files changed, 48 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index ab43c52723df..d0d0d12b531f 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1964,6 +1964,30 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) } } +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_enet_acpi_match[] = { + { "APMC0D05", XGENE_ENET1}, + { "APMC0D30", XGENE_ENET1}, + { "APMC0D31", XGENE_ENET1}, + { "APMC0D3F", XGENE_ENET1}, + { "APMC0D26", XGENE_ENET2}, + { "APMC0D25", XGENE_ENET2}, + { } +}; +MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); +#endif + +static const struct of_device_id xgene_enet_of_match[] = { + {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, + {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, + {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, + {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, + {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, + {}, +}; + +MODULE_DEVICE_TABLE(of, xgene_enet_of_match); + static int xgene_enet_probe(struct platform_device *pdev) { struct net_device *ndev; @@ -2110,32 +2134,6 @@ static void xgene_enet_shutdown(struct platform_device *pdev) xgene_enet_remove(pdev); } -#ifdef CONFIG_ACPI -static const struct acpi_device_id xgene_enet_acpi_match[] = { - { "APMC0D05", XGENE_ENET1}, - { "APMC0D30", XGENE_ENET1}, - { "APMC0D31", XGENE_ENET1}, - { "APMC0D3F", XGENE_ENET1}, - { "APMC0D26", XGENE_ENET2}, - { "APMC0D25", XGENE_ENET2}, - { } -}; -MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); -#endif - -#ifdef CONFIG_OF -static const struct of_device_id xgene_enet_of_match[] = { - {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, - {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, - {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, - {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, - {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, - {}, -}; - -MODULE_DEVICE_TABLE(of, xgene_enet_of_match); -#endif - static struct platform_driver xgene_enet_driver = { .driver = { .name = "xgene-enet", diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index 92af182951be..f095051beb54 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c @@ -311,6 +311,30 @@ static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl, } #endif +static const struct of_device_id xgene_mdio_of_match[] = { + { + .compatible = "apm,xgene-mdio-rgmii", + .data = (void *)XGENE_MDIO_RGMII + }, + { + .compatible = "apm,xgene-mdio-xfi", + .data = (void *)XGENE_MDIO_XFI + }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgene_mdio_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_mdio_acpi_match[] = { + { "APMC0D65", XGENE_MDIO_RGMII }, + { "APMC0D66", XGENE_MDIO_XFI }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match); +#endif + + static int xgene_mdio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -430,32 +454,6 @@ static int xgene_mdio_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_OF -static const struct of_device_id xgene_mdio_of_match[] = { - { - .compatible = "apm,xgene-mdio-rgmii", - .data = (void *)XGENE_MDIO_RGMII - }, - { - .compatible = "apm,xgene-mdio-xfi", - .data = (void *)XGENE_MDIO_XFI - }, - {}, -}; - -MODULE_DEVICE_TABLE(of, xgene_mdio_of_match); -#endif - -#ifdef CONFIG_ACPI -static const struct acpi_device_id xgene_mdio_acpi_match[] = { - { "APMC0D65", XGENE_MDIO_RGMII }, - { "APMC0D66", XGENE_MDIO_XFI }, - { } -}; - -MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match); -#endif - static struct platform_driver xgene_mdio_driver = { .driver = { .name = "xgene-mdio", diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h index 354241b53c1d..594a11d42401 100644 --- a/drivers/net/phy/mdio-xgene.h +++ b/drivers/net/phy/mdio-xgene.h @@ -132,10 +132,6 @@ static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src) #define GET_BIT(field, src) \ xgene_enet_get_field_value(field ## _POS, 1, src) -static const struct of_device_id xgene_mdio_of_match[]; -#ifdef CONFIG_ACPI -static const struct acpi_device_id xgene_mdio_acpi_match[]; -#endif int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg); int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data); struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr); -- cgit v1.2.3 From da9f33018e2c0b2f341cb574e3c08363e3b0dd28 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 1 Feb 2017 03:40:05 +0100 Subject: net: dsa: mv88e6xxx: Workaround missing PHY ID on mv88e6390 The internal PHYs of the mv88e6390 do not have a model ID. Trap any calls to the ID register, and if it is zero, return the ID for the mv88e6390. The Marvell PHY driver can then bind to this ID. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index bf385377a461..29190303ace0 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2930,6 +2930,14 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) err = chip->info->ops->phy_read(chip, bus, phy, reg, &val); mutex_unlock(&chip->reg_lock); + if (reg == MII_PHYSID2) { + /* Some internal PHYS don't have a model number. Use + * the mv88e6390 family model number instead. + */ + if (!(val & 0x3f0)) + val |= PORT_SWITCH_ID_PROD_NUM_6390; + } + return err ? err : val; } -- cgit v1.2.3 From e4cf8a38fc0d257b4070066e46a678f4777fecaa Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 1 Feb 2017 03:40:06 +0100 Subject: net: phy: Marvell: Add mv88e6390 internal PHY The mv88e6390 Ethernet switch has internal PHYs. These PHYs don't have an model ID in the ID2 register. So the MDIO driver in the switch intercepts reads to this register, and returns the switch family ID. Extend the Marvell PHY driver by including this ID, and treat the PHY as a 88E1540. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 20 ++++++++++++++++++++ include/linux/marvell_phy.h | 6 ++++++ 2 files changed, 26 insertions(+) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index a3e3733813a7..1a0ac48cbc50 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2142,6 +2142,25 @@ static struct phy_driver marvell_drivers[] = { .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, }, + { + .phy_id = MARVELL_PHY_ID_88E6390, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E6390", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .probe = m88e1510_probe, + .config_init = &marvell_config_init, + .config_aneg = &m88e1510_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .resume = &genphy_resume, + .suspend = &genphy_suspend, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + }, }; module_phy_driver(marvell_drivers); @@ -2160,6 +2179,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, { } }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index a57f0dfb6db7..3d616d7f65bf 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -19,6 +19,12 @@ #define MARVELL_PHY_ID_88E1540 0x01410eb0 #define MARVELL_PHY_ID_88E3016 0x01410e60 +/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do + * not have a model ID. So the switch driver traps reads to the ID2 + * register and returns the switch family ID + */ +#define MARVELL_PHY_ID_88E6390 0x01410f90 + /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 #define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 -- cgit v1.2.3 From ba94f3088b792b16ea576a256a6030feddc87f24 Mon Sep 17 00:00:00 2001 From: Andrey Vagin Date: Wed, 1 Feb 2017 11:00:45 -0800 Subject: unix: add ioctl to open a unix socket file with O_PATH This ioctl opens a file to which a socket is bound and returns a file descriptor. The caller has to have CAP_NET_ADMIN in the socket network namespace. Currently it is impossible to get a path and a mount point for a socket file. socket_diag reports address, device ID and inode number for unix sockets. An address can contain a relative path or a file may be moved somewhere. And these properties say nothing about a mount namespace and a mount point of a socket file. With the introduced ioctl, we can get a path by reading /proc/self/fd/X and get mnt_id from /proc/self/fdinfo/X. In CRIU we are going to use this ioctl to dump and restore unix socket. Here is an example how it can be used: $ strace -e socket,bind,ioctl ./test /tmp/test_sock socket(AF_UNIX, SOCK_STREAM, 0) = 3 bind(3, {sa_family=AF_UNIX, sun_path="test_sock"}, 11) = 0 ioctl(3, SIOCUNIXFILE, 0) = 4 ^Z $ ss -a | grep test_sock u_str LISTEN 0 1 test_sock 17798 * 0 $ ls -l /proc/760/fd/{3,4} lrwx------ 1 root root 64 Feb 1 09:41 3 -> 'socket:[17798]' l--------- 1 root root 64 Feb 1 09:41 4 -> /tmp/test_sock $ cat /proc/760/fdinfo/4 pos: 0 flags: 012000000 mnt_id: 40 $ cat /proc/self/mountinfo | grep "^40\s" 40 19 0:37 / /tmp rw shared:23 - tmpfs tmpfs rw Signed-off-by: Andrei Vagin Signed-off-by: David S. Miller --- include/uapi/linux/un.h | 2 ++ net/unix/af_unix.c | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/include/uapi/linux/un.h b/include/uapi/linux/un.h index 3ed3e46c1b1f..4f0ab3a548ad 100644 --- a/include/uapi/linux/un.h +++ b/include/uapi/linux/un.h @@ -10,4 +10,6 @@ struct sockaddr_un { char sun_path[UNIX_PATH_MAX]; /* pathname */ }; +#define SIOCUNIXFILE (SIOCPROTOPRIVATE + 0) /* open a socket file with O_PATH */ + #endif /* _LINUX_UN_H */ diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index cef79873b09d..e2d18b9f910f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -117,6 +117,7 @@ #include #include #include +#include struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; EXPORT_SYMBOL_GPL(unix_socket_table); @@ -2592,6 +2593,43 @@ long unix_outq_len(struct sock *sk) } EXPORT_SYMBOL_GPL(unix_outq_len); +static int unix_open_file(struct sock *sk) +{ + struct path path; + struct file *f; + int fd; + + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) + return -EPERM; + + unix_state_lock(sk); + path = unix_sk(sk)->path; + if (!path.dentry) { + unix_state_unlock(sk); + return -ENOENT; + } + + path_get(&path); + unix_state_unlock(sk); + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) + goto out; + + f = dentry_open(&path, O_PATH, current_cred()); + if (IS_ERR(f)) { + put_unused_fd(fd); + fd = PTR_ERR(f); + goto out; + } + + fd_install(fd, f); +out: + path_put(&path); + + return fd; +} + static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; @@ -2610,6 +2648,9 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) else err = put_user(amount, (int __user *)arg); break; + case SIOCUNIXFILE: + err = unix_open_file(sk); + break; default: err = -ENOIOCTLCMD; break; -- cgit v1.2.3 From d6db61a40ee056f0b067b924d28b276306474a9c Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 1 Feb 2017 20:19:25 +0100 Subject: net: stmmac: don't set tx delay in RGMII_ID and RGMII_TXID mode As documented in Documentation/devicetree/bindings/net/ethernet.txt, in RGMII_ID and RGMII_TXID mode the MAC should not add a tx delay. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 8840a360a0b7..9685555932ea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -177,12 +177,19 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) { int ret; unsigned long clk_rate; - u8 tx_dly_val; + u8 tx_dly_val = 0; switch (dwmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: + /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where + * 8ns are exactly one cycle of the 125MHz RGMII TX clock): + * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3 + */ + tx_dly_val = dwmac->tx_delay_ns >> 1; + /* fall through */ + + case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_TXID: /* Generate a 25MHz clock for the PHY */ clk_rate = 25 * 1000 * 1000; @@ -195,11 +202,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, 0); - /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where - * 8ns are exactly one cycle of the 125MHz RGMII TX clock): - * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3 - */ - tx_dly_val = dwmac->tx_delay_ns >> 1; meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, tx_dly_val << PRG_ETH0_TXDLY_SHIFT); break; -- cgit v1.2.3 From 722eef28616798dd10f9a2e4254163a5bcd54eea Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 1 Feb 2017 22:02:02 +0100 Subject: net: stmmac: add separate warning for PTP not being supported by HW Chips like Amlogic S905GXBB are supported by this driver but don't have support for PTP. Add a separate warning for missing HW support to differentiate it from other actual failures. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 26a2185fc8a9..bd83bf9ef326 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1726,8 +1726,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (init_ptp) { ret = stmmac_init_ptp(priv); - if (ret) - netdev_warn(priv->dev, "fail to init PTP.\n"); + if (ret == -EOPNOTSUPP) + netdev_warn(priv->dev, "PTP not supported by HW\n"); + else if (ret) + netdev_warn(priv->dev, "PTP init failed\n"); } #ifdef CONFIG_DEBUG_FS -- cgit v1.2.3 From fd3984e6e78a569cd72d94687860501f7e73594c Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 2 Feb 2017 08:20:12 +0100 Subject: net: stmmac: Fix wrong message in stmmac_probe_config_dt Most likely a copy & paste error in referenced commit. Restore the debug message to what it was before. Fixes: f573c0b9c4e0 ("stmmac: move stmmac_clk, pclk, clk_ptp_ref and stmmac_rst to platform structure") Signed-off-by: Heiner Kallweit Acked-By: Joao Pinto Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 460f94f5053a..4963ccdb31e3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -371,7 +371,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) } else { clk_prepare_enable(plat->clk_ptp_ref); plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); - dev_info(&pdev->dev, "No reset control found\n"); + dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); } plat->stmmac_rst = devm_reset_control_get(&pdev->dev, -- cgit v1.2.3 From 60f06fde4cff6f6134decbf09199b5e047bc3355 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 2 Feb 2017 00:35:03 +0100 Subject: net: phy: marvell: Add support for 88e1545 PHY The 88e1545 PHYs are discrete Marvell PHYs, found in a quad package on the zii-devel-b board. Add support for it to the Marvell PHY driver. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 21 +++++++++++++++++++++ include/linux/marvell_phy.h | 1 + 2 files changed, 22 insertions(+) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 1a0ac48cbc50..f9d0fa315a47 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2122,6 +2122,26 @@ static struct phy_driver marvell_drivers[] = { .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, }, + { + .phy_id = MARVELL_PHY_ID_88E1545, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1545", + .probe = m88e1510_probe, + .remove = &marvell_remove, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &marvell_config_init, + .config_aneg = &m88e1510_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .resume = &genphy_resume, + .suspend = &genphy_suspend, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + }, { .phy_id = MARVELL_PHY_ID_88E3016, .phy_id_mask = MARVELL_PHY_ID_MASK, @@ -2178,6 +2198,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, { } diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 3d616d7f65bf..4055cf8cc978 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -17,6 +17,7 @@ #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 +#define MARVELL_PHY_ID_88E1545 0x01410ea0 #define MARVELL_PHY_ID_88E3016 0x01410e60 /* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do -- cgit v1.2.3 From b91e055c57e45131470d4935e4a726bd65b14580 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 2 Feb 2017 00:46:15 +0100 Subject: net: dsa: mv88e6xxx: Fix ATU age timer for MV88E6390 The MV88E6390 family uses a different ATU age timer coefficient. Fix the info structures. Signed-off-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 29190303ace0..22ce57256d34 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -4011,7 +4011,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .port_base_addr = 0x0, .global1_addr = 0x1b, .tag_protocol = DSA_TAG_PROTO_DSA, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6190_ops, @@ -4025,7 +4025,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_ports = 11, /* 10 + Z80 */ .port_base_addr = 0x0, .global1_addr = 0x1b, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, @@ -4040,7 +4040,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_ports = 11, /* 10 + Z80 */ .port_base_addr = 0x0, .global1_addr = 0x1b, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, @@ -4070,7 +4070,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_ports = 11, /* 10 + Z80 */ .port_base_addr = 0x0, .global1_addr = 0x1b, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, @@ -4187,7 +4187,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_ports = 11, /* 10 + Z80 */ .port_base_addr = 0x0, .global1_addr = 0x1b, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, @@ -4201,7 +4201,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_ports = 11, /* 10 + Z80 */ .port_base_addr = 0x0, .global1_addr = 0x1b, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, -- cgit v1.2.3 From 740117a8e2b0e89c5fd6f1d5bbc12f87c46c80d7 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 2 Feb 2017 00:46:16 +0100 Subject: net: dsa: mv88e6xxx: Fix typ0 when configuring 2.5Gbps In order to enable 2.5Gbps mode, we need the base speed of 10G, plus the Alt bit setting. Fix a typ0 that used 1Gb base speed. Signed-off-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/port.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 0db7fa0373ae..d380a93b092c 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -193,7 +193,7 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port, ctrl = PORT_PCS_CTRL_SPEED_1000; break; case 2500: - ctrl = PORT_PCS_CTRL_SPEED_1000 | PORT_PCS_CTRL_ALTSPEED; + ctrl = PORT_PCS_CTRL_SPEED_10000 | PORT_PCS_CTRL_ALTSPEED; break; case 10000: /* all bits set, fall through... */ -- cgit v1.2.3 From b9ea2a7be758f2c07a0c044645edc409a8a388fd Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 1 Feb 2017 18:13:23 -0800 Subject: net: remove useless pfmemalloc setting When __alloc_skb() allocates an skb from fast clone cache, setting pfmemalloc on the clone is not needed. Clone will be properly initialized later at skb_clone() time, including pfmemalloc field, as it is included in the headers_start/headers_end section which is fully copied. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26c1344cc23e..4f8f2a1a66b5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -271,7 +271,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, atomic_set(&fclones->fclone_ref, 1); fclones->skb2.fclone = SKB_FCLONE_CLONE; - fclones->skb2.pfmemalloc = pfmemalloc; } out: return skb; -- cgit v1.2.3 From 661091093918657ab544fb8ca91a5ab172a986dc Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 1 Feb 2017 18:41:25 -0800 Subject: net: ipv4: remove fib_lookup.h from devinet.c include list nothing in devinet.c relies on fib_lookup.h; remove it from the includes Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/devinet.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 4cd2ee8857d2..5d367b7ff542 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -65,8 +65,6 @@ #include #include -#include "fib_lookup.h" - static struct ipv4_devconf ipv4_devconf = { .data = { [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1, -- cgit v1.2.3 From 8fe809a992639b2013c0d8da2ba55cdea28a959a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 1 Feb 2017 20:47:59 -0800 Subject: net: add LINUX_MIB_PFMEMALLOCDROP counter Debugging issues caused by pfmemalloc is often tedious. Add a new SNMP counter to more easily diagnose these problems. Signed-off-by: Eric Dumazet Cc: Josef Bacik Acked-by: Josef Bacik Signed-off-by: David S. Miller --- include/uapi/linux/snmp.h | 1 + net/core/filter.c | 5 +++-- net/ipv4/proc.c | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index e7a31f830690..3b2bed7ca9a4 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -240,6 +240,7 @@ enum LINUX_MIB_SACKMERGED, LINUX_MIB_SACKSHIFTFALLBACK, LINUX_MIB_TCPBACKLOGDROP, + LINUX_MIB_PFMEMALLOCDROP, LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ LINUX_MIB_TCPDEFERACCEPTDROP, LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ diff --git a/net/core/filter.c b/net/core/filter.c index 1e00737e3bc3..0b753cbb2536 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -76,9 +76,10 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) * allow SOCK_MEMALLOC sockets to use it as this socket is * helping free memory */ - if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) + if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP); return -ENOMEM; - + } err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb); if (err) return err; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index a9deeb90dd36..69cf49e8356d 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -262,6 +262,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), + SNMP_MIB_ITEM("PFMemallocDrop", LINUX_MIB_PFMEMALLOCDROP), SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), -- cgit v1.2.3 From f94484b7584765eebded5d7bfdeb0b2c7b1caf51 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 7 Dec 2016 14:05:34 -0800 Subject: i40e: don't allow i40e_vsi_(add|kill)_vlan to operate when VID<1 Now that we have the separate i40e_(add|rm)_vlan_all_mac functions, we should not be using the i40e_vsi_kill_vlan or i40e_vsi_add_vlan functions when PVID is set or when VID is less than 1. This allows us to remove some checks in i40e_vsi_add_vlan and ensures that callers which need to handle VID=0 or VID=-1 don't accidentally invoke the VLAN mode handling used to convert filters when entering VLAN mode. We also update the functions to take u16 instead of s16 as well since they no longer expect to be called with VID=I40E_VLAN_ANY. Change-ID: Ibddf44a8bb840dde8ceef2a4fdb92fd953b05a57 Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 4 ++-- drivers/net/ethernet/intel/i40e/i40e_main.c | 14 ++++++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 342007df4663..c164d5093d53 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -852,9 +852,9 @@ int i40e_close(struct net_device *netdev); int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid); -int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid); void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid); -void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); +void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid); struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, const u8 *macaddr); int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b2f76d24000d..0b4adccd2611 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2568,12 +2568,15 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) /** * i40e_vsi_add_vlan - Add VSI membership for given VLAN * @vsi: the VSI being configured - * @vid: VLAN id to be added (0 = untagged only , -1 = any) + * @vid: VLAN id to be added **/ -int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) { int err; + if (!vid || vsi->info.pvid) + return -EINVAL; + /* Locked once because all functions invoked below iterates list*/ spin_lock_bh(&vsi->mac_filter_hash_lock); err = i40e_add_vlan_all_mac(vsi, vid); @@ -2616,10 +2619,13 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) /** * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN * @vsi: the VSI being configured - * @vid: VLAN id to be removed (0 = untagged only , -1 = any) + * @vid: VLAN id to be removed **/ -void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) +void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) { + if (!vid || vsi->info.pvid) + return; + spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_rm_vlan_all_mac(vsi, vid); spin_unlock_bh(&vsi->mac_filter_hash_lock); -- cgit v1.2.3 From 7aaf9536c45021772a5bc6cd4b620a11eac750f7 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 11 Nov 2016 12:39:33 -0800 Subject: i40e: fold the i40e_is_vsi_in_vlan check into i40e_put_mac_in_vlan Fold the check for determining when to call i40e_put_mac_in_vlan directly into the function so that we don't need to decide which function to use ahead of time. This allows us to just call i40e_put_mac_in_vlan directly without having to check ahead of time. Change-ID: Ifff526940748ac14b8418be5df5a149502eed137 Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 11 ++++------- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 8 ++------ 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0b4adccd2611..7a11d2b4d45a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1498,6 +1498,9 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, return i40e_add_filter(vsi, macaddr, le16_to_cpu(vsi->info.pvid)); + if (!i40e_is_vsi_in_vlan(vsi)) + return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY); + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->state == I40E_FILTER_REMOVE) continue; @@ -1756,14 +1759,8 @@ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_mac_filter *f; - - if (i40e_is_vsi_in_vlan(vsi)) - f = i40e_put_mac_in_vlan(vsi, addr); - else - f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY); - if (f) + if (i40e_put_mac_in_vlan(vsi, addr)) return 0; else return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a6198b727e24..0cdbdd33d754 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1942,12 +1942,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_mac_filter *f; f = i40e_find_mac(vsi, al->list[i].addr); - if (!f) { - if (i40e_is_vsi_in_vlan(vsi)) - f = i40e_put_mac_in_vlan(vsi, al->list[i].addr); - else - f = i40e_add_filter(vsi, al->list[i].addr, -1); - } + if (!f) + f = i40e_put_mac_in_vlan(vsi, al->list[i].addr); if (!f) { dev_err(&pf->pdev->dev, -- cgit v1.2.3 From d983001f044cf823490af7dcfcde39cfb42be0db Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 11 Nov 2016 12:39:34 -0800 Subject: i40e: no need to check is_vsi_in_vlan before calling i40e_del_mac_all_vlan This function won't be appreciably slower when in VLAN mode, so there is no real reason to not just call it directly. In either case, we still must search the full table for a MAC/VLAN pair. We do get to stop searching a tiny bit early in the case of knowing we are not in VLAN mode, but this is a minor savings and we can avoid the code complexity by not having to worry about the check. Change-ID: I533412195b3a42f51cf629e3675dd5145aea8625 Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 7a11d2b4d45a..ab0a04bb6a32 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1779,10 +1779,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - if (i40e_is_vsi_in_vlan(vsi)) - i40e_del_mac_all_vlan(vsi, addr); - else - i40e_del_filter(vsi, addr, I40E_VLAN_ANY); + i40e_del_mac_all_vlan(vsi, addr); return 0; } -- cgit v1.2.3 From feffdbe47d6f4c02b5e2764e14490c5f9d250bdb Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 11 Nov 2016 12:39:35 -0800 Subject: i40e: rename i40e_put_mac_in_vlan and i40e_del_mac_all_vlan These functions purpose is to add a new MAC filter correctly, whether we're using VLANs or not. Their goal is to ensure that all active VLANs get the new MAC filter. Rename them so that their intent is clear. They function correctly regardless of whether we have any active VLANs or only have I40E_VLAN_ANY filters. The new names convey how they function in a more clear manner. Change-ID: Iec1961f968c0223a7132724a74e26a665750b107 Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 6 +++--- drivers/net/ethernet/intel/i40e/i40e_main.c | 24 ++++++++++++---------- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 4 ++-- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index c164d5093d53..4a648840e5d4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -855,9 +855,9 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid); void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid); void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid); -struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, - const u8 *macaddr); -int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr); +struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, + const u8 *macaddr); +int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); #ifdef I40E_FCOE diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ab0a04bb6a32..da9f8d38d6b9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1477,18 +1477,19 @@ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) } /** - * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans + * i40e_add_mac_filter - Add a MAC filter for all active VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered * - * Goes through all the macvlan filters and adds a macvlan filter for each + * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise, + * go through all the macvlan filters and add a macvlan filter for each * unique vlan that already exists. If a PVID has been assigned, instead only * add the macaddr to that VLAN. * * Returns last filter added on success, else NULL **/ -struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, - const u8 *macaddr) +struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, + const u8 *macaddr) { struct i40e_mac_filter *f, *add = NULL; struct hlist_node *h; @@ -1513,15 +1514,16 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, } /** - * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS + * i40e_del_mac_filter - Remove a MAC filter from all VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be removed * - * Removes a given MAC address from a VSI, regardless of VLAN + * Removes a given MAC address from a VSI regardless of what VLAN it has been + * associated with. * * Returns 0 for success, or error **/ -int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr) +int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) { struct i40e_mac_filter *f; struct hlist_node *h; @@ -1582,8 +1584,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_del_mac_all_vlan(vsi, netdev->dev_addr); - i40e_put_mac_in_vlan(vsi, addr->sa_data); + i40e_del_mac_filter(vsi, netdev->dev_addr); + i40e_add_mac_filter(vsi, addr->sa_data); spin_unlock_bh(&vsi->mac_filter_hash_lock); ether_addr_copy(netdev->dev_addr, addr->sa_data); if (vsi->type == I40E_VSI_MAIN) { @@ -1760,7 +1762,7 @@ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - if (i40e_put_mac_in_vlan(vsi, addr)) + if (i40e_add_mac_filter(vsi, addr)) return 0; else return -ENOMEM; @@ -1779,7 +1781,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - i40e_del_mac_all_vlan(vsi, addr); + i40e_del_mac_filter(vsi, addr); return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 0cdbdd33d754..1859911acf37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1943,7 +1943,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) f = i40e_find_mac(vsi, al->list[i].addr); if (!f) - f = i40e_put_mac_in_vlan(vsi, al->list[i].addr); + f = i40e_add_mac_filter(vsi, al->list[i].addr); if (!f) { dev_err(&pf->pdev->dev, @@ -2008,7 +2008,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) spin_lock_bh(&vsi->mac_filter_hash_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) - if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) { + if (i40e_del_mac_filter(vsi, al->list[i].addr)) { ret = I40E_ERR_INVALID_MAC_ADDR; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; -- cgit v1.2.3 From 148141bb26cf942e6bc7249c55d9958dd62dd4f2 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 11 Nov 2016 12:39:36 -0800 Subject: i40e: avoid O(n^2) loop when deleting all filters Use __i40e_del_filter instead of using i40e_del_filter() which will avoid doing an additional search to delete a filter we already have the pointer for. Change-ID: Iea5a7e3cafbf8c682ed9d3b6c69cf5ff53f44daf Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 4a648840e5d4..8bb0f4b79575 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -762,6 +762,7 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); +void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f); void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); int i40e_sync_vsi_filters(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index da9f8d38d6b9..cabd72854274 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1434,7 +1434,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() * instead of list_for_each_entry(). **/ -static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) +void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) { if (!f) return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 1859911acf37..49941dd691d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2725,7 +2725,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) * anyway. */ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) - i40e_del_filter(vsi, f->macaddr, f->vlan); + __i40e_del_filter(vsi, f); spin_unlock_bh(&vsi->mac_filter_hash_lock); -- cgit v1.2.3 From 9569a9a4547d5636827c3f6b09be73ed924b1d16 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 11 Nov 2016 12:39:37 -0800 Subject: i40e: when adding or removing MAC filters, correctly handle VLANs Instead of using i40e_add_filter or i40e_del_filter directly, when adding a MAC address, we should normally be using i40e_add_mac_filter or i40e_del_mac_filter. These functions correctly handle the various cases of VLAN mode or PVID settings. This ensures consistency and avoids the issues that can occur with the recent addition of a WARN_ON() in i40e_sync_vsi_filters. Change-ID: I7fe62db063391fdd1180b2d6a6a3c5ab4307eeee Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 6 +++--- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 11 ++++------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index cabd72854274..2d689e0e12a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9341,7 +9341,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) */ i40e_rm_default_mac_filter(vsi, mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY); + i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ @@ -9350,7 +9350,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) random_ether_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY); + i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } @@ -9369,7 +9369,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) */ eth_broadcast_addr(broadcast); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_add_filter(vsi, broadcast, I40E_VLAN_ANY); + i40e_add_mac_filter(vsi, broadcast); spin_unlock_bh(&vsi->mac_filter_hash_lock); ether_addr_copy(netdev->dev_addr, mac_addr); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 49941dd691d9..cbbf8648307a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -689,17 +689,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) spin_lock_bh(&vsi->mac_filter_hash_lock); if (is_valid_ether_addr(vf->default_lan_addr.addr)) { - f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? - vf->port_vlan_id : -1); + f = i40e_add_mac_filter(vsi, + vf->default_lan_addr.addr); if (!f) dev_info(&pf->pdev->dev, "Could not add MAC filter %pM for VF %d\n", vf->default_lan_addr.addr, vf->vf_id); } eth_broadcast_addr(broadcast); - f = i40e_add_filter(vsi, broadcast, - vf->port_vlan_id ? vf->port_vlan_id : -1); + f = i40e_add_mac_filter(vsi, broadcast); if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); @@ -2718,8 +2716,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* delete the temporary mac address */ if (!is_zero_ether_addr(vf->default_lan_addr.addr)) - i40e_del_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1); + i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); /* Delete all the filters for this VSI - we're going to kill it * anyway. -- cgit v1.2.3 From 6de432c5ae28e7330e14e85c4c614a67a0a12263 Mon Sep 17 00:00:00 2001 From: Bimmy Pujari Date: Fri, 11 Nov 2016 12:39:38 -0800 Subject: i40e: Deprecating unused macro I40E_MAC_X710 was supposed to be for 10G and I40E_MAC_XL710 was supposed to be for 40G. But function i40e_is_mac_710 sets I40E_MAC_XL710 for all device IDS, I40E_MAC_X710 is not used at all. As there is nothing to compare there is no need for this function. Thus deprecating this extra macro and removing this function entirely and replacing it with a direct check. Change-ID: I7d1769954dccd574a290ac04adb836ebd156730e Signed-off-by: Bimmy Pujari Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 13 ------------- drivers/net/ethernet/intel/i40e/i40e_main.c | 6 +++--- drivers/net/ethernet/intel/i40e/i40e_type.h | 1 - drivers/net/ethernet/intel/i40evf/i40e_type.h | 1 - 4 files changed, 3 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 8bb0f4b79575..cdf812c6b2fd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -134,19 +134,6 @@ /* default to trying for four seconds */ #define I40E_TRY_LINK_TIMEOUT (4 * HZ) -/** - * i40e_is_mac_710 - Return true if MAC is X710/XL710 - * @hw: ptr to the hardware info - **/ -static inline bool i40e_is_mac_710(struct i40e_hw *hw) -{ - if ((hw->mac.type == I40E_MAC_X710) || - (hw->mac.type == I40E_MAC_XL710)) - return true; - - return false; -} - /* driver state flags */ enum i40e_state_t { __I40E_TESTING, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2d689e0e12a5..24819248de68 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8684,7 +8684,7 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw.func_caps.fd_filters_best_effort; } - if (i40e_is_mac_710(&pf->hw) && + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4))) { pf->flags |= I40E_FLAG_RESTART_AUTONEG; @@ -8693,13 +8693,13 @@ static int i40e_sw_init(struct i40e_pf *pf) } /* Disable FW LLDP if FW < v4.3 */ - if (i40e_is_mac_710(&pf->hw) && + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4))) pf->flags |= I40E_FLAG_STOP_FW_LLDP; /* Use the FW Set LLDP MIB API if FW > v4.40 */ - if (i40e_is_mac_710(&pf->hw) && + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || (pf->hw.aq.fw_maj_ver >= 5))) pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index edc0abdf4783..b6cf8d2670a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -125,7 +125,6 @@ enum i40e_debug_mask { */ enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, - I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, I40E_MAC_X722, diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index c85e8a31c072..92ac60da5201 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -100,7 +100,6 @@ enum i40e_debug_mask { */ enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, - I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, I40E_MAC_X722, -- cgit v1.2.3 From 17901e1bde9b959b535d075f137d5a86ca64f1c9 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 11 Nov 2016 12:39:39 -0800 Subject: i40evf: remove unused device ID This device ID was intended for use when running Linux VF drivers under Hyper-V, but we have determined that it is not necessary. Since it is unused, and will never be used, remove it. Change-ID: I74998ab4237db043cd400547bb54a0a5e2a37ea5 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index c0fc53361800..3fe87e021148 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -59,7 +59,6 @@ static const struct pci_device_id i40evf_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0}, /* required last entry */ {0, } }; -- cgit v1.2.3 From 9588397d24eecb7298bce9307b589a5bc0ccafdd Mon Sep 17 00:00:00 2001 From: Tushar Dave Date: Sat, 19 Nov 2016 13:53:58 -0800 Subject: i40e: remove unnecessary __packed 'struct i40e_dma_mem' defined with 'packed' directive causing kernel unaligned errors on sparc. e.g. i40e: Intel(R) Ethernet Connection XL710 Network Driver - version 1.6.16-k i40e: Copyright (c) 2013 - 2014 Intel Corporation. Kernel unaligned access at TPC[44894c] dma_4v_alloc_coherent+0x1ac/0x300 Kernel unaligned access at TPC[44894c] dma_4v_alloc_coherent+0x1ac/0x300 Kernel unaligned access at TPC[44894c] dma_4v_alloc_coherent+0x1ac/0x300 Kernel unaligned access at TPC[44894c] dma_4v_alloc_coherent+0x1ac/0x300 Kernel unaligned access at TPC[44894c] dma_4v_alloc_coherent+0x1ac/0x300 i40e 0000:03:00.0: fw 5.1.40981 api 1.5 nvm 5.04 0x80002548 0.0.0 This can be fixed with get_unaligned/put_unaligned(). However no reference in driver shows that 'struct i40e_dma_mem' directly shoved into NIC hardware. But instead fields of the struct are being read and used for hardware. Therefore, __packed is unnecessary for 'struct i40e_dma_mem'. In addition, although 'struct i40e_virt_mem' doesn't cause any unaligned access, keeping it packed is unnecessary as well because of aforementioned reason. This change make 'struct i40e_dma_mem' and 'struct i40e_virt_mem' unpacked. Signed-off-by: Tushar Dave Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_osdep.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 5b6feb7edeb1..be74bcf9c961 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -55,7 +55,7 @@ struct i40e_dma_mem { void *va; dma_addr_t pa; u32 size; -} __packed; +}; #define i40e_allocate_dma_mem(h, m, unused, s, a) \ i40e_allocate_dma_mem_d(h, m, s, a) @@ -64,7 +64,7 @@ struct i40e_dma_mem { struct i40e_virt_mem { void *va; u32 size; -} __packed; +}; #define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) #define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) -- cgit v1.2.3 From 52ea3e802047552a8c1ec8c42cad361ffd4ca09c Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 28 Nov 2016 16:05:59 -0800 Subject: i40e: Quick refactor to start moving data off stack and into Tx buffer info This patch does some quick work to pull some of the data off of the stack and hopefully start storing it in the Tx buffer info section of the Tx ring. Ideally we should be moving away from having to store much of anything on the stack and can just maintain it all in the descriptor rings. Change-ID: I4b4715ea1920e122502482b3f9e56a9a6cb1e9fe Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 47 +++++++++++++++------------ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 47 +++++++++++++++------------ 2 files changed, 54 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 352cf7cd2ef4..f5baeb154d39 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2251,14 +2251,16 @@ out: /** * i40e_tso - set up the tso context descriptor - * @skb: ptr to the skb we're sending + * @first: pointer to first Tx buffer for xmit * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss) { + struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; union { struct iphdr *v4; @@ -2271,6 +2273,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) unsigned char *hdr; } l4; u32 paylen, l4_offset; + u16 gso_segs, gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -2335,10 +2338,18 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; + /* pull values out of skb_shinfo */ + gso_size = skb_shinfo(skb)->gso_size; + gso_segs = skb_shinfo(skb)->gso_segs; + + /* update GSO size and bytecount with header size */ + first->gso_segs = gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; - cd_mss = skb_shinfo(skb)->gso_size; + cd_mss = gso_size; *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); @@ -2699,7 +2710,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; - u16 gso_segs; u16 desc_count = 1; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { @@ -2708,15 +2718,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TX_FLAGS_VLAN_SHIFT; } - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) - gso_segs = skb_shinfo(skb)->gso_segs; - else - gso_segs = 1; - - /* multiply data chunks by size of headers */ - first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); - first->gso_segs = gso_segs; - first->skb = skb; first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); @@ -2902,8 +2903,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, count = i40e_xmit_descriptor_count(skb); if (i40e_chk_linearize(skb, count)) { - if (__skb_linearize(skb)) - goto out_drop; + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } @@ -2919,6 +2922,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_BUSY; } + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + /* prepare the xmit flags */ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; @@ -2926,16 +2935,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* obtain protocol of skb */ protocol = vlan_get_protocol(skb); - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_bi[tx_ring->next_to_use]; - /* setup IPv4/IPv6 offloads */ if (protocol == htons(ETH_P_IP)) tx_flags |= I40E_TX_FLAGS_IPV4; else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; @@ -2973,7 +2979,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: - dev_kfree_skb_any(skb); + dev_kfree_skb_any(first->skb); + first->skb = NULL; return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index df67ef37b7f3..d4e488267988 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1549,14 +1549,16 @@ out: /** * i40e_tso - set up the tso context descriptor - * @skb: ptr to the skb we're sending + * @first: pointer to first Tx buffer for xmit * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss) { + struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; union { struct iphdr *v4; @@ -1569,6 +1571,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) unsigned char *hdr; } l4; u32 paylen, l4_offset; + u16 gso_segs, gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -1633,10 +1636,18 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; + /* pull values out of skb_shinfo */ + gso_size = skb_shinfo(skb)->gso_size; + gso_segs = skb_shinfo(skb)->gso_segs; + + /* update GSO size and bytecount with header size */ + first->gso_segs = gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; - cd_mss = skb_shinfo(skb)->gso_size; + cd_mss = gso_size; *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); @@ -1949,7 +1960,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; - u16 gso_segs; u16 desc_count = 1; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { @@ -1958,15 +1968,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TX_FLAGS_VLAN_SHIFT; } - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) - gso_segs = skb_shinfo(skb)->gso_segs; - else - gso_segs = 1; - - /* multiply data chunks by size of headers */ - first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); - first->gso_segs = gso_segs; - first->skb = skb; first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); @@ -2151,8 +2152,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, count = i40e_xmit_descriptor_count(skb); if (i40e_chk_linearize(skb, count)) { - if (__skb_linearize(skb)) - goto out_drop; + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } @@ -2168,6 +2171,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_BUSY; } + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + /* prepare the xmit flags */ if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; @@ -2175,16 +2184,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* obtain protocol of skb */ protocol = vlan_get_protocol(skb); - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_bi[tx_ring->next_to_use]; - /* setup IPv4/IPv6 offloads */ if (protocol == htons(ETH_P_IP)) tx_flags |= I40E_TX_FLAGS_IPV4; else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; @@ -2211,7 +2217,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: - dev_kfree_skb_any(skb); + dev_kfree_skb_any(first->skb); + first->skb = NULL; return NETDEV_TX_OK; } -- cgit v1.2.3 From 11131e2b8e9454f79ebfc71e45fe246948ce4997 Mon Sep 17 00:00:00 2001 From: Jayaprakash Shanmugam Date: Mon, 28 Nov 2016 16:06:00 -0800 Subject: i40e: Remove FPK HyperV VF device ID Requirement for VFs to use the VMBus has been removed that's why removing Hyper-V VF device ID. Change-ID: I84f0964f443ee0db3e5e444b5ace996eb71b8280 Signed-off-by: Jayaprakash Shanmugam Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40e_common.c | 1 - drivers/net/ethernet/intel/i40evf/i40e_devids.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index aa63b7fb993d..b5a59dd72a0c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -64,7 +64,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: - case I40E_DEV_ID_X722_VF_HV: hw->mac.type = I40E_MAC_X722_VF; break; case I40E_DEV_ID_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index 21dcaee1ad1d..d76393c95056 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -48,7 +48,6 @@ #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 #define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ -- cgit v1.2.3 From 04766b22cf805011620f546384b271586548e8a7 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Mon, 28 Nov 2016 16:06:01 -0800 Subject: i40e: remove unused function After refactoring the client open and close code, this is no longer needed. Remove it. Change-ID: If8e6e32baa354d857c2fd8b2f19404f1786011c4 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 - drivers/net/ethernet/intel/i40e/i40e_client.c | 35 --------------------------- 2 files changed, 36 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index cdf812c6b2fd..fdd9069b6cec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -792,7 +792,6 @@ int i40e_lan_add_device(struct i40e_pf *pf); int i40e_lan_del_device(struct i40e_pf *pf); void i40e_client_subtask(struct i40e_pf *pf); void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi); -void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi); void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset); void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id); diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 7fe72abc0b4a..7ca048f0b159 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -200,41 +200,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) mutex_unlock(&i40e_client_instance_mutex); } -/** - * i40e_notify_client_of_netdev_open - call the client open callback - * @vsi: the VSI with netdev opened - * - * If there is a client to this netdev, call the client with open - **/ -void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) -{ - struct i40e_client_instance *cdev; - int ret = 0; - - if (!vsi) - return; - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry(cdev, &i40e_client_instances, list) { - if (cdev->lan_info.netdev == vsi->netdev) { - if (!cdev->client || - !cdev->client->ops || !cdev->client->ops->open) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance open routine\n"); - continue; - } - if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state))) { - ret = cdev->client->ops->open(&cdev->lan_info, - cdev->client); - if (!ret) - set_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state); - } - } - } - mutex_unlock(&i40e_client_instance_mutex); -} - /** * i40e_client_release_qvlist * @ldev: pointer to L2 context. -- cgit v1.2.3 From 1c0e6a3613d3f0bb088a3160095c8da4c1214d02 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Mon, 28 Nov 2016 16:06:02 -0800 Subject: i40e: refactor macro INTRL_USEC_TO_REG This patch refactors the macro INTRL_USEC_TO_REG into a static inline function and fixes a couple subtle bugs caused by the macro. This patch fixes a bug which was caused by passing a bad register value to the firmware. If enabling interrupt rate limiting, a non-zero value for the rate limit must be used. Otherwise the firmware sets the interrupt rate limit to the maximum value. Due to the limited resolution of the register, attempting to set a value of 1, 2, or 3 would be rounded down to 0 and limiting was left enabled, causing unexpected behavior. This patch also fixes a possible bug in which using the macro itself can introduce unintended side-affects because the macro argument is used more than once in the macro definition (e.g. a variable post-increment argument would perform a double increment on the variable). Without this patch, attempting to set interrupt rate limits of 1, 2, or 3 results in unexpected behavior and future use of this macro could cause subtle bugs. Change-Id: I83ac842de0ca9c86761923d6e3a4d7b1b95f2b3f Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 15 ++++++++++++++- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index cc1465aac2ef..7500902a612d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2072,7 +2072,7 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector; u16 vector, intrl; - intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); + intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit); vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs; vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 24819248de68..9f785c015a2f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3268,7 +3268,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); wr32(hw, I40E_PFINT_RATEN(vector - 1), - INTRL_USEC_TO_REG(vsi->int_rate_limit)); + i40e_intrl_usec_to_reg(vsi->int_rate_limit)); /* Linked list for the queuepairs assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index e065321ce8ed..1ea820e9debe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -52,7 +52,20 @@ */ #define INTRL_ENA BIT(6) #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) -#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) +/** + * i40e_intrl_usec_to_reg - convert interrupt rate limit to register + * @intrl: interrupt rate limit to convert + * + * This function converts a decimal interrupt rate limit to the appropriate + * register format expected by the firmware when setting interrupt rate limit. + */ +static inline u16 i40e_intrl_usec_to_reg(int intrl) +{ + if (intrl >> 2) + return ((intrl >> 2) | INTRL_ENA); + else + return 0; +} #define I40E_INTRL_8K 125 /* 8000 ints/sec */ #define I40E_INTRL_62K 16 /* 62500 ints/sec */ #define I40E_INTRL_83K 12 /* 83333 ints/sec */ -- cgit v1.2.3 From 33084060fb6d51ff566439a387f69ed90301280c Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Mon, 28 Nov 2016 16:06:03 -0800 Subject: i40e: add interrupt rate limit verbosity Due to the resolution of the register controlling interrupt rate limiting, setting certain values for the interrupt rate limit make it appear as though the limiting is not completely accurate. The problem is that the interrupt rate limit is getting rounded down to the nearest multiple of 4. This patch fixes the problem by adding some feedback to the user as to the actual interrupt rate limit being used when it differs from the requested limit. Without this patch setting interrupt rate limits may appear to behave inaccurately. Change-ID: I3093cf3f2d437d35a4c4f4bb5af5ce1b85ab21b7 Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7500902a612d..c4ab3c1ae02a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2116,6 +2116,7 @@ static int __i40e_set_coalesce(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + u16 intrl_reg; int i; if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) @@ -2127,8 +2128,9 @@ static int __i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { - netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n"); + if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n", + INTRL_REG_TO_USEC(I40E_MAX_INTRL)); return -EINVAL; } @@ -2141,7 +2143,12 @@ static int __i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - vsi->int_rate_limit = ec->rx_coalesce_usecs_high; + intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high); + vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg); + if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) { + netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n", + vsi->int_rate_limit); + } if (ec->tx_coalesce_usecs == 0) { if (ec->use_adaptive_tx_coalesce) -- cgit v1.2.3 From 143b0b2ae8c1f0c0e036ec859d0a49482892234b Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 26 Oct 2016 14:21:27 +0300 Subject: iwlwifi: mvm: fix PS-Poll enablement We added the uAPSD enabled ACs and that made the firmware choose to pull frames with uAPSD trigger frames instead of PS-Poll. Fix that. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/power.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index af6d10c23e5a..3a8e9379da2f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -174,6 +174,14 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, enum ieee80211_ac_numbers ac; bool tid_found = false; +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* set advanced pm flag with no uapsd ACs to enable ps-poll */ + if (mvmvif->dbgfs_pm.use_ps_poll) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); + return; + } +#endif + for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) { if (!mvmvif->queue_params[ac].uapsd) continue; @@ -204,16 +212,6 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, } } - if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* set advanced pm flag with no uapsd ACs to enable ps-poll */ - if (mvmvif->dbgfs_pm.use_ps_poll) - cmd->flags |= - cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); -#endif - return; - } - cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK); if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | -- cgit v1.2.3 From 1f3706508395043b5f29aff64d907e682b75de42 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 31 Aug 2016 18:13:57 +0300 Subject: iwlwifi: mvm: support unification of INIT and RT images For a000 devices the INIT and RT images are unified to one image. The changes in the flow are the following: * Driver load only RT firmware - meaning that the nvm access command will be done in the RT image load flow. * A new command (NVM_ACCESS_COMPLETE) now signals to the FW that the driver is done accessing the NVM and FW can proceed with phy calibrations. * Phy DB is no longer sent from INIT FW to be restored by driver for the RT FW - all the phy DB is now internal to the FW. INIT complete will now follow the NVM access command, without phy DB calls before. * Paging command is sent earlier in the flow before NVM access to enable a complete load of FW. * caution must be care when restart is called since we may have not completed init flow even though we are in RT firmware. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 12 ++ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 208 ++++++++++++++++------ drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 2 - drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 3 + drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 24 ++- 5 files changed, 188 insertions(+), 61 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 567597c26115..563b17796017 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -341,6 +341,10 @@ enum iwl_prot_offload_subcmd_ids { STORED_BEACON_NTF = 0xFF, }; +enum iwl_regulatory_and_nvm_subcmd_ids { + NVM_ACCESS_COMPLETE = 0x0, +}; + enum iwl_fmac_debug_cmds { LMAC_RD_WR = 0x0, UMAC_RD_WR = 0x1, @@ -355,6 +359,7 @@ enum { PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, PROT_OFFLOAD_GROUP = 0xb, + REGULATORY_AND_NVM_GROUP = 0xc, DEBUG_GROUP = 0xf, }; @@ -2200,4 +2205,11 @@ struct iwl_dbg_mem_access_rsp { __le32 data[]; } __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ +/** + * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed + */ +struct iwl_nvm_access_complete_cmd { + __le32 reserved; +} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index b278e44e97ad..dd8c8d8e9f18 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -522,6 +522,14 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, return true; } +static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); + + return true; +} + static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { @@ -537,6 +545,48 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, return false; } +static int iwl_mvm_init_paging(struct iwl_mvm *mvm) +{ + const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode]; + int ret; + + /* + * Configure and operate fw paging mechanism. + * The driver configures the paging flow only once. + * The CPU2 paging image is included in the IWL_UCODE_INIT image. + */ + if (!fw->paging_mem_size) + return 0; + + /* + * When dma is not enabled, the driver needs to copy / write + * the downloaded / uploaded page to / from the smem. + * This gets the location of the place were the pages are + * stored. + */ + if (!is_device_dma_capable(mvm->trans->dev)) { + ret = iwl_trans_get_paging_item(mvm); + if (ret) { + IWL_ERR(mvm, "failed to get FW paging item\n"); + return ret; + } + } + + ret = iwl_save_fw_paging(mvm, fw); + if (ret) { + IWL_ERR(mvm, "failed to save the FW paging image\n"); + return ret; + } + + ret = iwl_send_paging_cmd(mvm, fw); + if (ret) { + IWL_ERR(mvm, "failed to send the paging cmd\n"); + iwl_free_fw_paging(mvm); + return ret; + } + + return 0; +} static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { @@ -607,40 +657,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); - /* - * configure and operate fw paging mechanism. - * driver configures the paging flow only once, CPU2 paging image - * included in the IWL_UCODE_INIT image. - */ - if (fw->paging_mem_size) { - /* - * When dma is not enabled, the driver needs to copy / write - * the downloaded / uploaded page to / from the smem. - * This gets the location of the place were the pages are - * stored. - */ - if (!is_device_dma_capable(mvm->trans->dev)) { - ret = iwl_trans_get_paging_item(mvm); - if (ret) { - IWL_ERR(mvm, "failed to get FW paging item\n"); - return ret; - } - } - - ret = iwl_save_fw_paging(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to save the FW paging image\n"); - return ret; - } - - ret = iwl_send_paging_cmd(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to send the paging cmd\n"); - iwl_free_fw_paging(mvm); - return ret; - } - } - /* * Note: all the queues are enabled as part of the interface * initialization, but in firmware restart scenarios they @@ -798,6 +814,75 @@ out: return ret; } +int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) +{ + struct iwl_notification_wait init_wait; + struct iwl_nvm_access_complete_cmd nvm_complete = {}; + static const u16 init_complete[] = { + INIT_COMPLETE_NOTIF, + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + iwl_init_notification_wait(&mvm->notif_wait, + &init_wait, + init_complete, + ARRAY_SIZE(init_complete), + iwl_wait_init_complete, + NULL); + + /* Will also start the device */ + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); + if (ret) { + IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); + goto error; + } + + /* TODO: remove when integrating context info */ + ret = iwl_mvm_init_paging(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to init paging: %d\n", + ret); + goto error; + } + + /* Read the NVM only at driver load time, no need to do this twice */ + if (read_nvm) { + /* Read nvm */ + ret = iwl_nvm_init(mvm, true); + if (ret) { + IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); + goto error; + } + } + + /* In case we read the NVM from external file, load it to the NIC */ + if (mvm->nvm_file_name) + iwl_mvm_load_nvm_to_nic(mvm); + + ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); + if (WARN_ON(ret)) + goto error; + + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, + NVM_ACCESS_COMPLETE), 0, + sizeof(nvm_complete), &nvm_complete); + if (ret) { + IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", + ret); + goto error; + } + + /* We wait for the INIT complete notification */ + return iwl_wait_notification(&mvm->notif_wait, &init_wait, + MVM_UCODE_ALIVE_TIMEOUT); + +error: + iwl_remove_notification(&mvm->notif_wait, &init_wait); + return ret; +} + static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -1058,23 +1143,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) return ret; } -int iwl_mvm_up(struct iwl_mvm *mvm) +static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) { - int ret, i; - struct ieee80211_channel *chan; - struct cfg80211_chan_def chandef; - - lockdep_assert_held(&mvm->mutex); + int ret; - ret = iwl_trans_start_hw(mvm->trans); - if (ret) - return ret; + if (iwl_mvm_has_new_tx_api(mvm)) + return iwl_run_unified_mvm_ucode(mvm, false); - /* - * If we haven't completed the run of the init ucode during - * module loading, load init ucode now - * (for example, if we were in RFKILL) - */ ret = iwl_run_init_mvm_ucode(mvm, false); if (iwlmvm_mod_params.init_dbg) @@ -1085,7 +1160,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* this can't happen */ if (WARN_ON(ret > 0)) ret = -ERFKILL; - goto error; + return ret; } /* @@ -1096,9 +1171,28 @@ int iwl_mvm_up(struct iwl_mvm *mvm) _iwl_trans_stop_device(mvm->trans, false); ret = _iwl_trans_start_hw(mvm->trans, false); if (ret) - goto error; + return ret; ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); + if (ret) + return ret; + + return iwl_mvm_init_paging(mvm); +} + +int iwl_mvm_up(struct iwl_mvm *mvm) +{ + int ret, i; + struct ieee80211_channel *chan; + struct cfg80211_chan_def chandef; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_trans_start_hw(mvm->trans); + if (ret) + return ret; + + ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); goto error; @@ -1125,13 +1219,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; /* Send phy db control command and then phy db calibration*/ - ret = iwl_send_phy_db_data(mvm->phy_db); - if (ret) - goto error; + if (!iwl_mvm_has_new_tx_api(mvm)) { + ret = iwl_send_phy_db_data(mvm->phy_db); + if (ret) + goto error; - ret = iwl_send_phy_cfg_cmd(mvm); - if (ret) - goto error; + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) + goto error; + } /* Init RSS configuration */ if (iwl_mvm_has_new_rx_api(mvm)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 71f9aa9f7c7d..8d519c3f45bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1210,8 +1210,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) /* the fw is stopped, the aux sta is dead: clean up driver state */ iwl_mvm_del_aux_sta(mvm); - iwl_free_fw_paging(mvm); - /* * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() * won't be called in this case). diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index a672aa71c656..74156b24da16 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -739,6 +739,7 @@ struct iwl_mvm { enum iwl_ucode_type cur_ucode; bool ucode_loaded; + bool hw_registered; bool calibrating; u32 error_event_table; u32 log_event_table; @@ -1257,6 +1258,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm); ******************/ /* uCode */ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); +int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); /* Utils */ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, @@ -1686,6 +1688,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { + iwl_free_fw_paging(mvm); mvm->ucode_loaded = false; iwl_trans_stop_device(mvm->trans); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index f14aada390c5..4cd72d4cdc47 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -466,6 +466,13 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { HCMD_NAME(STORED_BEACON_NTF), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = { + HCMD_NAME(NVM_ACCESS_COMPLETE), +}; + static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), @@ -474,6 +481,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), + [REGULATORY_AND_NVM_GROUP] = + HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), }; /* this forward declaration can avoid to export the function */ @@ -597,7 +606,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; } mvm->sf_state = SF_UNINIT; - mvm->cur_ucode = IWL_UCODE_INIT; + if (iwl_mvm_has_new_tx_api(mvm)) + mvm->cur_ucode = IWL_UCODE_REGULAR; + else + mvm->cur_ucode = IWL_UCODE_INIT; mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); @@ -720,7 +732,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); - err = iwl_run_init_mvm_ucode(mvm, true); + if (iwl_mvm_has_new_tx_api(mvm)) + err = iwl_run_unified_mvm_ucode(mvm, true); + else + err = iwl_run_init_mvm_ucode(mvm, true); if (!err || !iwlmvm_mod_params.init_dbg) iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); @@ -743,6 +758,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, err = iwl_mvm_mac_setup_register(mvm); if (err) goto out_free; + mvm->hw_registered = true; min_backoff = calc_min_backoff(trans, cfg); iwl_mvm_thermal_initialize(mvm, min_backoff); @@ -764,6 +780,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, out_unregister: ieee80211_unregister_hw(mvm->hw); + mvm->hw_registered = false; iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); out_free: @@ -1192,7 +1209,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) reprobe->dev = mvm->trans->dev; INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); - } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) { + } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && + mvm->hw_registered) { /* don't let the transport/FW power down */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); -- cgit v1.2.3 From 19f63c531b85281003d7775490fd16d1579e2519 Mon Sep 17 00:00:00 2001 From: Golan Ben-Ami Date: Mon, 7 Nov 2016 17:40:43 +0200 Subject: iwlwifi: mvm: support v2 of mfuart load notification Add to the v1 of the mfuart loading notification, the size of the mfuart image, and write it to dmesg once the notification is received. Signed-off-by: Golan Ben-Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 5 ++++- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 21 +++++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 563b17796017..0b77073e719a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -1256,13 +1256,16 @@ struct iwl_missed_beacons_notif { * @external_ver: external image version * @status: MFUART loading status * @duration: MFUART loading time + * @image_size: MFUART image size in bytes */ struct iwl_mfuart_load_notif { __le32 installed_ver; __le32 external_ver; __le32 status; __le32 duration; -} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/ + /* image size valid only in v2 of the command */ + __le32 image_size; +} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/ /** * struct iwl_set_calib_default_cmd - set default value for calibration. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index dd8c8d8e9f18..3c70fb1199d7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1407,10 +1407,19 @@ void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; - IWL_DEBUG_INFO(mvm, - "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", - le32_to_cpu(mfuart_notif->installed_ver), - le32_to_cpu(mfuart_notif->external_ver), - le32_to_cpu(mfuart_notif->status), - le32_to_cpu(mfuart_notif->duration)); + if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif)) + IWL_DEBUG_INFO(mvm, + "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x, image size: 0x%08x\n", + le32_to_cpu(mfuart_notif->installed_ver), + le32_to_cpu(mfuart_notif->external_ver), + le32_to_cpu(mfuart_notif->status), + le32_to_cpu(mfuart_notif->duration), + le32_to_cpu(mfuart_notif->image_size)); + else + IWL_DEBUG_INFO(mvm, + "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", + le32_to_cpu(mfuart_notif->installed_ver), + le32_to_cpu(mfuart_notif->external_ver), + le32_to_cpu(mfuart_notif->status), + le32_to_cpu(mfuart_notif->duration)); } -- cgit v1.2.3 From 6574dc943fc32a2fce69fab14891abca7eecb67c Mon Sep 17 00:00:00 2001 From: Beni Lev Date: Thu, 17 Nov 2016 14:03:17 +0200 Subject: iwlwifi: mvm: Use aux queue for offchannel frames in dqa Since offchannel activity doesn't always require a BSS, e.g. ANQP sessions, offchannel frames should not use the BSS queue, because it might not be initialized. Use the auxilary queue instead Fixes: e3118ad74d7e ("iwlwifi: mvm: support tdls in dqa mode") Signed-off-by: Beni Lev Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 2b2db38eee3e..ea5d5b0d85b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -570,9 +570,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) * (this is not possible for unicast packets as a TLDS discovery * response are sent without a station entry); otherwise use the * AUX station. - * In DQA mode, if vif is of type STATION and frames are not multicast, - * they should be sent from the BSS queue. For example, TDLS setup - * frames should be sent on this queue, as they go through the AP. + * In DQA mode, if vif is of type STATION and frames are not multicast + * or offchannel, they should be sent from the BSS queue. + * For example, TDLS setup frames should be sent on this queue, + * as they go through the AP. */ sta_id = mvm->aux_sta.sta_id; if (info.control.vif) { @@ -594,7 +595,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (ap_sta_id != IWL_MVM_STATION_COUNT) sta_id = ap_sta_id; } else if (iwl_mvm_is_dqa_supported(mvm) && - info.control.vif->type == NL80211_IFTYPE_STATION) { + info.control.vif->type == NL80211_IFTYPE_STATION && + queue != mvm->aux_queue) { queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; } } -- cgit v1.2.3 From 4f23f206ddce85a92b1acdebe0f4e9ed01b1d3f4 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 20 Nov 2016 13:50:39 +0200 Subject: iwlwifi: mvm: cleanup incorrect and redundant define Currently we have up to 3 phy contexts - defined by NUM_PHY_CTX. However - some code paths validate the ID by using MAX_PHYS define which is set to 4. While there is no harm it is incorrect - since the maximum is 3. Remove the define and use the correct one. Cleanup the code a bit while at it. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 1 - drivers/net/wireless/intel/iwlwifi/mvm/power.c | 26 +++++++++++-------------- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 2 +- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 0b77073e719a..91f53596130b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -713,7 +713,6 @@ struct iwl_error_resp { #define MAX_MACS_IN_BINDING (3) #define MAX_BINDINGS (4) #define AUX_BINDING_INDEX (3) -#define MAX_PHYS (4) /* Used to extract ID and color from the context dword */ #define FW_CTXT_ID_POS (0) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 3a8e9379da2f..e684811f8e8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -599,9 +599,8 @@ static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool *disable_ps = _data; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - *disable_ps |= mvmvif->ps_disabled; + if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX) + *disable_ps |= mvmvif->ps_disabled; } static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, @@ -609,6 +608,7 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_power_vifs *power_iterator = _data; + bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX; switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_DEVICE: @@ -619,34 +619,30 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, /* only a single MAC of the same type */ WARN_ON(power_iterator->ap_vif); power_iterator->ap_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->ap_active = true; + if (active) + power_iterator->ap_active = true; break; case NL80211_IFTYPE_MONITOR: /* only a single MAC of the same type */ WARN_ON(power_iterator->monitor_vif); power_iterator->monitor_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->monitor_active = true; + if (active) + power_iterator->monitor_active = true; break; case NL80211_IFTYPE_P2P_CLIENT: /* only a single MAC of the same type */ WARN_ON(power_iterator->p2p_vif); power_iterator->p2p_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->p2p_active = true; + if (active) + power_iterator->p2p_active = true; break; case NL80211_IFTYPE_STATION: power_iterator->bss_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->bss_active = true; + if (active) + power_iterator->bss_active = true; break; default: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index fa9743205491..2aaa5ecf97bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -197,7 +197,7 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, int *global_cnt = data; if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && - mvmvif->phy_ctxt->id < MAX_PHYS) + mvmvif->phy_ctxt->id < NUM_PHY_CTX) *global_cnt += 1; } -- cgit v1.2.3 From 0e7ac018dc06a1fcd7cbb54a1ee81082f46af870 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 8 Sep 2016 17:32:19 +0300 Subject: iwlwifi: mvm: support new statistics APIs For CDB arch there is another auxiliary mac. Support statistics APIs that were changed to reflect that. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/mvm/fw-api-mac.h | 7 ++- .../net/wireless/intel/iwlwifi/mvm/fw-api-stats.h | 29 ++++++++- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 13 ++++ drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 70 ++++++++++++++++------ 4 files changed, 97 insertions(+), 22 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h index 0246506ab595..480a54af4534 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h @@ -64,13 +64,14 @@ #define __fw_api_mac_h__ /* - * The first MAC indices (starting from 0) - * are available to the driver, AUX follows + * The first MAC indices (starting from 0) are available to the driver, + * AUX indices follows - 1 for non-CDB, 2 for CDB. */ #define MAC_INDEX_AUX 4 #define MAC_INDEX_MIN_DRIVER 0 #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX -#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1) +#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1) +#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) #define IWL_MVM_STATION_COUNT 16 #define IWL_MVM_TDLS_STA_COUNT 4 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h index 4e638a44babb..6371c342b96d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h @@ -220,7 +220,7 @@ struct mvm_statistics_bt_activity { __le32 lo_priority_rx_denied_cnt; } __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ -struct mvm_statistics_general_v8 { +struct mvm_statistics_general_common { __le32 radio_temperature; __le32 radio_voltage; struct mvm_statistics_dbg dbg; @@ -248,11 +248,22 @@ struct mvm_statistics_general_v8 { __le64 on_time_rf; __le64 on_time_scan; __le64 tx_time; +} __packed; + +struct mvm_statistics_general_v8 { + struct mvm_statistics_general_common common; __le32 beacon_counter[NUM_MAC_INDEX]; u8 beacon_average_energy[NUM_MAC_INDEX]; u8 reserved[4 - (NUM_MAC_INDEX % 4)]; } __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ +struct mvm_statistics_general_cdb { + struct mvm_statistics_general_common common; + __le32 beacon_counter[NUM_MAC_INDEX_CDB]; + u8 beacon_average_energy[NUM_MAC_INDEX_CDB]; + u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)]; +} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */ + /** * struct mvm_statistics_load - RX statistics for multi-queue devices * @air_time: accumulated air time, per mac @@ -267,6 +278,13 @@ struct mvm_statistics_load { u8 avg_energy[IWL_MVM_STATION_COUNT]; } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */ +struct mvm_statistics_load_cdb { + __le32 air_time[NUM_MAC_INDEX_CDB]; + __le32 byte_count[NUM_MAC_INDEX_CDB]; + __le32 pkt_count[NUM_MAC_INDEX_CDB]; + u8 avg_energy[IWL_MVM_STATION_COUNT]; +} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_2 */ + struct mvm_statistics_rx { struct mvm_statistics_rx_phy ofdm; struct mvm_statistics_rx_phy cck; @@ -281,6 +299,7 @@ struct mvm_statistics_rx { * while associated. To disable this behavior, set DISABLE_NOTIF flag in the * STATISTICS_CMD (0x9c), below. */ + struct iwl_notif_statistics_v10 { __le32 flag; struct mvm_statistics_rx rx; @@ -296,6 +315,14 @@ struct iwl_notif_statistics_v11 { struct mvm_statistics_load load_stats; } __packed; /* STATISTICS_NTFY_API_S_VER_11 */ +struct iwl_notif_statistics_cdb { + __le32 flag; + struct mvm_statistics_rx rx; + struct mvm_statistics_tx tx; + struct mvm_statistics_general_cdb general; + struct mvm_statistics_load_cdb load_stats; +} __packed; /* STATISTICS_NTFY_API_S_VER_12 */ + #define IWL_STATISTICS_FLG_CLEAR 0x1 #define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 74156b24da16..37a4a980beee 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1218,6 +1218,19 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) return mvm->trans->cfg->use_tfh; } +static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) +{ + /* + * TODO: + * The issue of how to determine CDB support is still not well defined. + * It may be that it will be for all next HW devices and it may be per + * FW compilation and it may also differ between different devices. + * For now take a ride on the new TX API and get back to it when + * it is well defined. + */ + return iwl_mvm_has_new_tx_api(mvm); +} + static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) { #ifdef CONFIG_THERMAL diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index e16687d5afaa..e90c1c12a36c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -497,8 +497,7 @@ struct iwl_mvm_stat_data { struct iwl_mvm *mvm; __le32 mac_id; u8 beacon_filter_average_energy; - struct mvm_statistics_general_v8 *general; - struct mvm_statistics_load *load; + void *general; }; static void iwl_mvm_stat_iterator(void *_data, u8 *mac, @@ -518,10 +517,26 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, * the notification directly. */ if (data->general) { - mvmvif->beacon_stats.num_beacons = - le32_to_cpu(data->general->beacon_counter[mvmvif->id]); - mvmvif->beacon_stats.avg_signal = - -data->general->beacon_average_energy[mvmvif->id]; + u16 vif_id = mvmvif->id; + + if (iwl_mvm_is_cdb_supported(mvm)) { + struct mvm_statistics_general_cdb *general = + data->general; + + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(general->beacon_counter[vif_id]); + mvmvif->beacon_stats.avg_signal = + -general->beacon_average_energy[vif_id]; + } else { + struct mvm_statistics_general_v8 *general = + data->general; + + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(general->beacon_counter[vif_id]); + mvmvif->beacon_stats.avg_signal = + -general->beacon_average_energy[vif_id]; + } + } if (mvmvif->id != id) @@ -615,46 +630,65 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data; + struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data; struct iwl_mvm_stat_data data = { .mvm = mvm, }; - int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) : - sizeof(struct iwl_notif_statistics_v10); + int expected_size; + + if (iwl_mvm_is_cdb_supported(mvm)) + expected_size = sizeof(*stats); + else if (iwl_mvm_has_new_rx_api(mvm)) + expected_size = sizeof(struct iwl_notif_statistics_v11); + else + expected_size = sizeof(struct iwl_notif_statistics_v10); if (iwl_rx_packet_payload_len(pkt) != expected_size) goto invalid; data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = - stats->general.beacon_filter_average_energy; + stats->general.common.beacon_filter_average_energy; iwl_mvm_update_rx_statistics(mvm, &stats->rx); - mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time); - mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time); + mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time); + mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time); mvm->radio_stats.on_time_rf = - le64_to_cpu(stats->general.on_time_rf); + le64_to_cpu(stats->general.common.on_time_rf); mvm->radio_stats.on_time_scan = - le64_to_cpu(stats->general.on_time_scan); + le64_to_cpu(stats->general.common.on_time_scan); data.general = &stats->general; if (iwl_mvm_has_new_rx_api(mvm)) { int i; - - data.load = &stats->load_stats; + u8 *energy; + __le32 *bytes, *air_time; + + if (!iwl_mvm_is_cdb_supported(mvm)) { + struct iwl_notif_statistics_v11 *v11 = + (void *)&pkt->data; + + energy = (void *)&v11->load_stats.avg_energy; + bytes = (void *)&v11->load_stats.byte_count; + air_time = (void *)&v11->load_stats.air_time; + } else { + energy = (void *)&stats->load_stats.avg_energy; + bytes = (void *)&stats->load_stats.byte_count; + air_time = (void *)&stats->load_stats.air_time; + } rcu_read_lock(); for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { struct iwl_mvm_sta *sta; - if (!data.load->avg_energy[i]) + if (!energy[i]) continue; sta = iwl_mvm_sta_from_staid_rcu(mvm, i); if (!sta) continue; - sta->avg_energy = data.load->avg_energy[i]; + sta->avg_energy = energy[i]; } rcu_read_unlock(); } -- cgit v1.2.3 From 9ba221b372c177b6908ff8c39b7b056538622ae1 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 17 Nov 2016 11:35:05 +0200 Subject: iwlwifi: mvm: support new scan API For CDB devices we will want to configure scan parameters per band. Support the new scan API for now. Logic per band will be added in the future. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/mvm/fw-api-scan.h | 106 +++++++--- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 228 +++++++++++++++------ 2 files changed, 238 insertions(+), 96 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h index 0c294c9f98e9..c78a0c499459 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h @@ -453,6 +453,8 @@ enum scan_config_flags { SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19), SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20), SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21), + SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22), + SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23), /* Bits 26-31 are for num of channels in channel_array */ #define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26) @@ -485,6 +487,20 @@ enum iwl_channel_flags { IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3), }; +/** + * struct iwl_scan_dwell + * @active: default dwell time for active scan + * @passive: default dwell time for passive scan + * @fragmented: default dwell time for fragmented scan + * @extended: default dwell time for channels 1, 6 and 11 + */ +struct iwl_scan_dwell { + u8 active; + u8 passive; + u8 fragmented; + u8 extended; +} __packed; + /** * struct iwl_scan_config * @flags: enum scan_config_flags @@ -493,10 +509,7 @@ enum iwl_channel_flags { * @legacy_rates: default legacy rates - enum scan_config_rates * @out_of_channel_time: default max out of serving channel time * @suspend_time: default max suspend time - * @dwell_active: default dwell time for active scan - * @dwell_passive: default dwell time for passive scan - * @dwell_fragmented: default dwell time for fragmented scan - * @dwell_extended: default dwell time for channels 1, 6 and 11 + * @dwell: dwells for the scan * @mac_addr: default mac address to be used in probes * @bcast_sta_id: the index of the station in the fw * @channel_flags: default channel flags - enum iwl_channel_flags @@ -510,16 +523,29 @@ struct iwl_scan_config { __le32 legacy_rates; __le32 out_of_channel_time; __le32 suspend_time; - u8 dwell_active; - u8 dwell_passive; - u8 dwell_fragmented; - u8 dwell_extended; + struct iwl_scan_dwell dwell; u8 mac_addr[ETH_ALEN]; u8 bcast_sta_id; u8 channel_flags; u8 channel_array[]; } __packed; /* SCAN_CONFIG_DB_CMD_API_S */ +#define SCAN_TWO_LMACS 2 + +struct iwl_scan_config_cdb { + __le32 flags; + __le32 tx_chains; + __le32 rx_chains; + __le32 legacy_rates; + __le32 out_of_channel_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + struct iwl_scan_dwell dwell; + u8 mac_addr[ETH_ALEN]; + u8 bcast_sta_id; + u8 channel_flags; + u8 channel_array[]; +} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */ + /** * iwl_umac_scan_flags *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request @@ -540,17 +566,18 @@ enum iwl_umac_scan_uid_offsets { }; enum iwl_umac_scan_general_flags { - IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), - IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), - IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), - IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), - IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), - IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), - IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), - IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), - IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), - IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), - IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), + IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), + IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), + IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), + IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), + IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), + IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), + IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), + IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), + IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), + IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), + IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), + IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), }; /** @@ -610,8 +637,9 @@ struct iwl_scan_req_umac_tail { * @active_dwell: dwell time for active scan * @passive_dwell: dwell time for passive scan * @fragmented_dwell: dwell time for fragmented passive scan - * @max_out_time: max out of serving channel time - * @suspend_time: max suspend time + * @max_out_time: max out of serving channel time, per LMAC - for CDB there + * are 2 LMACs + * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs * @scan_priority: scan internal prioritization &enum iwl_scan_priority * @channel_flags: &enum iwl_scan_channel_flags * @n_channels: num of channels in scan request @@ -631,15 +659,33 @@ struct iwl_scan_req_umac { u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; - __le32 max_out_time; - __le32 suspend_time; - __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ - u8 channel_flags; - u8 n_channels; - __le16 reserved; - u8 data[]; -} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ + union { + struct { + __le32 max_out_time; + __le32 suspend_time; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved; + u8 data[]; + } no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ + struct { + __le32 max_out_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved; + u8 data[]; + } cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */ + }; +} __packed; + +#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \ + 2 * sizeof(__le32)) /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 2aaa5ecf97bc..0a64efa844b7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -943,18 +943,92 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) return cpu_to_le32(rates); } -int iwl_mvm_config_scan(struct iwl_mvm *mvm) +static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm, + struct iwl_scan_dwell *dwell, + struct iwl_mvm_scan_timing_params *timing) +{ + dwell->active = timing->dwell_active; + dwell->passive = timing->dwell_passive; + dwell->fragmented = timing->dwell_fragmented; + dwell->extended = timing->dwell_extended; +} + +static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels) { - struct iwl_scan_config *scan_config; struct ieee80211_supported_band *band; - int num_channels = - mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + - mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; - int ret, i, j = 0, cmd_size; + int i, j = 0; + + band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; + for (i = 0; i < band->n_channels; i++, j++) + channels[j] = band->channels[i].hw_value; + band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; + for (i = 0; i < band->n_channels; i++, j++) + channels[j] = band->channels[i].hw_value; +} + +static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, + u32 flags, u8 channel_flags) +{ + enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); + struct iwl_scan_config *cfg = config; + + cfg->flags = cpu_to_le32(flags); + cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); + cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); + cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); + cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time); + cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time); + + iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]); + + memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); + + cfg->bcast_sta_id = mvm->aux_sta.sta_id; + cfg->channel_flags = channel_flags; + + iwl_mvm_fill_channels(mvm, cfg->channel_array); +} + +static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config, + u32 flags, u8 channel_flags) +{ + enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); + struct iwl_scan_config_cdb *cfg = config; + + cfg->flags = cpu_to_le32(flags); + cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); + cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); + cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); + cfg->out_of_channel_time[0] = + cpu_to_le32(scan_timing[type].max_out_time); + cfg->out_of_channel_time[1] = + cpu_to_le32(scan_timing[type].max_out_time); + cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time); + cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time); + + iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]); + + memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); + + cfg->bcast_sta_id = mvm->aux_sta.sta_id; + cfg->channel_flags = channel_flags; + + iwl_mvm_fill_channels(mvm, cfg->channel_array); +} + +int iwl_mvm_config_scan(struct iwl_mvm *mvm) +{ + void *cfg; + int ret, cmd_size; struct iwl_host_cmd cmd = { .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), }; enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); + int num_channels = + mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + + mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; + u32 flags; + u8 channel_flags; if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) return -ENOBUFS; @@ -965,52 +1039,45 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) return 0; } - cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels; + if (iwl_mvm_is_cdb_supported(mvm)) + cmd_size = sizeof(struct iwl_scan_config_cdb); + else + cmd_size = sizeof(struct iwl_scan_config); + cmd_size += mvm->fw->ucode_capa.n_scan_channels; - scan_config = kzalloc(cmd_size, GFP_KERNEL); - if (!scan_config) + cfg = kzalloc(cmd_size, GFP_KERNEL); + if (!cfg) return -ENOMEM; - scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE | - SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | - SCAN_CONFIG_FLAG_SET_TX_CHAINS | - SCAN_CONFIG_FLAG_SET_RX_CHAINS | - SCAN_CONFIG_FLAG_SET_AUX_STA_ID | - SCAN_CONFIG_FLAG_SET_ALL_TIMES | - SCAN_CONFIG_FLAG_SET_LEGACY_RATES | - SCAN_CONFIG_FLAG_SET_MAC_ADDR | - SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS| - SCAN_CONFIG_N_CHANNELS(num_channels) | - (type == IWL_SCAN_TYPE_FRAGMENTED ? - SCAN_CONFIG_FLAG_SET_FRAGMENTED : - SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED)); - scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); - scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); - scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm); - scan_config->out_of_channel_time = - cpu_to_le32(scan_timing[type].max_out_time); - scan_config->suspend_time = cpu_to_le32(scan_timing[type].suspend_time); - scan_config->dwell_active = scan_timing[type].dwell_active; - scan_config->dwell_passive = scan_timing[type].dwell_passive; - scan_config->dwell_fragmented = scan_timing[type].dwell_fragmented; - scan_config->dwell_extended = scan_timing[type].dwell_extended; - - memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); - - scan_config->bcast_sta_id = mvm->aux_sta.sta_id; - scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS | - IWL_CHANNEL_FLAG_ACCURATE_EBS | - IWL_CHANNEL_FLAG_EBS_ADD | - IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; - - band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; - for (i = 0; i < band->n_channels; i++, j++) - scan_config->channel_array[j] = band->channels[i].hw_value; - band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; - for (i = 0; i < band->n_channels; i++, j++) - scan_config->channel_array[j] = band->channels[i].hw_value; + flags = SCAN_CONFIG_FLAG_ACTIVATE | + SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | + SCAN_CONFIG_FLAG_SET_TX_CHAINS | + SCAN_CONFIG_FLAG_SET_RX_CHAINS | + SCAN_CONFIG_FLAG_SET_AUX_STA_ID | + SCAN_CONFIG_FLAG_SET_ALL_TIMES | + SCAN_CONFIG_FLAG_SET_LEGACY_RATES | + SCAN_CONFIG_FLAG_SET_MAC_ADDR | + SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS | + SCAN_CONFIG_N_CHANNELS(num_channels) | + (type == IWL_SCAN_TYPE_FRAGMENTED ? + SCAN_CONFIG_FLAG_SET_FRAGMENTED : + SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED); + + channel_flags = IWL_CHANNEL_FLAG_EBS | + IWL_CHANNEL_FLAG_ACCURATE_EBS | + IWL_CHANNEL_FLAG_EBS_ADD | + IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; + + if (iwl_mvm_is_cdb_supported(mvm)) { + flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ? + SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : + SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; + iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags); + } else { + iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); + } - cmd.data[0] = scan_config; + cmd.data[0] = cfg; cmd.len[0] = cmd_size; cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; @@ -1020,7 +1087,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (!ret) mvm->scan_type = type; - kfree(scan_config); + kfree(cfg); return ret; } @@ -1039,19 +1106,31 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_umac *cmd, struct iwl_mvm_scan_params *params) { + struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type]; + if (params->measurement_dwell) { cmd->active_dwell = params->measurement_dwell; cmd->passive_dwell = params->measurement_dwell; cmd->extended_dwell = params->measurement_dwell; } else { - cmd->active_dwell = scan_timing[params->type].dwell_active; - cmd->passive_dwell = scan_timing[params->type].dwell_passive; - cmd->extended_dwell = scan_timing[params->type].dwell_extended; + cmd->active_dwell = timing->dwell_active; + cmd->passive_dwell = timing->dwell_passive; + cmd->extended_dwell = timing->dwell_extended; + } + cmd->fragmented_dwell = timing->dwell_fragmented; + + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time); + cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time); + cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time); + cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time); + cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + } else { + cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time); + cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time); + cmd->no_cdb.scan_priority = + cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } - cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; - cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); - cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); - cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); if (iwl_mvm_is_regular_scan(params)) cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); @@ -1063,9 +1142,8 @@ static void iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, struct ieee80211_channel **channels, int n_channels, u32 ssid_bitmap, - struct iwl_scan_req_umac *cmd) + struct iwl_scan_channel_cfg_umac *channel_cfg) { - struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data; int i; for (i = 0; i < n_channels; i++) { @@ -1088,8 +1166,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) + if (params->type == IWL_SCAN_TYPE_FRAGMENTED) { flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; + if (iwl_mvm_is_cdb_supported(mvm)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; + } if (iwl_mvm_rrm_scan_needed(mvm)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED; @@ -1126,11 +1207,14 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; - struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data + + void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ? + (void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data; + struct iwl_scan_req_umac_tail *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; int uid, i; u32 ssid_bitmap = 0; + u8 channel_flags = 0; struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); @@ -1157,16 +1241,23 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); if (iwl_mvm_scan_use_ebs(mvm, vif)) - cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; + channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; - cmd->n_channels = params->n_channels; + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->cdb.channel_flags = channel_flags; + cmd->cdb.n_channels = params->n_channels; + } else { + cmd->no_cdb.channel_flags = channel_flags; + cmd->no_cdb.n_channels = params->n_channels; + } iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap); iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, - params->n_channels, ssid_bitmap, cmd); + params->n_channels, ssid_bitmap, + cmd_data); for (i = 0; i < params->n_scan_plans; i++) { struct cfg80211_sched_scan_plan *scan_plan = @@ -1601,8 +1692,13 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) int iwl_mvm_scan_size(struct iwl_mvm *mvm) { + int base_size = IWL_SCAN_REQ_UMAC_SIZE; + + if (iwl_mvm_is_cdb_supported(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB; + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - return sizeof(struct iwl_scan_req_umac) + + return base_size + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels + sizeof(struct iwl_scan_req_umac_tail); -- cgit v1.2.3 From 01796ff2fa6e81b0d806e735308d8171bcbfbec7 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 16 Nov 2016 17:04:36 +0200 Subject: iwlwifi: mvm: always free inactive queue when moving ownership If iwl_mvm_find_free_queue() doesn't find a free queue, it will return an inactive one. However, not all the call paths free this queue before reassigning it, which is a bug. Check it in other paths and act accordingly. Fixes: 9794c64f302d ("iwlwifi: mvm: support dqa queue inactivation upon timeout") Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 101 ++++++++++++++++++--------- 1 file changed, 69 insertions(+), 32 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 40e7fc5f1f1f..962920861ccf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -457,6 +457,52 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) return disable_agg_tids; } +static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, + bool same_sta) +{ + struct iwl_mvm_sta *mvmsta; + u8 txq_curr_ac, sta_id, tid; + unsigned long disable_agg_tids = 0; + int ret; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->queue_info_lock); + txq_curr_ac = mvm->queue_info[queue].mac80211_ac; + sta_id = mvm->queue_info[queue].ra_sta_id; + tid = mvm->queue_info[queue].txq_tid; + spin_unlock_bh(&mvm->queue_info_lock); + + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); + + disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); + /* Disable the queue */ + if (disable_agg_tids) + iwl_mvm_invalidate_sta_queue(mvm, queue, + disable_agg_tids, false); + + ret = iwl_mvm_disable_txq(mvm, queue, + mvmsta->vif->hw_queue[txq_curr_ac], + tid, 0); + if (ret) { + /* Re-mark the inactive queue as inactive */ + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, + "Failed to free inactive queue %d (ret=%d)\n", + queue, ret); + + return ret; + } + + /* If TXQ is allocated to another STA, update removal in FW */ + if (!same_sta) + iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); + + return 0; +} + static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, unsigned long tfd_queue_mask, u8 ac) { @@ -645,7 +691,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); u8 mac_queue = mvmsta->vif->hw_queue[ac]; int queue = -1; - bool using_inactive_queue = false; + bool using_inactive_queue = false, same_sta = false; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; bool shared_queue = false; @@ -702,6 +748,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; using_inactive_queue = true; + same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; IWL_DEBUG_TX_QUEUES(mvm, "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", queue, mvmsta->sta_id, tid); @@ -748,38 +795,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, * first */ if (using_inactive_queue) { - u8 txq_curr_ac, sta_id; - - spin_lock_bh(&mvm->queue_info_lock); - txq_curr_ac = mvm->queue_info[queue].mac80211_ac; - sta_id = mvm->queue_info[queue].ra_sta_id; - spin_unlock_bh(&mvm->queue_info_lock); - - disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); - /* Disable the queue */ - if (disable_agg_tids) - iwl_mvm_invalidate_sta_queue(mvm, queue, - disable_agg_tids, false); - - ret = iwl_mvm_disable_txq(mvm, queue, - mvmsta->vif->hw_queue[txq_curr_ac], - tid, 0); - if (ret) { - IWL_ERR(mvm, - "Failed to free inactive queue %d (ret=%d)\n", - queue, ret); - - /* Re-mark the inactive queue as inactive */ - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; - spin_unlock_bh(&mvm->queue_info_lock); - + ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta); + if (ret) return ret; - } - - /* If TXQ is allocated to another STA, update removal in FW */ - if (sta_id != mvmsta->sta_id) - iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); } IWL_DEBUG_TX_QUEUES(mvm, @@ -1095,6 +1113,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int queue; + bool using_inactive_queue = false, same_sta = false; /* * Check for inactive queues, so we don't reach a situation where we @@ -1118,6 +1137,14 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, spin_unlock_bh(&mvm->queue_info_lock); IWL_ERR(mvm, "No available queues for new station\n"); return -ENOSPC; + } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { + /* + * If this queue is already allocated but inactive we'll need to + * first free this queue before enabling it again, we'll mark + * it as reserved to make sure no new traffic arrives on it + */ + using_inactive_queue = true; + same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; } mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; @@ -1125,6 +1152,9 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, mvmsta->reserved_queue = queue; + if (using_inactive_queue) + iwl_mvm_free_inactive_queue(mvm, queue, same_sta); + IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", queue, mvmsta->sta_id); @@ -2231,6 +2261,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto release_locks; } + /* + * TXQ shouldn't be in inactive mode for non-DQA, so getting + * an inactive queue from iwl_mvm_find_free_queue() is + * certainly a bug + */ + WARN_ON(mvm->queue_info[txq_id].status == + IWL_MVM_QUEUE_INACTIVE); /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; -- cgit v1.2.3 From 45b957e3edd22139a4e20067b39e15acbf36fb92 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 2 Dec 2016 13:33:40 +0100 Subject: iwlwifi: mvm: reduce usage of IEEE80211_SKB_CB() There's already a variable with the result in scope, use that. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index ea5d5b0d85b3..e2f82c10b019 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -543,9 +543,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) * queue. STATION (HS2.0) uses the auxiliary context of the FW, * and hence needs to be sent on the aux queue */ - if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && skb_info->control.vif->type == NL80211_IFTYPE_STATION) - IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; + skb_info->hw_queue = mvm->aux_queue; memcpy(&info, skb->cb, sizeof(info)); -- cgit v1.2.3 From 5c228d63f963bd7e20b6a9b06e93272093e98628 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 24 Nov 2016 13:48:27 +0200 Subject: iwlwifi: mvm: support new alive notification Support getting alive from two LMACs and dumping debug data from both. Deprecate older alive notifications no one is using. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 72 ++++------------ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 104 +++++++++--------------- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 12 ++- 5 files changed, 67 insertions(+), 125 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 207d8ae1e116..b02d7fc5116f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1741,7 +1741,7 @@ out: static struct iwl_wowlan_status * iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 base = mvm->error_event_table; + u32 base = mvm->error_event_table[0]; struct error_table_start { /* cf. struct iwl_error_event_table */ u32 valid; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 91f53596130b..cf2b836f3888 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -598,60 +598,7 @@ enum { #define IWL_ALIVE_FLG_RFKILL BIT(0) -struct mvm_alive_resp_ver1 { - __le16 status; - __le16 flags; - u8 ucode_minor; - u8 ucode_major; - __le16 id; - u8 api_minor; - u8 api_major; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le16 reserved2; - __le32 timestamp; - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 log_event_table_ptr; /* SRAM address for event log */ - __le32 cpu_register_ptr; - __le32 dbgm_config_ptr; - __le32 alive_counter_ptr; - __le32 scd_base_ptr; /* SRAM address for SCD */ -} __packed; /* ALIVE_RES_API_S_VER_1 */ - -struct mvm_alive_resp_ver2 { - __le16 status; - __le16 flags; - u8 ucode_minor; - u8 ucode_major; - __le16 id; - u8 api_minor; - u8 api_major; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le16 reserved2; - __le32 timestamp; - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ - __le32 cpu_register_ptr; - __le32 dbgm_config_ptr; - __le32 alive_counter_ptr; - __le32 scd_base_ptr; /* SRAM address for SCD */ - __le32 st_fwrd_addr; /* pointer to Store and forward */ - __le32 st_fwrd_size; - u8 umac_minor; /* UMAC version: minor */ - u8 umac_major; /* UMAC version: major */ - __le16 umac_id; /* UMAC version: id */ - __le32 error_info_addr; /* SRAM address for UMAC error log */ - __le32 dbg_print_buff_addr; -} __packed; /* ALIVE_RES_API_S_VER_2 */ - -struct mvm_alive_resp { - __le16 status; - __le16 flags; +struct iwl_lmac_alive { __le32 ucode_minor; __le32 ucode_major; u8 ver_subtype; @@ -667,12 +614,29 @@ struct mvm_alive_resp { __le32 scd_base_ptr; /* SRAM address for SCD */ __le32 st_fwrd_addr; /* pointer to Store and forward */ __le32 st_fwrd_size; +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ + +struct iwl_umac_alive { __le32 umac_minor; /* UMAC version: minor */ __le32 umac_major; /* UMAC version: major */ __le32 error_info_addr; /* SRAM address for UMAC error log */ __le32 dbg_print_buff_addr; +} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ + +struct mvm_alive_resp_v3 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data; + struct iwl_umac_alive umac_data; } __packed; /* ALIVE_RES_API_S_VER_3 */ +struct mvm_alive_resp { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; +} __packed; /* ALIVE_RES_API_S_VER_4 */ + /* Error response/notification */ enum { FW_ERR_UNKNOWN_CMD = 0x0, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 3c70fb1199d7..8fd3c2b12ea2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -444,80 +444,52 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_alive_data *alive_data = data; - struct mvm_alive_resp_ver1 *palive1; - struct mvm_alive_resp_ver2 *palive2; + struct mvm_alive_resp_v3 *palive3; struct mvm_alive_resp *palive; + struct iwl_umac_alive *umac; + struct iwl_lmac_alive *lmac1; + struct iwl_lmac_alive *lmac2 = NULL; + u16 status; - if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) { - palive1 = (void *)pkt->data; + if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { + palive = (void *)pkt->data; + umac = &palive->umac_data; + lmac1 = &palive->lmac_data[0]; + lmac2 = &palive->lmac_data[1]; + status = le16_to_cpu(palive->status); + } else { + palive3 = (void *)pkt->data; + umac = &palive3->umac_data; + lmac1 = &palive3->lmac_data; + status = le16_to_cpu(palive3->status); + } - mvm->support_umac_log = false; - mvm->error_event_table = - le32_to_cpu(palive1->error_event_table_ptr); - mvm->log_event_table = - le32_to_cpu(palive1->log_event_table_ptr); - alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr); + mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr); + if (lmac2) + mvm->error_event_table[1] = + le32_to_cpu(lmac2->error_event_table_ptr); + mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr); + mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr); + mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size); - alive_data->valid = le16_to_cpu(palive1->status) == - IWL_ALIVE_STATUS_OK; - IWL_DEBUG_FW(mvm, - "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", - le16_to_cpu(palive1->status), palive1->ver_type, - palive1->ver_subtype, palive1->flags); - } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) { - palive2 = (void *)pkt->data; - - mvm->error_event_table = - le32_to_cpu(palive2->error_event_table_ptr); - mvm->log_event_table = - le32_to_cpu(palive2->log_event_table_ptr); - alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr); - mvm->umac_error_event_table = - le32_to_cpu(palive2->error_info_addr); - mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr); - mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size); - - alive_data->valid = le16_to_cpu(palive2->status) == - IWL_ALIVE_STATUS_OK; - if (mvm->umac_error_event_table) - mvm->support_umac_log = true; + mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr); - IWL_DEBUG_FW(mvm, - "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", - le16_to_cpu(palive2->status), palive2->ver_type, - palive2->ver_subtype, palive2->flags); + alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr); + alive_data->valid = status == IWL_ALIVE_STATUS_OK; + if (mvm->umac_error_event_table) + mvm->support_umac_log = true; - IWL_DEBUG_FW(mvm, - "UMAC version: Major - 0x%x, Minor - 0x%x\n", - palive2->umac_major, palive2->umac_minor); - } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { - palive = (void *)pkt->data; - - mvm->error_event_table = - le32_to_cpu(palive->error_event_table_ptr); - mvm->log_event_table = - le32_to_cpu(palive->log_event_table_ptr); - alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); - mvm->umac_error_event_table = - le32_to_cpu(palive->error_info_addr); - mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr); - mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size); - - alive_data->valid = le16_to_cpu(palive->status) == - IWL_ALIVE_STATUS_OK; - if (mvm->umac_error_event_table) - mvm->support_umac_log = true; + IWL_DEBUG_FW(mvm, + "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", + status, lmac1->ver_type, lmac1->ver_subtype); - IWL_DEBUG_FW(mvm, - "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", - le16_to_cpu(palive->status), palive->ver_type, - palive->ver_subtype, palive->flags); + if (lmac2) + IWL_DEBUG_FW(mvm, "Alive ucode CDB\n"); - IWL_DEBUG_FW(mvm, - "UMAC version: Major - 0x%x, Minor - 0x%x\n", - le32_to_cpu(palive->umac_major), - le32_to_cpu(palive->umac_minor)); - } + IWL_DEBUG_FW(mvm, + "UMAC version: Major - 0x%x, Minor - 0x%x\n", + le32_to_cpu(umac->umac_major), + le32_to_cpu(umac->umac_minor)); return true; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 37a4a980beee..73a216524af2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -741,7 +741,7 @@ struct iwl_mvm { bool ucode_loaded; bool hw_registered; bool calibrating; - u32 error_event_table; + u32 error_event_table[2]; u32 log_event_table; u32 umac_error_event_table; bool support_umac_log; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 26b853ef195f..eb3434c0d148 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -497,13 +497,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } -void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) +static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) { struct iwl_trans *trans = mvm->trans; struct iwl_error_event_table table; - u32 base; - base = mvm->error_event_table; if (mvm->cur_ucode == IWL_UCODE_INIT) { if (!base) base = mvm->fw->init_errlog_ptr; @@ -574,6 +572,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); +} + +void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) +{ + iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]); + + if (mvm->error_event_table[1]) + iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]); if (mvm->support_umac_log) iwl_mvm_dump_umac_error_log(mvm); -- cgit v1.2.3 From 4b70f07686d75d1eb5d956812cc810944e0b29b2 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 30 Nov 2016 16:49:11 +0200 Subject: iwlwifi: mvm: synchronize firmware DMA paging memory When driver needs to access the contents of a streaming DMA buffer without unmapping it it should call dma_sync_single_for_cpu(). Once the call has been made, the CPU "owns" the DMA buffer and can work with it as needed. Before the device accesses the buffer, however, ownership should be transferred back to it with dma_sync_single_for_device(). Both calls weren't performed by the driver, resulting with odd paging errors on some platforms. Fix it. Fixes: a6c4fb4441f4 ("iwlwifi: mvm: Add FW paging mechanism for the UMAC on PCI") Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 4 ++++ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 23 ++++++++++++++++++++--- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index e7b3b712d778..a027b11bbdb3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -811,12 +811,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) struct iwl_fw_error_dump_paging *paging; struct page *pages = mvm->fw_paging_db[i].fw_paging_block; + dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); dump_data->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE); paging = (void *)dump_data->data; paging->index = cpu_to_le32(i); + dma_sync_single_for_cpu(mvm->trans->dev, addr, + PAGING_BLOCK_SIZE, + DMA_BIDIRECTIONAL); memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE); dump_data = iwl_fw_error_next_data(dump_data); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 8fd3c2b12ea2..c42ef8681b75 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -214,6 +214,10 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), image->sec[sec_idx].data, mvm->fw_paging_db[0].fw_paging_size); + dma_sync_single_for_device(mvm->trans->dev, + mvm->fw_paging_db[0].fw_paging_phys, + mvm->fw_paging_db[0].fw_paging_size, + DMA_BIDIRECTIONAL); IWL_DEBUG_FW(mvm, "Paging: copied %d CSS bytes to first block\n", @@ -228,9 +232,16 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) * loop stop at num_of_paging_blk since that last block is not full. */ for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { - memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), + struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; + + memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, - mvm->fw_paging_db[idx].fw_paging_size); + block->fw_paging_size); + dma_sync_single_for_device(mvm->trans->dev, + block->fw_paging_phys, + block->fw_paging_size, + DMA_BIDIRECTIONAL); + IWL_DEBUG_FW(mvm, "Paging: copied %d paging bytes to block %d\n", @@ -242,9 +253,15 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) /* copy the last paging block */ if (mvm->num_of_pages_in_last_blk > 0) { - memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), + struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; + + memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); + dma_sync_single_for_device(mvm->trans->dev, + block->fw_paging_phys, + block->fw_paging_size, + DMA_BIDIRECTIONAL); IWL_DEBUG_FW(mvm, "Paging: copied %d pages in the last block %d\n", -- cgit v1.2.3 From cba84570a0eb8826bcd69627fd975935bdcf7eaf Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 30 Nov 2016 17:04:52 +0100 Subject: iwlwifi: mvm: fix D3 replay counter value This fixes a long-standing bug that was introduced when this code was introduced: cfg80211 passes a pointer, but we treat it as if it was passing a value. The result is that we pass the pointer to the firmware, instead of the value. It's not clear how this could ever have worked, unless the firmware is ignoring this value. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index b02d7fc5116f..c7eb1983c4f9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -91,7 +91,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN); mvmvif->rekey_data.replay_ctr = - cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); + cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); mvmvif->rekey_data.valid = true; mutex_unlock(&mvm->mutex); -- cgit v1.2.3 From 6ea29ce5749687d125d56b6748fa68eee1791f6e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 1 Dec 2016 16:25:30 +0100 Subject: iwlwifi: mvm: set AID to firmware only for associated stations The firmware will soon actually look at the AID field, and when it does that it'll try to ensure that the AID is never changing. Due to the way the station is added, it may start with an invalid AID before it's associated, so to ensure a constant AID (once it becomes non-zero), track the station state and set the AID only when the station is associated and when it disassociates. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 6 ++++-- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 3 ++- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 1 + 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 8d519c3f45bd..72102566a02b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2552,6 +2552,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int ret; IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", @@ -2580,8 +2581,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST && iwl_mvm_is_dqa_supported(mvm)) { - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); flush_work(&mvm->add_stream_wk); @@ -2592,6 +2591,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, } mutex_lock(&mvm->mutex); + /* track whether or not the station is associated */ + mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC; + if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 962920861ccf..4afd52f8d9c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -202,7 +202,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); - add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); + if (mvm_sta->associated) + add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); if (sta->wme) { add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index b45c7b9937c8..4be34f902278 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -437,6 +437,7 @@ struct iwl_mvm_sta { bool disable_tx; bool tlc_amsdu; bool sleeping; + bool associated; u8 agg_tids; u8 sleep_tx_count; u8 avg_energy; -- cgit v1.2.3 From e7fe9491261ee07b0c7456df42c34ca6a8538804 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 17:13:19 -0800 Subject: sfc: get rid of custom busy polling code In linux-4.5, busy polling was implemented in core NAPI stack, meaning that all custom implementation can be removed from drivers. Not only we remove lot's of tricky code, we also remove one lock operation in fast path. Signed-off-by: Eric Dumazet Cc: Edward Cree Cc: Bert Kenward Acked-by: Bert Kenward Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 42 ------------ drivers/net/ethernet/sfc/net_driver.h | 125 ---------------------------------- drivers/net/ethernet/sfc/rx.c | 3 +- 3 files changed, 1 insertion(+), 169 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 466c02805f72..fcd4eeecfef4 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -308,9 +308,6 @@ static int efx_poll(struct napi_struct *napi, int budget) struct efx_nic *efx = channel->efx; int spent; - if (!efx_channel_lock_napi(channel)) - return budget; - netif_vdbg(efx, intr, efx->net_dev, "channel %d NAPI poll executing on CPU %d\n", channel->channel, raw_smp_processor_id()); @@ -335,7 +332,6 @@ static int efx_poll(struct napi_struct *napi, int budget) efx_nic_eventq_read_ack(channel); } - efx_channel_unlock_napi(channel); return spent; } @@ -391,7 +387,6 @@ void efx_start_eventq(struct efx_channel *channel) channel->enabled = true; smp_wmb(); - efx_channel_enable(channel); napi_enable(&channel->napi_str); efx_nic_eventq_read_ack(channel); } @@ -403,8 +398,6 @@ void efx_stop_eventq(struct efx_channel *channel) return; napi_disable(&channel->napi_str); - while (!efx_channel_disable(channel)) - usleep_range(1000, 20000); channel->enabled = false; } @@ -2088,7 +2081,6 @@ static void efx_init_napi_channel(struct efx_channel *channel) channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); - efx_channel_busy_poll_init(channel); } static void efx_init_napi(struct efx_nic *efx) @@ -2138,37 +2130,6 @@ static void efx_netpoll(struct net_device *net_dev) #endif -#ifdef CONFIG_NET_RX_BUSY_POLL -static int efx_busy_poll(struct napi_struct *napi) -{ - struct efx_channel *channel = - container_of(napi, struct efx_channel, napi_str); - struct efx_nic *efx = channel->efx; - int budget = 4; - int old_rx_packets, rx_packets; - - if (!netif_running(efx->net_dev)) - return LL_FLUSH_FAILED; - - if (!efx_channel_try_lock_poll(channel)) - return LL_FLUSH_BUSY; - - old_rx_packets = channel->rx_queue.rx_packets; - efx_process_channel(channel, budget); - - rx_packets = channel->rx_queue.rx_packets - old_rx_packets; - - /* There is no race condition with NAPI here. - * NAPI will automatically be rescheduled if it yielded during busy - * polling, because it was not able to take the lock and thus returned - * the full budget. - */ - efx_channel_unlock_poll(channel); - - return rx_packets; -} -#endif - /************************************************************************** * * Kernel net device interface @@ -2402,9 +2363,6 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_poll_controller = efx_netpoll, #endif .ndo_setup_tc = efx_setup_tc, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = efx_busy_poll, -#endif #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 5927c20ba43f..73810d2d630c 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -491,131 +491,6 @@ struct efx_channel { u32 sync_timestamp_minor; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum efx_channel_busy_poll_state { - EFX_CHANNEL_STATE_IDLE = 0, - EFX_CHANNEL_STATE_NAPI = BIT(0), - EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1, - EFX_CHANNEL_STATE_NAPI_REQ = BIT(1), - EFX_CHANNEL_STATE_POLL_BIT = 2, - EFX_CHANNEL_STATE_POLL = BIT(2), - EFX_CHANNEL_STATE_DISABLE_BIT = 3, -}; - -static inline void efx_channel_busy_poll_init(struct efx_channel *channel) -{ - WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); -} - -/* Called from the device poll routine to get ownership of a channel. */ -static inline bool efx_channel_lock_napi(struct efx_channel *channel) -{ - unsigned long prev, old = READ_ONCE(channel->busy_poll_state); - - while (1) { - switch (old) { - case EFX_CHANNEL_STATE_POLL: - /* Ensure efx_channel_try_lock_poll() wont starve us */ - set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT, - &channel->busy_poll_state); - /* fallthrough */ - case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ: - return false; - default: - break; - } - prev = cmpxchg(&channel->busy_poll_state, old, - EFX_CHANNEL_STATE_NAPI); - if (unlikely(prev != old)) { - /* This is likely to mean we've just entered polling - * state. Go back round to set the REQ bit. - */ - old = prev; - continue; - } - return true; - } -} - -static inline void efx_channel_unlock_napi(struct efx_channel *channel) -{ - /* Make sure write has completed from efx_channel_lock_napi() */ - smp_wmb(); - WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); -} - -/* Called from efx_busy_poll(). */ -static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) -{ - return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE, - EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE; -} - -static inline void efx_channel_unlock_poll(struct efx_channel *channel) -{ - clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline bool efx_channel_busy_polling(struct efx_channel *channel) -{ - return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline void efx_channel_enable(struct efx_channel *channel) -{ - clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT, - &channel->busy_poll_state); -} - -/* Stop further polling or napi access. - * Returns false if the channel is currently busy polling. - */ -static inline bool efx_channel_disable(struct efx_channel *channel) -{ - set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state); - /* Implicit barrier in efx_channel_busy_polling() */ - return !efx_channel_busy_polling(channel); -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ - -static inline void efx_channel_busy_poll_init(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_lock_napi(struct efx_channel *channel) -{ - return true; -} - -static inline void efx_channel_unlock_napi(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) -{ - return false; -} - -static inline void efx_channel_unlock_poll(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_busy_polling(struct efx_channel *channel) -{ - return false; -} - -static inline void efx_channel_enable(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_disable(struct efx_channel *channel) -{ - return true; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * struct efx_msi_context - Context for each MSI * @efx: The associated NIC diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 5f4ad4f3518f..31587f4066ab 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -665,8 +665,7 @@ void __efx_rx_packet(struct efx_channel *channel) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; - if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb && - !efx_channel_busy_polling(channel)) + if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); -- cgit v1.2.3 From 86bc8b310f1f323ccc9a311933e35d3442473034 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 18:22:28 -0800 Subject: sfc-falcon: get rid of custom busy polling code In linux-4.5, busy polling was implemented in core NAPI stack, meaning that all custom implementation can be removed from drivers. Not only we remove lot's of tricky code, we also remove one lock operation in fast path. Signed-off-by: Eric Dumazet Cc: Edward Cree Cc: Bert Kenward Acked-by: Bert Kenward Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/falcon/efx.c | 42 --------- drivers/net/ethernet/sfc/falcon/net_driver.h | 125 --------------------------- drivers/net/ethernet/sfc/falcon/rx.c | 3 +- 3 files changed, 1 insertion(+), 169 deletions(-) diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index c4ff3bb1a1ef..f5e5cd1659a1 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -304,9 +304,6 @@ static int ef4_poll(struct napi_struct *napi, int budget) struct ef4_nic *efx = channel->efx; int spent; - if (!ef4_channel_lock_napi(channel)) - return budget; - netif_vdbg(efx, intr, efx->net_dev, "channel %d NAPI poll executing on CPU %d\n", channel->channel, raw_smp_processor_id()); @@ -331,7 +328,6 @@ static int ef4_poll(struct napi_struct *napi, int budget) ef4_nic_eventq_read_ack(channel); } - ef4_channel_unlock_napi(channel); return spent; } @@ -387,7 +383,6 @@ void ef4_start_eventq(struct ef4_channel *channel) channel->enabled = true; smp_wmb(); - ef4_channel_enable(channel); napi_enable(&channel->napi_str); ef4_nic_eventq_read_ack(channel); } @@ -399,8 +394,6 @@ void ef4_stop_eventq(struct ef4_channel *channel) return; napi_disable(&channel->napi_str); - while (!ef4_channel_disable(channel)) - usleep_range(1000, 20000); channel->enabled = false; } @@ -2029,7 +2022,6 @@ static void ef4_init_napi_channel(struct ef4_channel *channel) channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll, napi_weight); - ef4_channel_busy_poll_init(channel); } static void ef4_init_napi(struct ef4_nic *efx) @@ -2079,37 +2071,6 @@ static void ef4_netpoll(struct net_device *net_dev) #endif -#ifdef CONFIG_NET_RX_BUSY_POLL -static int ef4_busy_poll(struct napi_struct *napi) -{ - struct ef4_channel *channel = - container_of(napi, struct ef4_channel, napi_str); - struct ef4_nic *efx = channel->efx; - int budget = 4; - int old_rx_packets, rx_packets; - - if (!netif_running(efx->net_dev)) - return LL_FLUSH_FAILED; - - if (!ef4_channel_try_lock_poll(channel)) - return LL_FLUSH_BUSY; - - old_rx_packets = channel->rx_queue.rx_packets; - ef4_process_channel(channel, budget); - - rx_packets = channel->rx_queue.rx_packets - old_rx_packets; - - /* There is no race condition with NAPI here. - * NAPI will automatically be rescheduled if it yielded during busy - * polling, because it was not able to take the lock and thus returned - * the full budget. - */ - ef4_channel_unlock_poll(channel); - - return rx_packets; -} -#endif - /************************************************************************** * * Kernel net device interface @@ -2289,9 +2250,6 @@ static const struct net_device_ops ef4_netdev_ops = { .ndo_poll_controller = ef4_netpoll, #endif .ndo_setup_tc = ef4_setup_tc, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = ef4_busy_poll, -#endif #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = ef4_filter_rfs, #endif diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h index fe59dd67f918..37a8bdf32206 100644 --- a/drivers/net/ethernet/sfc/falcon/net_driver.h +++ b/drivers/net/ethernet/sfc/falcon/net_driver.h @@ -448,131 +448,6 @@ struct ef4_channel { struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES]; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum ef4_channel_busy_poll_state { - EF4_CHANNEL_STATE_IDLE = 0, - EF4_CHANNEL_STATE_NAPI = BIT(0), - EF4_CHANNEL_STATE_NAPI_REQ_BIT = 1, - EF4_CHANNEL_STATE_NAPI_REQ = BIT(1), - EF4_CHANNEL_STATE_POLL_BIT = 2, - EF4_CHANNEL_STATE_POLL = BIT(2), - EF4_CHANNEL_STATE_DISABLE_BIT = 3, -}; - -static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel) -{ - WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE); -} - -/* Called from the device poll routine to get ownership of a channel. */ -static inline bool ef4_channel_lock_napi(struct ef4_channel *channel) -{ - unsigned long prev, old = READ_ONCE(channel->busy_poll_state); - - while (1) { - switch (old) { - case EF4_CHANNEL_STATE_POLL: - /* Ensure ef4_channel_try_lock_poll() wont starve us */ - set_bit(EF4_CHANNEL_STATE_NAPI_REQ_BIT, - &channel->busy_poll_state); - /* fallthrough */ - case EF4_CHANNEL_STATE_POLL | EF4_CHANNEL_STATE_NAPI_REQ: - return false; - default: - break; - } - prev = cmpxchg(&channel->busy_poll_state, old, - EF4_CHANNEL_STATE_NAPI); - if (unlikely(prev != old)) { - /* This is likely to mean we've just entered polling - * state. Go back round to set the REQ bit. - */ - old = prev; - continue; - } - return true; - } -} - -static inline void ef4_channel_unlock_napi(struct ef4_channel *channel) -{ - /* Make sure write has completed from ef4_channel_lock_napi() */ - smp_wmb(); - WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE); -} - -/* Called from ef4_busy_poll(). */ -static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel) -{ - return cmpxchg(&channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE, - EF4_CHANNEL_STATE_POLL) == EF4_CHANNEL_STATE_IDLE; -} - -static inline void ef4_channel_unlock_poll(struct ef4_channel *channel) -{ - clear_bit_unlock(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline bool ef4_channel_busy_polling(struct ef4_channel *channel) -{ - return test_bit(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline void ef4_channel_enable(struct ef4_channel *channel) -{ - clear_bit_unlock(EF4_CHANNEL_STATE_DISABLE_BIT, - &channel->busy_poll_state); -} - -/* Stop further polling or napi access. - * Returns false if the channel is currently busy polling. - */ -static inline bool ef4_channel_disable(struct ef4_channel *channel) -{ - set_bit(EF4_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state); - /* Implicit barrier in ef4_channel_busy_polling() */ - return !ef4_channel_busy_polling(channel); -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ - -static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_lock_napi(struct ef4_channel *channel) -{ - return true; -} - -static inline void ef4_channel_unlock_napi(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel) -{ - return false; -} - -static inline void ef4_channel_unlock_poll(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_busy_polling(struct ef4_channel *channel) -{ - return false; -} - -static inline void ef4_channel_enable(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_disable(struct ef4_channel *channel) -{ - return true; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * struct ef4_msi_context - Context for each MSI * @efx: The associated NIC diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c index 250458cbdb4d..6a8406dc0c2b 100644 --- a/drivers/net/ethernet/sfc/falcon/rx.c +++ b/drivers/net/ethernet/sfc/falcon/rx.c @@ -674,8 +674,7 @@ void __ef4_rx_packet(struct ef4_channel *channel) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EF4_RX_PKT_CSUMMED; - if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb && - !ef4_channel_busy_polling(channel)) + if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb) ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); -- cgit v1.2.3 From 0d4b729dac35bb265533c79b554578a5ec6df384 Mon Sep 17 00:00:00 2001 From: Arjun V Date: Thu, 2 Feb 2017 12:43:29 +0530 Subject: cxgb4: Fix uld_send() for ctrl pkts Without any uld being loaded, uld_txq_info[] will be NULL. uld_send() is also used for sending control work requests(for eg: setting filter) that dont require any ulds to be loaded. Hence move uld_txq_info[] assignment after ctrl_xmit(). Also added a NULL check for uld_txq_info[]. Fixes: 94cdb8bb993a (cxgb4: Add support for dynamic allocation of resources for ULD). Signed-off-by: Arjun V Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sge.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 0fe04b482c38..09653ae0d2b1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1774,15 +1774,20 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb, struct sge_uld_txq *txq; unsigned int idx = skb_txq(skb); - txq_info = adap->sge.uld_txq_info[tx_uld_type]; - txq = &txq_info->uldtxq[idx]; - if (unlikely(is_ctrl_pkt(skb))) { /* Single ctrl queue is a requirement for LE workaround path */ if (adap->tids.nsftids) idx = 0; return ctrl_xmit(&adap->sge.ctrlq[idx], skb); } + + txq_info = adap->sge.uld_txq_info[tx_uld_type]; + if (unlikely(!txq_info)) { + WARN_ON(true); + return NET_XMIT_DROP; + } + + txq = &txq_info->uldtxq[idx]; return ofld_xmit(txq, skb); } -- cgit v1.2.3 From c45f8e109b6214a80c27d785c044178231ef4990 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 06:09:14 -0800 Subject: atl1e: add GRO support It is time to add GRO support to this driver. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index ef003c522ba2..4f7e195af0bc 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1472,7 +1472,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, prrs->vtag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } - netif_receive_skb(skb); + napi_gro_receive(&adapter->napi, skb); skip_pkt: /* skip current packet whether it's ok or not. */ -- cgit v1.2.3 From ceef438d613f6d496ca84bad2ddf21786ee0e9f6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 06:35:36 -0800 Subject: virtio_net: remove custom busy_poll Generic NAPI busy polling allows us to remove custom implementations found in drivers. It is possible further optimization could be done by testing napi_complete_done() return value. Signed-off-by: Eric Dumazet Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 41 ----------------------------------------- 1 file changed, 41 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index bd22cf306a92..0382827829d9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -29,7 +29,6 @@ #include #include #include -#include static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -1024,43 +1023,6 @@ static int virtnet_poll(struct napi_struct *napi, int budget) return received; } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int virtnet_busy_poll(struct napi_struct *napi) -{ - struct receive_queue *rq = - container_of(napi, struct receive_queue, napi); - struct virtnet_info *vi = rq->vq->vdev->priv; - int r, received = 0, budget = 4; - - if (!(vi->status & VIRTIO_NET_S_LINK_UP)) - return LL_FLUSH_FAILED; - - if (!napi_schedule_prep(napi)) - return LL_FLUSH_BUSY; - - virtqueue_disable_cb(rq->vq); - -again: - received += virtnet_receive(rq, budget); - - r = virtqueue_enable_cb_prepare(rq->vq); - clear_bit(NAPI_STATE_SCHED, &napi->state); - if (unlikely(virtqueue_poll(rq->vq, r)) && - napi_schedule_prep(napi)) { - virtqueue_disable_cb(rq->vq); - if (received < budget) { - budget -= received; - goto again; - } else { - __napi_schedule(napi); - } - } - - return received; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -1816,9 +1778,6 @@ static const struct net_device_ops virtnet_netdev = { .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, -#endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = virtnet_busy_poll, #endif .ndo_xdp = virtnet_xdp, }; -- cgit v1.2.3 From ff3edc9b8efc8200c25f3a5adfb1c1de0a882dc5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 2 Feb 2017 15:49:24 +0100 Subject: hns_enet: use cpumask_var_t for on-stack mask On large SMP builds, we can run into a build warning: drivers/net/ethernet/hisilicon/hns/hns_enet.c: In function 'hns_set_irq_affinity.isra.27': drivers/net/ethernet/hisilicon/hns/hns_enet.c:1242:1: warning: the frame size of 1032 bytes is larger than 1024 bytes [-Wframe-larger-than=] The solution here is to use cpumask_var_t, which can use dynamic allocation when CONFIG_CPUMASK_OFFSTACK is enabled. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index f7b75e96c1c3..fefe371e5907 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1202,43 +1202,48 @@ static void hns_set_irq_affinity(struct hns_nic_priv *priv) struct hns_nic_ring_data *rd; int i; int cpu; - cpumask_t mask; + cpumask_var_t mask; + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return; /*diffrent irq banlance for 16core and 32core*/ if (h->q_num == num_possible_cpus()) { for (i = 0; i < h->q_num * 2; i++) { rd = &priv->ring_data[i]; if (cpu_online(rd->queue_index)) { - cpumask_clear(&mask); + cpumask_clear(mask); cpu = rd->queue_index; - cpumask_set_cpu(cpu, &mask); + cpumask_set_cpu(cpu, mask); (void)irq_set_affinity_hint(rd->ring->irq, - &mask); + mask); } } } else { for (i = 0; i < h->q_num; i++) { rd = &priv->ring_data[i]; if (cpu_online(rd->queue_index * 2)) { - cpumask_clear(&mask); + cpumask_clear(mask); cpu = rd->queue_index * 2; - cpumask_set_cpu(cpu, &mask); + cpumask_set_cpu(cpu, mask); (void)irq_set_affinity_hint(rd->ring->irq, - &mask); + mask); } } for (i = h->q_num; i < h->q_num * 2; i++) { rd = &priv->ring_data[i]; if (cpu_online(rd->queue_index * 2 + 1)) { - cpumask_clear(&mask); + cpumask_clear(mask); cpu = rd->queue_index * 2 + 1; - cpumask_set_cpu(cpu, &mask); + cpumask_set_cpu(cpu, mask); (void)irq_set_affinity_hint(rd->ring->irq, - &mask); + mask); } } } + + free_cpumask_var(mask); } static int hns_nic_init_irq(struct hns_nic_priv *priv) -- cgit v1.2.3 From 3541f9e8bdebce02458882b66b638d7302c1f616 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 08:04:56 -0800 Subject: tcp: add tcp_mss_clamp() helper Small cleanup factorizing code doing the TCP_MAXSEG clamping. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 9 +++++++++ net/ipv4/tcp_ipv4.c | 5 +---- net/ipv4/tcp_minisocks.c | 7 ++----- net/ipv4/tcp_output.c | 14 ++++---------- net/ipv6/tcp_ipv6.c | 5 +---- 5 files changed, 17 insertions(+), 23 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index f88f4649ba6f..cfc2d9506ce8 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -445,4 +445,13 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); +static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) +{ + /* We use READ_ONCE() here because socket might not be locked. + * This happens for listeners. + */ + u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); + + return (user_mss && user_mss < mss) ? user_mss : mss; +} #endif /* _LINUX_TCP_H */ diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8c9e9aa17d66..8c124d4ef4b7 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1324,10 +1324,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); - newtp->advmss = dst_metric_advmss(dst); - if (tcp_sk(sk)->rx_opt.user_mss && - tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) - newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; + newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); tcp_initialize_rcv_mss(newsk); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index bdb443471c39..dff7d2aaf861 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -360,15 +360,12 @@ void tcp_openreq_init_rwin(struct request_sock *req, { struct inet_request_sock *ireq = inet_rsk(req); const struct tcp_sock *tp = tcp_sk(sk_listener); - u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); int full_space = tcp_full_space(sk_listener); - int mss = dst_metric_advmss(dst); u32 window_clamp; __u8 rcv_wscale; + int mss; - if (user_mss && user_mss < mss) - mss = user_mss; - + mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); window_clamp = READ_ONCE(tp->window_clamp); /* Set this up on the first call only */ req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6d5bab8a3ea6..956bea9a5394 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3062,7 +3062,6 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, struct sk_buff *skb; int tcp_header_size; struct tcphdr *th; - u16 user_mss; int mss; skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); @@ -3092,10 +3091,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, } skb_dst_set(skb, dst); - mss = dst_metric_advmss(dst); - user_mss = READ_ONCE(tp->rx_opt.user_mss); - if (user_mss && user_mss < mss) - mss = user_mss; + mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); memset(&opts, 0, sizeof(opts)); #ifdef CONFIG_SYN_COOKIES @@ -3201,9 +3197,7 @@ static void tcp_connect_init(struct sock *sk) if (!tp->window_clamp) tp->window_clamp = dst_metric(dst, RTAX_WINDOW); - tp->advmss = dst_metric_advmss(dst); - if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) - tp->advmss = tp->rx_opt.user_mss; + tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); tcp_initialize_rcv_mss(sk); @@ -3280,8 +3274,8 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) * user-MSS. Reserve maximum option space for middleboxes that add * private TCP options. The cost is reduced data space in SYN :( */ - if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) - tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; + tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); + space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - MAX_TCP_OPTION_SPACE; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 64834ec5ab73..6b9fc63fd4d2 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1147,10 +1147,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); - newtp->advmss = dst_metric_advmss(dst); - if (tcp_sk(sk)->rx_opt.user_mss && - tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) - newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; + newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); tcp_initialize_rcv_mss(newsk); -- cgit v1.2.3 From 1d5e7c859e81a66674d194c346119d154d31e9dc Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Wed, 1 Feb 2017 15:30:01 +0200 Subject: net/sched: act_ife: Unexport ife_tlv_meta_encode As the function ife_tlv_meta_encode is not used by any other module, unexport it and make it static for the act_ife module. Signed-off-by: Yotam Gigi Signed-off-by: Jamal Hadi Salim Signed-off-by: Roman Mashak Signed-off-by: David S. Miller --- include/net/tc_act/tc_ife.h | 2 -- net/sched/act_ife.c | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h index 9fd2bea0a6e0..f37e7516ab28 100644 --- a/include/net/tc_act/tc_ife.h +++ b/include/net/tc_act/tc_ife.h @@ -45,8 +45,6 @@ struct tcf_meta_ops { int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi); int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi); -int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, - const void *dval); int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp); int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp); int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi); diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 921fb20eaa7c..70148c10ede9 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -48,7 +48,8 @@ static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = { /* Caller takes care of presenting data in network order */ -int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval) +static int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, + const void *dval) { u32 *tlv = (u32 *)(skbdata); u16 totlen = nla_total_size(dlen); /*alignment + hdr */ @@ -61,7 +62,6 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval) return totlen; } -EXPORT_SYMBOL_GPL(ife_tlv_meta_encode); int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi) { -- cgit v1.2.3 From 1ce8460496c05379c66edc178c3c55ca4e953044 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Wed, 1 Feb 2017 15:30:02 +0200 Subject: net: Introduce ife encapsulation module This module is responsible for the ife encapsulation protocol encode/decode logics. That module can: - ife_encode: encode skb and reserve space for the ife meta header - ife_decode: decode skb and extract the meta header size - ife_tlv_meta_encode - encodes one tlv entry into the reserved ife header space. - ife_tlv_meta_decode - decodes one tlv entry from the packet - ife_tlv_meta_next - advance to the next tlv Reviewed-by: Jiri Pirko Signed-off-by: Yotam Gigi Signed-off-by: Jamal Hadi Salim Signed-off-by: Roman Mashak Signed-off-by: David S. Miller --- MAINTAINERS | 7 +++ include/net/ife.h | 51 +++++++++++++++++ include/uapi/linux/Kbuild | 1 + include/uapi/linux/ife.h | 18 ++++++ net/Kconfig | 1 + net/Makefile | 1 + net/ife/Kconfig | 16 ++++++ net/ife/Makefile | 5 ++ net/ife/ife.c | 142 ++++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 242 insertions(+) create mode 100644 include/net/ife.h create mode 100644 include/uapi/linux/ife.h create mode 100644 net/ife/Kconfig create mode 100644 net/ife/Makefile create mode 100644 net/ife/ife.c diff --git a/MAINTAINERS b/MAINTAINERS index 5e637e2b3ff9..2abda6cb3150 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6250,6 +6250,13 @@ F: include/net/cfg802154.h F: include/net/ieee802154_netdev.h F: Documentation/networking/ieee802154.txt +IFE PROTOCOL +M: Yotam Gigi +M: Jamal Hadi Salim +F: net/ife +F: include/net/ife.h +F: include/uapi/linux/ife.h + IGORPLUG-USB IR RECEIVER M: Sean Young L: linux-media@vger.kernel.org diff --git a/include/net/ife.h b/include/net/ife.h new file mode 100644 index 000000000000..2d87d6898b0a --- /dev/null +++ b/include/net/ife.h @@ -0,0 +1,51 @@ +#ifndef __NET_IFE_H +#define __NET_IFE_H + +#include +#include +#include +#include + +#if IS_ENABLED(CONFIG_NET_IFE) + +void *ife_encode(struct sk_buff *skb, u16 metalen); +void *ife_decode(struct sk_buff *skb, u16 *metalen); + +void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen); +int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, + const void *dval); + +void *ife_tlv_meta_next(void *skbdata); + +#else + +static inline void *ife_encode(struct sk_buff *skb, u16 metalen) +{ + return NULL; +} + +static inline void *ife_decode(struct sk_buff *skb, u16 *metalen) +{ + return NULL; +} + +static inline void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, + u16 *totlen) +{ + return NULL; +} + +static inline int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, + const void *dval) +{ + return 0; +} + +static inline void *ife_tlv_meta_next(void *skbdata) +{ + return NULL; +} + +#endif + +#endif /* __NET_IFE_H */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 486e050e64c5..a2e90722a4c4 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -195,6 +195,7 @@ header-y += if_tun.h header-y += if_tunnel.h header-y += if_vlan.h header-y += if_x25.h +header-y += ife.h header-y += igmp.h header-y += ila.h header-y += in6.h diff --git a/include/uapi/linux/ife.h b/include/uapi/linux/ife.h new file mode 100644 index 000000000000..2954da32e012 --- /dev/null +++ b/include/uapi/linux/ife.h @@ -0,0 +1,18 @@ +#ifndef __UAPI_IFE_H +#define __UAPI_IFE_H + +#define IFE_METAHDRLEN 2 + +enum { + IFE_META_SKBMARK = 1, + IFE_META_HASHID, + IFE_META_PRIO, + IFE_META_QMAP, + IFE_META_TCINDEX, + __IFE_META_MAX +}; + +/*Can be overridden at runtime by module option*/ +#define IFE_META_MAX (__IFE_META_MAX - 1) + +#endif diff --git a/net/Kconfig b/net/Kconfig index ce4aee69fc0d..2f2842d2d3ed 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -391,6 +391,7 @@ source "net/caif/Kconfig" source "net/ceph/Kconfig" source "net/nfc/Kconfig" source "net/psample/Kconfig" +source "net/ife/Kconfig" config LWTUNNEL bool "Network light weight tunnels" diff --git a/net/Makefile b/net/Makefile index 7d41de48310e..9b681550e3a3 100644 --- a/net/Makefile +++ b/net/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_CEPH_LIB) += ceph/ obj-$(CONFIG_BATMAN_ADV) += batman-adv/ obj-$(CONFIG_NFC) += nfc/ obj-$(CONFIG_PSAMPLE) += psample/ +obj-$(CONFIG_NET_IFE) += ife/ obj-$(CONFIG_OPENVSWITCH) += openvswitch/ obj-$(CONFIG_VSOCKETS) += vmw_vsock/ obj-$(CONFIG_MPLS) += mpls/ diff --git a/net/ife/Kconfig b/net/ife/Kconfig new file mode 100644 index 000000000000..31e48b652c7c --- /dev/null +++ b/net/ife/Kconfig @@ -0,0 +1,16 @@ +# +# IFE subsystem configuration +# + +menuconfig NET_IFE + depends on NET + tristate "Inter-FE based on IETF ForCES InterFE LFB" + default n + help + Say Y here to add support of IFE encapsulation protocol + For details refer to netdev01 paper: + "Distributing Linux Traffic Control Classifier-Action Subsystem" + Authors: Jamal Hadi Salim and Damascene M. Joachimpillai + + To compile this support as a module, choose M here: the module will + be called ife. diff --git a/net/ife/Makefile b/net/ife/Makefile new file mode 100644 index 000000000000..2a90d97746cc --- /dev/null +++ b/net/ife/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the IFE encapsulation protocol +# + +obj-$(CONFIG_NET_IFE) += ife.o diff --git a/net/ife/ife.c b/net/ife/ife.c new file mode 100644 index 000000000000..f360341c72eb --- /dev/null +++ b/net/ife/ife.c @@ -0,0 +1,142 @@ +/* + * net/ife/ife.c - Inter-FE protocol based on ForCES WG InterFE LFB + * Copyright (c) 2015 Jamal Hadi Salim + * Copyright (c) 2017 Yotam Gigi + * + * Refer to: draft-ietf-forces-interfelfb-03 and netdev01 paper: + * "Distributing Linux Traffic Control Classifier-Action Subsystem" + * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ifeheadr { + __be16 metalen; + u8 tlv_data[]; +}; + +void *ife_encode(struct sk_buff *skb, u16 metalen) +{ + /* OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA + * where ORIGDATA = original ethernet header ... + */ + int hdrm = metalen + IFE_METAHDRLEN; + int total_push = hdrm + skb->dev->hard_header_len; + struct ifeheadr *ifehdr; + struct ethhdr *iethh; /* inner ether header */ + int skboff = 0; + int err; + + err = skb_cow_head(skb, total_push); + if (unlikely(err)) + return NULL; + + iethh = (struct ethhdr *) skb->data; + + __skb_push(skb, total_push); + memcpy(skb->data, iethh, skb->dev->hard_header_len); + skb_reset_mac_header(skb); + skboff += skb->dev->hard_header_len; + + /* total metadata length */ + ifehdr = (struct ifeheadr *) (skb->data + skboff); + metalen += IFE_METAHDRLEN; + ifehdr->metalen = htons(metalen); + + return ifehdr->tlv_data; +} +EXPORT_SYMBOL_GPL(ife_encode); + +void *ife_decode(struct sk_buff *skb, u16 *metalen) +{ + struct ifeheadr *ifehdr; + int total_pull; + u16 ifehdrln; + + ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len); + ifehdrln = ntohs(ifehdr->metalen); + total_pull = skb->dev->hard_header_len + ifehdrln; + + if (unlikely(ifehdrln < 2)) + return NULL; + + if (unlikely(!pskb_may_pull(skb, total_pull))) + return NULL; + + skb_set_mac_header(skb, total_pull); + __skb_pull(skb, total_pull); + *metalen = ifehdrln - IFE_METAHDRLEN; + + return &ifehdr->tlv_data; +} +EXPORT_SYMBOL_GPL(ife_decode); + +struct meta_tlvhdr { + __be16 type; + __be16 len; +}; + +/* Caller takes care of presenting data in network order + */ +void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen) +{ + struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata; + + *dlen = ntohs(tlv->len) - NLA_HDRLEN; + *attrtype = ntohs(tlv->type); + + if (totlen) + *totlen = nla_total_size(*dlen); + + return skbdata + sizeof(struct meta_tlvhdr); +} +EXPORT_SYMBOL_GPL(ife_tlv_meta_decode); + +void *ife_tlv_meta_next(void *skbdata) +{ + struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata; + u16 tlvlen = ntohs(tlv->len); + + tlvlen = NLA_ALIGN(tlvlen); + + return skbdata + tlvlen; +} +EXPORT_SYMBOL_GPL(ife_tlv_meta_next); + +/* Caller takes care of presenting data in network order + */ +int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval) +{ + __be32 *tlv = (__be32 *) (skbdata); + u16 totlen = nla_total_size(dlen); /*alignment + hdr */ + char *dptr = (char *) tlv + NLA_HDRLEN; + u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN); + + *tlv = htonl(htlv); + memset(dptr, 0, totlen - NLA_HDRLEN); + memcpy(dptr, dval, dlen); + + return totlen; +} +EXPORT_SYMBOL_GPL(ife_tlv_meta_encode); + +MODULE_AUTHOR("Jamal Hadi Salim "); +MODULE_AUTHOR("Yotam Gigi "); +MODULE_DESCRIPTION("Inter-FE LFB action"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 295a6e06d21e1f469c9f38b00125a13b60ad4e7c Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Wed, 1 Feb 2017 15:30:03 +0200 Subject: net/sched: act_ife: Change to use ife module Use the encode/decode functionality from the ife module instead of using implementation inside the act_ife. Reviewed-by: Jiri Pirko Signed-off-by: Yotam Gigi Signed-off-by: Jamal Hadi Salim Signed-off-by: Roman Mashak Signed-off-by: David S. Miller --- include/net/tc_act/tc_ife.h | 1 - include/uapi/linux/tc_act/tc_ife.h | 10 +--- net/sched/Kconfig | 1 + net/sched/act_ife.c | 110 +++++++++++-------------------------- 4 files changed, 34 insertions(+), 88 deletions(-) diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h index f37e7516ab28..30ba459ddd34 100644 --- a/include/net/tc_act/tc_ife.h +++ b/include/net/tc_act/tc_ife.h @@ -6,7 +6,6 @@ #include #include -#define IFE_METAHDRLEN 2 struct tcf_ife_info { struct tc_action common; u8 eth_dst[ETH_ALEN]; diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h index cd18360eca24..7c2817866c97 100644 --- a/include/uapi/linux/tc_act/tc_ife.h +++ b/include/uapi/linux/tc_act/tc_ife.h @@ -3,6 +3,7 @@ #include #include +#include #define TCA_ACT_IFE 25 /* Flag bits for now just encoding/decoding; mutually exclusive */ @@ -28,13 +29,4 @@ enum { }; #define TCA_IFE_MAX (__TCA_IFE_MAX - 1) -#define IFE_META_SKBMARK 1 -#define IFE_META_HASHID 2 -#define IFE_META_PRIO 3 -#define IFE_META_QMAP 4 -#define IFE_META_TCINDEX 5 -/*Can be overridden at runtime by module option*/ -#define __IFE_META_MAX 6 -#define IFE_META_MAX (__IFE_META_MAX - 1) - #endif diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 72cfa3a6bac0..403790cce7d2 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -776,6 +776,7 @@ config NET_ACT_SKBMOD config NET_ACT_IFE tristate "Inter-FE action based on IETF ForCES InterFE LFB" depends on NET_CLS_ACT + select NET_IFE ---help--- Say Y here to allow for sourcing and terminating metadata For details refer to netdev01 paper: diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 70148c10ede9..71e7ff22f7c9 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -32,6 +32,7 @@ #include #include #include +#include #define IFE_TAB_MASK 15 @@ -46,23 +47,6 @@ static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = { [TCA_IFE_TYPE] = { .type = NLA_U16}, }; -/* Caller takes care of presenting data in network order -*/ -static int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, - const void *dval) -{ - u32 *tlv = (u32 *)(skbdata); - u16 totlen = nla_total_size(dlen); /*alignment + hdr */ - char *dptr = (char *)tlv + NLA_HDRLEN; - u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN); - - *tlv = htonl(htlv); - memset(dptr, 0, totlen - NLA_HDRLEN); - memcpy(dptr, dval, dlen); - - return totlen; -} - int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi) { u16 edata = 0; @@ -637,69 +621,59 @@ int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, return 0; } -struct ifeheadr { - __be16 metalen; - u8 tlv_data[]; -}; - -struct meta_tlvhdr { - __be16 type; - __be16 len; -}; - static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_ife_info *ife = to_ife(a); int action = ife->tcf_action; - struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data; - int ifehdrln = (int)ifehdr->metalen; - struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data); + u8 *ifehdr_end; + u8 *tlv_data; + u16 metalen; spin_lock(&ife->tcf_lock); bstats_update(&ife->tcf_bstats, skb); tcf_lastuse_update(&ife->tcf_tm); spin_unlock(&ife->tcf_lock); - ifehdrln = ntohs(ifehdrln); - if (unlikely(!pskb_may_pull(skb, ifehdrln))) { + if (skb_at_tc_ingress(skb)) + skb_push(skb, skb->dev->hard_header_len); + + tlv_data = ife_decode(skb, &metalen); + if (unlikely(!tlv_data)) { spin_lock(&ife->tcf_lock); ife->tcf_qstats.drops++; spin_unlock(&ife->tcf_lock); return TC_ACT_SHOT; } - skb_set_mac_header(skb, ifehdrln); - __skb_pull(skb, ifehdrln); - skb->protocol = eth_type_trans(skb, skb->dev); - ifehdrln -= IFE_METAHDRLEN; - - while (ifehdrln > 0) { - u8 *tlvdata = (u8 *)tlv; - u16 mtype = tlv->type; - u16 mlen = tlv->len; - u16 alen; + ifehdr_end = tlv_data + metalen; + for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) { + u8 *curr_data; + u16 mtype; + u16 dlen; - mtype = ntohs(mtype); - mlen = ntohs(mlen); - alen = NLA_ALIGN(mlen); + curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL); - if (find_decode_metaid(skb, ife, mtype, (mlen - NLA_HDRLEN), - (void *)(tlvdata + NLA_HDRLEN))) { + if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) { /* abuse overlimits to count when we receive metadata * but dont have an ops for it */ - pr_info_ratelimited("Unknown metaid %d alnlen %d\n", - mtype, mlen); + pr_info_ratelimited("Unknown metaid %d dlen %d\n", + mtype, dlen); ife->tcf_qstats.overlimits++; } + } - tlvdata += alen; - ifehdrln -= alen; - tlv = (struct meta_tlvhdr *)tlvdata; + if (WARN_ON(tlv_data != ifehdr_end)) { + spin_lock(&ife->tcf_lock); + ife->tcf_qstats.drops++; + spin_unlock(&ife->tcf_lock); + return TC_ACT_SHOT; } + skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); + return action; } @@ -727,7 +701,6 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, struct tcf_ife_info *ife = to_ife(a); int action = ife->tcf_action; struct ethhdr *oethh; /* outer ether header */ - struct ethhdr *iethh; /* inner eth header */ struct tcf_meta_info *e; /* OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA @@ -735,10 +708,11 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, */ u16 metalen = ife_get_sz(skb, ife); int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN; - unsigned int skboff = skb->dev->hard_header_len; + unsigned int skboff = 0; int new_len = skb->len + hdrm; bool exceed_mtu = false; - int err; + void *ife_meta; + int err = 0; if (!skb_at_tc_ingress(skb)) { if (new_len > skb->dev->mtu) @@ -765,27 +739,10 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, return TC_ACT_SHOT; } - err = skb_cow_head(skb, hdrm); - if (unlikely(err)) { - ife->tcf_qstats.drops++; - spin_unlock(&ife->tcf_lock); - return TC_ACT_SHOT; - } - if (skb_at_tc_ingress(skb)) skb_push(skb, skb->dev->hard_header_len); - iethh = (struct ethhdr *)skb->data; - __skb_push(skb, hdrm); - memcpy(skb->data, iethh, skb->mac_len); - skb_reset_mac_header(skb); - oethh = eth_hdr(skb); - - /*total metadata length */ - metalen += IFE_METAHDRLEN; - metalen = htons(metalen); - memcpy((skb->data + skboff), &metalen, IFE_METAHDRLEN); - skboff += IFE_METAHDRLEN; + ife_meta = ife_encode(skb, metalen); /* XXX: we dont have a clever way of telling encode to * not repeat some of the computations that are done by @@ -793,7 +750,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, */ list_for_each_entry(e, &ife->metalist, metalist) { if (e->ops->encode) { - err = e->ops->encode(skb, (void *)(skb->data + skboff), + err = e->ops->encode(skb, (void *)(ife_meta + skboff), e); } if (err < 0) { @@ -804,15 +761,12 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, } skboff += err; } + oethh = (struct ethhdr *)skb->data; if (!is_zero_ether_addr(ife->eth_src)) ether_addr_copy(oethh->h_source, ife->eth_src); - else - ether_addr_copy(oethh->h_source, iethh->h_source); if (!is_zero_ether_addr(ife->eth_dst)) ether_addr_copy(oethh->h_dest, ife->eth_dst); - else - ether_addr_copy(oethh->h_dest, iethh->h_dest); oethh->h_proto = htons(ife->eth_type); if (skb_at_tc_ingress(skb)) -- cgit v1.2.3 From f35581d64e55fc65753a62957b3b98127d560d07 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Tue, 31 Jan 2017 22:59:51 -0800 Subject: ip_tunnels: new IP_TUNNEL_INFO_BRIDGE flag for ip_tunnel_info mode New ip_tunnel_info flag to represent bridged tunnel metadata. Used by bridge driver later in the series to pass per vlan dst metadata to bridge ports. Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/net/ip_tunnels.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 3d4ca4df1209..95056796657c 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -58,6 +58,7 @@ struct ip_tunnel_key { /* Flags for ip_tunnel_info mode. */ #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */ #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */ +#define IP_TUNNEL_INFO_BRIDGE 0x04 /* represents a bridged tunnel id */ /* Maximum tunnel options length. */ #define IP_TUNNEL_OPTS_MAX \ -- cgit v1.2.3 From 3ad7a4b141ebd6091494913672d7166d5c2764e4 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Tue, 31 Jan 2017 22:59:52 -0800 Subject: vxlan: support fdb and learning in COLLECT_METADATA mode Vxlan COLLECT_METADATA mode today solves the per-vni netdev scalability problem in l3 networks. It expects all forwarding information to be present in dst_metadata. This patch series enhances collect metadata mode to include the case where only vni is present in dst_metadata, and the vxlan driver can then use the rest of the forwarding information datbase to make forwarding decisions. There is no change to default COLLECT_METADATA behaviour. These changes only apply to COLLECT_METADATA when used with the bridging use-case with a special dst_metadata tunnel info flag (eg: where vxlan device is part of a bridge). For all this to work, the vxlan driver will need to now support a single fdb table hashed by mac + vni. This series essentially makes this happen. use-case and workflow: vxlan collect metadata device participates in bridging vlan to vn-segments. Bridge driver above the vxlan device, sends the vni corresponding to the vlan in the dst_metadata. vxlan driver will lookup forwarding database with (mac + vni) for the required remote destination information to forward the packet. Changes introduced by this patch: - allow learning and forwarding database state in vxlan netdev in COLLECT_METADATA mode. Current behaviour is not changed by default. tunnel info flag IP_TUNNEL_INFO_BRIDGE is used to support the new bridge friendly mode. - A single fdb table hashed by (mac, vni) to allow fdb entries with multiple vnis in the same fdb table - rx path already has the vni - tx path expects a vni in the packet with dst_metadata - prior to this series, fdb remote_dsts carried remote vni and the vxlan device carrying the fdb table represented the source vni. With the vxlan device now representing multiple vnis, this patch adds a src vni attribute to the fdb entry. The remote vni already uses NDA_VNI attribute. This patch introduces NDA_SRC_VNI netlink attribute to represent the src vni in a multi vni fdb table. iproute2 example (patched and pruned iproute2 output to just show relevant fdb entries): example shows same host mac learnt on two vni's. before (netdev per vni): $bridge fdb show | grep "00:02:00:00:00:03" 00:02:00:00:00:03 dev vxlan1001 dst 12.0.0.8 self 00:02:00:00:00:03 dev vxlan1000 dst 12.0.0.8 self after this patch with collect metadata in bridged mode (single netdev): $bridge fdb show | grep "00:02:00:00:00:03" 00:02:00:00:00:03 dev vxlan0 src_vni 1001 dst 12.0.0.8 self 00:02:00:00:00:03 dev vxlan0 src_vni 1000 dst 12.0.0.8 self Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 196 ++++++++++++++++++++++++++--------------- include/uapi/linux/neighbour.h | 1 + 2 files changed, 126 insertions(+), 71 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 2e48ce22eabf..2374a75dcb55 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -75,6 +75,7 @@ struct vxlan_fdb { struct list_head remotes; u8 eth_addr[ETH_ALEN]; u16 state; /* see ndm_state */ + __be32 vni; u8 flags; /* see ndm_flags */ }; @@ -302,6 +303,10 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, if (rdst->remote_vni != vxlan->default_dst.remote_vni && nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) goto nla_put_failure; + if ((vxlan->flags & VXLAN_F_COLLECT_METADATA) && fdb->vni && + nla_put_u32(skb, NDA_SRC_VNI, + be32_to_cpu(fdb->vni))) + goto nla_put_failure; if (rdst->remote_ifindex && nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) goto nla_put_failure; @@ -400,34 +405,51 @@ static u32 eth_hash(const unsigned char *addr) return hash_64(value, FDB_HASH_BITS); } +static u32 eth_vni_hash(const unsigned char *addr, __be32 vni) +{ + /* use 1 byte of OUI and 3 bytes of NIC */ + u32 key = get_unaligned((u32 *)(addr + 2)); + + return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1); +} + /* Hash chain to use given mac address */ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, - const u8 *mac) + const u8 *mac, __be32 vni) { - return &vxlan->fdb_head[eth_hash(mac)]; + if (vxlan->flags & VXLAN_F_COLLECT_METADATA) + return &vxlan->fdb_head[eth_vni_hash(mac, vni)]; + else + return &vxlan->fdb_head[eth_hash(mac)]; } /* Look up Ethernet address in forwarding table */ static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, - const u8 *mac) + const u8 *mac, __be32 vni) { - struct hlist_head *head = vxlan_fdb_head(vxlan, mac); + struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni); struct vxlan_fdb *f; hlist_for_each_entry_rcu(f, head, hlist) { - if (ether_addr_equal(mac, f->eth_addr)) - return f; + if (ether_addr_equal(mac, f->eth_addr)) { + if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { + if (vni == f->vni) + return f; + } else { + return f; + } + } } return NULL; } static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, - const u8 *mac) + const u8 *mac, __be32 vni) { struct vxlan_fdb *f; - f = __vxlan_find_mac(vxlan, mac); + f = __vxlan_find_mac(vxlan, mac, vni); if (f) f->used = jiffies; @@ -605,15 +627,15 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, - __be16 port, __be32 vni, __u32 ifindex, - __u8 ndm_flags) + __be16 port, __be32 src_vni, __be32 vni, + __u32 ifindex, __u8 ndm_flags) { struct vxlan_rdst *rd = NULL; struct vxlan_fdb *f; int notify = 0; int rc; - f = __vxlan_find_mac(vxlan, mac); + f = __vxlan_find_mac(vxlan, mac, src_vni); if (f) { if (flags & NLM_F_EXCL) { netdev_dbg(vxlan->dev, @@ -670,6 +692,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, f->state = state; f->flags = ndm_flags; f->updated = f->used = jiffies; + f->vni = src_vni; INIT_LIST_HEAD(&f->remotes); memcpy(f->eth_addr, mac, ETH_ALEN); @@ -681,7 +704,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, ++vxlan->addrcnt; hlist_add_head_rcu(&f->hlist, - vxlan_fdb_head(vxlan, mac)); + vxlan_fdb_head(vxlan, mac, src_vni)); } if (notify) { @@ -718,8 +741,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) } static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, - union vxlan_addr *ip, __be16 *port, __be32 *vni, - u32 *ifindex) + union vxlan_addr *ip, __be16 *port, __be32 *src_vni, + __be32 *vni, u32 *ifindex) { struct net *net = dev_net(vxlan->dev); int err; @@ -757,6 +780,14 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, *vni = vxlan->default_dst.remote_vni; } + if (tb[NDA_SRC_VNI]) { + if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32)) + return -EINVAL; + *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI])); + } else { + *src_vni = vxlan->default_dst.remote_vni; + } + if (tb[NDA_IFINDEX]) { struct net_device *tdev; @@ -782,7 +813,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /* struct net *net = dev_net(vxlan->dev); */ union vxlan_addr ip; __be16 port; - __be32 vni; + __be32 src_vni, vni; u32 ifindex; int err; @@ -795,7 +826,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], if (tb[NDA_DST] == NULL) return -EINVAL; - err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); + err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex); if (err) return err; @@ -804,36 +835,24 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], spin_lock_bh(&vxlan->hash_lock); err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, - port, vni, ifindex, ndm->ndm_flags); + port, src_vni, vni, ifindex, ndm->ndm_flags); spin_unlock_bh(&vxlan->hash_lock); return err; } -/* Delete entry (via netlink) */ -static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid) +static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, + const unsigned char *addr, union vxlan_addr ip, + __be16 port, __be32 src_vni, u32 vni, u32 ifindex, + u16 vid) { - struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; struct vxlan_rdst *rd = NULL; - union vxlan_addr ip; - __be16 port; - __be32 vni; - u32 ifindex; - int err; + int err = -ENOENT; - err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); - if (err) - return err; - - err = -ENOENT; - - spin_lock_bh(&vxlan->hash_lock); - f = vxlan_find_mac(vxlan, addr); + f = vxlan_find_mac(vxlan, addr, src_vni); if (!f) - goto out; + return err; if (!vxlan_addr_any(&ip)) { rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); @@ -841,8 +860,6 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], goto out; } - err = 0; - /* remove a destination if it's not the only one on the list, * otherwise destroy the fdb entry */ @@ -856,6 +873,28 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], vxlan_fdb_destroy(vxlan, f); out: + return 0; +} + +/* Delete entry (via netlink) */ +static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + union vxlan_addr ip; + __be32 src_vni, vni; + __be16 port; + u32 ifindex; + int err; + + err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex); + if (err) + return err; + + spin_lock_bh(&vxlan->hash_lock); + err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, + vid); spin_unlock_bh(&vxlan->hash_lock); return err; @@ -901,12 +940,13 @@ out: * Return true if packet is bogus and should be dropped. */ static bool vxlan_snoop(struct net_device *dev, - union vxlan_addr *src_ip, const u8 *src_mac) + union vxlan_addr *src_ip, const u8 *src_mac, + __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; - f = vxlan_find_mac(vxlan, src_mac); + f = vxlan_find_mac(vxlan, src_mac, vni); if (likely(f)) { struct vxlan_rdst *rdst = first_remote_rcu(f); @@ -935,6 +975,7 @@ static bool vxlan_snoop(struct net_device *dev, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, vxlan->cfg.dst_port, + vni, vxlan->default_dst.remote_vni, 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); @@ -1202,7 +1243,7 @@ static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, static bool vxlan_set_mac(struct vxlan_dev *vxlan, struct vxlan_sock *vs, - struct sk_buff *skb) + struct sk_buff *skb, __be32 vni) { union vxlan_addr saddr; @@ -1226,7 +1267,7 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan, } if ((vxlan->flags & VXLAN_F_LEARN) && - vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) + vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, vni)) return false; return true; @@ -1268,6 +1309,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) __be16 protocol = htons(ETH_P_TEB); bool raw_proto = false; void *oiph; + __be32 vni = 0; /* Need UDP and VXLAN header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) @@ -1289,7 +1331,12 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (!vs) goto drop; - vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni)); + vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); + + if ((vs->flags & VXLAN_F_COLLECT_METADATA) && !vni) + goto drop; + + vxlan = vxlan_vs_find_vni(vs, vni); if (!vxlan) goto drop; @@ -1307,7 +1354,6 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) goto drop; if (vxlan_collect_metadata(vs)) { - __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); struct metadata_dst *tun_dst; tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, @@ -1345,7 +1391,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) } if (!raw_proto) { - if (!vxlan_set_mac(vxlan, vs, skb)) + if (!vxlan_set_mac(vxlan, vs, skb, vni)) goto drop; } else { skb_reset_mac_header(skb); @@ -1377,7 +1423,7 @@ drop: return 0; } -static int arp_reduce(struct net_device *dev, struct sk_buff *skb) +static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); struct arphdr *parp; @@ -1424,7 +1470,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) goto out; } - f = vxlan_find_mac(vxlan, n->ha); + f = vxlan_find_mac(vxlan, n->ha, vni); if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { /* bridge-local neighbor */ neigh_release(n); @@ -1548,7 +1594,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, return reply; } -static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) +static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); struct nd_msg *msg; @@ -1585,7 +1631,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) goto out; } - f = vxlan_find_mac(vxlan, n->ha); + f = vxlan_find_mac(vxlan, n->ha, vni); if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { /* bridge-local neighbor */ neigh_release(n); @@ -1906,7 +1952,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, - struct vxlan_dev *dst_vxlan) + struct vxlan_dev *dst_vxlan, __be32 vni) { struct pcpu_sw_netstats *tx_stats, *rx_stats; union vxlan_addr loopback; @@ -1932,7 +1978,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, } if (dst_vxlan->flags & VXLAN_F_LEARN) - vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source); + vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, vni); u64_stats_update_begin(&tx_stats->syncp); tx_stats->tx_packets++; @@ -1976,7 +2022,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, return -ENOENT; } - vxlan_encap_bypass(skb, vxlan, dst_vxlan); + vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni); return 1; } @@ -1984,7 +2030,8 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, } static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, - struct vxlan_rdst *rdst, bool did_rsc) + __be32 default_vni, struct vxlan_rdst *rdst, + bool did_rsc) { struct dst_cache *dst_cache; struct ip_tunnel_info *info; @@ -2011,14 +2058,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (vxlan_addr_any(dst)) { if (did_rsc) { /* short-circuited back to local bridge */ - vxlan_encap_bypass(skb, vxlan, vxlan); + vxlan_encap_bypass(skb, vxlan, vxlan, default_vni); return; } goto drop; } dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; - vni = rdst->remote_vni; + vni = (rdst->remote_vni) ? : default_vni; src = &vxlan->cfg.saddr; dst_cache = &rdst->dst_cache; md->gbp = skb->mark; @@ -2173,23 +2220,29 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) bool did_rsc = false; struct vxlan_rdst *rdst, *fdst = NULL; struct vxlan_fdb *f; + __be32 vni = 0; info = skb_tunnel_info(skb); skb_reset_mac_header(skb); if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { - if (info && info->mode & IP_TUNNEL_INFO_TX) - vxlan_xmit_one(skb, dev, NULL, false); - else - kfree_skb(skb); - return NETDEV_TX_OK; + if (info && info->mode & IP_TUNNEL_INFO_BRIDGE && + info->mode & IP_TUNNEL_INFO_TX) { + vni = tunnel_id_to_key32(info->key.tun_id); + } else { + if (info && info->mode & IP_TUNNEL_INFO_TX) + vxlan_xmit_one(skb, dev, vni, NULL, false); + else + kfree_skb(skb); + return NETDEV_TX_OK; + } } if (vxlan->flags & VXLAN_F_PROXY) { eth = eth_hdr(skb); if (ntohs(eth->h_proto) == ETH_P_ARP) - return arp_reduce(dev, skb); + return arp_reduce(dev, skb, vni); #if IS_ENABLED(CONFIG_IPV6) else if (ntohs(eth->h_proto) == ETH_P_IPV6 && pskb_may_pull(skb, sizeof(struct ipv6hdr) @@ -2200,13 +2253,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) msg = (struct nd_msg *)skb_transport_header(skb); if (msg->icmph.icmp6_code == 0 && msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) - return neigh_reduce(dev, skb); + return neigh_reduce(dev, skb, vni); } #endif } eth = eth_hdr(skb); - f = vxlan_find_mac(vxlan, eth->h_dest); + f = vxlan_find_mac(vxlan, eth->h_dest, vni); did_rsc = false; if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) && @@ -2214,11 +2267,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) ntohs(eth->h_proto) == ETH_P_IPV6)) { did_rsc = route_shortcircuit(dev, skb); if (did_rsc) - f = vxlan_find_mac(vxlan, eth->h_dest); + f = vxlan_find_mac(vxlan, eth->h_dest, vni); } if (f == NULL) { - f = vxlan_find_mac(vxlan, all_zeros_mac); + f = vxlan_find_mac(vxlan, all_zeros_mac, vni); if (f == NULL) { if ((vxlan->flags & VXLAN_F_L2MISS) && !is_multicast_ether_addr(eth->h_dest)) @@ -2239,11 +2292,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) } skb1 = skb_clone(skb, GFP_ATOMIC); if (skb1) - vxlan_xmit_one(skb1, dev, rdst, did_rsc); + vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc); } if (fdst) - vxlan_xmit_one(skb, dev, fdst, did_rsc); + vxlan_xmit_one(skb, dev, vni, fdst, did_rsc); else kfree_skb(skb); return NETDEV_TX_OK; @@ -2307,12 +2360,12 @@ static int vxlan_init(struct net_device *dev) return 0; } -static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan) +static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) { struct vxlan_fdb *f; spin_lock_bh(&vxlan->hash_lock); - f = __vxlan_find_mac(vxlan, all_zeros_mac); + f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); if (f) vxlan_fdb_destroy(vxlan, f); spin_unlock_bh(&vxlan->hash_lock); @@ -2322,7 +2375,7 @@ static void vxlan_uninit(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); - vxlan_fdb_delete_default(vxlan); + vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); free_percpu(dev->tstats); } @@ -2923,6 +2976,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, NLM_F_EXCL|NLM_F_CREATE, vxlan->cfg.dst_port, vxlan->default_dst.remote_vni, + vxlan->default_dst.remote_vni, vxlan->default_dst.remote_ifindex, NTF_SELF); if (err) @@ -2931,7 +2985,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, err = register_netdevice(dev); if (err) { - vxlan_fdb_delete_default(vxlan); + vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); return err; } diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index bd99a8d80f36..f3d16dbe09d6 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -26,6 +26,7 @@ enum { NDA_IFINDEX, NDA_MASTER, NDA_LINK_NETNSID, + NDA_SRC_VNI, __NDA_MAX }; -- cgit v1.2.3 From b3c7ef0adadc5768e0baa786213c6bd1ce521a77 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Tue, 31 Jan 2017 22:59:53 -0800 Subject: bridge: uapi: add per vlan tunnel info New nested netlink attribute to associate tunnel info per vlan. This is used by bridge driver to send tunnel metadata to bridge ports in vlan tunnel mode. This patch also adds new per port flag IFLA_BRPORT_VLAN_TUNNEL to enable vlan tunnel mode. off by default. One example use for this is a vxlan bridging gateway or vtep which maps vlans to vn-segments (or vnis). User can configure per-vlan tunnel information which the bridge driver can use to bridge vlan into the corresponding vn-segment. Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/linux/if_bridge.h | 1 + include/uapi/linux/if_bridge.h | 11 +++++++++++ include/uapi/linux/if_link.h | 1 + 3 files changed, 13 insertions(+) diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index debc9d5904e5..c5847dc75a93 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -47,6 +47,7 @@ struct br_ip_list { #define BR_PROXYARP_WIFI BIT(10) #define BR_MCAST_FLOOD BIT(11) #define BR_MULTICAST_TO_UNICAST BIT(12) +#define BR_VLAN_TUNNEL BIT(13) #define BR_DEFAULT_AGEING_TIME (300 * HZ) diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index ab92bca6d448..a9e6244ce438 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -118,6 +118,7 @@ enum { IFLA_BRIDGE_FLAGS, IFLA_BRIDGE_MODE, IFLA_BRIDGE_VLAN_INFO, + IFLA_BRIDGE_VLAN_TUNNEL_INFO, __IFLA_BRIDGE_MAX, }; #define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1) @@ -134,6 +135,16 @@ struct bridge_vlan_info { __u16 vid; }; +enum { + IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC, + IFLA_BRIDGE_VLAN_TUNNEL_ID, + IFLA_BRIDGE_VLAN_TUNNEL_VID, + IFLA_BRIDGE_VLAN_TUNNEL_FLAGS, + __IFLA_BRIDGE_VLAN_TUNNEL_MAX, +}; + +#define IFLA_BRIDGE_VLAN_TUNNEL_MAX (__IFLA_BRIDGE_VLAN_TUNNEL_MAX - 1) + struct bridge_vlan_xstats { __u64 rx_bytes; __u64 rx_packets; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index b9aa5641ebe5..320fc1e747ee 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -322,6 +322,7 @@ enum { IFLA_BRPORT_PAD, IFLA_BRPORT_MCAST_FLOOD, IFLA_BRPORT_MCAST_TO_UCAST, + IFLA_BRPORT_VLAN_TUNNEL, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) -- cgit v1.2.3 From efa5356b0d9753b9d7e63e41459eba106cce30f3 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Tue, 31 Jan 2017 22:59:54 -0800 Subject: bridge: per vlan dst_metadata netlink support This patch adds support to attach per vlan tunnel info dst metadata. This enables bridge driver to map vlan to tunnel_info at ingress and egress. It uses the kernel dst_metadata infrastructure. The initial use case is vlan to vni bridging, but the api is generic to extend to any tunnel_info in the future: - Uapi to configure/unconfigure/dump per vlan tunnel data - netlink functions to configure vlan and tunnel_info mapping - Introduces bridge port flag BR_LWT_VLAN to enable attach/detach dst_metadata to bridged packets on ports. off by default. - changes to existing code is mainly refactor some existing vlan handling netlink code + hooks for new vlan tunnel code - I have kept the vlan tunnel code isolated in separate files. - most of the netlink vlan tunnel code is handling of vlan-tunid ranges (follows the vlan range handling code). To conserve space vlan-tunid by default are always dumped in ranges if applicable. Use case: example use for this is a vxlan bridging gateway or vtep which maps vlans to vn-segments (or vnis). iproute2 example (patched and pruned iproute2 output to just show relevant fdb entries): example shows same host mac learnt on two vni's and vlan 100 maps to vni 1000, vlan 101 maps to vni 1001 before (netdev per vni): $bridge fdb show | grep "00:02:00:00:00:03" 00:02:00:00:00:03 dev vxlan1001 vlan 101 master bridge 00:02:00:00:00:03 dev vxlan1001 dst 12.0.0.8 self 00:02:00:00:00:03 dev vxlan1000 vlan 100 master bridge 00:02:00:00:00:03 dev vxlan1000 dst 12.0.0.8 self after this patch with collect metdata in bridged mode (single netdev): $bridge fdb show | grep "00:02:00:00:00:03" 00:02:00:00:00:03 dev vxlan0 vlan 101 master bridge 00:02:00:00:00:03 dev vxlan0 src_vni 1001 dst 12.0.0.8 self 00:02:00:00:00:03 dev vxlan0 vlan 100 master bridge 00:02:00:00:00:03 dev vxlan0 src_vni 1000 dst 12.0.0.8 self CC: Nikolay Aleksandrov Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- net/bridge/Makefile | 5 +- net/bridge/br_netlink.c | 140 ++++++++++++------- net/bridge/br_netlink_tunnel.c | 296 +++++++++++++++++++++++++++++++++++++++++ net/bridge/br_private.h | 10 ++ net/bridge/br_private_tunnel.h | 72 ++++++++++ net/bridge/br_vlan.c | 17 ++- net/bridge/br_vlan_tunnel.c | 149 +++++++++++++++++++++ 7 files changed, 641 insertions(+), 48 deletions(-) create mode 100644 net/bridge/br_netlink_tunnel.c create mode 100644 net/bridge/br_private_tunnel.h create mode 100644 net/bridge/br_vlan_tunnel.c diff --git a/net/bridge/Makefile b/net/bridge/Makefile index 0aefc011b668..40b1ede527ca 100644 --- a/net/bridge/Makefile +++ b/net/bridge/Makefile @@ -6,7 +6,8 @@ obj-$(CONFIG_BRIDGE) += bridge.o bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \ br_ioctl.o br_stp.o br_stp_bpdu.o \ - br_stp_if.o br_stp_timer.o br_netlink.o + br_stp_if.o br_stp_timer.o br_netlink.o \ + br_netlink_tunnel.o bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o @@ -18,7 +19,7 @@ obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o -bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o +bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 1ca25498fe4d..fc5d885dbb22 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -20,6 +20,7 @@ #include "br_private.h" #include "br_private_stp.h" +#include "br_private_tunnel.h" static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, u32 filter_mask) @@ -95,9 +96,10 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev, u32 filter_mask) { struct net_bridge_vlan_group *vg = NULL; - struct net_bridge_port *p; + struct net_bridge_port *p = NULL; struct net_bridge *br; int num_vlan_infos; + size_t vinfo_sz = 0; rcu_read_lock(); if (br_port_exists(dev)) { @@ -110,8 +112,13 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev, num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); rcu_read_unlock(); + if (p && (p->flags & BR_VLAN_TUNNEL)) + vinfo_sz += br_get_vlan_tunnel_info_size(vg); + /* Each VLAN is returned in bridge_vlan_info along with flags */ - return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); + vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); + + return vinfo_sz; } static inline size_t br_port_info_size(void) @@ -128,6 +135,7 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ + + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ @@ -194,7 +202,9 @@ static int br_port_fill_attrs(struct sk_buff *skb, nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, p->topology_change_ack) || - nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending)) + nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) || + nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags & + BR_VLAN_TUNNEL))) return -EMSGSIZE; timerval = br_timer_value(&p->message_age_timer); @@ -417,6 +427,9 @@ static int br_fill_ifinfo(struct sk_buff *skb, err = br_fill_ifvlaninfo_compressed(skb, vg); else err = br_fill_ifvlaninfo(skb, vg); + + if (port && (port->flags & BR_VLAN_TUNNEL)) + err = br_fill_vlan_tunnel_info(skb, vg); rcu_read_unlock(); if (err) goto nla_put_failure; @@ -517,60 +530,91 @@ static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, return err; } +static int br_process_vlan_info(struct net_bridge *br, + struct net_bridge_port *p, int cmd, + struct bridge_vlan_info *vinfo_curr, + struct bridge_vlan_info **vinfo_last) +{ + if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK) + return -EINVAL; + + if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { + /* check if we are already processing a range */ + if (*vinfo_last) + return -EINVAL; + *vinfo_last = vinfo_curr; + /* don't allow range of pvids */ + if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID) + return -EINVAL; + return 0; + } + + if (*vinfo_last) { + struct bridge_vlan_info tmp_vinfo; + int v, err; + + if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END)) + return -EINVAL; + + if (vinfo_curr->vid <= (*vinfo_last)->vid) + return -EINVAL; + + memcpy(&tmp_vinfo, *vinfo_last, + sizeof(struct bridge_vlan_info)); + for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) { + tmp_vinfo.vid = v; + err = br_vlan_info(br, p, cmd, &tmp_vinfo); + if (err) + break; + } + *vinfo_last = NULL; + + return 0; + } + + return br_vlan_info(br, p, cmd, vinfo_curr); +} + static int br_afspec(struct net_bridge *br, struct net_bridge_port *p, struct nlattr *af_spec, int cmd) { - struct bridge_vlan_info *vinfo_start = NULL; - struct bridge_vlan_info *vinfo = NULL; + struct bridge_vlan_info *vinfo_curr = NULL; + struct bridge_vlan_info *vinfo_last = NULL; struct nlattr *attr; - int err = 0; - int rem; + struct vtunnel_info tinfo_last = {}; + struct vtunnel_info tinfo_curr = {}; + int err = 0, rem; nla_for_each_nested(attr, af_spec, rem) { - if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO) - continue; - if (nla_len(attr) != sizeof(struct bridge_vlan_info)) - return -EINVAL; - vinfo = nla_data(attr); - if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) - return -EINVAL; - if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { - if (vinfo_start) + err = 0; + switch (nla_type(attr)) { + case IFLA_BRIDGE_VLAN_TUNNEL_INFO: + if (!(p->flags & BR_VLAN_TUNNEL)) return -EINVAL; - vinfo_start = vinfo; - /* don't allow range of pvids */ - if (vinfo_start->flags & BRIDGE_VLAN_INFO_PVID) + err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); + if (err) + return err; + err = br_process_vlan_tunnel_info(br, p, cmd, + &tinfo_curr, + &tinfo_last); + if (err) + return err; + break; + case IFLA_BRIDGE_VLAN_INFO: + if (nla_len(attr) != sizeof(struct bridge_vlan_info)) return -EINVAL; - continue; + vinfo_curr = nla_data(attr); + err = br_process_vlan_info(br, p, cmd, vinfo_curr, + &vinfo_last); + if (err) + return err; + break; } - if (vinfo_start) { - struct bridge_vlan_info tmp_vinfo; - int v; - - if (!(vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END)) - return -EINVAL; - - if (vinfo->vid <= vinfo_start->vid) - return -EINVAL; - - memcpy(&tmp_vinfo, vinfo_start, - sizeof(struct bridge_vlan_info)); - - for (v = vinfo_start->vid; v <= vinfo->vid; v++) { - tmp_vinfo.vid = v; - err = br_vlan_info(br, p, cmd, &tmp_vinfo); - if (err) - break; - } - vinfo_start = NULL; - } else { - err = br_vlan_info(br, p, cmd, vinfo); - } if (err) - break; + return err; } return err; @@ -630,8 +674,9 @@ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], /* Process bridge protocol info on port */ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) { - int err; unsigned long old_flags = p->flags; + bool br_vlan_tunnel_old = false; + int err; br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); @@ -644,6 +689,11 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); + br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false; + br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); + if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL)) + nbp_vlan_tunnel_info_flush(p); + if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); if (err) diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c new file mode 100644 index 000000000000..99c68012c9d4 --- /dev/null +++ b/net/bridge/br_netlink_tunnel.c @@ -0,0 +1,296 @@ +/* + * Bridge per vlan tunnel port dst_metadata netlink control interface + * + * Authors: + * Roopa Prabhu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "br_private.h" +#include "br_private_tunnel.h" + +static size_t __get_vlan_tinfo_size(void) +{ + return nla_total_size(0) + /* nest IFLA_BRIDGE_VLAN_TUNNEL_INFO */ + nla_total_size(sizeof(u32)) + /* IFLA_BRIDGE_VLAN_TUNNEL_ID */ + nla_total_size(sizeof(u16)) + /* IFLA_BRIDGE_VLAN_TUNNEL_VID */ + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_VLAN_TUNNEL_FLAGS */ +} + +static bool vlan_tunnel_id_isrange(struct net_bridge_vlan *v, + struct net_bridge_vlan *v_end) +{ + __be32 tunid_curr = tunnel_id_to_key32(v->tinfo.tunnel_id); + __be32 tunid_end = tunnel_id_to_key32(v_end->tinfo.tunnel_id); + + return (be32_to_cpu(tunid_curr) - be32_to_cpu(tunid_end)) == 1; +} + +static int __get_num_vlan_tunnel_infos(struct net_bridge_vlan_group *vg) +{ + struct net_bridge_vlan *v, *v_start = NULL, *v_end = NULL; + int num_tinfos = 0; + + /* Count number of vlan infos */ + list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { + /* only a context, bridge vlan not activated */ + if (!br_vlan_should_use(v) || !v->tinfo.tunnel_id) + continue; + + if (!v_start) { + goto initvars; + } else if ((v->vid - v_end->vid) == 1 && + vlan_tunnel_id_isrange(v_end, v) == 1) { + v_end = v; + continue; + } else { + if ((v_end->vid - v->vid) > 0 && + vlan_tunnel_id_isrange(v_end, v) > 0) + num_tinfos += 2; + else + num_tinfos += 1; + } +initvars: + v_start = v; + v_end = v; + } + + if (v_start) { + if ((v_end->vid - v->vid) > 0 && + vlan_tunnel_id_isrange(v_end, v) > 0) + num_tinfos += 2; + else + num_tinfos += 1; + } + + return num_tinfos; +} + +int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg) +{ + int num_tinfos; + + if (!vg) + return 0; + + rcu_read_lock(); + num_tinfos = __get_num_vlan_tunnel_infos(vg); + rcu_read_unlock(); + + return num_tinfos * __get_vlan_tinfo_size(); +} + +static int br_fill_vlan_tinfo(struct sk_buff *skb, u16 vid, + __be64 tunnel_id, u16 flags) +{ + __be32 tid = tunnel_id_to_key32(tunnel_id); + struct nlattr *tmap; + + tmap = nla_nest_start(skb, IFLA_BRIDGE_VLAN_TUNNEL_INFO); + if (!tmap) + return -EMSGSIZE; + if (nla_put_u32(skb, IFLA_BRIDGE_VLAN_TUNNEL_ID, + be32_to_cpu(tid))) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_BRIDGE_VLAN_TUNNEL_VID, + vid)) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_BRIDGE_VLAN_TUNNEL_FLAGS, + flags)) + goto nla_put_failure; + nla_nest_end(skb, tmap); + + return 0; + +nla_put_failure: + nla_nest_cancel(skb, tmap); + + return -EMSGSIZE; +} + +static int br_fill_vlan_tinfo_range(struct sk_buff *skb, + struct net_bridge_vlan *vtbegin, + struct net_bridge_vlan *vtend) +{ + int err; + + if (vtbegin && vtend && (vtend->vid - vtbegin->vid) > 0) { + /* add range to skb */ + err = br_fill_vlan_tinfo(skb, vtbegin->vid, + vtbegin->tinfo.tunnel_id, + BRIDGE_VLAN_INFO_RANGE_BEGIN); + if (err) + return err; + + err = br_fill_vlan_tinfo(skb, vtend->vid, + vtend->tinfo.tunnel_id, + BRIDGE_VLAN_INFO_RANGE_END); + if (err) + return err; + } else { + err = br_fill_vlan_tinfo(skb, vtbegin->vid, + vtbegin->tinfo.tunnel_id, + 0); + if (err) + return err; + } + + return 0; +} + +int br_fill_vlan_tunnel_info(struct sk_buff *skb, + struct net_bridge_vlan_group *vg) +{ + struct net_bridge_vlan *vtbegin = NULL; + struct net_bridge_vlan *vtend = NULL; + struct net_bridge_vlan *v; + int err; + + /* Count number of vlan infos */ + list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { + /* only a context, bridge vlan not activated */ + if (!br_vlan_should_use(v)) + continue; + + if (!v->tinfo.tunnel_dst) + continue; + + if (!vtbegin) { + goto initvars; + } else if ((v->vid - vtend->vid) == 1 && + vlan_tunnel_id_isrange(v, vtend)) { + vtend = v; + continue; + } else { + err = br_fill_vlan_tinfo_range(skb, vtbegin, vtend); + if (err) + return err; + } +initvars: + vtbegin = v; + vtend = v; + } + + if (vtbegin) { + err = br_fill_vlan_tinfo_range(skb, vtbegin, vtend); + if (err) + return err; + } + + return 0; +} + +static const struct nla_policy vlan_tunnel_policy[IFLA_BRIDGE_VLAN_TUNNEL_MAX + 1] = { + [IFLA_BRIDGE_VLAN_TUNNEL_ID] = { .type = NLA_U32 }, + [IFLA_BRIDGE_VLAN_TUNNEL_VID] = { .type = NLA_U16 }, + [IFLA_BRIDGE_VLAN_TUNNEL_FLAGS] = { .type = NLA_U16 }, +}; + +static int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd, + u16 vid, u32 tun_id) +{ + int err = 0; + + if (!p) + return -EINVAL; + + switch (cmd) { + case RTM_SETLINK: + err = nbp_vlan_tunnel_info_add(p, vid, tun_id); + break; + case RTM_DELLINK: + nbp_vlan_tunnel_info_delete(p, vid); + break; + } + + return err; +} + +int br_parse_vlan_tunnel_info(struct nlattr *attr, + struct vtunnel_info *tinfo) +{ + struct nlattr *tb[IFLA_BRIDGE_VLAN_TUNNEL_MAX + 1]; + u32 tun_id; + u16 vid, flags = 0; + int err; + + memset(tinfo, 0, sizeof(*tinfo)); + + if (!tb[IFLA_BRIDGE_VLAN_TUNNEL_ID] || + !tb[IFLA_BRIDGE_VLAN_TUNNEL_VID]) + return -EINVAL; + + err = nla_parse_nested(tb, IFLA_BRIDGE_VLAN_TUNNEL_MAX, + attr, vlan_tunnel_policy); + if (err < 0) + return err; + + tun_id = nla_get_u32(tb[IFLA_BRIDGE_VLAN_TUNNEL_ID]); + vid = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_VID]); + if (vid >= VLAN_VID_MASK) + return -ERANGE; + + if (tb[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS]) + flags = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS]); + + tinfo->tunid = tun_id; + tinfo->vid = vid; + tinfo->flags = flags; + + return 0; +} + +int br_process_vlan_tunnel_info(struct net_bridge *br, + struct net_bridge_port *p, int cmd, + struct vtunnel_info *tinfo_curr, + struct vtunnel_info *tinfo_last) +{ + int err; + + if (tinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { + if (tinfo_last->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) + return -EINVAL; + memcpy(tinfo_last, tinfo_curr, sizeof(struct vtunnel_info)); + } else if (tinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END) { + int t, v; + + if (!(tinfo_last->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN)) + return -EINVAL; + if ((tinfo_curr->vid - tinfo_last->vid) != + (tinfo_curr->tunid - tinfo_last->tunid)) + return -EINVAL; + t = tinfo_last->tunid; + for (v = tinfo_last->vid; v <= tinfo_curr->vid; v++) { + err = br_vlan_tunnel_info(p, cmd, v, t); + if (err) + return err; + t++; + } + memset(tinfo_last, 0, sizeof(struct vtunnel_info)); + memset(tinfo_curr, 0, sizeof(struct vtunnel_info)); + } else { + if (tinfo_last->flags) + return -EINVAL; + err = br_vlan_tunnel_info(p, cmd, tinfo_curr->vid, + tinfo_curr->tunid); + if (err) + return err; + memset(tinfo_last, 0, sizeof(struct vtunnel_info)); + memset(tinfo_curr, 0, sizeof(struct vtunnel_info)); + } + + return 0; +} diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 0b82a227fc34..61de90f28afa 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -91,6 +91,11 @@ struct br_vlan_stats { struct u64_stats_sync syncp; }; +struct br_tunnel_info { + __be64 tunnel_id; + struct metadata_dst *tunnel_dst; +}; + /** * struct net_bridge_vlan - per-vlan entry * @@ -113,6 +118,7 @@ struct br_vlan_stats { */ struct net_bridge_vlan { struct rhash_head vnode; + struct rhash_head tnode; u16 vid; u16 flags; struct br_vlan_stats __percpu *stats; @@ -124,6 +130,9 @@ struct net_bridge_vlan { atomic_t refcnt; struct net_bridge_vlan *brvlan; }; + + struct br_tunnel_info tinfo; + struct list_head vlist; struct rcu_head rcu; @@ -145,6 +154,7 @@ struct net_bridge_vlan { */ struct net_bridge_vlan_group { struct rhashtable vlan_hash; + struct rhashtable tunnel_hash; struct list_head vlan_list; u16 num_vlans; u16 pvid; diff --git a/net/bridge/br_private_tunnel.h b/net/bridge/br_private_tunnel.h new file mode 100644 index 000000000000..1c8d0d5302cb --- /dev/null +++ b/net/bridge/br_private_tunnel.h @@ -0,0 +1,72 @@ +/* + * Bridge per vlan tunnels + * + * Authors: + * Roopa Prabhu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _BR_PRIVATE_TUNNEL_H +#define _BR_PRIVATE_TUNNEL_H + +struct vtunnel_info { + u32 tunid; + u16 vid; + u16 flags; +}; + +/* br_netlink_tunnel.c */ +int br_parse_vlan_tunnel_info(struct nlattr *attr, + struct vtunnel_info *tinfo); +int br_process_vlan_tunnel_info(struct net_bridge *br, + struct net_bridge_port *p, + int cmd, + struct vtunnel_info *tinfo_curr, + struct vtunnel_info *tinfo_last); +int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg); +int br_fill_vlan_tunnel_info(struct sk_buff *skb, + struct net_bridge_vlan_group *vg); + +#ifdef CONFIG_BRIDGE_VLAN_FILTERING +/* br_vlan_tunnel.c */ +int vlan_tunnel_init(struct net_bridge_vlan_group *vg); +void vlan_tunnel_deinit(struct net_bridge_vlan_group *vg); +int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port, u16 vid); +int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, u16 vid, u32 tun_id); +void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port); +void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg, + struct net_bridge_vlan *vlan); +#else +static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg) +{ + return 0; +} + +static inline int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port, + u16 vid) +{ + return 0; +} + +static inline int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, + u16 vid, u32 tun_id) +{ + return 0; +} + +static inline void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port) +{ +} + +static inline void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg, + struct net_bridge_vlan *vlan) +{ +} + +#endif + +#endif diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index b6de4f457161..64002e3941ca 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -5,6 +5,7 @@ #include #include "br_private.h" +#include "br_private_tunnel.h" static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg, const void *ptr) @@ -310,6 +311,7 @@ static int __vlan_del(struct net_bridge_vlan *v) } if (masterv != v) { + vlan_tunnel_info_del(vg, v); rhashtable_remove_fast(&vg->vlan_hash, &v->vnode, br_vlan_rht_params); __vlan_del_list(v); @@ -325,6 +327,7 @@ static void __vlan_group_free(struct net_bridge_vlan_group *vg) { WARN_ON(!list_empty(&vg->vlan_list)); rhashtable_destroy(&vg->vlan_hash); + vlan_tunnel_deinit(vg); kfree(vg); } @@ -613,6 +616,8 @@ int br_vlan_delete(struct net_bridge *br, u16 vid) br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid); br_fdb_delete_by_port(br, NULL, vid, 0); + vlan_tunnel_info_del(vg, v); + return __vlan_del(v); } @@ -918,6 +923,9 @@ int br_vlan_init(struct net_bridge *br) ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); if (ret) goto err_rhtbl; + ret = vlan_tunnel_init(vg); + if (ret) + goto err_tunnel_init; INIT_LIST_HEAD(&vg->vlan_list); br->vlan_proto = htons(ETH_P_8021Q); br->default_pvid = 1; @@ -932,6 +940,8 @@ out: return ret; err_vlan_add: + vlan_tunnel_deinit(vg); +err_tunnel_init: rhashtable_destroy(&vg->vlan_hash); err_rhtbl: kfree(vg); @@ -961,6 +971,9 @@ int nbp_vlan_init(struct net_bridge_port *p) ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); if (ret) goto err_rhtbl; + ret = vlan_tunnel_init(vg); + if (ret) + goto err_tunnel_init; INIT_LIST_HEAD(&vg->vlan_list); rcu_assign_pointer(p->vlgrp, vg); if (p->br->default_pvid) { @@ -976,8 +989,10 @@ out: err_vlan_add: RCU_INIT_POINTER(p->vlgrp, NULL); synchronize_rcu(); - rhashtable_destroy(&vg->vlan_hash); + vlan_tunnel_deinit(vg); err_vlan_enabled: +err_tunnel_init: + rhashtable_destroy(&vg->vlan_hash); err_rhtbl: kfree(vg); diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c new file mode 100644 index 000000000000..b3fd29d20a3c --- /dev/null +++ b/net/bridge/br_vlan_tunnel.c @@ -0,0 +1,149 @@ +/* + * Bridge per vlan tunnel port dst_metadata handling code + * + * Authors: + * Roopa Prabhu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include "br_private.h" +#include "br_private_tunnel.h" + +static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg, + const void *ptr) +{ + const struct net_bridge_vlan *vle = ptr; + __be64 tunid = *(__be64 *)arg->key; + + return vle->tinfo.tunnel_id != tunid; +} + +static const struct rhashtable_params br_vlan_tunnel_rht_params = { + .head_offset = offsetof(struct net_bridge_vlan, tnode), + .key_offset = offsetof(struct net_bridge_vlan, tinfo.tunnel_id), + .key_len = sizeof(__be64), + .nelem_hint = 3, + .locks_mul = 1, + .obj_cmpfn = br_vlan_tunid_cmp, + .automatic_shrinking = true, +}; + +void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg, + struct net_bridge_vlan *vlan) +{ + if (!vlan->tinfo.tunnel_dst) + return; + rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode, + br_vlan_tunnel_rht_params); + vlan->tinfo.tunnel_id = 0; + dst_release(&vlan->tinfo.tunnel_dst->dst); + vlan->tinfo.tunnel_dst = NULL; +} + +static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg, + struct net_bridge_vlan *vlan, u32 tun_id) +{ + struct metadata_dst *metadata = NULL; + __be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id)); + int err; + + if (vlan->tinfo.tunnel_dst) + return -EEXIST; + + metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY, + key, 0); + if (!metadata) + return -EINVAL; + + metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE; + vlan->tinfo.tunnel_dst = metadata; + vlan->tinfo.tunnel_id = key; + + err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode, + br_vlan_tunnel_rht_params); + if (err) + goto out; + + return 0; +out: + dst_release(&vlan->tinfo.tunnel_dst->dst); + + return err; +} + +/* Must be protected by RTNL. + * Must be called with vid in range from 1 to 4094 inclusive. + */ +int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, u16 vid, u32 tun_id) +{ + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *vlan; + + ASSERT_RTNL(); + + vg = nbp_vlan_group(port); + vlan = br_vlan_find(vg, vid); + if (!vlan) + return -EINVAL; + + return __vlan_tunnel_info_add(vg, vlan, tun_id); +} + +/* Must be protected by RTNL. + * Must be called with vid in range from 1 to 4094 inclusive. + */ +int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port, u16 vid) +{ + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *v; + + ASSERT_RTNL(); + + vg = nbp_vlan_group(port); + v = br_vlan_find(vg, vid); + if (!v) + return -ENOENT; + + vlan_tunnel_info_del(vg, v); + + return 0; +} + +static void __vlan_tunnel_info_flush(struct net_bridge_vlan_group *vg) +{ + struct net_bridge_vlan *vlan, *tmp; + + list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) + vlan_tunnel_info_del(vg, vlan); +} + +void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port) +{ + struct net_bridge_vlan_group *vg; + + ASSERT_RTNL(); + + vg = nbp_vlan_group(port); + __vlan_tunnel_info_flush(vg); +} + +int vlan_tunnel_init(struct net_bridge_vlan_group *vg) +{ + return rhashtable_init(&vg->tunnel_hash, &br_vlan_tunnel_rht_params); +} + +void vlan_tunnel_deinit(struct net_bridge_vlan_group *vg) +{ + rhashtable_destroy(&vg->tunnel_hash); +} -- cgit v1.2.3 From 11538d039ac6efcf4f1a6c536e1b87cd3668a9fd Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Tue, 31 Jan 2017 22:59:55 -0800 Subject: bridge: vlan dst_metadata hooks in ingress and egress paths - ingress hook: - if port is a tunnel port, use tunnel info in attached dst_metadata to map it to a local vlan - egress hook: - if port is a tunnel port, use tunnel info attached to vlan to set dst_metadata on the skb CC: Nikolay Aleksandrov Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- net/bridge/br_forward.c | 2 +- net/bridge/br_input.c | 8 ++++++- net/bridge/br_private.h | 2 ++ net/bridge/br_private_tunnel.h | 11 +++++++++ net/bridge/br_vlan.c | 7 ++++++ net/bridge/br_vlan_tunnel.c | 54 ++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 82 insertions(+), 2 deletions(-) diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 5a1f8ef49899..6bfac29318f2 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -80,7 +80,7 @@ static void __br_forward(const struct net_bridge_port *to, int br_hook; vg = nbp_vlan_group_rcu(to); - skb = br_handle_vlan(to->br, vg, skb); + skb = br_handle_vlan(to->br, to, vg, skb); if (!skb) return; diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 855b72fbe1da..fba38d8a1a08 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -21,6 +21,7 @@ #include #include #include "br_private.h" +#include "br_private_tunnel.h" /* Hook for brouter */ br_should_route_hook_t __rcu *br_should_route_hook __read_mostly; @@ -57,7 +58,7 @@ static int br_pass_frame_up(struct sk_buff *skb) indev = skb->dev; skb->dev = brdev; - skb = br_handle_vlan(br, vg, skb); + skb = br_handle_vlan(br, NULL, vg, skb); if (!skb) return NET_RX_DROP; /* update the multicast stats if the packet is IGMP/MLD */ @@ -261,6 +262,11 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; p = br_port_get_rcu(skb->dev); + if (p->flags & BR_VLAN_TUNNEL) { + if (br_handle_ingress_vlan_tunnel(skb, p, + nbp_vlan_group_rcu(p))) + goto drop; + } if (unlikely(is_link_local_ether_addr(dest))) { u16 fwd_mask = p->br->group_fwd_mask_required; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 61de90f28afa..40177df45ba6 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -775,6 +775,7 @@ bool br_allowed_egress(struct net_bridge_vlan_group *vg, const struct sk_buff *skb); bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid); struct sk_buff *br_handle_vlan(struct net_bridge *br, + const struct net_bridge_port *port, struct net_bridge_vlan_group *vg, struct sk_buff *skb); int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags); @@ -874,6 +875,7 @@ static inline bool br_should_learn(struct net_bridge_port *p, } static inline struct sk_buff *br_handle_vlan(struct net_bridge *br, + const struct net_bridge_port *port, struct net_bridge_vlan_group *vg, struct sk_buff *skb) { diff --git a/net/bridge/br_private_tunnel.h b/net/bridge/br_private_tunnel.h index 1c8d0d5302cb..4a447a378ab3 100644 --- a/net/bridge/br_private_tunnel.h +++ b/net/bridge/br_private_tunnel.h @@ -40,6 +40,11 @@ int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, u16 vid, u32 tun_id); void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port); void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg, struct net_bridge_vlan *vlan); +int br_handle_ingress_vlan_tunnel(struct sk_buff *skb, + struct net_bridge_port *p, + struct net_bridge_vlan_group *vg); +int br_handle_egress_vlan_tunnel(struct sk_buff *skb, + struct net_bridge_vlan *vlan); #else static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg) { @@ -67,6 +72,12 @@ static inline void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg, { } +static inline int br_handle_ingress_vlan_tunnel(struct sk_buff *skb, + struct net_bridge_port *p, + struct net_bridge_vlan_group *vg) +{ + return 0; +} #endif #endif diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 64002e3941ca..62e68c0dc687 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -341,6 +341,7 @@ static void __vlan_flush(struct net_bridge_vlan_group *vg) } struct sk_buff *br_handle_vlan(struct net_bridge *br, + const struct net_bridge_port *p, struct net_bridge_vlan_group *vg, struct sk_buff *skb) { @@ -381,6 +382,12 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) skb->vlan_tci = 0; + + if (p && (p->flags & BR_VLAN_TUNNEL) && + br_handle_egress_vlan_tunnel(skb, v)) { + kfree_skb(skb); + return NULL; + } out: return skb; } diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c index b3fd29d20a3c..b2b79a070162 100644 --- a/net/bridge/br_vlan_tunnel.c +++ b/net/bridge/br_vlan_tunnel.c @@ -39,6 +39,13 @@ static const struct rhashtable_params br_vlan_tunnel_rht_params = { .automatic_shrinking = true, }; +static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl, + u64 tunnel_id) +{ + return rhashtable_lookup_fast(tbl, &tunnel_id, + br_vlan_tunnel_rht_params); +} + void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg, struct net_bridge_vlan *vlan) { @@ -147,3 +154,50 @@ void vlan_tunnel_deinit(struct net_bridge_vlan_group *vg) { rhashtable_destroy(&vg->tunnel_hash); } + +int br_handle_ingress_vlan_tunnel(struct sk_buff *skb, + struct net_bridge_port *p, + struct net_bridge_vlan_group *vg) +{ + struct ip_tunnel_info *tinfo = skb_tunnel_info(skb); + struct net_bridge_vlan *vlan; + + if (!vg || !tinfo) + return 0; + + /* if already tagged, ignore */ + if (skb_vlan_tagged(skb)) + return 0; + + /* lookup vid, given tunnel id */ + vlan = br_vlan_tunnel_lookup(&vg->tunnel_hash, tinfo->key.tun_id); + if (!vlan) + return 0; + + skb_dst_drop(skb); + + __vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid); + + return 0; +} + +int br_handle_egress_vlan_tunnel(struct sk_buff *skb, + struct net_bridge_vlan *vlan) +{ + int err; + + if (!vlan || !vlan->tinfo.tunnel_id) + return 0; + + if (unlikely(!skb_vlan_tag_present(skb))) + return 0; + + skb_dst_drop(skb); + err = skb_vlan_pop(skb); + if (err) + return err; + + skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst)); + + return 0; +} -- cgit v1.2.3 From f38c5ad79f1eb5569843ea24ac7f4cabccc3c657 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 2 Feb 2017 17:05:04 +0100 Subject: MAINTAINERS: add Ivan as a switchdev maintainer Ivan will be taking care of switchdev code from now on. Signed-off-by: Jiri Pirko Acked-by: Ivan Vecera Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 2abda6cb3150..c15dc53f9d66 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11914,6 +11914,7 @@ F: include/linux/swiotlb.h SWITCHDEV M: Jiri Pirko +M: Ivan Vecera L: netdev@vger.kernel.org S: Supported F: net/switchdev/ -- cgit v1.2.3 From 3898fac1f488c76e0eef5b5267b4ba8112a82ac4 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 2 Feb 2017 17:09:54 +0100 Subject: trace: rename trace_print_hex_seq arg and add kdoc Steven suggested to improve trace_print_hex_seq() a bit after commit 2acae0d5b0f7 ("trace: add variant without spacing in trace_print_hex_seq") in two ways: i) by adding a kdoc comment for the helper function itself and ii) by renaming 'spacing' argument into 'concatenate' to better denote that we don't add spaces between each hex bytes. Suggested-by: Steven Rostedt Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/trace_events.h | 2 +- include/trace/trace_events.h | 4 ++-- kernel/trace/trace_output.c | 15 +++++++++++++-- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index cfa475a0e9ca..0f165507495c 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -34,7 +34,7 @@ const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, const char *trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int len, - bool spacing); + bool concatenate); const char *trace_print_array_seq(struct trace_seq *p, const void *buf, int count, diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 9f684629c3cf..5c06f4af8323 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -298,11 +298,11 @@ TRACE_MAKE_SYSTEM_STR(); #undef __print_hex #define __print_hex(buf, buf_len) \ - trace_print_hex_seq(p, buf, buf_len, true) + trace_print_hex_seq(p, buf, buf_len, false) #undef __print_hex_str #define __print_hex_str(buf, buf_len) \ - trace_print_hex_seq(p, buf, buf_len, false) + trace_print_hex_seq(p, buf, buf_len, true) #undef __print_array #define __print_array(array, count, el_size) \ diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 30a144b1b9ee..aea6a1218c7d 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -162,15 +162,26 @@ trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, } EXPORT_SYMBOL_GPL(trace_print_bitmask_seq); +/** + * trace_print_hex_seq - print buffer as hex sequence + * @p: trace seq struct to write to + * @buf: The buffer to print + * @buf_len: Length of @buf in bytes + * @concatenate: Print @buf as single hex string or with spacing + * + * Prints the passed buffer as a hex sequence either as a whole, + * single hex string if @concatenate is true or with spacing after + * each byte in case @concatenate is false. + */ const char * trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len, - bool spacing) + bool concatenate) { int i; const char *ret = trace_seq_buffer_ptr(p); for (i = 0; i < buf_len; i++) - trace_seq_printf(p, "%s%2.2x", !spacing || i == 0 ? "" : " ", + trace_seq_printf(p, "%s%2.2x", concatenate || i == 0 ? "" : " ", buf[i]); trace_seq_putc(p, 0); -- cgit v1.2.3 From 94b5e0f970258828bf163b5ef076da4e4b0802e0 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 2 Feb 2017 08:52:21 -0800 Subject: net: ipv6: Set protocol to kernel for local routes IPv6 stack does not set the protocol for local routes, so those routes show up with proto "none": $ ip -6 ro ls table local local ::1 dev lo proto none metric 0 pref medium local 2100:3:: dev lo proto none metric 0 pref medium local 2100:3::4 dev lo proto none metric 0 pref medium local fe80:: dev lo proto none metric 0 pref medium ... Set rt6i_protocol to RTPROT_KERNEL for consistency with IPv4. Now routes show up with proto "kernel": $ ip -6 ro ls table local local ::1 dev lo proto kernel metric 0 pref medium local 2100:3:: dev lo proto kernel metric 0 pref medium local 2100:3::4 dev lo proto kernel metric 0 pref medium local fe80:: dev lo proto kernel metric 0 pref medium ... Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2563331b0532..91eb3f7782dd 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2634,6 +2634,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, rt->dst.output = ip6_output; rt->rt6i_idev = idev; + rt->rt6i_protocol = RTPROT_KERNEL; rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; if (anycast) rt->rt6i_flags |= RTF_ANYCAST; -- cgit v1.2.3 From fb6113e688e0bf5bc3081d6ff02e8ad77fed3c7a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 10:16:00 -0800 Subject: be2net: get rid of custom busy poll code Compared to custom busy_poll, the generic NAPI one is better, since it allows to use GRO, and it removes a lot of code and extra locked operations in fast path. Signed-off-by: Eric Dumazet Cc: Sathya Perla Cc: Ajit Khaparde Cc: Sriharsha Basavapatna Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 5 - drivers/net/ethernet/emulex/benet/be_main.c | 151 ++-------------------------- 2 files changed, 9 insertions(+), 147 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4c30c44b242e..d49528ad7821 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -226,11 +226,6 @@ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ u64 tx_reqs_prev; /* Used to calculate TX pps */ }; -enum { - NAPI_POLLING, - BUSY_POLLING -}; - struct be_mcc_obj { struct be_queue_info q; struct be_queue_info cq; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 52aed6d383c3..6be3b9aba8ed 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3063,7 +3063,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp) } static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, - int budget, int polling) + int budget) { struct be_adapter *adapter = rxo->adapter; struct be_queue_info *rx_cq = &rxo->cq; @@ -3095,8 +3095,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, goto loop_continue; } - /* Don't do gro when we're busy_polling */ - if (do_gro(rxcp) && polling != BUSY_POLLING) + if (do_gro(rxcp)) be_rx_compl_process_gro(rxo, napi, rxcp); else be_rx_compl_process(rxo, napi, rxcp); @@ -3194,106 +3193,6 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, } } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline bool be_lock_napi(struct be_eq_obj *eqo) -{ - bool status = true; - - spin_lock(&eqo->lock); /* BH is already disabled */ - if (eqo->state & BE_EQ_LOCKED) { - WARN_ON(eqo->state & BE_EQ_NAPI); - eqo->state |= BE_EQ_NAPI_YIELD; - status = false; - } else { - eqo->state = BE_EQ_NAPI; - } - spin_unlock(&eqo->lock); - return status; -} - -static inline void be_unlock_napi(struct be_eq_obj *eqo) -{ - spin_lock(&eqo->lock); /* BH is already disabled */ - - WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD)); - eqo->state = BE_EQ_IDLE; - - spin_unlock(&eqo->lock); -} - -static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) -{ - bool status = true; - - spin_lock_bh(&eqo->lock); - if (eqo->state & BE_EQ_LOCKED) { - eqo->state |= BE_EQ_POLL_YIELD; - status = false; - } else { - eqo->state |= BE_EQ_POLL; - } - spin_unlock_bh(&eqo->lock); - return status; -} - -static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) -{ - spin_lock_bh(&eqo->lock); - - WARN_ON(eqo->state & (BE_EQ_NAPI)); - eqo->state = BE_EQ_IDLE; - - spin_unlock_bh(&eqo->lock); -} - -static inline void be_enable_busy_poll(struct be_eq_obj *eqo) -{ - spin_lock_init(&eqo->lock); - eqo->state = BE_EQ_IDLE; -} - -static inline void be_disable_busy_poll(struct be_eq_obj *eqo) -{ - local_bh_disable(); - - /* It's enough to just acquire napi lock on the eqo to stop - * be_busy_poll() from processing any queueus. - */ - while (!be_lock_napi(eqo)) - mdelay(1); - - local_bh_enable(); -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ - -static inline bool be_lock_napi(struct be_eq_obj *eqo) -{ - return true; -} - -static inline void be_unlock_napi(struct be_eq_obj *eqo) -{ -} - -static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) -{ - return false; -} - -static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) -{ -} - -static inline void be_enable_busy_poll(struct be_eq_obj *eqo) -{ -} - -static inline void be_disable_busy_poll(struct be_eq_obj *eqo) -{ -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - int be_poll(struct napi_struct *napi, int budget) { struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); @@ -3308,18 +3207,13 @@ int be_poll(struct napi_struct *napi, int budget) for_all_tx_queues_on_eq(adapter, eqo, txo, i) be_process_tx(adapter, txo, i); - if (be_lock_napi(eqo)) { - /* This loop will iterate twice for EQ0 in which - * completions of the last RXQ (default one) are also processed - * For other EQs the loop iterates only once - */ - for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { - work = be_process_rx(rxo, napi, budget, NAPI_POLLING); - max_work = max(work, max_work); - } - be_unlock_napi(eqo); - } else { - max_work = budget; + /* This loop will iterate twice for EQ0 in which + * completions of the last RXQ (default one) are also processed + * For other EQs the loop iterates only once + */ + for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { + work = be_process_rx(rxo, napi, budget); + max_work = max(work, max_work); } if (is_mcc_eqo(eqo)) @@ -3343,28 +3237,6 @@ int be_poll(struct napi_struct *napi, int budget) return max_work; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static int be_busy_poll(struct napi_struct *napi) -{ - struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); - struct be_adapter *adapter = eqo->adapter; - struct be_rx_obj *rxo; - int i, work = 0; - - if (!be_lock_busy_poll(eqo)) - return LL_FLUSH_BUSY; - - for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { - work = be_process_rx(rxo, napi, 4, BUSY_POLLING); - if (work) - break; - } - - be_unlock_busy_poll(eqo); - return work; -} -#endif - void be_detect_error(struct be_adapter *adapter) { u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0; @@ -3669,7 +3541,6 @@ static int be_close(struct net_device *netdev) if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { for_all_evt_queues(adapter, eqo, i) { napi_disable(&eqo->napi); - be_disable_busy_poll(eqo); } adapter->flags &= ~BE_FLAGS_NAPI_ENABLED; } @@ -3839,7 +3710,6 @@ static int be_open(struct net_device *netdev) for_all_evt_queues(adapter, eqo, i) { napi_enable(&eqo->napi); - be_enable_busy_poll(eqo); be_eq_notify(adapter, eqo->q.id, true, true, 0, 0); } adapter->flags |= BE_FLAGS_NAPI_ENABLED; @@ -5245,9 +5115,6 @@ static const struct net_device_ops be_netdev_ops = { #endif .ndo_bridge_setlink = be_ndo_bridge_setlink, .ndo_bridge_getlink = be_ndo_bridge_getlink, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = be_busy_poll, -#endif .ndo_udp_tunnel_add = be_add_vxlan_port, .ndo_udp_tunnel_del = be_del_vxlan_port, .ndo_features_check = be_features_check, -- cgit v1.2.3 From 362108b5adddac6f496acc40696e499defd56d62 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 10:50:48 -0800 Subject: myri10ge: get rid of custom busy poll code Compared to custom busy_poll, the generic NAPI one is simpler and removes a lot of code. It removes one atomic in the fast path (when busy poll is not in action) since we do not have to use an extra spinlock. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 197 +---------------------- 1 file changed, 4 insertions(+), 193 deletions(-) diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 3d881176a353..1139d1803e7e 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -191,21 +191,6 @@ struct myri10ge_slice_state { int cpu; __be32 __iomem *dca_tag; #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; -#define SLICE_STATE_IDLE 0 -#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */ -#define SLICE_STATE_POLL 2 /* poll owns this slice */ -#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL) -#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */ -#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */ -#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD) - spinlock_t lock; - unsigned long lock_napi_yield; - unsigned long lock_poll_yield; - unsigned long busy_poll_miss; - unsigned long busy_poll_cnt; -#endif /* CONFIG_NET_RX_BUSY_POLL */ char irq_desc[32]; }; @@ -925,92 +910,6 @@ abort: return status; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) -{ - spin_lock_init(&ss->lock); - ss->state = SLICE_STATE_IDLE; -} - -static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) -{ - bool rc = true; - spin_lock(&ss->lock); - if ((ss->state & SLICE_LOCKED)) { - WARN_ON((ss->state & SLICE_STATE_NAPI)); - ss->state |= SLICE_STATE_NAPI_YIELD; - rc = false; - ss->lock_napi_yield++; - } else - ss->state = SLICE_STATE_NAPI; - spin_unlock(&ss->lock); - return rc; -} - -static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) -{ - spin_lock(&ss->lock); - WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD))); - ss->state = SLICE_STATE_IDLE; - spin_unlock(&ss->lock); -} - -static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) -{ - bool rc = true; - spin_lock_bh(&ss->lock); - if ((ss->state & SLICE_LOCKED)) { - ss->state |= SLICE_STATE_POLL_YIELD; - rc = false; - ss->lock_poll_yield++; - } else - ss->state |= SLICE_STATE_POLL; - spin_unlock_bh(&ss->lock); - return rc; -} - -static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) -{ - spin_lock_bh(&ss->lock); - WARN_ON((ss->state & SLICE_STATE_NAPI)); - ss->state = SLICE_STATE_IDLE; - spin_unlock_bh(&ss->lock); -} - -static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) -{ - WARN_ON(!(ss->state & SLICE_LOCKED)); - return (ss->state & SLICE_USER_PEND); -} -#else /* CONFIG_NET_RX_BUSY_POLL */ -static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) -{ -} - -static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) -{ - return false; -} - -static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) -{ -} - -static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) -{ - return false; -} - -static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) -{ -} - -static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) -{ - return false; -} -#endif - static int myri10ge_reset(struct myri10ge_priv *mgp) { struct myri10ge_cmd cmd; @@ -1426,7 +1325,6 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) struct pci_dev *pdev = mgp->pdev; struct net_device *dev = mgp->dev; u8 *va; - bool polling; if (len <= mgp->small_bytes) { rx = &ss->rx_small; @@ -1441,15 +1339,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; prefetch(va); - /* When busy polling in user context, allocate skb and copy headers to - * skb's linear memory ourselves. When not busy polling, use the napi - * gro api. - */ - polling = myri10ge_ss_busy_polling(ss); - if (polling) - skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); - else - skb = napi_get_frags(&ss->napi); + skb = napi_get_frags(&ss->napi); if (unlikely(skb == NULL)) { ss->stats.rx_dropped++; for (i = 0, remainder = len; remainder > 0; i++) { @@ -1489,27 +1379,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) myri10ge_vlan_rx(mgp->dev, va, skb); skb_record_rx_queue(skb, ss - &mgp->ss[0]); - if (polling) { - int hlen; - - /* myri10ge_vlan_rx might have moved the header, so compute - * length and address again. - */ - hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN; - va = page_address(skb_frag_page(&rx_frags[0])) + - rx_frags[0].page_offset; - /* Copy header into the skb linear memory */ - skb_copy_to_linear_data(skb, va, hlen); - rx_frags[0].page_offset += hlen; - rx_frags[0].size -= hlen; - skb->data_len -= hlen; - skb->tail += hlen; - skb->protocol = eth_type_trans(skb, dev); - skb_mark_napi_id(skb, &ss->napi); - netif_receive_skb(skb); - } - else - napi_gro_frags(&ss->napi); + napi_gro_frags(&ss->napi); return 1; } @@ -1669,14 +1539,9 @@ static int myri10ge_poll(struct napi_struct *napi, int budget) if (ss->mgp->dca_enabled) myri10ge_update_dca(ss); #endif - /* Try later if the busy_poll handler is running. */ - if (!myri10ge_ss_lock_napi(ss)) - return budget; - /* process as many rx events as NAPI will allow */ work_done = myri10ge_clean_rx_done(ss, budget); - myri10ge_ss_unlock_napi(ss); if (work_done < budget) { napi_complete_done(napi, work_done); put_be32(htonl(3), ss->irq_claim); @@ -1684,34 +1549,6 @@ static int myri10ge_poll(struct napi_struct *napi, int budget) return work_done; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static int myri10ge_busy_poll(struct napi_struct *napi) -{ - struct myri10ge_slice_state *ss = - container_of(napi, struct myri10ge_slice_state, napi); - struct myri10ge_priv *mgp = ss->mgp; - int work_done; - - /* Poll only when the link is up */ - if (mgp->link_state != MXGEFW_LINK_UP) - return LL_FLUSH_FAILED; - - if (!myri10ge_ss_lock_poll(ss)) - return LL_FLUSH_BUSY; - - /* Process a small number of packets */ - work_done = myri10ge_clean_rx_done(ss, 4); - if (work_done) - ss->busy_poll_cnt += work_done; - else - ss->busy_poll_miss++; - - myri10ge_ss_unlock_poll(ss); - - return work_done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - static irqreturn_t myri10ge_intr(int irq, void *arg) { struct myri10ge_slice_state *ss = arg; @@ -1919,10 +1756,6 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", "wake_queue", "stop_queue", "tx_linearized", -#ifdef CONFIG_NET_RX_BUSY_POLL - "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss", - "rx_busy_poll_cnt", -#endif }; #define MYRI10GE_NET_STATS_LEN 21 @@ -2022,12 +1855,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i++] = (unsigned int)ss->tx.wake_queue; data[i++] = (unsigned int)ss->tx.stop_queue; data[i++] = (unsigned int)ss->tx.linearized; -#ifdef CONFIG_NET_RX_BUSY_POLL - data[i++] = ss->lock_napi_yield; - data[i++] = ss->lock_poll_yield; - data[i++] = ss->busy_poll_miss; - data[i++] = ss->busy_poll_cnt; -#endif } } @@ -2589,9 +2416,6 @@ static int myri10ge_open(struct net_device *dev) goto abort_with_rings; } - /* Initialize the slice spinlock and state used for polling */ - myri10ge_ss_init_lock(ss); - /* must happen prior to any irq */ napi_enable(&(ss)->napi); } @@ -2668,19 +2492,9 @@ static int myri10ge_close(struct net_device *dev) del_timer_sync(&mgp->watchdog_timer); mgp->running = MYRI10GE_ETH_STOPPING; - for (i = 0; i < mgp->num_slices; i++) { + for (i = 0; i < mgp->num_slices; i++) napi_disable(&mgp->ss[i].napi); - local_bh_disable(); /* myri10ge_ss_lock_napi needs this */ - /* Lock the slice to prevent the busy_poll handler from - * accessing it. Later when we bring the NIC up, myri10ge_open - * resets the slice including this lock. - */ - while (!myri10ge_ss_lock_napi(&mgp->ss[i])) { - pr_info("Slice %d locked\n", i); - mdelay(1); - } - local_bh_enable(); - } + netif_carrier_off(dev); netif_tx_stop_all_queues(dev); @@ -3953,9 +3767,6 @@ static const struct net_device_ops myri10ge_netdev_ops = { .ndo_change_mtu = myri10ge_change_mtu, .ndo_set_rx_mode = myri10ge_set_multicast_list, .ndo_set_mac_address = myri10ge_set_mac_address, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = myri10ge_busy_poll, -#endif }; static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -- cgit v1.2.3 From 5226b7919641f285bf0f8db84deeb3920b160ec7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 11:44:27 -0800 Subject: cxgb4: get rid of custom busy poll code In linux-4.5, busy polling was implemented in core NAPI stack, meaning that all custom implementation can be removed from drivers. Not only we remove lot of code, we also remove one spin_lock() from driver fast path. Signed-off-by: Eric Dumazet Cc: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 113 ------------------------ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 16 +--- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 12 +-- drivers/net/ethernet/chelsio/cxgb4/sge.c | 39 -------- 4 files changed, 6 insertions(+), 174 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ccb455f14d08..163543b1ea0b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -586,22 +586,6 @@ struct sge_rspq { /* state for an SGE response queue */ rspq_handler_t handler; rspq_flush_handler_t flush_handler; struct t4_lro_mgr lro_mgr; -#ifdef CONFIG_NET_RX_BUSY_POLL -#define CXGB_POLL_STATE_IDLE 0 -#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */ -#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */ -#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */ -#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */ -#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \ - CXGB_POLL_STATE_POLL_YIELD) -#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \ - CXGB_POLL_STATE_POLL) -#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \ - CXGB_POLL_STATE_POLL_YIELD) - unsigned int bpoll_state; - spinlock_t bpoll_lock; /* lock for busy poll */ -#endif /* CONFIG_NET_RX_BUSY_POLL */ - }; struct sge_eth_stats { /* Ethernet queue statistics */ @@ -1173,102 +1157,6 @@ static inline struct adapter *netdev2adap(const struct net_device *dev) return netdev2pinfo(dev)->adapter; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) -{ - spin_lock_init(&q->bpoll_lock); - q->bpoll_state = CXGB_POLL_STATE_IDLE; -} - -static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) -{ - bool rc = true; - - spin_lock(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_LOCKED) { - q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD; - rc = false; - } else { - q->bpoll_state = CXGB_POLL_STATE_NAPI; - } - spin_unlock(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) -{ - bool rc = false; - - spin_lock(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) - rc = true; - q->bpoll_state = CXGB_POLL_STATE_IDLE; - spin_unlock(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) -{ - bool rc = true; - - spin_lock_bh(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_LOCKED) { - q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD; - rc = false; - } else { - q->bpoll_state |= CXGB_POLL_STATE_POLL; - } - spin_unlock_bh(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) -{ - bool rc = false; - - spin_lock_bh(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) - rc = true; - q->bpoll_state = CXGB_POLL_STATE_IDLE; - spin_unlock_bh(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) -{ - return q->bpoll_state & CXGB_POLL_USER_PEND; -} -#else -static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) -{ -} - -static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) -{ - return true; -} - -static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) -{ - return false; -} - -static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) -{ - return false; -} - -static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) -{ - return false; -} - -static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) -{ - return false; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* Return a version number to identify the type of adapter. The scheme is: * - bits 0..9: chip version * - bits 10..15: chip revision @@ -1325,7 +1213,6 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie); int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); -int cxgb_busy_poll(struct napi_struct *napi); void cxgb4_set_ethtool_ops(struct net_device *netdev); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); extern int dbfifo_int_thresh; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 49e000ebd2b9..f4f569060689 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -744,14 +744,8 @@ static void quiesce_rx(struct adapter *adap) for (i = 0; i < adap->sge.ingr_sz; i++) { struct sge_rspq *q = adap->sge.ingr_map[i]; - if (q && q->handler) { + if (q && q->handler) napi_disable(&q->napi); - local_bh_disable(); - while (!cxgb_poll_lock_napi(q)) - mdelay(1); - local_bh_enable(); - } - } } @@ -782,10 +776,9 @@ static void enable_rx(struct adapter *adap) if (!q) continue; - if (q->handler) { - cxgb_busy_poll_init_lock(q); + if (q->handler) napi_enable(&q->napi); - } + /* 0-increment GTS to start the timer and enable interrupts */ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), SEINTARM_V(q->intr_params) | @@ -2763,9 +2756,6 @@ static const struct net_device_ops cxgb4_netdev_ops = { .ndo_fcoe_enable = cxgb_fcoe_enable, .ndo_fcoe_disable = cxgb_fcoe_disable, #endif /* CONFIG_CHELSIO_T4_FCOE */ -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = cxgb_busy_poll, -#endif .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, .ndo_setup_tc = cxgb_setup_tc, }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 8098902c094a..36105b6837cb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -408,10 +408,9 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q) if (!q) return; - if (q->handler) { - cxgb_busy_poll_init_lock(q); + if (q->handler) napi_enable(&q->napi); - } + /* 0-increment GTS to start the timer and enable interrupts */ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), SEINTARM_V(q->intr_params) | @@ -420,13 +419,8 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q) static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) { - if (q && q->handler) { + if (q && q->handler) napi_disable(&q->napi); - local_bh_disable(); - while (!cxgb_poll_lock_napi(q)) - mdelay(1); - local_bh_enable(); - } } static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 09653ae0d2b1..f05f0d400324 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -43,9 +43,7 @@ #include #include #include -#ifdef CONFIG_NET_RX_BUSY_POLL #include -#endif /* CONFIG_NET_RX_BUSY_POLL */ #ifdef CONFIG_CHELSIO_T4_FCOE #include #endif /* CONFIG_CHELSIO_T4_FCOE */ @@ -2059,7 +2057,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, csum_ok = pkt->csum_calc && !err_vec && (q->netdev->features & NETIF_F_RXCSUM); if ((pkt->l2info & htonl(RXF_TCP_F)) && - !(cxgb_poll_busy_polling(q)) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { do_gro(rxq, si, pkt); return 0; @@ -2290,38 +2287,6 @@ static int process_responses(struct sge_rspq *q, int budget) return budget - budget_left; } -#ifdef CONFIG_NET_RX_BUSY_POLL -int cxgb_busy_poll(struct napi_struct *napi) -{ - struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); - unsigned int params, work_done; - u32 val; - - if (!cxgb_poll_lock_poll(q)) - return LL_FLUSH_BUSY; - - work_done = process_responses(q, 4); - params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1); - q->next_intr_params = params; - val = CIDXINC_V(work_done) | SEINTARM_V(params); - - /* If we don't have access to the new User GTS (T5+), use the old - * doorbell mechanism; otherwise use the new BAR2 mechanism. - */ - if (unlikely(!q->bar2_addr)) - t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), - val | INGRESSQID_V((u32)q->cntxt_id)); - else { - writel(val | INGRESSQID_V(q->bar2_qid), - q->bar2_addr + SGE_UDB_GTS); - wmb(); - } - - cxgb_poll_unlock_poll(q); - return work_done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * napi_rx_handler - the NAPI handler for Rx processing * @napi: the napi instance @@ -2340,9 +2305,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) int work_done; u32 val; - if (!cxgb_poll_lock_napi(q)) - return budget; - work_done = process_responses(q, budget); if (likely(work_done < budget)) { int timer_index; @@ -2382,7 +2344,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) q->bar2_addr + SGE_UDB_GTS); wmb(); } - cxgb_poll_unlock_napi(q); return work_done; } -- cgit v1.2.3 From 38ab52e8e1e2dd65f2d349f82553335813b638d2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 20:40:08 -0800 Subject: tcp: clear pfmemalloc on outgoing skb Josef Bacik diagnosed following problem : I was seeing random disconnects while testing NBD over loopback. This turned out to be because NBD sets pfmemalloc on it's socket, however the receiving side is a user space application so does not have pfmemalloc set on its socket. This means that sk_filter_trim_cap will simply drop this packet, under the assumption that the other side will simply retransmit. Well we do retransmit, and then the packet is just dropped again for the same reason. It seems the better way to address this problem is to clear pfmemalloc in the TCP transmit path. pfmemalloc strict control really makes sense on the receive path. Signed-off-by: Eric Dumazet Acked-by: Josef Bacik Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 956bea9a5394..ccf1ef4dcba4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -964,6 +964,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, */ skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); + /* If we had to use memory reserve to allocate this skb, + * this might cause drops if packet is looped back : + * Other socket might not have SOCK_MEMALLOC. + * Packets not looped back do not care about pfmemalloc. + */ + skb->pfmemalloc = 0; + skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); -- cgit v1.2.3 From 3d67576da15167b2669e4765ca9e383f6bcb4171 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Thu, 2 Feb 2017 23:46:21 -0500 Subject: bonding: Remove unnecessary returned value check The function bond_info_query alwarys returns 0. As such, in the function bond_do_ioctl, it is not necessary to check the returned value. So the interface type of the function bond_info_query is changed to void. The redundant check is removed. Signed-off-by: Zhu Yanjun Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 36919221b3f0..e6af04716cf7 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1993,11 +1993,10 @@ static int bond_release_and_destroy(struct net_device *bond_dev, return ret; } -static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) +static void bond_info_query(struct net_device *bond_dev, struct ifbond *info) { struct bonding *bond = netdev_priv(bond_dev); bond_fill_ifbond(bond, info); - return 0; } static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) @@ -3409,12 +3408,11 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond))) return -EFAULT; - res = bond_info_query(bond_dev, &k_binfo); - if (res == 0 && - copy_to_user(u_binfo, &k_binfo, sizeof(ifbond))) + bond_info_query(bond_dev, &k_binfo); + if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond))) return -EFAULT; - return res; + return 0; case BOND_SLAVE_INFO_QUERY_OLD: case SIOCBONDSLAVEINFOQUERY: u_sinfo = (struct ifslave __user *)ifr->ifr_data; -- cgit v1.2.3 From 2946fde9fd7f6c38beaaf581514b706d07c7aec0 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:28:51 +0100 Subject: mlxsw: item: Add 8bit item helpers Item heplers for 8bit values are needed, let's add them. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/item.h | 79 +++++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index 3c95e3ddd9c2..09f35de0a371 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/item.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Jiri Pirko * Copyright (c) 2015 Ido Schimmel * * Redistribution and use in source and binary forms, with or without @@ -72,6 +72,40 @@ __mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index, typesize); } +static inline u8 __mlxsw_item_get8(const char *buf, + const struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8)); + u8 *b = (u8 *) buf; + u8 tmp; + + tmp = b[offset]; + tmp >>= item->shift; + tmp &= GENMASK(item->size.bits - 1, 0); + if (item->no_real_shift) + tmp <<= item->shift; + return tmp; +} + +static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item, + unsigned short index, u8 val) +{ + unsigned int offset = __mlxsw_item_offset(item, index, + sizeof(u8)); + u8 *b = (u8 *) buf; + u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift; + u8 tmp; + + if (!item->no_real_shift) + val <<= item->shift; + val &= mask; + tmp = b[offset]; + tmp &= ~mask; + tmp |= val; + b[offset] = tmp; +} + static inline u16 __mlxsw_item_get16(const char *buf, const struct mlxsw_item *item, unsigned short index) @@ -253,6 +287,47 @@ static inline void __mlxsw_item_bit_array_set(char *buf, * _iname: item name within the container */ +#define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +{ \ + return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\ +{ \ + __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ +} + +#define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ + _step, _instepoffset, _norealshift) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .shift = _shift, \ + .no_real_shift = _norealshift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u8 \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ +{ \ + return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ + u8 val) \ +{ \ + __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} + #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .offset = _offset, \ -- cgit v1.2.3 From d5e556c6a158893d9e50de3d4e7638f753ffd520 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:28:52 +0100 Subject: mlxsw: item: Add helpers for getting pointer into payload for char buffer item Sometimes it is handy to get a pointer to a char buffer item and use it direcly to write/read data. So add these helpers. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/item.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index 09f35de0a371..28427f0758c7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -225,6 +225,14 @@ static inline void __mlxsw_item_memcpy_to(char *buf, const char *src, memcpy(&buf[offset], src, item->size.bytes); } +static inline char *__mlxsw_item_data(char *buf, const struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); + + return &buf[offset]; +} + static inline u16 __mlxsw_item_bit_array_offset(const struct mlxsw_item *item, u16 index, u8 *shift) @@ -468,6 +476,11 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \ { \ __mlxsw_item_memcpy_to(buf, src, \ &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline char * \ +mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \ +{ \ + return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \ @@ -494,6 +507,12 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ { \ __mlxsw_item_memcpy_to(buf, src, \ &__ITEM_NAME(_type, _cname, _iname), index); \ +} \ +static inline char * \ +mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \ +{ \ + return __mlxsw_item_data(buf, \ + &__ITEM_NAME(_type, _cname, _iname), index); \ } #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \ -- cgit v1.2.3 From 3279da4c8877357b1001b4a3359069d588053f27 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:28:53 +0100 Subject: mlxsw: reg: Add Policy-Engine ACL Register The PACL register is used for configuration of the ACL. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 47 +++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 9fb0316b3a30..18b2da4301c2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1,9 +1,9 @@ /* * drivers/net/ethernet/mellanox/mlxsw/reg.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. * Copyright (c) 2015-2016 Ido Schimmel * Copyright (c) 2015 Elad Raz - * Copyright (c) 2015-2016 Jiri Pirko + * Copyright (c) 2015-2017 Jiri Pirko * Copyright (c) 2016 Yotam Gigi * * Redistribution and use in source and binary forms, with or without @@ -1757,6 +1757,48 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* PACL - Policy-Engine ACL Register + * --------------------------------- + * This register is used for configuration of the ACL. + */ +#define MLXSW_REG_PACL_ID 0x3004 +#define MLXSW_REG_PACL_LEN 0x70 + +MLXSW_REG_DEFINE(pacl, MLXSW_REG_PACL_ID, MLXSW_REG_PACL_LEN); + +/* reg_pacl_v + * Valid. Setting the v bit makes the ACL valid. It should not be cleared + * while the ACL is bounded to either a port, VLAN or ACL rule. + * Access: RW + */ +MLXSW_ITEM32(reg, pacl, v, 0x00, 24, 1); + +/* reg_pacl_acl_id + * An identifier representing the ACL (managed by software) + * Range 0 .. cap_max_acl_regions - 1 + * Access: Index + */ +MLXSW_ITEM32(reg, pacl, acl_id, 0x08, 0, 16); + +#define MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN 16 + +/* reg_pacl_tcam_region_info + * Opaque object that represents a TCAM region. + * Obtained through PTAR register. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, pacl, tcam_region_info, 0x30, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +static inline void mlxsw_reg_pacl_pack(char *payload, u16 acl_id, + bool valid, const char *tcam_region_info) +{ + MLXSW_REG_ZERO(pacl, payload); + mlxsw_reg_pacl_acl_id_set(payload, acl_id); + mlxsw_reg_pacl_v_set(payload, valid); + mlxsw_reg_pacl_tcam_region_info_memcpy_to(payload, tcam_region_info); +} + /* QPCR - QoS Policer Configuration Register * ----------------------------------------- * The QPCR register is used to create policers - that limit @@ -5434,6 +5476,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(svpe), MLXSW_REG(sfmr), MLXSW_REG(spvmlr), + MLXSW_REG(pacl), MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), -- cgit v1.2.3 From 10fabef5134b02aa5a6f8f894d9943095d9b2ce2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:28:54 +0100 Subject: mlxsw: reg: Add Policy-Engine ACL Group Table register The PAGT register is used for configuration of the ACL Group Table. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 54 +++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 18b2da4301c2..5f76fbc1e03f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1799,6 +1799,59 @@ static inline void mlxsw_reg_pacl_pack(char *payload, u16 acl_id, mlxsw_reg_pacl_tcam_region_info_memcpy_to(payload, tcam_region_info); } +/* PAGT - Policy-Engine ACL Group Table + * ------------------------------------ + * This register is used for configuration of the ACL Group Table. + */ +#define MLXSW_REG_PAGT_ID 0x3005 +#define MLXSW_REG_PAGT_BASE_LEN 0x30 +#define MLXSW_REG_PAGT_ACL_LEN 4 +#define MLXSW_REG_PAGT_ACL_MAX_NUM 16 +#define MLXSW_REG_PAGT_LEN (MLXSW_REG_PAGT_BASE_LEN + \ + MLXSW_REG_PAGT_ACL_MAX_NUM * MLXSW_REG_PAGT_ACL_LEN) + +MLXSW_REG_DEFINE(pagt, MLXSW_REG_PAGT_ID, MLXSW_REG_PAGT_LEN); + +/* reg_pagt_size + * Number of ACLs in the group. + * Size 0 invalidates a group. + * Range 0 .. cap_max_acl_group_size (hard coded to 16 for now) + * Total number of ACLs in all groups must be lower or equal + * to cap_max_acl_tot_groups + * Note: a group which is binded must not be invalidated + * Access: Index + */ +MLXSW_ITEM32(reg, pagt, size, 0x00, 0, 8); + +/* reg_pagt_acl_group_id + * An identifier (numbered from 0..cap_max_acl_groups-1) representing + * the ACL Group identifier (managed by software). + * Access: Index + */ +MLXSW_ITEM32(reg, pagt, acl_group_id, 0x08, 0, 16); + +/* reg_pagt_acl_id + * ACL identifier + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pagt, acl_id, 0x30, 0, 16, 0x04, 0x00, false); + +static inline void mlxsw_reg_pagt_pack(char *payload, u16 acl_group_id) +{ + MLXSW_REG_ZERO(pagt, payload); + mlxsw_reg_pagt_acl_group_id_set(payload, acl_group_id); +} + +static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index, + u16 acl_id) +{ + u8 size = mlxsw_reg_pagt_size_get(payload); + + if (index >= size) + mlxsw_reg_pagt_size_set(payload, index + 1); + mlxsw_reg_pagt_acl_id_set(payload, index, acl_id); +} + /* QPCR - QoS Policer Configuration Register * ----------------------------------------- * The QPCR register is used to create policers - that limit @@ -5477,6 +5530,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(sfmr), MLXSW_REG(spvmlr), MLXSW_REG(pacl), + MLXSW_REG(pagt), MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), -- cgit v1.2.3 From d9c2661e1c8a98583a48f25b24d44ea9fa57fa87 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:28:55 +0100 Subject: mlxsw: reg: Add Policy-Engine TCAM Allocation Register The PTAR register is used for allocation of regions in the TCAM. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 106 ++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5f76fbc1e03f..444f0a309d30 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1852,6 +1852,111 @@ static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index, mlxsw_reg_pagt_acl_id_set(payload, index, acl_id); } +/* PTAR - Policy-Engine TCAM Allocation Register + * --------------------------------------------- + * This register is used for allocation of regions in the TCAM. + * Note: Query method is not supported on this register. + */ +#define MLXSW_REG_PTAR_ID 0x3006 +#define MLXSW_REG_PTAR_BASE_LEN 0x20 +#define MLXSW_REG_PTAR_KEY_ID_LEN 1 +#define MLXSW_REG_PTAR_KEY_ID_MAX_NUM 16 +#define MLXSW_REG_PTAR_LEN (MLXSW_REG_PTAR_BASE_LEN + \ + MLXSW_REG_PTAR_KEY_ID_MAX_NUM * MLXSW_REG_PTAR_KEY_ID_LEN) + +MLXSW_REG_DEFINE(ptar, MLXSW_REG_PTAR_ID, MLXSW_REG_PTAR_LEN); + +enum mlxsw_reg_ptar_op { + /* allocate a TCAM region */ + MLXSW_REG_PTAR_OP_ALLOC, + /* resize a TCAM region */ + MLXSW_REG_PTAR_OP_RESIZE, + /* deallocate TCAM region */ + MLXSW_REG_PTAR_OP_FREE, + /* test allocation */ + MLXSW_REG_PTAR_OP_TEST, +}; + +/* reg_ptar_op + * Access: OP + */ +MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4); + +/* reg_ptar_action_set_type + * Type of action set to be used on this region. + * For Spectrum, this is always type 2 - "flexible" + * Access: WO + */ +MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8); + +/* reg_ptar_key_type + * TCAM key type for the region. + * For Spectrum, this is always type 0x50 - "FLEX_KEY" + * Access: WO + */ +MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8); + +/* reg_ptar_region_size + * TCAM region size. When allocating/resizing this is the requested size, + * the response is the actual size. Note that actual size may be + * larger than requested. + * Allowed range 1 .. cap_max_rules-1 + * Reserved during op deallocate. + * Access: WO + */ +MLXSW_ITEM32(reg, ptar, region_size, 0x04, 0, 16); + +/* reg_ptar_region_id + * Region identifier + * Range 0 .. cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, ptar, region_id, 0x08, 0, 16); + +/* reg_ptar_tcam_region_info + * Opaque object that represents the TCAM region. + * Returned when allocating a region. + * Provided by software for ACL generation and region deallocation and resize. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptar, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +/* reg_ptar_flexible_key_id + * Identifier of the Flexible Key. + * Only valid if key_type == "FLEX_KEY" + * The key size will be rounded up to one of the following values: + * 9B, 18B, 36B, 54B. + * This field is reserved for in resize operation. + * Access: WO + */ +MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8, + MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false); + +static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op, + u16 region_size, u16 region_id, + const char *tcam_region_info) +{ + MLXSW_REG_ZERO(ptar, payload); + mlxsw_reg_ptar_op_set(payload, op); + mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */ + mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */ + mlxsw_reg_ptar_region_size_set(payload, region_size); + mlxsw_reg_ptar_region_id_set(payload, region_id); + mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info); +} + +static inline void mlxsw_reg_ptar_key_id_pack(char *payload, int index, + u16 key_id) +{ + mlxsw_reg_ptar_flexible_key_id_set(payload, index, key_id); +} + +static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info) +{ + mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info); +} + /* QPCR - QoS Policer Configuration Register * ----------------------------------------- * The QPCR register is used to create policers - that limit @@ -5531,6 +5636,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(spvmlr), MLXSW_REG(pacl), MLXSW_REG(pagt), + MLXSW_REG(ptar), MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), -- cgit v1.2.3 From 0171cdec03bddcc00de50f318d253ff2da821c41 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:28:56 +0100 Subject: mlxsw: reg: Add Policy-Engine TCAM Entry Register Version 2 The PTCE-V2 register is used for accessing rules within a TCAM region. It is a new version of PTCE in order to support wider key, mask and action within a TCAM region. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 100 ++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 444f0a309d30..10082518cf97 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1957,6 +1957,105 @@ static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info) mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info); } +/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2 + * ----------------------------------------------------- + * This register is used for accessing rules within a TCAM region. + * It is a new version of PTCE in order to support wider key, + * mask and action within a TCAM region. This register is not supported + * by SwitchX and SwitchX-2. + */ +#define MLXSW_REG_PTCE2_ID 0x3017 +#define MLXSW_REG_PTCE2_LEN 0x1D8 + +MLXSW_REG_DEFINE(ptce2, MLXSW_REG_PTCE2_ID, MLXSW_REG_PTCE2_LEN); + +/* reg_ptce2_v + * Valid. + * Access: RW + */ +MLXSW_ITEM32(reg, ptce2, v, 0x00, 31, 1); + +/* reg_ptce2_a + * Activity. Set if a packet lookup has hit on the specific entry. + * To clear the "a" bit, use "clear activity" op or "clear on read" op. + * Access: RO + */ +MLXSW_ITEM32(reg, ptce2, a, 0x00, 30, 1); + +enum mlxsw_reg_ptce2_op { + /* Read operation. */ + MLXSW_REG_PTCE2_OP_QUERY_READ = 0, + /* clear on read operation. Used to read entry + * and clear Activity bit. + */ + MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ = 1, + /* Write operation. Used to write a new entry to the table. + * All R/W fields are relevant for new entry. Activity bit is set + * for new entries - Note write with v = 0 will delete the entry. + */ + MLXSW_REG_PTCE2_OP_WRITE_WRITE = 0, + /* Update action. Only action set will be updated. */ + MLXSW_REG_PTCE2_OP_WRITE_UPDATE = 1, + /* Clear activity. A bit is cleared for the entry. */ + MLXSW_REG_PTCE2_OP_WRITE_CLEAR_ACTIVITY = 2, +}; + +/* reg_ptce2_op + * Access: OP + */ +MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3); + +/* reg_ptce2_offset + * Access: Index + */ +MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16); + +/* reg_ptce2_tcam_region_info + * Opaque object that represents the TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96 + +/* reg_ptce2_flex_key_blocks + * ACL Key. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, + MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); + +/* reg_ptce2_mask + * mask- in the same size as key. A bit that is set directs the TCAM + * to compare the corresponding bit in key. A bit that is clear directs + * the TCAM to ignore the corresponding bit in key. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80, + MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); + +#define MLXSW_REG_PTCE2_FLEX_ACTION_SET_LEN 0xA8 + +/* reg_ptce2_flex_action_set + * ACL action set. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0, + MLXSW_REG_PTCE2_FLEX_ACTION_SET_LEN); + +static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, + enum mlxsw_reg_ptce2_op op, + const char *tcam_region_info, + u16 offset) +{ + MLXSW_REG_ZERO(ptce2, payload); + mlxsw_reg_ptce2_v_set(payload, valid); + mlxsw_reg_ptce2_op_set(payload, op); + mlxsw_reg_ptce2_offset_set(payload, offset); + mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); +} + /* QPCR - QoS Policer Configuration Register * ----------------------------------------- * The QPCR register is used to create policers - that limit @@ -5637,6 +5736,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pacl), MLXSW_REG(pagt), MLXSW_REG(ptar), + MLXSW_REG(ptce2), MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), -- cgit v1.2.3 From af7170eee6ee269fe1d4f5cbd276bafc2ce064b8 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:28:57 +0100 Subject: mlxsw: reg: Add Policy-Engine Port Binding Table The PPBT is used for configuration of the Port Binding Table. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 63 +++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 10082518cf97..ce6d85a626bb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1757,6 +1757,68 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* PPBT - Policy-Engine Port Binding Table + * --------------------------------------- + * This register is used for configuration of the Port Binding Table. + */ +#define MLXSW_REG_PPBT_ID 0x3002 +#define MLXSW_REG_PPBT_LEN 0x14 + +MLXSW_REG_DEFINE(ppbt, MLXSW_REG_PPBT_ID, MLXSW_REG_PPBT_LEN); + +enum mlxsw_reg_pxbt_e { + MLXSW_REG_PXBT_E_IACL, + MLXSW_REG_PXBT_E_EACL, +}; + +/* reg_ppbt_e + * Access: Index + */ +MLXSW_ITEM32(reg, ppbt, e, 0x00, 31, 1); + +enum mlxsw_reg_pxbt_op { + MLXSW_REG_PXBT_OP_BIND, + MLXSW_REG_PXBT_OP_UNBIND, +}; + +/* reg_ppbt_op + * Access: RW + */ +MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3); + +/* reg_ppbt_local_port + * Local port. Not including CPU port. + * Access: Index + */ +MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8); + +/* reg_ppbt_g + * group - When set, the binding is of an ACL group. When cleared, + * the binding is of an ACL. + * Must be set to 1 for Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, ppbt, g, 0x10, 31, 1); + +/* reg_ppbt_acl_info + * ACL/ACL group identifier. If the g bit is set, this field should hold + * the acl_group_id, else it should hold the acl_id. + * Access: RW + */ +MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16); + +static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e, + enum mlxsw_reg_pxbt_op op, + u8 local_port, u16 acl_info) +{ + MLXSW_REG_ZERO(ppbt, payload); + mlxsw_reg_ppbt_e_set(payload, e); + mlxsw_reg_ppbt_op_set(payload, op); + mlxsw_reg_ppbt_local_port_set(payload, local_port); + mlxsw_reg_ppbt_g_set(payload, true); + mlxsw_reg_ppbt_acl_info_set(payload, acl_info); +} + /* PACL - Policy-Engine ACL Register * --------------------------------- * This register is used for configuration of the ACL. @@ -5733,6 +5795,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(svpe), MLXSW_REG(sfmr), MLXSW_REG(spvmlr), + MLXSW_REG(ppbt), MLXSW_REG(pacl), MLXSW_REG(pagt), MLXSW_REG(ptar), -- cgit v1.2.3 From 937b682cc0bcebe19df7911f3697be7fd8c5dee6 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:28:58 +0100 Subject: mlxsw: reg: Add Policy-Engine Rules Copy Register The PRCR register is used for accessing rules within a TCAM region. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 77 +++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index ce6d85a626bb..555cb80ab97b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2019,6 +2019,82 @@ static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info) mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info); } +/* PRCR - Policy-Engine Rules Copy Register + * ---------------------------------------- + * This register is used for accessing rules within a TCAM region. + */ +#define MLXSW_REG_PRCR_ID 0x300D +#define MLXSW_REG_PRCR_LEN 0x40 + +MLXSW_REG_DEFINE(prcr, MLXSW_REG_PRCR_ID, MLXSW_REG_PRCR_LEN); + +enum mlxsw_reg_prcr_op { + /* Move rules. Moves the rules from "tcam_region_info" starting + * at offset "offset" to "dest_tcam_region_info" + * at offset "dest_offset." + */ + MLXSW_REG_PRCR_OP_MOVE, + /* Copy rules. Copies the rules from "tcam_region_info" starting + * at offset "offset" to "dest_tcam_region_info" + * at offset "dest_offset." + */ + MLXSW_REG_PRCR_OP_COPY, +}; + +/* reg_prcr_op + * Access: OP + */ +MLXSW_ITEM32(reg, prcr, op, 0x00, 28, 4); + +/* reg_prcr_offset + * Offset within the source region to copy/move from. + * Access: Index + */ +MLXSW_ITEM32(reg, prcr, offset, 0x00, 0, 16); + +/* reg_prcr_size + * The number of rules to copy/move. + * Access: WO + */ +MLXSW_ITEM32(reg, prcr, size, 0x04, 0, 16); + +/* reg_prcr_tcam_region_info + * Opaque object that represents the source TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, prcr, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +/* reg_prcr_dest_offset + * Offset within the source region to copy/move to. + * Access: Index + */ +MLXSW_ITEM32(reg, prcr, dest_offset, 0x20, 0, 16); + +/* reg_prcr_dest_tcam_region_info + * Opaque object that represents the destination TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, prcr, dest_tcam_region_info, 0x30, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +static inline void mlxsw_reg_prcr_pack(char *payload, enum mlxsw_reg_prcr_op op, + const char *src_tcam_region_info, + u16 src_offset, + const char *dest_tcam_region_info, + u16 dest_offset, u16 size) +{ + MLXSW_REG_ZERO(prcr, payload); + mlxsw_reg_prcr_op_set(payload, op); + mlxsw_reg_prcr_offset_set(payload, src_offset); + mlxsw_reg_prcr_size_set(payload, size); + mlxsw_reg_prcr_tcam_region_info_memcpy_to(payload, + src_tcam_region_info); + mlxsw_reg_prcr_dest_offset_set(payload, dest_offset); + mlxsw_reg_prcr_dest_tcam_region_info_memcpy_to(payload, + dest_tcam_region_info); +} + /* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2 * ----------------------------------------------------- * This register is used for accessing rules within a TCAM region. @@ -5799,6 +5875,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pacl), MLXSW_REG(pagt), MLXSW_REG(ptar), + MLXSW_REG(prcr), MLXSW_REG(ptce2), MLXSW_REG(qpcr), MLXSW_REG(qtct), -- cgit v1.2.3 From d120649d86852f00d2c8afc943d1f480d6ed2703 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:28:59 +0100 Subject: mlxsw: reg: Add Policy-Engine Policy Based Switching Register The PPBS register retrieves and sets Policy Based Switching Table entries. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 555cb80ab97b..c503363ebc1f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2019,6 +2019,36 @@ static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info) mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info); } +/* PPBS - Policy-Engine Policy Based Switching Register + * ---------------------------------------------------- + * This register retrieves and sets Policy Based Switching Table entries. + */ +#define MLXSW_REG_PPBS_ID 0x300C +#define MLXSW_REG_PPBS_LEN 0x14 + +MLXSW_REG_DEFINE(ppbs, MLXSW_REG_PPBS_ID, MLXSW_REG_PPBS_LEN); + +/* reg_ppbs_pbs_ptr + * Index into the PBS table. + * For Spectrum, the index points to the KVD Linear. + * Access: Index + */ +MLXSW_ITEM32(reg, ppbs, pbs_ptr, 0x08, 0, 24); + +/* reg_ppbs_system_port + * Unique port identifier for the final destination of the packet. + * Access: RW + */ +MLXSW_ITEM32(reg, ppbs, system_port, 0x10, 0, 16); + +static inline void mlxsw_reg_ppbs_pack(char *payload, u32 pbs_ptr, + u16 system_port) +{ + MLXSW_REG_ZERO(ppbs, payload); + mlxsw_reg_ppbs_pbs_ptr_set(payload, pbs_ptr); + mlxsw_reg_ppbs_system_port_set(payload, system_port); +} + /* PRCR - Policy-Engine Rules Copy Register * ---------------------------------------- * This register is used for accessing rules within a TCAM region. @@ -5875,6 +5905,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pacl), MLXSW_REG(pagt), MLXSW_REG(ptar), + MLXSW_REG(ppbs), MLXSW_REG(prcr), MLXSW_REG(ptce2), MLXSW_REG(qpcr), -- cgit v1.2.3 From e3426e12fee11cb7949b65428955127ca3dcb433 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:29:00 +0100 Subject: mlxsw: reg: Add Policy-Engine Extended Flexible Action Register PEFA register is used for accessing an extended flexible action entry in the central KVD Linear Database. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 39 ++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index c503363ebc1f..b50a312d89c1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2125,6 +2125,40 @@ static inline void mlxsw_reg_prcr_pack(char *payload, enum mlxsw_reg_prcr_op op, dest_tcam_region_info); } +/* PEFA - Policy-Engine Extended Flexible Action Register + * ------------------------------------------------------ + * This register is used for accessing an extended flexible action entry + * in the central KVD Linear Database. + */ +#define MLXSW_REG_PEFA_ID 0x300F +#define MLXSW_REG_PEFA_LEN 0xB0 + +MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN); + +/* reg_pefa_index + * Index in the KVD Linear Centralized Database. + * Access: Index + */ +MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); + +#define MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN 0xA8 + +/* reg_pefa_flex_action_set + * Action-set to perform when rule is matched. + * Must be zero padded if action set is shorter. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, + MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); + +static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, + const char *flex_action_set) +{ + MLXSW_REG_ZERO(pefa, payload); + mlxsw_reg_pefa_index_set(payload, index); + mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set); +} + /* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2 * ----------------------------------------------------- * This register is used for accessing rules within a TCAM region. @@ -2203,14 +2237,12 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80, MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); -#define MLXSW_REG_PTCE2_FLEX_ACTION_SET_LEN 0xA8 - /* reg_ptce2_flex_action_set * ACL action set. * Access: RW */ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0, - MLXSW_REG_PTCE2_FLEX_ACTION_SET_LEN); + MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, enum mlxsw_reg_ptce2_op op, @@ -5907,6 +5939,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(ptar), MLXSW_REG(ppbs), MLXSW_REG(prcr), + MLXSW_REG(pefa), MLXSW_REG(ptce2), MLXSW_REG(qpcr), MLXSW_REG(qtct), -- cgit v1.2.3 From 3f1a84e6962c7fd0703b9bb5db0355d1c28b201a Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:29:01 +0100 Subject: mlxsw: core: Introduce flexible keys support Hardware supports matching on so called "flexible keys". The idea is to assemble an optimal key to use for matching according to the fields in packet (elements) requested by user. Certain sets of elements are combined into pre-defined blocks. There is a picker to find needed blocks. Keys consist of 1..n blocks. Alongside with that, an initial portion of elements is introduced in order to be able to offload basic cls_flower rules. Picked keys are cached so multiple rules could share them. There is an encode function provided that takes care of encoding key and mask values according to given key. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 2 +- .../ethernet/mellanox/mlxsw/core_acl_flex_keys.c | 475 +++++++++++++++++++++ .../ethernet/mellanox/mlxsw/core_acl_flex_keys.h | 238 +++++++++++ 3 files changed, 714 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index fe8dadba15ab..6a83768e445e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o -mlxsw_core-objs := core.o +mlxsw_core-objs := core.o core_acl_flex_keys.o mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c new file mode 100644 index 000000000000..b32a00972e83 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -0,0 +1,475 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include "item.h" +#include "core_acl_flex_keys.h" + +struct mlxsw_afk { + struct list_head key_info_list; + unsigned int max_blocks; + const struct mlxsw_afk_block *blocks; + unsigned int blocks_count; +}; + +static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk) +{ + int i; + int j; + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i]; + + for (j = 0; j < block->instances_count; j++) { + struct mlxsw_afk_element_inst *elinst; + + elinst = &block->instances[j]; + if (elinst->type != elinst->info->type || + elinst->item.size.bits != + elinst->info->item.size.bits) + return false; + } + } + return true; +} + +struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, + const struct mlxsw_afk_block *blocks, + unsigned int blocks_count) +{ + struct mlxsw_afk *mlxsw_afk; + + mlxsw_afk = kzalloc(sizeof(*mlxsw_afk), GFP_KERNEL); + if (!mlxsw_afk) + return NULL; + INIT_LIST_HEAD(&mlxsw_afk->key_info_list); + mlxsw_afk->max_blocks = max_blocks; + mlxsw_afk->blocks = blocks; + mlxsw_afk->blocks_count = blocks_count; + WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk)); + return mlxsw_afk; +} +EXPORT_SYMBOL(mlxsw_afk_create); + +void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk) +{ + WARN_ON(!list_empty(&mlxsw_afk->key_info_list)); + kfree(mlxsw_afk); +} +EXPORT_SYMBOL(mlxsw_afk_destroy); + +struct mlxsw_afk_key_info { + struct list_head list; + unsigned int ref_count; + unsigned int blocks_count; + int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value + * is index inside "blocks" + */ + struct mlxsw_afk_element_usage elusage; + const struct mlxsw_afk_block *blocks[0]; +}; + +static bool +mlxsw_afk_key_info_elements_eq(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage) +{ + return memcmp(&key_info->elusage, elusage, sizeof(*elusage)) == 0; +} + +static struct mlxsw_afk_key_info * +mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_key_info *key_info; + + list_for_each_entry(key_info, &mlxsw_afk->key_info_list, list) { + if (mlxsw_afk_key_info_elements_eq(key_info, elusage)) + return key_info; + } + return NULL; +} + +struct mlxsw_afk_picker { + struct { + DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX); + unsigned int total; + } hits[0]; +}; + +static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker, + enum mlxsw_afk_element element) +{ + int i; + int j; + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i]; + + for (j = 0; j < block->instances_count; j++) { + struct mlxsw_afk_element_inst *elinst; + + elinst = &block->instances[j]; + if (elinst->info->element == element) { + __set_bit(element, picker->hits[i].element); + picker->hits[i].total++; + } + } + } +} + +static void mlxsw_afk_picker_subtract_hits(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker, + int block_index) +{ + DECLARE_BITMAP(hits_element, MLXSW_AFK_ELEMENT_MAX); + int i; + int j; + + memcpy(&hits_element, &picker->hits[block_index].element, + sizeof(hits_element)); + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + for_each_set_bit(j, hits_element, MLXSW_AFK_ELEMENT_MAX) { + if (__test_and_clear_bit(j, picker->hits[i].element)) + picker->hits[i].total--; + } + } +} + +static int mlxsw_afk_picker_most_hits_get(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker) +{ + int most_index = -EINVAL; /* Should never happen to return this */ + int most_hits = 0; + int i; + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + if (picker->hits[i].total > most_hits) { + most_hits = picker->hits[i].total; + most_index = i; + } + } + return most_index; +} + +static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker, + int block_index, + struct mlxsw_afk_key_info *key_info) +{ + enum mlxsw_afk_element element; + + if (key_info->blocks_count == mlxsw_afk->max_blocks) + return -EINVAL; + + for_each_set_bit(element, picker->hits[block_index].element, + MLXSW_AFK_ELEMENT_MAX) { + key_info->element_to_block[element] = key_info->blocks_count; + mlxsw_afk_element_usage_add(&key_info->elusage, element); + } + + key_info->blocks[key_info->blocks_count] = + &mlxsw_afk->blocks[block_index]; + key_info->blocks_count++; + return 0; +} + +static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_picker *picker; + enum mlxsw_afk_element element; + size_t alloc_size; + int err; + + alloc_size = sizeof(picker->hits[0]) * mlxsw_afk->blocks_count; + picker = kzalloc(alloc_size, GFP_KERNEL); + if (!picker) + return -ENOMEM; + + /* Since the same elements could be present in multiple blocks, + * we must find out optimal block list in order to make the + * block count as low as possible. + * + * First, we count hits. We go over all available blocks and count + * how many of requested elements are covered by each. + * + * Then in loop, we find block with most hits and add it to + * output key_info. Then we have to subtract this block hits so + * the next iteration will find most suitable block for + * the rest of requested elements. + */ + + mlxsw_afk_element_usage_for_each(element, elusage) + mlxsw_afk_picker_count_hits(mlxsw_afk, picker, element); + + do { + int block_index; + + block_index = mlxsw_afk_picker_most_hits_get(mlxsw_afk, picker); + if (block_index < 0) { + err = block_index; + goto out; + } + err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, + block_index, key_info); + if (err) + goto out; + mlxsw_afk_picker_subtract_hits(mlxsw_afk, picker, block_index); + } while (!mlxsw_afk_key_info_elements_eq(key_info, elusage)); + + err = 0; +out: + kfree(picker); + return err; +} + +static struct mlxsw_afk_key_info * +mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_key_info *key_info; + size_t alloc_size; + int err; + + alloc_size = sizeof(*key_info) + + sizeof(key_info->blocks[0]) * mlxsw_afk->max_blocks; + key_info = kzalloc(alloc_size, GFP_KERNEL); + if (!key_info) + return ERR_PTR(-ENOMEM); + err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage); + if (err) + goto err_picker; + list_add(&key_info->list, &mlxsw_afk->key_info_list); + key_info->ref_count = 1; + return key_info; + +err_picker: + kfree(key_info); + return ERR_PTR(err); +} + +static void mlxsw_afk_key_info_destroy(struct mlxsw_afk_key_info *key_info) +{ + list_del(&key_info->list); + kfree(key_info); +} + +struct mlxsw_afk_key_info * +mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_key_info *key_info; + + key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage); + if (key_info) { + key_info->ref_count++; + return key_info; + } + return mlxsw_afk_key_info_create(mlxsw_afk, elusage); +} +EXPORT_SYMBOL(mlxsw_afk_key_info_get); + +void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info) +{ + if (--key_info->ref_count) + return; + mlxsw_afk_key_info_destroy(key_info); +} +EXPORT_SYMBOL(mlxsw_afk_key_info_put); + +bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage) +{ + return mlxsw_afk_element_usage_subset(elusage, &key_info->elusage); +} +EXPORT_SYMBOL(mlxsw_afk_key_info_subset); + +static const struct mlxsw_afk_element_inst * +mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block, + enum mlxsw_afk_element element) +{ + int i; + + for (i = 0; i < block->instances_count; i++) { + struct mlxsw_afk_element_inst *elinst; + + elinst = &block->instances[i]; + if (elinst->info->element == element) + return elinst; + } + return NULL; +} + +static const struct mlxsw_afk_element_inst * +mlxsw_afk_key_info_elinst_get(struct mlxsw_afk_key_info *key_info, + enum mlxsw_afk_element element, + int *p_block_index) +{ + const struct mlxsw_afk_element_inst *elinst; + const struct mlxsw_afk_block *block; + int block_index; + + if (WARN_ON(!test_bit(element, key_info->elusage.usage))) + return NULL; + block_index = key_info->element_to_block[element]; + block = key_info->blocks[block_index]; + + elinst = mlxsw_afk_block_elinst_get(block, element); + if (WARN_ON(!elinst)) + return NULL; + + *p_block_index = block_index; + return elinst; +} + +u16 +mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info, + int block_index) +{ + return key_info->blocks[block_index]->encoding; +} +EXPORT_SYMBOL(mlxsw_afk_key_info_block_encoding_get); + +unsigned int +mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info) +{ + return key_info->blocks_count; +} +EXPORT_SYMBOL(mlxsw_afk_key_info_blocks_count_get); + +void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value) +{ + const struct mlxsw_afk_element_info *elinfo = + &mlxsw_afk_element_infos[element]; + const struct mlxsw_item *storage_item = &elinfo->item; + + if (!mask_value) + return; + if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_U32)) + return; + __mlxsw_item_set32(values->storage.key, storage_item, 0, key_value); + __mlxsw_item_set32(values->storage.mask, storage_item, 0, mask_value); + mlxsw_afk_element_usage_add(&values->elusage, element); +} +EXPORT_SYMBOL(mlxsw_afk_values_add_u32); + +void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + const char *key_value, const char *mask_value, + unsigned int len) +{ + const struct mlxsw_afk_element_info *elinfo = + &mlxsw_afk_element_infos[element]; + const struct mlxsw_item *storage_item = &elinfo->item; + + if (!memchr_inv(mask_value, 0, len)) /* If mask is zero */ + return; + if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_BUF) || + WARN_ON(elinfo->item.size.bytes != len)) + return; + __mlxsw_item_memcpy_to(values->storage.key, key_value, + storage_item, 0); + __mlxsw_item_memcpy_to(values->storage.mask, mask_value, + storage_item, 0); + mlxsw_afk_element_usage_add(&values->elusage, element); +} +EXPORT_SYMBOL(mlxsw_afk_values_add_buf); + +static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output_indexed) +{ + u32 value; + + value = __mlxsw_item_get32(storage, storage_item, 0); + __mlxsw_item_set32(output_indexed, output_item, 0, value); +} + +static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output_indexed) +{ + char *storage_data = __mlxsw_item_data(storage, storage_item, 0); + char *output_data = __mlxsw_item_data(output_indexed, output_item, 0); + size_t len = output_item->size.bytes; + + memcpy(output_data, storage_data, len); +} + +#define MLXSW_AFK_KEY_BLOCK_SIZE 16 + +static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, + int block_index, char *storage, char *output) +{ + char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE; + const struct mlxsw_item *storage_item = &elinst->info->item; + const struct mlxsw_item *output_item = &elinst->item; + + if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32) + mlxsw_afk_encode_u32(storage_item, output_item, + storage, output_indexed); + else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF) + mlxsw_afk_encode_buf(storage_item, output_item, + storage, output_indexed); +} + +void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_values *values, + char *key, char *mask) +{ + const struct mlxsw_afk_element_inst *elinst; + enum mlxsw_afk_element element; + int block_index; + + mlxsw_afk_element_usage_for_each(element, &values->elusage) { + elinst = mlxsw_afk_key_info_elinst_get(key_info, element, + &block_index); + if (!elinst) + continue; + mlxsw_afk_encode_one(elinst, block_index, + values->storage.key, key); + mlxsw_afk_encode_one(elinst, block_index, + values->storage.mask, mask); + } +} +EXPORT_SYMBOL(mlxsw_afk_encode); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h new file mode 100644 index 000000000000..e4fcba7c2af2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -0,0 +1,238 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H +#define _MLXSW_CORE_ACL_FLEX_KEYS_H + +#include +#include + +#include "item.h" + +enum mlxsw_afk_element { + MLXSW_AFK_ELEMENT_SRC_SYS_PORT, + MLXSW_AFK_ELEMENT_DMAC, + MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_ETHERTYPE, + MLXSW_AFK_ELEMENT_IP_PROTO, + MLXSW_AFK_ELEMENT_SRC_IP4, + MLXSW_AFK_ELEMENT_DST_IP4, + MLXSW_AFK_ELEMENT_SRC_IP6_HI, + MLXSW_AFK_ELEMENT_SRC_IP6_LO, + MLXSW_AFK_ELEMENT_DST_IP6_HI, + MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_DST_L4_PORT, + MLXSW_AFK_ELEMENT_SRC_L4_PORT, + MLXSW_AFK_ELEMENT_MAX, +}; + +enum mlxsw_afk_element_type { + MLXSW_AFK_ELEMENT_TYPE_U32, + MLXSW_AFK_ELEMENT_TYPE_BUF, +}; + +struct mlxsw_afk_element_info { + enum mlxsw_afk_element element; /* element ID */ + enum mlxsw_afk_element_type type; + struct mlxsw_item item; /* element geometry in internal storage */ +}; + +#define MLXSW_AFK_ELEMENT_INFO(_type, _element, _offset, _shift, _size) \ + [MLXSW_AFK_ELEMENT_##_element] = { \ + .element = MLXSW_AFK_ELEMENT_##_element, \ + .type = _type, \ + .item = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _size}, \ + .name = #_element, \ + }, \ + } + +#define MLXSW_AFK_ELEMENT_INFO_U32(_element, _offset, _shift, _size) \ + MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_U32, \ + _element, _offset, _shift, _size) + +#define MLXSW_AFK_ELEMENT_INFO_BUF(_element, _offset, _size) \ + MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \ + _element, _offset, 0, _size) + +/* For the purpose of the driver, define a internal storage scratchpad + * that will be used to store key/mask values. For each defined element type + * define an internal storage geometry. + */ +static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { + MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16), + MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6), + MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6), + MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16), + MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), + MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), + MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), +}; + +#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 + +struct mlxsw_afk_element_inst { /* element instance in actual block */ + const struct mlxsw_afk_element_info *info; + enum mlxsw_afk_element_type type; + struct mlxsw_item item; /* element geometry in block */ +}; + +#define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, _shift, _size) \ + { \ + .info = &mlxsw_afk_element_infos[MLXSW_AFK_ELEMENT_##_element], \ + .type = _type, \ + .item = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _size}, \ + .name = #_element, \ + }, \ + } + +#define MLXSW_AFK_ELEMENT_INST_U32(_element, _offset, _shift, _size) \ + MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_U32, \ + _element, _offset, _shift, _size) + +#define MLXSW_AFK_ELEMENT_INST_BUF(_element, _offset, _size) \ + MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_BUF, \ + _element, _offset, 0, _size) + +struct mlxsw_afk_block { + u16 encoding; /* block ID */ + struct mlxsw_afk_element_inst *instances; + unsigned int instances_count; +}; + +#define MLXSW_AFK_BLOCK(_encoding, _instances) \ + { \ + .encoding = _encoding, \ + .instances = _instances, \ + .instances_count = ARRAY_SIZE(_instances), \ + } + +struct mlxsw_afk_element_usage { + DECLARE_BITMAP(usage, MLXSW_AFK_ELEMENT_MAX); +}; + +#define mlxsw_afk_element_usage_for_each(element, elusage) \ + for_each_set_bit(element, (elusage)->usage, MLXSW_AFK_ELEMENT_MAX) + +static inline void +mlxsw_afk_element_usage_add(struct mlxsw_afk_element_usage *elusage, + enum mlxsw_afk_element element) +{ + __set_bit(element, elusage->usage); +} + +static inline void +mlxsw_afk_element_usage_zero(struct mlxsw_afk_element_usage *elusage) +{ + bitmap_zero(elusage->usage, MLXSW_AFK_ELEMENT_MAX); +} + +static inline void +mlxsw_afk_element_usage_fill(struct mlxsw_afk_element_usage *elusage, + const enum mlxsw_afk_element *elements, + unsigned int elements_count) +{ + int i; + + mlxsw_afk_element_usage_zero(elusage); + for (i = 0; i < elements_count; i++) + mlxsw_afk_element_usage_add(elusage, elements[i]); +} + +static inline bool +mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small, + struct mlxsw_afk_element_usage *elusage_big) +{ + int i; + + for (i = 0; i < MLXSW_AFK_ELEMENT_MAX; i++) + if (test_bit(i, elusage_small->usage) && + !test_bit(i, elusage_big->usage)) + return false; + return true; +} + +struct mlxsw_afk; + +struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, + const struct mlxsw_afk_block *blocks, + unsigned int blocks_count); +void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk); + +struct mlxsw_afk_key_info; + +struct mlxsw_afk_key_info * +mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage); +void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info); +bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage); + +u16 +mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info, + int block_index); +unsigned int +mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info); + +struct mlxsw_afk_element_values { + struct mlxsw_afk_element_usage elusage; + struct { + char key[MLXSW_AFK_ELEMENT_STORAGE_SIZE]; + char mask[MLXSW_AFK_ELEMENT_STORAGE_SIZE]; + } storage; +}; + +void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value); +void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + const char *key_value, const char *mask_value, + unsigned int len); +void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_values *values, + char *key, char *mask); + +#endif -- cgit v1.2.3 From 4cda7d8d7098e2b2c2c496ffb77b18f8b685e80d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:29:02 +0100 Subject: mlxsw: core: Introduce flexible actions support Each entry which is matched during ACL lookup points to an action set. This action set contains up to three separate actions. If more actions are needed to be chained, the extended set is created to hold them in KVD linear area. This patch implements handling of sets and encoding of actions. Currectly, only two actions are supported. Drop and forward. Forward action uses PBS pointer to KVD linear area, so the action code needs to take care of this as well. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 +- .../mellanox/mlxsw/core_acl_flex_actions.c | 685 +++++++++++++++++++++ .../mellanox/mlxsw/core_acl_flex_actions.h | 66 ++ 3 files changed, 753 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 6a83768e445e..c4c48baf0c3f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o -mlxsw_core-objs := core.o core_acl_flex_keys.o +mlxsw_core-objs := core.o core_acl_flex_keys.o \ + core_acl_flex_actions.o mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c new file mode 100644 index 000000000000..34e2fefb0a25 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -0,0 +1,685 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include "item.h" +#include "core_acl_flex_actions.h" + +enum mlxsw_afa_set_type { + MLXSW_AFA_SET_TYPE_NEXT, + MLXSW_AFA_SET_TYPE_GOTO, +}; + +/* afa_set_type + * Type of the record at the end of the action set. + */ +MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4); + +/* afa_set_next_action_set_ptr + * A pointer to the next action set in the KVD Centralized database. + */ +MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24); + +/* afa_set_goto_g + * group - When set, the binding is of an ACL group. When cleared, + * the binding is of an ACL. + * Must be set to 1 for Spectrum. + */ +MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1); + +enum mlxsw_afa_set_goto_binding_cmd { + /* continue go the next binding point */ + MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, + /* jump to the next binding point no return */ + MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, + /* terminate the acl binding */ + MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4, +}; + +/* afa_set_goto_binding_cmd */ +MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3); + +/* afa_set_goto_next_binding + * ACL/ACL group identifier. If the g bit is set, this field should hold + * the acl_group_id, else it should hold the acl_id. + */ +MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16); + +/* afa_all_action_type + * Action Type. + */ +MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6); + +struct mlxsw_afa { + unsigned int max_acts_per_set; + const struct mlxsw_afa_ops *ops; + void *ops_priv; + struct rhashtable set_ht; + struct rhashtable fwd_entry_ht; +}; + +#define MLXSW_AFA_SET_LEN 0xA8 + +struct mlxsw_afa_set_ht_key { + char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */ + bool is_first; +}; + +/* Set structure holds one action set record. It contains up to three + * actions (depends on size of particular actions). The set is either + * put directly to a rule, or it is stored in KVD linear area. + * To prevent duplicate entries in KVD linear area, a hashtable is + * used to track sets that were previously inserted and may be shared. + */ + +struct mlxsw_afa_set { + struct rhash_head ht_node; + struct mlxsw_afa_set_ht_key ht_key; + u32 kvdl_index; + bool shared; /* Inserted in hashtable (doesn't mean that + * kvdl_index is valid). + */ + unsigned int ref_count; + struct mlxsw_afa_set *next; /* Pointer to the next set. */ + struct mlxsw_afa_set *prev; /* Pointer to the previous set, + * note that set may have multiple + * sets from multiple blocks + * pointing at it. This is only + * usable until commit. + */ +}; + +static const struct rhashtable_params mlxsw_afa_set_ht_params = { + .key_len = sizeof(struct mlxsw_afa_set_ht_key), + .key_offset = offsetof(struct mlxsw_afa_set, ht_key), + .head_offset = offsetof(struct mlxsw_afa_set, ht_node), + .automatic_shrinking = true, +}; + +struct mlxsw_afa_fwd_entry_ht_key { + u8 local_port; +}; + +struct mlxsw_afa_fwd_entry { + struct rhash_head ht_node; + struct mlxsw_afa_fwd_entry_ht_key ht_key; + u32 kvdl_index; + unsigned int ref_count; +}; + +static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = { + .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key), + .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key), + .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node), + .automatic_shrinking = true, +}; + +struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, + const struct mlxsw_afa_ops *ops, + void *ops_priv) +{ + struct mlxsw_afa *mlxsw_afa; + int err; + + mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL); + if (!mlxsw_afa) + return ERR_PTR(-ENOMEM); + err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params); + if (err) + goto err_set_rhashtable_init; + err = rhashtable_init(&mlxsw_afa->fwd_entry_ht, + &mlxsw_afa_fwd_entry_ht_params); + if (err) + goto err_fwd_entry_rhashtable_init; + mlxsw_afa->max_acts_per_set = max_acts_per_set; + mlxsw_afa->ops = ops; + mlxsw_afa->ops_priv = ops_priv; + return mlxsw_afa; + +err_fwd_entry_rhashtable_init: + rhashtable_destroy(&mlxsw_afa->set_ht); +err_set_rhashtable_init: + kfree(mlxsw_afa); + return ERR_PTR(err); +} +EXPORT_SYMBOL(mlxsw_afa_create); + +void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa) +{ + rhashtable_destroy(&mlxsw_afa->fwd_entry_ht); + rhashtable_destroy(&mlxsw_afa->set_ht); + kfree(mlxsw_afa); +} +EXPORT_SYMBOL(mlxsw_afa_destroy); + +static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set, + enum mlxsw_afa_set_goto_binding_cmd cmd, + u16 group_id) +{ + char *actions = set->ht_key.enc_actions; + + mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO); + mlxsw_afa_set_goto_g_set(actions, true); + mlxsw_afa_set_goto_binding_cmd_set(actions, cmd); + mlxsw_afa_set_goto_next_binding_set(actions, group_id); +} + +static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set, + u32 next_set_kvdl_index) +{ + char *actions = set->ht_key.enc_actions; + + mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT); + mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index); +} + +static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first) +{ + struct mlxsw_afa_set *set; + + set = kzalloc(sizeof(*set), GFP_KERNEL); + if (!set) + return NULL; + /* Need to initialize the set to pass by default */ + mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0); + set->ht_key.is_first = is_first; + set->ref_count = 1; + return set; +} + +static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set) +{ + kfree(set); +} + +static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *set) +{ + int err; + + err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node, + mlxsw_afa_set_ht_params); + if (err) + return err; + err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv, + &set->kvdl_index, + set->ht_key.enc_actions, + set->ht_key.is_first); + if (err) + goto err_kvdl_set_add; + set->shared = true; + set->prev = NULL; + return 0; + +err_kvdl_set_add: + rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node, + mlxsw_afa_set_ht_params); + return err; +} + +static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *set) +{ + mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv, + set->kvdl_index, + set->ht_key.is_first); + rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node, + mlxsw_afa_set_ht_params); + set->shared = false; +} + +static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *set) +{ + if (--set->ref_count) + return; + if (set->shared) + mlxsw_afa_set_unshare(mlxsw_afa, set); + mlxsw_afa_set_destroy(set); +} + +static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *orig_set) +{ + struct mlxsw_afa_set *set; + int err; + + /* There is a hashtable of sets maintained. If a set with the exact + * same encoding exists, we reuse it. Otherwise, the current set + * is shared by making it available to others using the hash table. + */ + set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key, + mlxsw_afa_set_ht_params); + if (set) { + set->ref_count++; + mlxsw_afa_set_put(mlxsw_afa, orig_set); + } else { + set = orig_set; + err = mlxsw_afa_set_share(mlxsw_afa, set); + if (err) + return ERR_PTR(err); + } + return set; +} + +/* Block structure holds a list of action sets. One action block + * represents one chain of actions executed upon match of a rule. + */ + +struct mlxsw_afa_block { + struct mlxsw_afa *afa; + bool finished; + struct mlxsw_afa_set *first_set; + struct mlxsw_afa_set *cur_set; + unsigned int cur_act_index; /* In current set. */ + struct list_head fwd_entry_ref_list; +}; + +struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) +{ + struct mlxsw_afa_block *block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return NULL; + INIT_LIST_HEAD(&block->fwd_entry_ref_list); + block->afa = mlxsw_afa; + + /* At least one action set is always present, so just create it here */ + block->first_set = mlxsw_afa_set_create(true); + if (!block->first_set) + goto err_first_set_create; + block->cur_set = block->first_set; + return block; + +err_first_set_create: + kfree(block); + return NULL; +} +EXPORT_SYMBOL(mlxsw_afa_block_create); + +static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block); + +void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block) +{ + struct mlxsw_afa_set *set = block->first_set; + struct mlxsw_afa_set *next_set; + + do { + next_set = set->next; + mlxsw_afa_set_put(block->afa, set); + set = next_set; + } while (set); + mlxsw_afa_fwd_entry_refs_destroy(block); + kfree(block); +} +EXPORT_SYMBOL(mlxsw_afa_block_destroy); + +int mlxsw_afa_block_commit(struct mlxsw_afa_block *block) +{ + struct mlxsw_afa_set *set = block->cur_set; + struct mlxsw_afa_set *prev_set; + int err; + + block->cur_set = NULL; + + /* Go over all linked sets starting from last + * and try to find existing set in the hash table. + * In case it is not there, assign a KVD linear index + * and insert it. + */ + do { + prev_set = set->prev; + set = mlxsw_afa_set_get(block->afa, set); + if (IS_ERR(set)) { + err = PTR_ERR(set); + goto rollback; + } + if (prev_set) { + prev_set->next = set; + mlxsw_afa_set_next_set(prev_set, set->kvdl_index); + set = prev_set; + } + } while (prev_set); + + block->first_set = set; + block->finished = true; + return 0; + +rollback: + while ((set = set->next)) + mlxsw_afa_set_put(block->afa, set); + return err; +} +EXPORT_SYMBOL(mlxsw_afa_block_commit); + +char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block) +{ + return block->first_set->ht_key.enc_actions; +} +EXPORT_SYMBOL(mlxsw_afa_block_first_set); + +u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block) +{ + return block->first_set->kvdl_index; +} +EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index); + +void mlxsw_afa_block_continue(struct mlxsw_afa_block *block) +{ + if (WARN_ON(block->finished)) + return; + mlxsw_afa_set_goto_set(block->cur_set, + MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0); + block->finished = true; +} +EXPORT_SYMBOL(mlxsw_afa_block_continue); + +void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id) +{ + if (WARN_ON(block->finished)) + return; + mlxsw_afa_set_goto_set(block->cur_set, + MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id); + block->finished = true; +} +EXPORT_SYMBOL(mlxsw_afa_block_jump); + +static struct mlxsw_afa_fwd_entry * +mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) +{ + struct mlxsw_afa_fwd_entry *fwd_entry; + int err; + + fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL); + if (!fwd_entry) + return ERR_PTR(-ENOMEM); + fwd_entry->ht_key.local_port = local_port; + fwd_entry->ref_count = 1; + + err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht, + &fwd_entry->ht_node, + mlxsw_afa_fwd_entry_ht_params); + if (err) + goto err_rhashtable_insert; + + err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv, + &fwd_entry->kvdl_index, + local_port); + if (err) + goto err_kvdl_fwd_entry_add; + return fwd_entry; + +err_kvdl_fwd_entry_add: + rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node, + mlxsw_afa_fwd_entry_ht_params); +err_rhashtable_insert: + kfree(fwd_entry); + return ERR_PTR(err); +} + +static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_fwd_entry *fwd_entry) +{ + mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv, + fwd_entry->kvdl_index); + rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node, + mlxsw_afa_fwd_entry_ht_params); + kfree(fwd_entry); +} + +static struct mlxsw_afa_fwd_entry * +mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port) +{ + struct mlxsw_afa_fwd_entry_ht_key ht_key = {0}; + struct mlxsw_afa_fwd_entry *fwd_entry; + + ht_key.local_port = local_port; + fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key, + mlxsw_afa_fwd_entry_ht_params); + if (fwd_entry) { + fwd_entry->ref_count++; + return fwd_entry; + } + return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port); +} + +static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_fwd_entry *fwd_entry) +{ + if (--fwd_entry->ref_count) + return; + mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry); +} + +struct mlxsw_afa_fwd_entry_ref { + struct list_head list; + struct mlxsw_afa_fwd_entry *fwd_entry; +}; + +static struct mlxsw_afa_fwd_entry_ref * +mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port) +{ + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; + struct mlxsw_afa_fwd_entry *fwd_entry; + int err; + + fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL); + if (!fwd_entry_ref) + return ERR_PTR(-ENOMEM); + fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port); + if (IS_ERR(fwd_entry)) { + err = PTR_ERR(fwd_entry); + goto err_fwd_entry_get; + } + fwd_entry_ref->fwd_entry = fwd_entry; + list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list); + return fwd_entry_ref; + +err_fwd_entry_get: + kfree(fwd_entry_ref); + return ERR_PTR(err); +} + +static void +mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block, + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref) +{ + list_del(&fwd_entry_ref->list); + mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry); + kfree(fwd_entry_ref); +} + +static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block) +{ + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; + struct mlxsw_afa_fwd_entry_ref *tmp; + + list_for_each_entry_safe(fwd_entry_ref, tmp, + &block->fwd_entry_ref_list, list) + mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); +} + +#define MLXSW_AFA_ONE_ACTION_LEN 32 +#define MLXSW_AFA_PAYLOAD_OFFSET 4 + +static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, + u8 action_code, u8 action_size) +{ + char *oneact; + char *actions; + + if (WARN_ON(block->finished)) + return NULL; + if (block->cur_act_index + action_size > + block->afa->max_acts_per_set) { + struct mlxsw_afa_set *set; + + /* The appended action won't fit into the current action set, + * so create a new set. + */ + set = mlxsw_afa_set_create(false); + if (!set) + return NULL; + set->prev = block->cur_set; + block->cur_act_index = 0; + block->cur_set->next = set; + block->cur_set = set; + } + + actions = block->cur_set->ht_key.enc_actions; + oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN; + block->cur_act_index += action_size; + mlxsw_afa_all_action_type_set(oneact, action_code); + return oneact + MLXSW_AFA_PAYLOAD_OFFSET; +} + +/* Trap / Discard Action + * --------------------- + * The Trap / Discard action enables trapping / mirroring packets to the CPU + * as well as discarding packets. + * The ACL Trap / Discard separates the forward/discard control from CPU + * trap control. In addition, the Trap / Discard action enables activating + * SPAN (port mirroring). + */ + +#define MLXSW_AFA_TRAPDISC_CODE 0x03 +#define MLXSW_AFA_TRAPDISC_SIZE 1 + +enum mlxsw_afa_trapdisc_forward_action { + MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3, +}; + +/* afa_trapdisc_forward_action + * Forward Action. + */ +MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4); + +static inline void +mlxsw_afa_trapdisc_pack(char *payload, + enum mlxsw_afa_trapdisc_forward_action forward_action) +{ + mlxsw_afa_trapdisc_forward_action_set(payload, forward_action); +} + +int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_TRAPDISC_CODE, + MLXSW_AFA_TRAPDISC_SIZE); + + if (!act) + return -ENOBUFS; + mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_drop); + +/* Forwarding Action + * ----------------- + * Forwarding Action can be used to implement Policy Based Switching (PBS) + * as well as OpenFlow related "Output" action. + */ + +#define MLXSW_AFA_FORWARD_CODE 0x07 +#define MLXSW_AFA_FORWARD_SIZE 1 + +enum mlxsw_afa_forward_type { + /* PBS, Policy Based Switching */ + MLXSW_AFA_FORWARD_TYPE_PBS, + /* Output, OpenFlow output type */ + MLXSW_AFA_FORWARD_TYPE_OUTPUT, +}; + +/* afa_forward_type */ +MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2); + +/* afa_forward_pbs_ptr + * A pointer to the PBS entry configured by PPBS register. + * Reserved when in_port is set. + */ +MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24); + +/* afa_forward_in_port + * Packet is forwarded back to the ingress port. + */ +MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1); + +static inline void +mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type, + u32 pbs_ptr, bool in_port) +{ + mlxsw_afa_forward_type_set(payload, type); + mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr); + mlxsw_afa_forward_in_port_set(payload, in_port); +} + +int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, + u8 local_port, bool in_port) +{ + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; + u32 kvdl_index = 0; + char *act; + int err; + + if (!in_port) { + fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, + local_port); + if (IS_ERR(fwd_entry_ref)) + return PTR_ERR(fwd_entry_ref); + kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index; + } + + act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE, + MLXSW_AFA_FORWARD_SIZE); + if (!act) { + err = -ENOBUFS; + goto err_append_action; + } + mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_OUTPUT, + kvdl_index, in_port); + return 0; + +err_append_action: + if (!in_port) + mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); + return err; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_fwd); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h new file mode 100644 index 000000000000..43f78dcfe394 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -0,0 +1,66 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H +#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H + +#include + +struct mlxsw_afa; +struct mlxsw_afa_block; + +struct mlxsw_afa_ops { + int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first); + void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first); + int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port); + void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); +}; + +struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, + const struct mlxsw_afa_ops *ops, + void *ops_priv); +void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa); +struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa); +void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block); +int mlxsw_afa_block_commit(struct mlxsw_afa_block *block); +char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); +u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); +void mlxsw_afa_block_continue(struct mlxsw_afa_block *block); +void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); +int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); +int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, + u8 local_port, bool in_port); + +#endif -- cgit v1.2.3 From b876b9aaadce9c6bb47508c4b31efc474b70f179 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:29:03 +0100 Subject: mlxsw: spectrum: Introduce basic set of flexible key blocks Introduce basic set of Spectrum flexible key blocks. It contains blocks needed to carry all elements defined so far. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_acl_flex_keys.h | 109 +++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h new file mode 100644 index 000000000000..82b81cf7f4a7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -0,0 +1,109 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H +#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H + +#include "core_acl_flex_keys.h" + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6), + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { + MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { + MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { + MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), + MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16), +}; + +static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = { + MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac), + MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac), + MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex), + MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip), + MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip), + MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex), + MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip), + MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1), + MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip), + MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex), + MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), +}; + +#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks) + +#endif -- cgit v1.2.3 From 8708ecf01d454607f15fe1b378c8b88de3b9c34b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:29:04 +0100 Subject: mlxsw: resources: Add ACL related resources Add couple of resource limits related to ACL. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/resources.h | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 3c2171dbdba4..bce8c2e00630 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/resources.h - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Jiri Pirko + * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2017 Jiri Pirko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -48,6 +48,14 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_LAG, MLXSW_RES_ID_MAX_LAG_MEMBERS, MLXSW_RES_ID_MAX_BUFFER_SIZE, + MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS, + MLXSW_RES_ID_ACL_MAX_TCAM_RULES, + MLXSW_RES_ID_ACL_MAX_REGIONS, + MLXSW_RES_ID_ACL_MAX_GROUPS, + MLXSW_RES_ID_ACL_MAX_GROUP_SIZE, + MLXSW_RES_ID_ACL_FLEX_KEYS, + MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE, + MLXSW_RES_ID_ACL_ACTIONS_PER_SET, MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, @@ -72,6 +80,14 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_LAG] = 0x2520, [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */ + [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901, + [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902, + [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903, + [MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904, + [MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905, + [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910, + [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911, + [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912, [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, -- cgit v1.2.3 From b862815c3ee7b49ec20a9ab25da55a5f0bcbb95e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:29:05 +0100 Subject: list: introduce list_for_each_entry_from_reverse helper Similar to list_for_each_entry_continue and its reverse variant list_for_each_entry_continue_reverse, introduce reverse helper for list_for_each_entry_from. Signed-off-by: Jiri Pirko Acked-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/list.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/include/linux/list.h b/include/linux/list.h index d1039ecaf94f..ae537fa46216 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -526,6 +526,19 @@ static inline void list_splice_tail_init(struct list_head *list, for (; &pos->member != (head); \ pos = list_next_entry(pos, member)) +/** + * list_for_each_entry_from_reverse - iterate backwards over list of given type + * from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate backwards over list of given type, continuing from current position. + */ +#define list_for_each_entry_from_reverse(pos, head, member) \ + for (; &pos->member != (head); \ + pos = list_prev_entry(pos, member)) + /** * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. -- cgit v1.2.3 From 44091d29f2075972aede47ef17e1e70db3d51190 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:29:06 +0100 Subject: lib: Introduce priority array area manager This introduces a infrastructure for management of linear priority areas. Priority order in an array matters, however order of items inside a priority group does not matter. As an initial implementation, L-sort algorithm is used. It is quite trivial. More advanced algorithm called P-sort will be introduced as a follow-up. The infrastructure is prepared for other algos. Alongside this, a testing module is introduced as well. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- MAINTAINERS | 8 + include/linux/parman.h | 76 ++++++++++ lib/Kconfig | 3 + lib/Kconfig.debug | 10 ++ lib/Makefile | 3 + lib/parman.c | 376 ++++++++++++++++++++++++++++++++++++++++++++++ lib/test_parman.c | 395 +++++++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 871 insertions(+) create mode 100644 include/linux/parman.h create mode 100644 lib/parman.c create mode 100644 lib/test_parman.c diff --git a/MAINTAINERS b/MAINTAINERS index c15dc53f9d66..a9368bba9b37 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9382,6 +9382,14 @@ F: drivers/video/fbdev/sti* F: drivers/video/console/sti* F: drivers/video/logo/logo_parisc* +PARMAN +M: Jiri Pirko +L: netdev@vger.kernel.org +S: Supported +F: lib/parman.c +F: lib/test_parman.c +F: include/linux/parman.h + PC87360 HARDWARE MONITORING DRIVER M: Jim Cromie L: linux-hwmon@vger.kernel.org diff --git a/include/linux/parman.h b/include/linux/parman.h new file mode 100644 index 000000000000..3c8cccc7d4da --- /dev/null +++ b/include/linux/parman.h @@ -0,0 +1,76 @@ +/* + * include/linux/parman.h - Manager for linear priority array areas + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PARMAN_H +#define _PARMAN_H + +#include + +enum parman_algo_type { + PARMAN_ALGO_TYPE_LSORT, +}; + +struct parman_item { + struct list_head list; + unsigned long index; +}; + +struct parman_prio { + struct list_head list; + struct list_head item_list; + unsigned long priority; +}; + +struct parman_ops { + unsigned long base_count; + unsigned long resize_step; + int (*resize)(void *priv, unsigned long new_count); + void (*move)(void *priv, unsigned long from_index, + unsigned long to_index, unsigned long count); + enum parman_algo_type algo; +}; + +struct parman; + +struct parman *parman_create(const struct parman_ops *ops, void *priv); +void parman_destroy(struct parman *parman); +void parman_prio_init(struct parman *parman, struct parman_prio *prio, + unsigned long priority); +void parman_prio_fini(struct parman_prio *prio); +int parman_item_add(struct parman *parman, struct parman_prio *prio, + struct parman_item *item); +void parman_item_remove(struct parman *parman, struct parman_prio *prio, + struct parman_item *item); + +#endif diff --git a/lib/Kconfig b/lib/Kconfig index 260a80e313b9..5d644f180fe5 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -550,4 +550,7 @@ config STACKDEPOT config SBITMAP bool +config PARMAN + tristate "parman" + endmenu diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 15969abf00a6..433a788500be 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1826,6 +1826,16 @@ config TEST_HASH This is intended to help people writing architecture-specific optimized versions. If unsure, say N. +config TEST_PARMAN + tristate "Perform selftest on priority array manager" + default n + depends on PARMAN + help + Enable this option to test priority array manager on boot + (or module load). + + If unsure, say N. + endmenu # runtime tests config PROVIDE_OHCI1394_DMA_INIT diff --git a/lib/Makefile b/lib/Makefile index 7b3008d58600..1c039a4f60e5 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o obj-$(CONFIG_TEST_PRINTF) += test_printf.o obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o obj-$(CONFIG_TEST_UUID) += test_uuid.o +obj-$(CONFIG_TEST_PARMAN) += test_parman.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -230,3 +231,5 @@ obj-$(CONFIG_UBSAN) += ubsan.o UBSAN_SANITIZE_ubsan.o := n obj-$(CONFIG_SBITMAP) += sbitmap.o + +obj-$(CONFIG_PARMAN) += parman.o diff --git a/lib/parman.c b/lib/parman.c new file mode 100644 index 000000000000..c6e42a8db824 --- /dev/null +++ b/lib/parman.c @@ -0,0 +1,376 @@ +/* + * lib/parman.c - Manager for linear priority array areas + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +struct parman_algo { + int (*item_add)(struct parman *parman, struct parman_prio *prio, + struct parman_item *item); + void (*item_remove)(struct parman *parman, struct parman_prio *prio, + struct parman_item *item); +}; + +struct parman { + const struct parman_ops *ops; + void *priv; + const struct parman_algo *algo; + unsigned long count; + unsigned long limit_count; + struct list_head prio_list; +}; + +static int parman_enlarge(struct parman *parman) +{ + unsigned long new_count = parman->limit_count + + parman->ops->resize_step; + int err; + + err = parman->ops->resize(parman->priv, new_count); + if (err) + return err; + parman->limit_count = new_count; + return 0; +} + +static int parman_shrink(struct parman *parman) +{ + unsigned long new_count = parman->limit_count - + parman->ops->resize_step; + int err; + + if (new_count < parman->ops->base_count) + return 0; + err = parman->ops->resize(parman->priv, new_count); + if (err) + return err; + parman->limit_count = new_count; + return 0; +} + +static bool parman_prio_used(struct parman_prio *prio) + +{ + return !list_empty(&prio->item_list); +} + +static struct parman_item *parman_prio_first_item(struct parman_prio *prio) +{ + return list_first_entry(&prio->item_list, + typeof(struct parman_item), list); +} + +static unsigned long parman_prio_first_index(struct parman_prio *prio) +{ + return parman_prio_first_item(prio)->index; +} + +static struct parman_item *parman_prio_last_item(struct parman_prio *prio) +{ + return list_last_entry(&prio->item_list, + typeof(struct parman_item), list); +} + +static unsigned long parman_prio_last_index(struct parman_prio *prio) +{ + return parman_prio_last_item(prio)->index; +} + +static unsigned long parman_lsort_new_index_find(struct parman *parman, + struct parman_prio *prio) +{ + list_for_each_entry_from_reverse(prio, &parman->prio_list, list) { + if (!parman_prio_used(prio)) + continue; + return parman_prio_last_index(prio) + 1; + } + return 0; +} + +static void __parman_prio_move(struct parman *parman, struct parman_prio *prio, + struct parman_item *item, unsigned long to_index, + unsigned long count) +{ + parman->ops->move(parman->priv, item->index, to_index, count); +} + +static void parman_prio_shift_down(struct parman *parman, + struct parman_prio *prio) +{ + struct parman_item *item; + unsigned long to_index; + + if (!parman_prio_used(prio)) + return; + item = parman_prio_first_item(prio); + to_index = parman_prio_last_index(prio) + 1; + __parman_prio_move(parman, prio, item, to_index, 1); + list_move_tail(&item->list, &prio->item_list); + item->index = to_index; +} + +static void parman_prio_shift_up(struct parman *parman, + struct parman_prio *prio) +{ + struct parman_item *item; + unsigned long to_index; + + if (!parman_prio_used(prio)) + return; + item = parman_prio_last_item(prio); + to_index = parman_prio_first_index(prio) - 1; + __parman_prio_move(parman, prio, item, to_index, 1); + list_move(&item->list, &prio->item_list); + item->index = to_index; +} + +static void parman_prio_item_remove(struct parman *parman, + struct parman_prio *prio, + struct parman_item *item) +{ + struct parman_item *last_item; + unsigned long to_index; + + last_item = parman_prio_last_item(prio); + if (last_item == item) { + list_del(&item->list); + return; + } + to_index = item->index; + __parman_prio_move(parman, prio, last_item, to_index, 1); + list_del(&last_item->list); + list_replace(&item->list, &last_item->list); + last_item->index = to_index; +} + +static int parman_lsort_item_add(struct parman *parman, + struct parman_prio *prio, + struct parman_item *item) +{ + struct parman_prio *prio2; + unsigned long new_index; + int err; + + if (parman->count + 1 > parman->limit_count) { + err = parman_enlarge(parman); + if (err) + return err; + } + + new_index = parman_lsort_new_index_find(parman, prio); + list_for_each_entry_reverse(prio2, &parman->prio_list, list) { + if (prio2 == prio) + break; + parman_prio_shift_down(parman, prio2); + } + item->index = new_index; + list_add_tail(&item->list, &prio->item_list); + parman->count++; + return 0; +} + +static void parman_lsort_item_remove(struct parman *parman, + struct parman_prio *prio, + struct parman_item *item) +{ + parman_prio_item_remove(parman, prio, item); + list_for_each_entry_continue(prio, &parman->prio_list, list) + parman_prio_shift_up(parman, prio); + parman->count--; + if (parman->limit_count - parman->count >= parman->ops->resize_step) + parman_shrink(parman); +} + +static const struct parman_algo parman_lsort = { + .item_add = parman_lsort_item_add, + .item_remove = parman_lsort_item_remove, +}; + +static const struct parman_algo *parman_algos[] = { + &parman_lsort, +}; + +/** + * parman_create - creates a new parman instance + * @ops: caller-specific callbacks + * @priv: pointer to a private data passed to the ops + * + * Note: all locking must be provided by the caller. + * + * Each parman instance manages an array area with chunks of entries + * with the same priority. Consider following example: + * + * item 1 with prio 10 + * item 2 with prio 10 + * item 3 with prio 10 + * item 4 with prio 20 + * item 5 with prio 20 + * item 6 with prio 30 + * item 7 with prio 30 + * item 8 with prio 30 + * + * In this example, there are 3 priority chunks. The order of the priorities + * matters, however the order of items within a single priority chunk does not + * matter. So the same array could be ordered as follows: + * + * item 2 with prio 10 + * item 3 with prio 10 + * item 1 with prio 10 + * item 5 with prio 20 + * item 4 with prio 20 + * item 7 with prio 30 + * item 8 with prio 30 + * item 6 with prio 30 + * + * The goal of parman is to maintain the priority ordering. The caller + * provides @ops with callbacks parman uses to move the items + * and resize the array area. + * + * Returns a pointer to newly created parman instance in case of success, + * otherwise it returns NULL. + */ +struct parman *parman_create(const struct parman_ops *ops, void *priv) +{ + struct parman *parman; + + parman = kzalloc(sizeof(*parman), GFP_KERNEL); + if (!parman) + return NULL; + INIT_LIST_HEAD(&parman->prio_list); + parman->ops = ops; + parman->priv = priv; + parman->limit_count = ops->base_count; + parman->algo = parman_algos[ops->algo]; + return parman; +} +EXPORT_SYMBOL(parman_create); + +/** + * parman_destroy - destroys existing parman instance + * @parman: parman instance + * + * Note: all locking must be provided by the caller. + */ +void parman_destroy(struct parman *parman) +{ + WARN_ON(!list_empty(&parman->prio_list)); + kfree(parman); +} +EXPORT_SYMBOL(parman_destroy); + +/** + * parman_prio_init - initializes a parman priority chunk + * @parman: parman instance + * @prio: parman prio structure to be initialized + * @prority: desired priority of the chunk + * + * Note: all locking must be provided by the caller. + * + * Before caller could add an item with certain priority, he has to + * initialize a priority chunk for it using this function. + */ +void parman_prio_init(struct parman *parman, struct parman_prio *prio, + unsigned long priority) +{ + struct parman_prio *prio2; + struct list_head *pos; + + INIT_LIST_HEAD(&prio->item_list); + prio->priority = priority; + + /* Position inside the list according to priority */ + list_for_each(pos, &parman->prio_list) { + prio2 = list_entry(pos, typeof(*prio2), list); + if (prio2->priority > prio->priority) + break; + } + list_add_tail(&prio->list, pos); +} +EXPORT_SYMBOL(parman_prio_init); + +/** + * parman_prio_fini - finalizes use of parman priority chunk + * @prio: parman prio structure + * + * Note: all locking must be provided by the caller. + */ +void parman_prio_fini(struct parman_prio *prio) +{ + WARN_ON(parman_prio_used(prio)); + list_del(&prio->list); +} +EXPORT_SYMBOL(parman_prio_fini); + +/** + * parman_item_add - adds a parman item under defined priority + * @parman: parman instance + * @prio: parman prio instance to add the item to + * @item: parman item instance + * + * Note: all locking must be provided by the caller. + * + * Adds item to a array managed by parman instance under the specified priority. + * + * Returns 0 in case of success, negative number to indicate an error. + */ +int parman_item_add(struct parman *parman, struct parman_prio *prio, + struct parman_item *item) +{ + return parman->algo->item_add(parman, prio, item); +} +EXPORT_SYMBOL(parman_item_add); + +/** + * parman_item_del - deletes parman item + * @parman: parman instance + * @prio: parman prio instance to delete the item from + * @item: parman item instance + * + * Note: all locking must be provided by the caller. + */ +void parman_item_remove(struct parman *parman, struct parman_prio *prio, + struct parman_item *item) +{ + parman->algo->item_remove(parman, prio, item); +} +EXPORT_SYMBOL(parman_item_remove); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_DESCRIPTION("Priority-based array manager"); diff --git a/lib/test_parman.c b/lib/test_parman.c new file mode 100644 index 000000000000..fe9f3a785804 --- /dev/null +++ b/lib/test_parman.c @@ -0,0 +1,395 @@ +/* + * lib/test_parman.c - Test module for parman + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#define TEST_PARMAN_PRIO_SHIFT 7 /* defines number of prios for testing */ +#define TEST_PARMAN_PRIO_COUNT BIT(TEST_PARMAN_PRIO_SHIFT) +#define TEST_PARMAN_PRIO_MASK (TEST_PARMAN_PRIO_COUNT - 1) + +#define TEST_PARMAN_ITEM_SHIFT 13 /* defines a total number + * of items for testing + */ +#define TEST_PARMAN_ITEM_COUNT BIT(TEST_PARMAN_ITEM_SHIFT) +#define TEST_PARMAN_ITEM_MASK (TEST_PARMAN_ITEM_COUNT - 1) + +#define TEST_PARMAN_BASE_SHIFT 8 +#define TEST_PARMAN_BASE_COUNT BIT(TEST_PARMAN_BASE_SHIFT) +#define TEST_PARMAN_RESIZE_STEP_SHIFT 7 +#define TEST_PARMAN_RESIZE_STEP_COUNT BIT(TEST_PARMAN_RESIZE_STEP_SHIFT) + +#define TEST_PARMAN_BULK_MAX_SHIFT (2 + TEST_PARMAN_RESIZE_STEP_SHIFT) +#define TEST_PARMAN_BULK_MAX_COUNT BIT(TEST_PARMAN_BULK_MAX_SHIFT) +#define TEST_PARMAN_BULK_MAX_MASK (TEST_PARMAN_BULK_MAX_COUNT - 1) + +#define TEST_PARMAN_RUN_BUDGET (TEST_PARMAN_ITEM_COUNT * 256) + +struct test_parman_prio { + struct parman_prio parman_prio; + unsigned long priority; +}; + +struct test_parman_item { + struct parman_item parman_item; + struct test_parman_prio *prio; + bool used; +}; + +struct test_parman { + struct parman *parman; + struct test_parman_item **prio_array; + unsigned long prio_array_limit; + struct test_parman_prio prios[TEST_PARMAN_PRIO_COUNT]; + struct test_parman_item items[TEST_PARMAN_ITEM_COUNT]; + struct rnd_state rnd; + unsigned long run_budget; + unsigned long bulk_budget; + bool bulk_noop; + unsigned int used_items; +}; + +#define ITEM_PTRS_SIZE(count) (sizeof(struct test_parman_item *) * (count)) + +static int test_parman_resize(void *priv, unsigned long new_count) +{ + struct test_parman *test_parman = priv; + struct test_parman_item **prio_array; + unsigned long old_count; + + prio_array = krealloc(test_parman->prio_array, + ITEM_PTRS_SIZE(new_count), GFP_KERNEL); + if (new_count == 0) + return 0; + if (!prio_array) + return -ENOMEM; + old_count = test_parman->prio_array_limit; + if (new_count > old_count) + memset(&prio_array[old_count], 0, + ITEM_PTRS_SIZE(new_count - old_count)); + test_parman->prio_array = prio_array; + test_parman->prio_array_limit = new_count; + return 0; +} + +static void test_parman_move(void *priv, unsigned long from_index, + unsigned long to_index, unsigned long count) +{ + struct test_parman *test_parman = priv; + struct test_parman_item **prio_array = test_parman->prio_array; + + memmove(&prio_array[to_index], &prio_array[from_index], + ITEM_PTRS_SIZE(count)); + memset(&prio_array[from_index], 0, ITEM_PTRS_SIZE(count)); +} + +static const struct parman_ops test_parman_lsort_ops = { + .base_count = TEST_PARMAN_BASE_COUNT, + .resize_step = TEST_PARMAN_RESIZE_STEP_COUNT, + .resize = test_parman_resize, + .move = test_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +static void test_parman_rnd_init(struct test_parman *test_parman) +{ + prandom_seed_state(&test_parman->rnd, 3141592653589793238ULL); +} + +static u32 test_parman_rnd_get(struct test_parman *test_parman) +{ + return prandom_u32_state(&test_parman->rnd); +} + +static unsigned long test_parman_priority_gen(struct test_parman *test_parman) +{ + unsigned long priority; + int i; + +again: + priority = test_parman_rnd_get(test_parman); + if (priority == 0) + goto again; + + for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { + struct test_parman_prio *prio = &test_parman->prios[i]; + + if (prio->priority == 0) + break; + if (prio->priority == priority) + goto again; + } + return priority; +} + +static void test_parman_prios_init(struct test_parman *test_parman) +{ + int i; + + for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { + struct test_parman_prio *prio = &test_parman->prios[i]; + + /* Assign random uniqueue priority to each prio structure */ + prio->priority = test_parman_priority_gen(test_parman); + parman_prio_init(test_parman->parman, &prio->parman_prio, + prio->priority); + } +} + +static void test_parman_prios_fini(struct test_parman *test_parman) +{ + int i; + + for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { + struct test_parman_prio *prio = &test_parman->prios[i]; + + parman_prio_fini(&prio->parman_prio); + } +} + +static void test_parman_items_init(struct test_parman *test_parman) +{ + int i; + + for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) { + struct test_parman_item *item = &test_parman->items[i]; + unsigned int prio_index = test_parman_rnd_get(test_parman) & + TEST_PARMAN_PRIO_MASK; + + /* Assign random prio to each item structure */ + item->prio = &test_parman->prios[prio_index]; + } +} + +static void test_parman_items_fini(struct test_parman *test_parman) +{ + int i; + + for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) { + struct test_parman_item *item = &test_parman->items[i]; + + if (!item->used) + continue; + parman_item_remove(test_parman->parman, + &item->prio->parman_prio, + &item->parman_item); + } +} + +static struct test_parman *test_parman_create(const struct parman_ops *ops) +{ + struct test_parman *test_parman; + int err; + + test_parman = kzalloc(sizeof(*test_parman), GFP_KERNEL); + if (!test_parman) + return ERR_PTR(-ENOMEM); + err = test_parman_resize(test_parman, TEST_PARMAN_BASE_COUNT); + if (err) + goto err_resize; + test_parman->parman = parman_create(ops, test_parman); + if (!test_parman->parman) { + err = -ENOMEM; + goto err_parman_create; + } + test_parman_rnd_init(test_parman); + test_parman_prios_init(test_parman); + test_parman_items_init(test_parman); + test_parman->run_budget = TEST_PARMAN_RUN_BUDGET; + return test_parman; + +err_parman_create: + test_parman_resize(test_parman, 0); +err_resize: + kfree(test_parman); + return ERR_PTR(err); +} + +static void test_parman_destroy(struct test_parman *test_parman) +{ + test_parman_items_fini(test_parman); + test_parman_prios_fini(test_parman); + parman_destroy(test_parman->parman); + test_parman_resize(test_parman, 0); + kfree(test_parman); +} + +static bool test_parman_run_check_budgets(struct test_parman *test_parman) +{ + if (test_parman->run_budget-- == 0) + return false; + if (test_parman->bulk_budget-- != 0) + return true; + + test_parman->bulk_budget = test_parman_rnd_get(test_parman) & + TEST_PARMAN_BULK_MAX_MASK; + test_parman->bulk_noop = test_parman_rnd_get(test_parman) & 1; + return true; +} + +static int test_parman_run(struct test_parman *test_parman) +{ + unsigned int i = test_parman_rnd_get(test_parman); + int err; + + while (test_parman_run_check_budgets(test_parman)) { + unsigned int item_index = i++ & TEST_PARMAN_ITEM_MASK; + struct test_parman_item *item = &test_parman->items[item_index]; + + if (test_parman->bulk_noop) + continue; + + if (!item->used) { + err = parman_item_add(test_parman->parman, + &item->prio->parman_prio, + &item->parman_item); + if (err) + return err; + test_parman->prio_array[item->parman_item.index] = item; + test_parman->used_items++; + } else { + test_parman->prio_array[item->parman_item.index] = NULL; + parman_item_remove(test_parman->parman, + &item->prio->parman_prio, + &item->parman_item); + test_parman->used_items--; + } + item->used = !item->used; + } + return 0; +} + +static int test_parman_check_array(struct test_parman *test_parman, + bool gaps_allowed) +{ + unsigned int last_unused_items = 0; + unsigned long last_priority = 0; + unsigned int used_items = 0; + int i; + + if (test_parman->prio_array_limit < TEST_PARMAN_BASE_COUNT) { + pr_err("Array limit is lower than the base count (%lu < %lu)\n", + test_parman->prio_array_limit, TEST_PARMAN_BASE_COUNT); + return -EINVAL; + } + + for (i = 0; i < test_parman->prio_array_limit; i++) { + struct test_parman_item *item = test_parman->prio_array[i]; + + if (!item) { + last_unused_items++; + continue; + } + if (last_unused_items && !gaps_allowed) { + pr_err("Gap found in array even though they are forbidden\n"); + return -EINVAL; + } + + last_unused_items = 0; + used_items++; + + if (item->prio->priority < last_priority) { + pr_err("Item belongs under higher priority then the last one (current: %lu, previous: %lu)\n", + item->prio->priority, last_priority); + return -EINVAL; + } + last_priority = item->prio->priority; + + if (item->parman_item.index != i) { + pr_err("Item has different index in compare to where it actualy is (%lu != %d)\n", + item->parman_item.index, i); + return -EINVAL; + } + } + + if (used_items != test_parman->used_items) { + pr_err("Number of used items in array does not match (%u != %u)\n", + used_items, test_parman->used_items); + return -EINVAL; + } + + if (last_unused_items >= TEST_PARMAN_RESIZE_STEP_COUNT) { + pr_err("Number of unused item at the end of array is bigger than resize step (%u >= %lu)\n", + last_unused_items, TEST_PARMAN_RESIZE_STEP_COUNT); + return -EINVAL; + } + + pr_info("Priority array check successful\n"); + + return 0; +} + +static int test_parman_lsort(void) +{ + struct test_parman *test_parman; + int err; + + test_parman = test_parman_create(&test_parman_lsort_ops); + if (IS_ERR(test_parman)) + return PTR_ERR(test_parman); + + err = test_parman_run(test_parman); + if (err) + goto out; + + err = test_parman_check_array(test_parman, false); + if (err) + goto out; +out: + test_parman_destroy(test_parman); + return err; +} + +static int __init test_parman_init(void) +{ + return test_parman_lsort(); +} + +static void __exit test_parman_exit(void) +{ +} + +module_init(test_parman_init); +module_exit(test_parman_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_DESCRIPTION("Test module for parman"); -- cgit v1.2.3 From 22a677661f5624539d394f681276171f92d714df Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:29:07 +0100 Subject: mlxsw: spectrum: Introduce ACL core with simple TCAM implementation Add ACL core infrastructure for Spectrum ASIC. This infra provides an abstraction layer over specific HW implementations. There are two basic objects used. One is "rule" and the second is "ruleset" which serves as a container of multiple rules. In general, within one ruleset the rules are allowed to have multiple priorities and masks. Each ruleset is bound to either ingress or egress a of port netdevice. The initial TCAM implementation is very simple and limited. It utilizes parman lsort manager to take care of TCAM region layout. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Kconfig | 1 + drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 17 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 100 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 572 +++++++++++ .../ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 1084 ++++++++++++++++++++ 6 files changed, 1769 insertions(+), 8 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 16f44b9aa076..76a7574c3c7d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -73,6 +73,7 @@ config MLXSW_SWITCHX2 config MLXSW_SPECTRUM tristate "Mellanox Technologies Spectrum support" depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q + select PARMAN default m ---help--- This driver supports Mellanox Technologies Spectrum Ethernet diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index c4c48baf0c3f..1459716aa06d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -14,7 +14,8 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ - spectrum_kvdl.o + spectrum_kvdl.o spectrum_acl.o \ + spectrum_acl_tcam.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o mlxsw_minimal-objs := minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 467aa5288935..b1d77e13d1c2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.c - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Jiri Pirko * Copyright (c) 2015 Ido Schimmel * Copyright (c) 2015 Elad Raz * @@ -138,8 +138,6 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); */ MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); -static bool mlxsw_sp_port_dev_check(const struct net_device *dev); - static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { @@ -3203,6 +3201,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_span_init; } + err = mlxsw_sp_acl_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); + goto err_acl_init; + } + err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); @@ -3212,6 +3216,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return 0; err_ports_create: + mlxsw_sp_acl_fini(mlxsw_sp); +err_acl_init: mlxsw_sp_span_fini(mlxsw_sp); err_span_init: mlxsw_sp_router_fini(mlxsw_sp); @@ -3232,6 +3238,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); @@ -3297,7 +3304,7 @@ static struct mlxsw_driver mlxsw_sp_driver = { .profile = &mlxsw_sp_config_profile, }; -static bool mlxsw_sp_port_dev_check(const struct net_device *dev) +bool mlxsw_sp_port_dev_check(const struct net_device *dev) { return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index bc3efe1629c4..cd9b4b2d0980 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Jiri Pirko * Copyright (c) 2015 Ido Schimmel * Copyright (c) 2015 Elad Raz * @@ -50,6 +50,8 @@ #include "port.h" #include "core.h" +#include "core_acl_flex_keys.h" +#include "core_acl_flex_actions.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID #define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */ @@ -262,6 +264,8 @@ struct mlxsw_sp_router { bool aborted; }; +struct mlxsw_sp_acl; + struct mlxsw_sp { struct { struct list_head list; @@ -291,6 +295,7 @@ struct mlxsw_sp { u8 port_to_module[MLXSW_PORT_MAX_PORTS]; struct mlxsw_sp_sb sb; struct mlxsw_sp_router router; + struct mlxsw_sp_acl *acl; struct { DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); } kvdl; @@ -373,6 +378,7 @@ struct mlxsw_sp_port { struct mlxsw_sp_port_sample *sample; }; +bool mlxsw_sp_port_dev_check(const struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); @@ -602,4 +608,94 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); +struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); + +struct mlxsw_sp_acl_rule_info { + unsigned int priority; + struct mlxsw_afk_element_values values; + struct mlxsw_afa_block *act_block; +}; + +enum mlxsw_sp_acl_profile { + MLXSW_SP_ACL_PROFILE_FLOWER, +}; + +struct mlxsw_sp_acl_profile_ops { + size_t ruleset_priv_size; + int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, + void *priv, void *ruleset_priv); + void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct net_device *dev, bool ingress); + void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + size_t rule_priv_size; + int (*rule_add)(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei); + void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); +}; + +struct mlxsw_sp_acl_ops { + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); + const struct mlxsw_sp_acl_profile_ops * + (*profile_ops)(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_acl_profile profile); +}; + +struct mlxsw_sp_acl_ruleset; + +struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, bool ingress, + enum mlxsw_sp_acl_profile profile); +void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset); + +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl); +void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei); +int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei); +void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, + unsigned int priority); +void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value); +void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + const char *key_value, + const char *mask_value, unsigned int len); +void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); +void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, + u16 group_id); +int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); +int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct net_device *out_dev); + +struct mlxsw_sp_acl_rule; + +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie); +void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule); +int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule); +void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule); +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie); +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule); + +int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); + +extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c new file mode 100644 index 000000000000..8a18b3aa70dc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -0,0 +1,572 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "reg.h" +#include "core.h" +#include "resources.h" +#include "spectrum.h" +#include "core_acl_flex_keys.h" +#include "core_acl_flex_actions.h" +#include "spectrum_acl_flex_keys.h" + +struct mlxsw_sp_acl { + struct mlxsw_afk *afk; + struct mlxsw_afa *afa; + const struct mlxsw_sp_acl_ops *ops; + struct rhashtable ruleset_ht; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) +{ + return acl->afk; +} + +struct mlxsw_sp_acl_ruleset_ht_key { + struct net_device *dev; /* dev this ruleset is bound to */ + bool ingress; + const struct mlxsw_sp_acl_profile_ops *ops; +}; + +struct mlxsw_sp_acl_ruleset { + struct rhash_head ht_node; /* Member of acl HT */ + struct mlxsw_sp_acl_ruleset_ht_key ht_key; + struct rhashtable rule_ht; + unsigned int ref_count; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +struct mlxsw_sp_acl_rule { + struct rhash_head ht_node; /* Member of rule HT */ + unsigned long cookie; /* HT key */ + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule_info *rulei; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = { + .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key), + .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key), + .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node), + .automatic_shrinking = true, +}; + +static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = { + .key_len = sizeof(unsigned long), + .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie), + .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node), + .automatic_shrinking = true, +}; + +static struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_acl_profile_ops *ops) +{ + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + struct mlxsw_sp_acl_ruleset *ruleset; + size_t alloc_size; + int err; + + alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size; + ruleset = kzalloc(alloc_size, GFP_KERNEL); + if (!ruleset) + return ERR_PTR(-ENOMEM); + ruleset->ref_count = 1; + ruleset->ht_key.ops = ops; + + err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params); + if (err) + goto err_rhashtable_init; + + err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv); + if (err) + goto err_ops_ruleset_add; + + return ruleset; + +err_ops_ruleset_add: + rhashtable_destroy(&ruleset->rule_ht); +err_rhashtable_init: + kfree(ruleset); + return ERR_PTR(err); +} + +static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + + ops->ruleset_del(mlxsw_sp, ruleset->priv); + rhashtable_destroy(&ruleset->rule_ht); + kfree(ruleset); +} + +static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + struct net_device *dev, bool ingress) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + int err; + + ruleset->ht_key.dev = dev; + ruleset->ht_key.ingress = ingress; + err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, + mlxsw_sp_acl_ruleset_ht_params); + if (err) + return err; + err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); + if (err) + goto err_ops_ruleset_bind; + return 0; + +err_ops_ruleset_bind: + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, + mlxsw_sp_acl_ruleset_ht_params); + return err; +} + +static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + + ops->ruleset_unbind(mlxsw_sp, ruleset->priv); + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, + mlxsw_sp_acl_ruleset_ht_params); +} + +static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset) +{ + ruleset->ref_count++; +} + +static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + if (--ruleset->ref_count) + return; + mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset); + mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); +} + +struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, bool ingress, + enum mlxsw_sp_acl_profile profile) +{ + const struct mlxsw_sp_acl_profile_ops *ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + struct mlxsw_sp_acl_ruleset_ht_key ht_key; + struct mlxsw_sp_acl_ruleset *ruleset; + int err; + + ops = acl->ops->profile_ops(mlxsw_sp, profile); + if (!ops) + return ERR_PTR(-EINVAL); + + memset(&ht_key, 0, sizeof(ht_key)); + ht_key.dev = dev; + ht_key.ingress = ingress; + ht_key.ops = ops; + ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, + mlxsw_sp_acl_ruleset_ht_params); + if (ruleset) { + mlxsw_sp_acl_ruleset_ref_inc(ruleset); + return ruleset; + } + ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops); + if (IS_ERR(ruleset)) + return ruleset; + err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress); + if (err) + goto err_ruleset_bind; + return ruleset; + +err_ruleset_bind: + mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); +} + +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) +{ + struct mlxsw_sp_acl_rule_info *rulei; + int err; + + rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); + if (!rulei) + return NULL; + rulei->act_block = mlxsw_afa_block_create(acl->afa); + if (IS_ERR(rulei->act_block)) { + err = PTR_ERR(rulei->act_block); + goto err_afa_block_create; + } + return rulei; + +err_afa_block_create: + kfree(rulei); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei) +{ + mlxsw_afa_block_destroy(rulei->act_block); + kfree(rulei); +} + +int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_afa_block_commit(rulei->act_block); +} + +void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, + unsigned int priority) +{ + rulei->priority = priority; +} + +void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value) +{ + mlxsw_afk_values_add_u32(&rulei->values, element, + key_value, mask_value); +} + +void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + const char *key_value, + const char *mask_value, unsigned int len) +{ + mlxsw_afk_values_add_buf(&rulei->values, element, + key_value, mask_value, len); +} + +void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei) +{ + mlxsw_afa_block_continue(rulei->act_block); +} + +void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, + u16 group_id) +{ + mlxsw_afa_block_jump(rulei->act_block, group_id); +} + +int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_afa_block_append_drop(rulei->act_block); +} + +int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct net_device *out_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + u8 local_port; + bool in_port; + + if (out_dev) { + if (!mlxsw_sp_port_dev_check(out_dev)) + return -EINVAL; + mlxsw_sp_port = netdev_priv(out_dev); + if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) + return -EINVAL; + local_port = mlxsw_sp_port->local_port; + in_port = false; + } else { + /* If out_dev is NULL, the called wants to + * set forward to ingress port. + */ + local_port = 0; + in_port = true; + } + return mlxsw_afa_block_append_fwd(rulei->act_block, + local_port, in_port); +} + +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl_rule *rule; + int err; + + mlxsw_sp_acl_ruleset_ref_inc(ruleset); + rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); + if (!rule) { + err = -ENOMEM; + goto err_alloc; + } + rule->cookie = cookie; + rule->ruleset = ruleset; + + rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + if (IS_ERR(rule->rulei)) { + err = PTR_ERR(rule->rulei); + goto err_rulei_create; + } + return rule; + +err_rulei_create: + kfree(rule); +err_alloc: + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + + mlxsw_sp_acl_rulei_destroy(rule->rulei); + kfree(rule); + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); +} + +int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + int err; + + err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei); + if (err) + return err; + + err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node, + mlxsw_sp_acl_rule_ht_params); + if (err) + goto err_rhashtable_insert; + + return 0; + +err_rhashtable_insert: + ops->rule_del(mlxsw_sp, rule->priv); + return err; +} + +void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + mlxsw_sp_acl_rule_ht_params); + ops->rule_del(mlxsw_sp, rule->priv); +} + +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie) +{ + return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie, + mlxsw_sp_acl_rule_ht_params); +} + +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule) +{ + return rule->rulei; +} + +#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1 + +static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + u32 kvdl_index; + int ret; + int err; + + /* The first action set of a TCAM entry is stored directly in TCAM, + * not KVD linear area. + */ + if (is_first) + return 0; + + ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE); + if (ret < 0) + return ret; + kvdl_index = ret; + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + goto err_pefa_write; + *p_kvdl_index = kvdl_index; + return 0; + +err_pefa_write: + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + return err; +} + +static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, + bool is_first) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + if (is_first) + return; + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); +} + +static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, + u8 local_port) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char ppbs_pl[MLXSW_REG_PPBS_LEN]; + u32 kvdl_index; + int ret; + int err; + + ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1); + if (ret < 0) + return ret; + kvdl_index = ret; + mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); + if (err) + goto err_ppbs_write; + *p_kvdl_index = kvdl_index; + return 0; + +err_ppbs_write: + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + return err; +} + +static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); +} + +static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { + .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, + .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, + .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, +}; + +int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) +{ + const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; + struct mlxsw_sp_acl *acl; + int err; + + acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL); + if (!acl) + return -ENOMEM; + mlxsw_sp->acl = acl; + + acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_FLEX_KEYS), + mlxsw_sp_afk_blocks, + MLXSW_SP_AFK_BLOCKS_COUNT); + if (!acl->afk) { + err = -ENOMEM; + goto err_afk_create; + } + + acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_ACTIONS_PER_SET), + &mlxsw_sp_act_afa_ops, mlxsw_sp); + if (IS_ERR(acl->afa)) { + err = PTR_ERR(acl->afa); + goto err_afa_create; + } + + err = rhashtable_init(&acl->ruleset_ht, + &mlxsw_sp_acl_ruleset_ht_params); + if (err) + goto err_rhashtable_init; + + err = acl_ops->init(mlxsw_sp, acl->priv); + if (err) + goto err_acl_ops_init; + + acl->ops = acl_ops; + return 0; + +err_acl_ops_init: + rhashtable_destroy(&acl->ruleset_ht); +err_rhashtable_init: + mlxsw_afa_destroy(acl->afa); +err_afa_create: + mlxsw_afk_destroy(acl->afk); +err_afk_create: + kfree(acl); + return err; +} + +void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; + + acl_ops->fini(mlxsw_sp, acl->priv); + rhashtable_destroy(&acl->ruleset_ht); + mlxsw_afa_destroy(acl->afa); + mlxsw_afk_destroy(acl->afk); + kfree(acl); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c new file mode 100644 index 000000000000..a0a968e47ae6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -0,0 +1,1084 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "reg.h" +#include "core.h" +#include "resources.h" +#include "spectrum.h" +#include "core_acl_flex_keys.h" + +struct mlxsw_sp_acl_tcam { + unsigned long *used_regions; /* bit array */ + unsigned int max_regions; + unsigned long *used_groups; /* bit array */ + unsigned int max_groups; + unsigned int max_group_size; +}; + +static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp_acl_tcam *tcam = priv; + u64 max_tcam_regions; + u64 max_regions; + u64 max_groups; + size_t alloc_size; + int err; + + max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_TCAM_REGIONS); + max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); + + /* Use 1:1 mapping between ACL region and TCAM region */ + if (max_tcam_regions < max_regions) + max_regions = max_tcam_regions; + + alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions); + tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL); + if (!tcam->used_regions) + return -ENOMEM; + tcam->max_regions = max_regions; + + max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS); + alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups); + tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL); + if (!tcam->used_groups) { + err = -ENOMEM; + goto err_alloc_used_groups; + } + tcam->max_groups = max_groups; + tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_GROUP_SIZE); + return 0; + +err_alloc_used_groups: + kfree(tcam->used_regions); + return err; +} + +static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp_acl_tcam *tcam = priv; + + kfree(tcam->used_groups); + kfree(tcam->used_regions); +} + +static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam, + u16 *p_id) +{ + u16 id; + + id = find_first_zero_bit(tcam->used_regions, tcam->max_regions); + if (id < tcam->max_regions) { + __set_bit(id, tcam->used_regions); + *p_id = id; + return 0; + } + return -ENOBUFS; +} + +static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam, + u16 id) +{ + __clear_bit(id, tcam->used_regions); +} + +static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam, + u16 *p_id) +{ + u16 id; + + id = find_first_zero_bit(tcam->used_groups, tcam->max_groups); + if (id < tcam->max_groups) { + __set_bit(id, tcam->used_groups); + *p_id = id; + return 0; + } + return -ENOBUFS; +} + +static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam, + u16 id) +{ + __clear_bit(id, tcam->used_groups); +} + +struct mlxsw_sp_acl_tcam_pattern { + const enum mlxsw_afk_element *elements; + unsigned int elements_count; +}; + +struct mlxsw_sp_acl_tcam_group { + struct mlxsw_sp_acl_tcam *tcam; + u16 id; + struct list_head region_list; + unsigned int region_count; + struct rhashtable chunk_ht; + struct { + u16 local_port; + bool ingress; + } bound; + struct mlxsw_sp_acl_tcam_group_ops *ops; + const struct mlxsw_sp_acl_tcam_pattern *patterns; + unsigned int patterns_count; +}; + +struct mlxsw_sp_acl_tcam_region { + struct list_head list; /* Member of a TCAM group */ + struct list_head chunk_list; /* List of chunks under this region */ + struct parman *parman; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_acl_tcam_group *group; + u16 id; /* ACL ID and region ID - they are same */ + char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; + struct mlxsw_afk_key_info *key_info; + struct { + struct parman_prio parman_prio; + struct parman_item parman_item; + struct mlxsw_sp_acl_rule_info *rulei; + } catchall; +}; + +struct mlxsw_sp_acl_tcam_chunk { + struct list_head list; /* Member of a TCAM region */ + struct rhash_head ht_node; /* Member of a chunk HT */ + unsigned int priority; /* Priority within the region and group */ + struct parman_prio parman_prio; + struct mlxsw_sp_acl_tcam_group *group; + struct mlxsw_sp_acl_tcam_region *region; + unsigned int ref_count; +}; + +struct mlxsw_sp_acl_tcam_entry { + struct parman_item parman_item; + struct mlxsw_sp_acl_tcam_chunk *chunk; +}; + +static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = { + .key_len = sizeof(unsigned int), + .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority), + .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node), + .automatic_shrinking = true, +}; + +static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group) +{ + struct mlxsw_sp_acl_tcam_region *region; + char pagt_pl[MLXSW_REG_PAGT_LEN]; + int acl_index = 0; + + mlxsw_reg_pagt_pack(pagt_pl, group->id); + list_for_each_entry(region, &group->region_list, list) + mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id); + mlxsw_reg_pagt_size_set(pagt_pl, acl_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl); +} + +static int +mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + struct mlxsw_sp_acl_tcam_group *group, + const struct mlxsw_sp_acl_tcam_pattern *patterns, + unsigned int patterns_count) +{ + int err; + + group->tcam = tcam; + group->patterns = patterns; + group->patterns_count = patterns_count; + INIT_LIST_HEAD(&group->region_list); + err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); + if (err) + return err; + + err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + if (err) + goto err_group_update; + + err = rhashtable_init(&group->chunk_ht, + &mlxsw_sp_acl_tcam_chunk_ht_params); + if (err) + goto err_rhashtable_init; + + return 0; + +err_rhashtable_init: +err_group_update: + mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); + return err; +} + +static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group) +{ + struct mlxsw_sp_acl_tcam *tcam = group->tcam; + + rhashtable_destroy(&group->chunk_ht); + mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); + WARN_ON(!list_empty(&group->region_list)); +} + +static int +mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct net_device *dev, bool ingress) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + char ppbt_pl[MLXSW_REG_PPBT_LEN]; + + if (!mlxsw_sp_port_dev_check(dev)) + return -EINVAL; + + mlxsw_sp_port = netdev_priv(dev); + group->bound.local_port = mlxsw_sp_port->local_port; + group->bound.ingress = ingress; + mlxsw_reg_ppbt_pack(ppbt_pl, + group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : + MLXSW_REG_PXBT_E_EACL, + MLXSW_REG_PXBT_OP_BIND, group->bound.local_port, + group->id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); +} + +static void +mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group) +{ + char ppbt_pl[MLXSW_REG_PPBT_LEN]; + + mlxsw_reg_ppbt_pack(ppbt_pl, + group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : + MLXSW_REG_PXBT_E_EACL, + MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port, + group->id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); +} + +static unsigned int +mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + + if (list_empty(®ion->chunk_list)) + return 0; + /* As a priority of a region, return priority of the first chunk */ + chunk = list_first_entry(®ion->chunk_list, typeof(*chunk), list); + return chunk->priority; +} + +static unsigned int +mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + + if (list_empty(®ion->chunk_list)) + return 0; + chunk = list_last_entry(®ion->chunk_list, typeof(*chunk), list); + return chunk->priority; +} + +static void +mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_region *region2; + struct list_head *pos; + + /* Position the region inside the list according to priority */ + list_for_each(pos, &group->region_list) { + region2 = list_entry(pos, typeof(*region2), list); + if (mlxsw_sp_acl_tcam_region_prio(region2) > + mlxsw_sp_acl_tcam_region_prio(region)) + break; + } + list_add_tail(®ion->list, pos); + group->region_count++; +} + +static void +mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region) +{ + group->region_count--; + list_del(®ion->list); +} + +static int +mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region) +{ + int err; + + if (group->region_count == group->tcam->max_group_size) + return -ENOBUFS; + + mlxsw_sp_acl_tcam_group_list_add(group, region); + + err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + if (err) + goto err_group_update; + region->group = group; + + return 0; + +err_group_update: + mlxsw_sp_acl_tcam_group_list_del(group, region); + mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + return err; +} + +static void +mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_group *group = region->group; + + mlxsw_sp_acl_tcam_group_list_del(group, region); + mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); +} + +static struct mlxsw_sp_acl_tcam_region * +mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage, + bool *p_need_split) +{ + struct mlxsw_sp_acl_tcam_region *region, *region2; + struct list_head *pos; + bool issubset; + + list_for_each(pos, &group->region_list) { + region = list_entry(pos, typeof(*region), list); + + /* First, check if the requested priority does not rather belong + * under some of the next regions. + */ + if (pos->next != &group->region_list) { /* not last */ + region2 = list_entry(pos->next, typeof(*region2), list); + if (priority >= mlxsw_sp_acl_tcam_region_prio(region2)) + continue; + } + + issubset = mlxsw_afk_key_info_subset(region->key_info, elusage); + + /* If requested element usage would not fit and the priority + * is lower than the currently inspected region we cannot + * use this region, so return NULL to indicate new region has + * to be created. + */ + if (!issubset && + priority < mlxsw_sp_acl_tcam_region_prio(region)) + return NULL; + + /* If requested element usage would not fit and the priority + * is higher than the currently inspected region we cannot + * use this region. There is still some hope that the next + * region would be the fit. So let it be processed and + * eventually break at the check right above this. + */ + if (!issubset && + priority > mlxsw_sp_acl_tcam_region_max_prio(region)) + continue; + + /* Indicate if the region needs to be split in order to add + * the requested priority. Split is needed when requested + * element usage won't fit into the found region. + */ + *p_need_split = !issubset; + return region; + } + return NULL; /* New region has to be created. */ +} + +static void +mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_afk_element_usage *elusage, + struct mlxsw_afk_element_usage *out) +{ + const struct mlxsw_sp_acl_tcam_pattern *pattern; + int i; + + for (i = 0; i < group->patterns_count; i++) { + pattern = &group->patterns[i]; + mlxsw_afk_element_usage_fill(out, pattern->elements, + pattern->elements_count); + if (mlxsw_afk_element_usage_subset(elusage, out)) + return; + } + memcpy(out, elusage, sizeof(*out)); +} + +#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 +#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 + +static int +mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_afk_key_info *key_info = region->key_info; + char ptar_pl[MLXSW_REG_PTAR_LEN]; + unsigned int encodings_count; + int i; + int err; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC, + MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, + region->id, region->tcam_region_info); + encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info); + for (i = 0; i < encodings_count; i++) { + u16 encoding; + + encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i); + mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding); + } + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); + if (err) + return err; + mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info); + return 0; +} + +static void +mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + char ptar_pl[MLXSW_REG_PTAR_LEN]; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id, + region->tcam_region_info); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); +} + +static int +mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 new_size) +{ + char ptar_pl[MLXSW_REG_PTAR_LEN]; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, + new_size, region->id, region->tcam_region_info); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); +} + +static int +mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + char pacl_pl[MLXSW_REG_PACL_LEN]; + + mlxsw_reg_pacl_pack(pacl_pl, region->id, true, + region->tcam_region_info); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); +} + +static void +mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + char pacl_pl[MLXSW_REG_PACL_LEN]; + + mlxsw_reg_pacl_pack(pacl_pl, region->id, false, + region->tcam_region_info); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); +} + +static int +mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset, + struct mlxsw_sp_acl_rule_info *rulei) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + char *act_set; + char *mask; + char *key; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, offset); + key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); + mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); + mlxsw_afk_encode(region->key_info, &rulei->values, key, mask); + + /* Only the first action set belongs here, the rest is in KVD */ + act_set = mlxsw_afa_block_first_set(rulei->act_block); + mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +static void +mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + + mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, offset); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (-1UL) + +static int +mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct parman_prio *parman_prio = ®ion->catchall.parman_prio; + struct parman_item *parman_item = ®ion->catchall.parman_item; + struct mlxsw_sp_acl_rule_info *rulei; + int err; + + parman_prio_init(region->parman, parman_prio, + MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); + err = parman_item_add(region->parman, parman_prio, parman_item); + if (err) + goto err_parman_item_add; + + rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + if (IS_ERR(rulei)) { + err = PTR_ERR(rulei); + goto err_rulei_create; + } + + mlxsw_sp_acl_rulei_act_continue(rulei); + err = mlxsw_sp_acl_rulei_commit(rulei); + if (err) + goto err_rulei_commit; + + err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, + parman_item->index, rulei); + region->catchall.rulei = rulei; + if (err) + goto err_rule_insert; + + return 0; + +err_rule_insert: +err_rulei_commit: + mlxsw_sp_acl_rulei_destroy(rulei); +err_rulei_create: + parman_item_remove(region->parman, parman_prio, parman_item); +err_parman_item_add: + parman_prio_fini(parman_prio); + return err; +} + +static void +mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct parman_prio *parman_prio = ®ion->catchall.parman_prio; + struct parman_item *parman_item = ®ion->catchall.parman_item; + struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; + + mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, + parman_item->index); + mlxsw_sp_acl_rulei_destroy(rulei); + parman_item_remove(region->parman, parman_prio, parman_item); + parman_prio_fini(parman_prio); +} + +static void +mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 src_offset, u16 dst_offset, u16 size) +{ + char prcr_pl[MLXSW_REG_PRCR_LEN]; + + mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, + region->tcam_region_info, src_offset, + region->tcam_region_info, dst_offset, size); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); +} + +static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv, + unsigned long new_count) +{ + struct mlxsw_sp_acl_tcam_region *region = priv; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + u64 max_tcam_rules; + + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); + if (new_count > max_tcam_rules) + return -EINVAL; + return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count); +} + +static void mlxsw_sp_acl_tcam_region_parman_move(void *priv, + unsigned long from_index, + unsigned long to_index, + unsigned long count) +{ + struct mlxsw_sp_acl_tcam_region *region = priv; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + + mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region, + from_index, to_index, count); +} + +static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = { + .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, + .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, + .resize = mlxsw_sp_acl_tcam_region_parman_resize, + .move = mlxsw_sp_acl_tcam_region_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +static struct mlxsw_sp_acl_tcam_region * +mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + struct mlxsw_sp_acl_tcam_region *region; + int err; + + region = kzalloc(sizeof(*region), GFP_KERNEL); + if (!region) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(®ion->chunk_list); + region->mlxsw_sp = mlxsw_sp; + + region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops, + region); + if (!region->parman) { + err = -ENOMEM; + goto err_parman_create; + } + + region->key_info = mlxsw_afk_key_info_get(afk, elusage); + if (IS_ERR(region->key_info)) { + err = PTR_ERR(region->key_info); + goto err_key_info_get; + } + + err = mlxsw_sp_acl_tcam_region_id_get(tcam, ®ion->id); + if (err) + goto err_region_id_get; + + err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); + if (err) + goto err_tcam_region_alloc; + + err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region); + if (err) + goto err_tcam_region_enable; + + err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region); + if (err) + goto err_tcam_region_catchall_add; + + return region; + +err_tcam_region_catchall_add: + mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); +err_tcam_region_enable: + mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); +err_tcam_region_alloc: + mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); +err_region_id_get: + mlxsw_afk_key_info_put(region->key_info); +err_key_info_get: + parman_destroy(region->parman); +err_parman_create: + kfree(region); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); + mlxsw_afk_key_info_put(region->key_info); + parman_destroy(region->parman); + kfree(region); +} + +static int +mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + struct mlxsw_sp_acl_tcam_region *region; + bool region_created = false; + bool need_split; + int err; + + region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage, + &need_split); + if (region && need_split) { + /* According to priority, the chunk should belong to an + * existing region. However, this chunk needs elements + * that region does not contain. We need to split the existing + * region into two and create a new region for this chunk + * in between. This is not supported now. + */ + return -EOPNOTSUPP; + } + if (!region) { + struct mlxsw_afk_element_usage region_elusage; + + mlxsw_sp_acl_tcam_group_use_patterns(group, elusage, + ®ion_elusage); + region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam, + ®ion_elusage); + if (IS_ERR(region)) + return PTR_ERR(region); + region_created = true; + } + + chunk->region = region; + list_add_tail(&chunk->list, ®ion->chunk_list); + + if (!region_created) + return 0; + + err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region); + if (err) + goto err_group_region_attach; + + return 0; + +err_group_region_attach: + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); + return err; +} + +static void +mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + struct mlxsw_sp_acl_tcam_region *region = chunk->region; + + list_del(&chunk->list); + if (list_empty(®ion->chunk_list)) { + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); + } +} + +static struct mlxsw_sp_acl_tcam_chunk * +mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + int err; + + if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) + return ERR_PTR(-EINVAL); + + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + return ERR_PTR(-ENOMEM); + chunk->priority = priority; + chunk->group = group; + chunk->ref_count = 1; + + err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority, + elusage, chunk); + if (err) + goto err_chunk_assoc; + + parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority); + + err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node, + mlxsw_sp_acl_tcam_chunk_ht_params); + if (err) + goto err_rhashtable_insert; + + return chunk; + +err_rhashtable_insert: + parman_prio_fini(&chunk->parman_prio); + mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); +err_chunk_assoc: + kfree(chunk); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + struct mlxsw_sp_acl_tcam_group *group = chunk->group; + + rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node, + mlxsw_sp_acl_tcam_chunk_ht_params); + parman_prio_fini(&chunk->parman_prio); + mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); + kfree(chunk); +} + +static struct mlxsw_sp_acl_tcam_chunk * +mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + + chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority, + mlxsw_sp_acl_tcam_chunk_ht_params); + if (chunk) { + if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info, + elusage))) + return ERR_PTR(-EINVAL); + chunk->ref_count++; + return chunk; + } + return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group, + priority, elusage); +} + +static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + if (--chunk->ref_count) + return; + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk); +} + +static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_entry *entry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_region *region; + int err; + + chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority, + &rulei->values.elusage); + if (IS_ERR(chunk)) + return PTR_ERR(chunk); + + region = chunk->region; + err = parman_item_add(region->parman, &chunk->parman_prio, + &entry->parman_item); + if (err) + goto err_parman_item_add; + + err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, + entry->parman_item.index, + rulei); + if (err) + goto err_rule_insert; + entry->chunk = chunk; + + return 0; + +err_rule_insert: + parman_item_remove(region->parman, &chunk->parman_prio, + &entry->parman_item); +err_parman_item_add: + mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); + return err; +} + +static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_entry *entry) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; + struct mlxsw_sp_acl_tcam_region *region = chunk->region; + + mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, + entry->parman_item.index); + parman_item_remove(region->parman, &chunk->parman_prio, + &entry->parman_item); + mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); +} + +static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { + MLXSW_AFK_ELEMENT_SRC_SYS_PORT, + MLXSW_AFK_ELEMENT_DMAC, + MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_ETHERTYPE, + MLXSW_AFK_ELEMENT_IP_PROTO, + MLXSW_AFK_ELEMENT_SRC_IP4, + MLXSW_AFK_ELEMENT_DST_IP4, + MLXSW_AFK_ELEMENT_DST_L4_PORT, + MLXSW_AFK_ELEMENT_SRC_L4_PORT, +}; + +static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { + MLXSW_AFK_ELEMENT_ETHERTYPE, + MLXSW_AFK_ELEMENT_IP_PROTO, + MLXSW_AFK_ELEMENT_SRC_IP6_HI, + MLXSW_AFK_ELEMENT_SRC_IP6_LO, + MLXSW_AFK_ELEMENT_DST_IP6_HI, + MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_DST_L4_PORT, + MLXSW_AFK_ELEMENT_SRC_L4_PORT, +}; + +static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = { + { + .elements = mlxsw_sp_acl_tcam_pattern_ipv4, + .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4), + }, + { + .elements = mlxsw_sp_acl_tcam_pattern_ipv6, + .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6), + }, +}; + +#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \ + ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns) + +struct mlxsw_sp_acl_tcam_flower_ruleset { + struct mlxsw_sp_acl_tcam_group group; +}; + +struct mlxsw_sp_acl_tcam_flower_rule { + struct mlxsw_sp_acl_tcam_entry entry; +}; + +static int +mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, + void *priv, void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + struct mlxsw_sp_acl_tcam *tcam = priv; + + return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, + mlxsw_sp_acl_tcam_patterns, + MLXSW_SP_ACL_TCAM_PATTERNS_COUNT); +} + +static void +mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); +} + +static int +mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, + struct net_device *dev, bool ingress) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group, + dev, ingress); +} + +static void +mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group); +} + +static int +mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; + + return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group, + &rule->entry, rulei); +} + +static void +mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) +{ + struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; + + mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); +} + +static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { + .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset), + .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add, + .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del, + .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, + .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, + .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), + .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, + .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, +}; + +static const struct mlxsw_sp_acl_profile_ops * +mlxsw_sp_acl_tcam_profile_ops_arr[] = { + [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops, +}; + +static const struct mlxsw_sp_acl_profile_ops * +mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_acl_profile profile) +{ + const struct mlxsw_sp_acl_profile_ops *ops; + + if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr))) + return NULL; + ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile]; + if (WARN_ON(!ops)) + return NULL; + return ops; +} + +const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = { + .priv_size = sizeof(struct mlxsw_sp_acl_tcam), + .init = mlxsw_sp_acl_tcam_init, + .fini = mlxsw_sp_acl_tcam_fini, + .profile_ops = mlxsw_sp_acl_tcam_profile_ops, +}; -- cgit v1.2.3 From 69ca05ce9dec2cc95070df7f1f10ea6c9c12d237 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:29:08 +0100 Subject: sched: cls_flower: expose priority to offloading netdevice The driver that offloads flower rules needs to know with which priority user inserted the rules. So add this information into offload struct. Signed-off-by: Jiri Pirko Acked-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 1 + net/sched/cls_flower.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index b43077e47d35..dabb00af46a0 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -481,6 +481,7 @@ enum tc_fl_command { struct tc_cls_flower_offload { enum tc_fl_command command; + u32 prio; unsigned long cookie; struct flow_dissector *dissector; struct fl_flow_key *mask; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 23c4d224dcb1..0826c8ec3a76 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -229,6 +229,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) return; offload.command = TC_CLSFLOWER_DESTROY; + offload.prio = tp->prio; offload.cookie = (unsigned long)f; tc->type = TC_SETUP_CLSFLOWER; @@ -260,6 +261,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, } offload.command = TC_CLSFLOWER_REPLACE; + offload.prio = tp->prio; offload.cookie = (unsigned long)f; offload.dissector = dissector; offload.mask = mask; @@ -287,6 +289,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) return; offload.command = TC_CLSFLOWER_STATS; + offload.prio = tp->prio; offload.cookie = (unsigned long)f; offload.exts = &f->exts; -- cgit v1.2.3 From 7aa0f5aa9030aa97e9d83d834fc977664cf1b066 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Feb 2017 10:29:09 +0100 Subject: mlxsw: spectrum: Implement TC flower offload Extend the existing setup_tc ndo call and allow to offload cls_flower rules. Only limited set of dissector keys and actions are supported now. Use previously introduced ACL infrastructure to offload cls_flower rules to be processed in the HW. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 4 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 15 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 6 + .../net/ethernet/mellanox/mlxsw/spectrum_flower.c | 309 +++++++++++++++++++++ 4 files changed, 331 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 1459716aa06d..6b6c30deee83 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -14,8 +14,8 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ - spectrum_kvdl.o spectrum_acl.o \ - spectrum_acl_tcam.o + spectrum_kvdl.o spectrum_acl_tcam.o \ + spectrum_acl.o spectrum_flower.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o mlxsw_minimal-objs := minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index b1d77e13d1c2..8a52c860794b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1355,7 +1355,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); - if (tc->type == TC_SETUP_MATCHALL) { + switch (tc->type) { + case TC_SETUP_MATCHALL: switch (tc->cls_mall->command) { case TC_CLSMATCHALL_REPLACE: return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, @@ -1369,6 +1370,18 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, default: return -EINVAL; } + case TC_SETUP_CLSFLOWER: + switch (tc->cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, + proto, tc->cls_flower); + case TC_CLSFLOWER_DESTROY: + mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, + tc->cls_flower); + return 0; + default: + return -EOPNOTSUPP; + } } return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index cd9b4b2d0980..4d251e06bf31 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -47,6 +47,7 @@ #include #include #include +#include #include "port.h" #include "core.h" @@ -698,4 +699,9 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; +int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + __be16 protocol, struct tc_cls_flower_offload *f); +void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + struct tc_cls_flower_offload *f); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c new file mode 100644 index 000000000000..35b147a47eb5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -0,0 +1,309 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "spectrum.h" +#include "core_acl_flex_keys.h" + +static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + struct mlxsw_sp_acl_rule_info *rulei, + struct tcf_exts *exts) +{ + const struct tc_action *a; + LIST_HEAD(actions); + int err; + + if (tc_no_actions(exts)) + return 0; + + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { + if (is_tcf_gact_shot(a)) { + err = mlxsw_sp_acl_rulei_act_drop(rulei); + if (err) + return err; + } else if (is_tcf_mirred_egress_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + struct net_device *out_dev; + + out_dev = __dev_get_by_index(dev_net(dev), ifindex); + if (out_dev == dev) + out_dev = NULL; + + err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, + out_dev); + if (err) + return err; + } else { + dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); + return -EOPNOTSUPP; + } + } + return 0; +} + +static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f) +{ + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->mask); + + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4, + ntohl(key->src), ntohl(mask->src)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4, + ntohl(key->dst), ntohl(mask->dst)); +} + +static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f) +{ + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->mask); + size_t addr_half_size = sizeof(key->src) / 2; + + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI, + &key->src.s6_addr[0], + &mask->src.s6_addr[0], + addr_half_size); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO, + &key->src.s6_addr[addr_half_size], + &mask->src.s6_addr[addr_half_size], + addr_half_size); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI, + &key->dst.s6_addr[0], + &mask->dst.s6_addr[0], + addr_half_size); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO, + &key->dst.s6_addr[addr_half_size], + &mask->dst.s6_addr[addr_half_size], + addr_half_size); +} + +static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f, + u8 ip_proto) +{ + struct flow_dissector_key_ports *key, *mask; + + if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) + return 0; + + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); + return -EINVAL; + } + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->mask); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, + ntohs(key->dst), ntohs(mask->dst)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, + ntohs(key->src), ntohs(mask->src)); + return 0; +} + +static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f) +{ + u16 addr_type = 0; + u8 ip_proto = 0; + int err; + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); + return -EOPNOTSUPP; + } + + mlxsw_sp_acl_rulei_priority(rulei, f->prio); + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->key); + addr_type = key->addr_type; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + ip_proto = key->ip_proto; + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_ETHERTYPE, + ntohs(key->n_proto), + ntohs(mask->n_proto)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_IP_PROTO, + key->ip_proto, mask->ip_proto); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->key); + struct flow_dissector_key_eth_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->mask); + + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_DMAC, + key->dst, mask->dst, + sizeof(key->dst)); + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_SMAC, + key->src, mask->src, + sizeof(key->src)); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) + mlxsw_sp_flower_parse_ipv4(rulei, f); + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) + mlxsw_sp_flower_parse_ipv6(rulei, f); + + err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); + if (err) + return err; + + return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts); +} + +int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + __be16 protocol, struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_acl_rule_info *rulei; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + int err; + + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress, + MLXSW_SP_ACL_PROFILE_FLOWER); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_rule_create; + } + + rulei = mlxsw_sp_acl_rule_rulei(rule); + err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f); + if (err) + goto err_flower_parse; + + err = mlxsw_sp_acl_rulei_commit(rulei); + if (err) + goto err_rulei_commit; + + err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); + if (err) + goto err_rule_add; + + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + return 0; + +err_rule_add: +err_rulei_commit: +err_flower_parse: + mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); +err_rule_create: + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + return err; +} + +void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, + ingress, + MLXSW_SP_ACL_PROFILE_FLOWER); + if (WARN_ON(IS_ERR(ruleset))) + return; + + rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); + if (!WARN_ON(!rule)) { + mlxsw_sp_acl_rule_del(mlxsw_sp, rule); + mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); + } + + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); +} -- cgit v1.2.3 From 3ffc1af576550ec61d35668485954e49da29d168 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 16:26:39 -0800 Subject: ixgbe: get rid of custom busy polling code In linux-4.5, busy polling was implemented in core NAPI stack, meaning that all custom implementation can be removed from drivers. Not only we remove lot's of code, we also remove one lock operation in fast path, and allow GRO to do its job. Signed-off-by: Eric Dumazet Cc: Jeff Kirsher Acked-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 125 ----------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 40 -------- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 5 - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 59 +---------- 4 files changed, 5 insertions(+), 224 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index aeedc812ee27..e83444c34cf9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -55,9 +55,6 @@ #include -#ifdef CONFIG_NET_RX_BUSY_POLL -#define BP_EXTENDED_STATS -#endif /* common prefix used by pr_<> macros */ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -201,11 +198,6 @@ struct ixgbe_rx_buffer { struct ixgbe_queue_stats { u64 packets; u64 bytes; -#ifdef BP_EXTENDED_STATS - u64 yields; - u64 misses; - u64 cleaned; -#endif /* BP_EXTENDED_STATS */ }; struct ixgbe_tx_queue_stats { @@ -399,127 +391,10 @@ struct ixgbe_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; -#ifdef CONFIG_NET_RX_BUSY_POLL - atomic_t state; -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum ixgbe_qv_state_t { - IXGBE_QV_STATE_IDLE = 0, - IXGBE_QV_STATE_NAPI, - IXGBE_QV_STATE_POLL, - IXGBE_QV_STATE_DISABLE -}; - -static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) -{ - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* called from the device poll routine to get ownership of a q_vector */ -static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_NAPI); -#ifdef BP_EXTENDED_STATS - if (rc != IXGBE_QV_STATE_IDLE) - q_vector->tx.ring->stats.yields++; -#endif - - return rc == IXGBE_QV_STATE_IDLE; -} - -/* returns true is someone tried to get the qv while napi had it */ -static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) -{ - WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); - - /* flush any outstanding Rx frames */ - if (q_vector->napi.gro_list) - napi_gro_flush(&q_vector->napi, false); - - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* called from ixgbe_low_latency_poll() */ -static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_POLL); -#ifdef BP_EXTENDED_STATS - if (rc != IXGBE_QV_STATE_IDLE) - q_vector->rx.ring->stats.yields++; -#endif - return rc == IXGBE_QV_STATE_IDLE; -} - -/* returns true if someone tried to get the qv while it was locked */ -static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) -{ - WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL); - - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* true if a socket is polling, even if it did not get the lock */ -static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) -{ - return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL; -} - -/* false if QV is currently owned */ -static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_DISABLE); - - return rc == IXGBE_QV_STATE_IDLE; -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ -static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) -{ -} - -static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) -{ - return true; -} - -static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) -{ - return true; -} - -#endif /* CONFIG_NET_RX_BUSY_POLL */ - #ifdef CONFIG_IXGBE_HWMON #define IXGBE_HWMON_TYPE_LOC 0 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 17589068da78..ee28e54a4f75 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1179,12 +1179,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = 0; - data[i+1] = 0; - data[i+2] = 0; - i += 3; -#endif continue; } @@ -1194,12 +1188,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i+1] = ring->stats.misses; - data[i+2] = ring->stats.cleaned; - i += 3; -#endif } for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { ring = adapter->rx_ring[j]; @@ -1207,12 +1195,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = 0; - data[i+1] = 0; - data[i+2] = 0; - i += 3; -#endif continue; } @@ -1222,12 +1204,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i+1] = ring->stats.misses; - data[i+2] = ring->stats.cleaned; - i += 3; -#endif } for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { @@ -1264,28 +1240,12 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "tx_queue_%u_bp_napi_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "rx_queue_%u_bp_poll_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { sprintf(p, "tx_pb_%u_pxon", i); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 10d29678d65e..1b8be7d813bd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -853,11 +853,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); -#ifdef CONFIG_NET_RX_BUSY_POLL - /* initialize busy poll */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); - -#endif /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3b3b52b62a5f..86135c00d4b1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1720,11 +1720,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { - skb_mark_napi_id(skb, &q_vector->napi); - if (ixgbe_qv_busy_polling(q_vector)) - netif_receive_skb(skb); - else - napi_gro_receive(&q_vector->napi, skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -2201,40 +2197,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, return total_rx_packets; } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int ixgbe_low_latency_recv(struct napi_struct *napi) -{ - struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *ring; - int found = 0; - - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return LL_FLUSH_FAILED; - - if (!ixgbe_qv_lock_poll(q_vector)) - return LL_FLUSH_BUSY; - - ixgbe_for_each_ring(ring, q_vector->rx) { - found = ixgbe_clean_rx_irq(q_vector, ring, 4); -#ifdef BP_EXTENDED_STATS - if (found) - ring->stats.cleaned += found; - else - ring->stats.misses++; -#endif - if (found) - break; - } - - ixgbe_qv_unlock_poll(q_vector); - - return found; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * ixgbe_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -2878,8 +2840,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget) clean_complete = false; } - /* Exit if we are called by netpoll or busy polling is active */ - if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) + /* Exit if we are called by netpoll */ + if (budget <= 0) return budget; /* attempt to distribute budget to each queue fairly, but don't allow @@ -2898,7 +2860,6 @@ int ixgbe_poll(struct napi_struct *napi, int budget) clean_complete = false; } - ixgbe_qv_unlock_napi(q_vector); /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -4581,23 +4542,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { - ixgbe_qv_init_lock(adapter->q_vector[q_idx]); + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) napi_enable(&adapter->q_vector[q_idx]->napi); - } } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) napi_disable(&adapter->q_vector[q_idx]->napi); - while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { - pr_info("QV %d locked\n", q_idx); - usleep_range(1000, 20000); - } - } } static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) @@ -9352,9 +9306,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = ixgbe_low_latency_recv, -#endif #ifdef IXGBE_FCOE .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, -- cgit v1.2.3 From 508aac6dee025f93eab1e806d20762ea6327b43d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 16:59:18 -0800 Subject: ixgbevf: get rid of custom busy polling code In linux-4.5, busy polling was implemented in core NAPI stack, meaning that all custom implementation can be removed from drivers. Not only we remove lot's of code, we also remove one lock operation in fast path, and allow GRO to do its job. Signed-off-by: Eric Dumazet Cc: Jeff Kirsher Acked-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 38 -------- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 113 ---------------------- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 64 ------------ 3 files changed, 215 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 508e72c5f1c2..1f6c0ecd50bb 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -432,11 +432,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, if (!ring) { data[i++] = 0; data[i++] = 0; -#ifdef BP_EXTENDED_STATS - data[i++] = 0; - data[i++] = 0; - data[i++] = 0; -#endif continue; } @@ -446,12 +441,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i + 1] = ring->stats.misses; - data[i + 2] = ring->stats.cleaned; - i += 3; -#endif } /* populate Rx queue data */ @@ -460,11 +449,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, if (!ring) { data[i++] = 0; data[i++] = 0; -#ifdef BP_EXTENDED_STATS - data[i++] = 0; - data[i++] = 0; - data[i++] = 0; -#endif continue; } @@ -474,12 +458,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i + 1] = ring->stats.misses; - data[i + 2] = ring->stats.cleaned; - i += 3; -#endif } } @@ -507,28 +485,12 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "tx_queue_%u_bp_napi_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < adapter->num_rx_queues; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "rx_queue_%u_bp_poll_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } break; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 3fe6504eeab1..a8cbc2dda0dd 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -37,11 +37,6 @@ #include "vf.h" -#ifdef CONFIG_NET_RX_BUSY_POLL -#include -#define BP_EXTENDED_STATS -#endif - #define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) @@ -73,11 +68,6 @@ struct ixgbevf_rx_buffer { struct ixgbevf_stats { u64 packets; u64 bytes; -#ifdef BP_EXTENDED_STATS - u64 yields; - u64 misses; - u64 cleaned; -#endif }; struct ixgbevf_tx_queue_stats { @@ -217,109 +207,6 @@ struct ixgbevf_q_vector { #endif /* CONFIG_NET_RX_BUSY_POLL */ }; -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) -{ - spin_lock_init(&q_vector->lock); - q_vector->state = IXGBEVF_QV_STATE_IDLE; -} - -/* called from the device poll routine to get ownership of a q_vector */ -static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) -{ - int rc = true; - - spin_lock_bh(&q_vector->lock); - if (q_vector->state & IXGBEVF_QV_LOCKED) { - WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); - q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; - rc = false; -#ifdef BP_EXTENDED_STATS - q_vector->tx.ring->stats.yields++; -#endif - } else { - /* we don't care if someone yielded */ - q_vector->state = IXGBEVF_QV_STATE_NAPI; - } - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* returns true is someone tried to get the qv while napi had it */ -static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) -{ - int rc = false; - - spin_lock_bh(&q_vector->lock); - WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | - IXGBEVF_QV_STATE_NAPI_YIELD)); - - if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) - rc = true; - /* reset state to idle, unless QV is disabled */ - q_vector->state &= IXGBEVF_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* called from ixgbevf_low_latency_poll() */ -static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) -{ - int rc = true; - - spin_lock_bh(&q_vector->lock); - if ((q_vector->state & IXGBEVF_QV_LOCKED)) { - q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; - rc = false; -#ifdef BP_EXTENDED_STATS - q_vector->rx.ring->stats.yields++; -#endif - } else { - /* preserve yield marks */ - q_vector->state |= IXGBEVF_QV_STATE_POLL; - } - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* returns true if someone tried to get the qv while it was locked */ -static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) -{ - int rc = false; - - spin_lock_bh(&q_vector->lock); - WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); - - if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) - rc = true; - /* reset state to idle, unless QV is disabled */ - q_vector->state &= IXGBEVF_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* true if a socket is polling, even if it did not get the lock */ -static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) -{ - WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED)); - return q_vector->state & IXGBEVF_QV_USER_PEND; -} - -/* false if QV is currently owned */ -static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) -{ - int rc = true; - - spin_lock_bh(&q_vector->lock); - if (q_vector->state & IXGBEVF_QV_OWNED) - rc = false; - q_vector->state |= IXGBEVF_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; -} - -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* microsecond values for various ITR rates shifted by 2 to fit itr register * with the first 3 bits reserved 0 */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b06863560c7d..80bab261a0ec 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -457,16 +457,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb) { -#ifdef CONFIG_NET_RX_BUSY_POLL - skb_mark_napi_id(skb, &q_vector->napi); - - if (ixgbevf_qv_busy_polling(q_vector)) { - netif_receive_skb(skb); - /* exit early if we busy polled */ - return; - } -#endif /* CONFIG_NET_RX_BUSY_POLL */ - napi_gro_receive(&q_vector->napi, skb); } @@ -1031,10 +1021,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (budget <= 0) return budget; -#ifdef CONFIG_NET_RX_BUSY_POLL - if (!ixgbevf_qv_lock_napi(q_vector)) - return budget; -#endif /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling @@ -1052,10 +1038,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) clean_complete = false; } -#ifdef CONFIG_NET_RX_BUSY_POLL - ixgbevf_qv_unlock_napi(q_vector); -#endif - /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -1090,40 +1072,6 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int ixgbevf_busy_poll_recv(struct napi_struct *napi) -{ - struct ixgbevf_q_vector *q_vector = - container_of(napi, struct ixgbevf_q_vector, napi); - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbevf_ring *ring; - int found = 0; - - if (test_bit(__IXGBEVF_DOWN, &adapter->state)) - return LL_FLUSH_FAILED; - - if (!ixgbevf_qv_lock_poll(q_vector)) - return LL_FLUSH_BUSY; - - ixgbevf_for_each_ring(ring, q_vector->rx) { - found = ixgbevf_clean_rx_irq(q_vector, ring, 4); -#ifdef BP_EXTENDED_STATS - if (found) - ring->stats.cleaned += found; - else - ring->stats.misses++; -#endif - if (found) - break; - } - - ixgbevf_qv_unlock_poll(q_vector); - - return found; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * ixgbevf_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -1960,9 +1908,6 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; -#ifdef CONFIG_NET_RX_BUSY_POLL - ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); -#endif napi_enable(&q_vector->napi); } } @@ -1976,12 +1921,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); -#ifdef CONFIG_NET_RX_BUSY_POLL - while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { - pr_info("QV %d locked\n", q_idx); - usleep_range(1000, 20000); - } -#endif /* CONFIG_NET_RX_BUSY_POLL */ } } @@ -3978,9 +3917,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = ixgbevf_busy_poll_recv, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbevf_netpoll, #endif -- cgit v1.2.3 From 7a655c6324a8968ea2f027bf3660c87c42ac3de4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 3 Feb 2017 17:28:21 -0500 Subject: enic: Remove local ndo_busy_poll() implementation. We do polling generically these days. Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic_main.c | 66 ++---------------------- drivers/net/ethernet/cisco/enic/vnic_rq.h | 78 ----------------------------- 2 files changed, 5 insertions(+), 139 deletions(-) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 91e42bea151c..c009f6ddabf7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -43,10 +43,8 @@ #ifdef CONFIG_RFS_ACCEL #include #endif -#ifdef CONFIG_NET_RX_BUSY_POLL -#include -#endif #include +#include #include "cq_enet_desc.h" #include "vnic_dev.h" @@ -1191,8 +1189,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); skb_mark_napi_id(skb, &enic->napi[rq->index]); - if (enic_poll_busy_polling(rq) || - !(netdev->features & NETIF_F_GRO)) + if (!(netdev->features & NETIF_F_GRO)) netif_receive_skb(skb); else napi_gro_receive(&enic->napi[q_number], skb); @@ -1296,15 +1293,6 @@ static int enic_poll(struct napi_struct *napi, int budget) wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, enic_wq_service, NULL); - if (!enic_poll_lock_napi(&enic->rq[cq_rq])) { - if (wq_work_done > 0) - vnic_intr_return_credits(&enic->intr[intr], - wq_work_done, - 0 /* dont unmask intr */, - 0 /* dont reset intr timer */); - return budget; - } - if (budget > 0) rq_work_done = vnic_cq_service(&enic->cq[cq_rq], rq_work_to_do, enic_rq_service, NULL); @@ -1323,7 +1311,6 @@ static int enic_poll(struct napi_struct *napi, int budget) 0 /* don't reset intr timer */); err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); - enic_poll_unlock_napi(&enic->rq[cq_rq], napi); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. @@ -1390,34 +1377,6 @@ static void enic_set_rx_cpu_rmap(struct enic *enic) #endif /* CONFIG_RFS_ACCEL */ -#ifdef CONFIG_NET_RX_BUSY_POLL -static int enic_busy_poll(struct napi_struct *napi) -{ - struct net_device *netdev = napi->dev; - struct enic *enic = netdev_priv(netdev); - unsigned int rq = (napi - &enic->napi[0]); - unsigned int cq = enic_cq_rq(enic, rq); - unsigned int intr = enic_msix_rq_intr(enic, rq); - unsigned int work_to_do = -1; /* clean all pkts possible */ - unsigned int work_done; - - if (!enic_poll_lock_poll(&enic->rq[rq])) - return LL_FLUSH_BUSY; - work_done = vnic_cq_service(&enic->cq[cq], work_to_do, - enic_rq_service, NULL); - - if (work_done > 0) - vnic_intr_return_credits(&enic->intr[intr], - work_done, 0, 0); - vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); - if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_calc_int_moderation(enic, &enic->rq[rq]); - enic_poll_unlock_poll(&enic->rq[rq]); - - return work_done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - static int enic_poll_msix_wq(struct napi_struct *napi, int budget) { struct net_device *netdev = napi->dev; @@ -1459,8 +1418,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) unsigned int work_done = 0; int err; - if (!enic_poll_lock_napi(&enic->rq[rq])) - return budget; /* Service RQ */ @@ -1493,7 +1450,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ enic_calc_int_moderation(enic, &enic->rq[rq]); - enic_poll_unlock_napi(&enic->rq[rq], napi); if (work_done < work_to_do) { /* Some work done, but not enough to stay in polling, @@ -1751,10 +1707,9 @@ static int enic_open(struct net_device *netdev) netif_tx_wake_all_queues(netdev); - for (i = 0; i < enic->rq_count; i++) { - enic_busy_poll_init_lock(&enic->rq[i]); + for (i = 0; i < enic->rq_count; i++) napi_enable(&enic->napi[i]); - } + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) for (i = 0; i < enic->wq_count; i++) napi_enable(&enic->napi[enic_cq_wq(enic, i)]); @@ -1798,13 +1753,8 @@ static int enic_stop(struct net_device *netdev) enic_dev_disable(enic); - for (i = 0; i < enic->rq_count; i++) { + for (i = 0; i < enic->rq_count; i++) napi_disable(&enic->napi[i]); - local_bh_disable(); - while (!enic_poll_lock_napi(&enic->rq[i])) - mdelay(1); - local_bh_enable(); - } netif_carrier_off(netdev); netif_tx_disable(netdev); @@ -2335,9 +2285,6 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = enic_busy_poll, -#endif }; static const struct net_device_ops enic_netdev_ops = { @@ -2361,9 +2308,6 @@ static const struct net_device_ops enic_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = enic_busy_poll, -#endif }; static void enic_dev_deinit(struct enic *enic) diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h index b9c82f143d7e..0413103ebe94 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h @@ -92,9 +92,6 @@ struct vnic_rq { struct vnic_rq_buf *to_clean; void *os_buf_head; unsigned int pkts_outstanding; -#ifdef CONFIG_NET_RX_BUSY_POLL - atomic_t bpoll_state; -#endif /* CONFIG_NET_RX_BUSY_POLL */ }; static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) @@ -207,81 +204,6 @@ static inline int vnic_rq_fill(struct vnic_rq *rq, return 0; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) -{ - atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); -} - -static inline bool enic_poll_lock_napi(struct vnic_rq *rq) -{ - int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, - ENIC_POLL_STATE_NAPI); - - return (rc == ENIC_POLL_STATE_IDLE); -} - -static inline void enic_poll_unlock_napi(struct vnic_rq *rq, - struct napi_struct *napi) -{ - WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI); - napi_gro_flush(napi, false); - atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); -} - -static inline bool enic_poll_lock_poll(struct vnic_rq *rq) -{ - int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, - ENIC_POLL_STATE_POLL); - - return (rc == ENIC_POLL_STATE_IDLE); -} - - -static inline void enic_poll_unlock_poll(struct vnic_rq *rq) -{ - WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL); - atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); -} - -static inline bool enic_poll_busy_polling(struct vnic_rq *rq) -{ - return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL; -} - -#else - -static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) -{ -} - -static inline bool enic_poll_lock_napi(struct vnic_rq *rq) -{ - return true; -} - -static inline bool enic_poll_unlock_napi(struct vnic_rq *rq, - struct napi_struct *napi) -{ - return false; -} - -static inline bool enic_poll_lock_poll(struct vnic_rq *rq) -{ - return false; -} - -static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) -{ - return false; -} - -static inline bool enic_poll_ll_polling(struct vnic_rq *rq) -{ - return false; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - void vnic_rq_free(struct vnic_rq *rq); int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, unsigned int desc_count, unsigned int desc_size); -- cgit v1.2.3 From 79e7fff47b7bb4124ef970a13eac4fdeddd1fc25 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Feb 2017 18:43:28 -0800 Subject: net: remove support for per driver ndo_busy_poll() We added generic support for busy polling in NAPI layer in linux-4.5 No network driver uses ndo_busy_poll() anymore, we can get rid of the pointer in struct net_device_ops, and its use in sk_busy_loop() Saves NETIF_F_BUSY_POLL features bit. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 2 -- include/linux/netdevice.h | 3 --- net/core/dev.c | 15 --------------- net/core/ethtool.c | 1 - 4 files changed, 21 deletions(-) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 9c6c8ef2e9e7..9a0419594e84 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -71,7 +71,6 @@ enum { NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ - NETIF_F_BUSY_POLL_BIT, /* Busy poll */ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ @@ -134,7 +133,6 @@ enum { #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) -#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) #define NETIF_F_HW_TC __NETIF_F(HW_TC) #define for_each_netdev_feature(mask_addr, bit) \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f3878fbe7786..6f18b509fb2f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1184,9 +1184,6 @@ struct net_device_ops { int (*ndo_netpoll_setup)(struct net_device *dev, struct netpoll_info *info); void (*ndo_netpoll_cleanup)(struct net_device *dev); -#endif -#ifdef CONFIG_NET_RX_BUSY_POLL - int (*ndo_busy_poll)(struct napi_struct *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, int queue, u8 *mac); diff --git a/net/core/dev.c b/net/core/dev.c index 727b6fda0e8c..4cde8bfb9bab 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4978,7 +4978,6 @@ bool sk_busy_loop(struct sock *sk, int nonblock) { unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; int (*napi_poll)(struct napi_struct *napi, int budget); - int (*busy_poll)(struct napi_struct *dev); void *have_poll_lock = NULL; struct napi_struct *napi; int rc; @@ -4993,17 +4992,10 @@ restart: if (!napi) goto out; - /* Note: ndo_busy_poll method is optional in linux-4.5 */ - busy_poll = napi->dev->netdev_ops->ndo_busy_poll; - preempt_disable(); for (;;) { rc = 0; local_bh_disable(); - if (busy_poll) { - rc = busy_poll(napi); - goto count; - } if (!napi_poll) { unsigned long val = READ_ONCE(napi->state); @@ -6956,13 +6948,6 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, features &= ~dev->gso_partial_features; } -#ifdef CONFIG_NET_RX_BUSY_POLL - if (dev->netdev_ops->ndo_busy_poll) - features |= NETIF_F_BUSY_POLL; - else -#endif - features &= ~NETIF_F_BUSY_POLL; - return features; } diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 6b3eee0834a0..d5f412b3093d 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -102,7 +102,6 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_RXFCS_BIT] = "rx-fcs", [NETIF_F_RXALL_BIT] = "rx-all", [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", - [NETIF_F_BUSY_POLL_BIT] = "busy-poll", [NETIF_F_HW_TC_BIT] = "hw-tc-offload", }; -- cgit v1.2.3 From 6e7bc478c9a006c701c14476ec9d389a484b4864 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 3 Feb 2017 14:29:42 -0800 Subject: net: skb_needs_check() accepts CHECKSUM_NONE for tx My recent change missed fact that UFO would perform a complete UDP checksum before segmenting in frags. In this case skb->ip_summed is set to CHECKSUM_NONE. We need to add this valid case to skb_needs_check() Fixes: b2504a5dbef3 ("net: reduce skb_warn_bad_offload() noise") Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Signed-off-by: David S. Miller --- net/core/dev.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 4cde8bfb9bab..42ba0379575a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2637,9 +2637,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment); static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) { if (tx_path) - return skb->ip_summed != CHECKSUM_PARTIAL; - else - return skb->ip_summed == CHECKSUM_NONE; + return skb->ip_summed != CHECKSUM_PARTIAL && + skb->ip_summed != CHECKSUM_NONE; + + return skb->ip_summed == CHECKSUM_NONE; } /** -- cgit v1.2.3 From 4d6308aac3258ecad8444811a16951a919adc131 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 07:49:21 -0800 Subject: virtio_net: exploit napi_complete_done() return value Since commit 364b6055738b ("net: busy-poll: return busypolling status to drivers"), napi_complete_done() returns a boolean that can be used by drivers to conditionally rearm interrupts. This patch changes virtio_net to use this boolean to avoid a bit of overhead for busy-poll users. Jason reports about 1.1% improvement for 1 byte TCP_RR (burst 100). Signed-off-by: Eric Dumazet Acked-by: Jason Wang Cc: Michael S. Tsirkin Cc: Willem de Bruijn Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 0382827829d9..29982c7f6080 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1012,11 +1012,12 @@ static int virtnet_poll(struct napi_struct *napi, int budget) /* Out of packets? */ if (received < budget) { r = virtqueue_enable_cb_prepare(rq->vq); - napi_complete_done(napi, received); - if (unlikely(virtqueue_poll(rq->vq, r)) && - napi_schedule_prep(napi)) { - virtqueue_disable_cb(rq->vq); - __napi_schedule(napi); + if (napi_complete_done(napi, received)) { + if (unlikely(virtqueue_poll(rq->vq, r)) && + napi_schedule_prep(napi)) { + virtqueue_disable_cb(rq->vq); + __napi_schedule(napi); + } } } -- cgit v1.2.3 From 0ae8133586ad1c9be894411aaf8b17bb58c8efe5 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 2 Feb 2017 12:37:08 -0800 Subject: net: ipv6: Allow shorthand delete of all nexthops in multipath route IPv4 allows multipath routes to be deleted using just the prefix and length. For example: $ ip ro ls vrf red unreachable default metric 8192 1.1.1.0/24 nexthop via 10.100.1.254 dev eth1 weight 1 nexthop via 10.11.200.2 dev eth11.200 weight 1 10.11.200.0/24 dev eth11.200 proto kernel scope link src 10.11.200.3 10.100.1.0/24 dev eth1 proto kernel scope link src 10.100.1.3 $ ip ro del 1.1.1.0/24 vrf red $ ip ro ls vrf red unreachable default metric 8192 10.11.200.0/24 dev eth11.200 proto kernel scope link src 10.11.200.3 10.100.1.0/24 dev eth1 proto kernel scope link src 10.100.1.3 The same notation does not work with IPv6 because of how multipath routes are implemented for IPv6. For IPv6 only the first nexthop of a multipath route is deleted if the request contains only a prefix and length. This leads to unnecessary complexity in userspace dealing with IPv6 multipath routes. This patch allows all nexthops to be deleted without specifying each one in the delete request. Internally, this is done by walking the sibling list of the route matching the specifications given (prefix, length, metric, protocol, etc). $ ip -6 ro ls vrf red 2001:db8:1::/120 dev eth1 proto kernel metric 256 pref medium 2001:db8:2::/120 dev eth2 proto kernel metric 256 pref medium 2001:db8:200::/120 via 2001:db8:1::2 dev eth1 metric 1024 pref medium 2001:db8:200::/120 via 2001:db8:2::2 dev eth2 metric 1024 pref medium ... $ ip -6 ro del vrf red 2001:db8:200::/120 $ ip -6 ro ls vrf red 2001:db8:1::/120 dev eth1 proto kernel metric 256 pref medium 2001:db8:2::/120 dev eth2 proto kernel metric 256 pref medium ... Because IPv6 allows individual nexthops to be deleted without deleting the entire route, the ip6_route_multipath_del and non-multipath code path (ip6_route_del) have to be discriminated so that all nexthops are only deleted for the latter case. This is done by making the existing fc_type in fib6_config a u16 and then adding a new u16 field with fc_delete_all_nh as the first bit. Suggested-by: Dinesh Dutt Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 4 +++- net/ipv6/route.c | 38 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index a74e2aa40ef4..c979c878df1c 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -37,7 +37,9 @@ struct fib6_config { int fc_ifindex; u32 fc_flags; u32 fc_protocol; - u32 fc_type; /* only 8 bits are used */ + u16 fc_type; /* only 8 bits are used */ + u16 fc_delete_all_nh : 1, + __unused : 15; struct in6_addr fc_dst; struct in6_addr fc_src; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 91eb3f7782dd..635b7fdef2eb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2143,6 +2143,34 @@ int ip6_del_rt(struct rt6_info *rt) return __ip6_del_rt(rt, &info); } +static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) +{ + struct nl_info *info = &cfg->fc_nlinfo; + struct fib6_table *table; + int err; + + table = rt->rt6i_table; + write_lock_bh(&table->tb6_lock); + + if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) { + struct rt6_info *sibling, *next_sibling; + + list_for_each_entry_safe(sibling, next_sibling, + &rt->rt6i_siblings, + rt6i_siblings) { + err = fib6_del(sibling, info); + if (err) + goto out; + } + } + + err = fib6_del(rt, info); +out: + write_unlock_bh(&table->tb6_lock); + ip6_rt_put(rt); + return err; +} + static int ip6_route_del(struct fib6_config *cfg) { struct fib6_table *table; @@ -2179,7 +2207,11 @@ static int ip6_route_del(struct fib6_config *cfg) dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); - return __ip6_del_rt(rt, &cfg->fc_nlinfo); + /* if gateway was specified only delete the one hop */ + if (cfg->fc_flags & RTF_GATEWAY) + return __ip6_del_rt(rt, &cfg->fc_nlinfo); + + return __ip6_del_rt_siblings(rt, cfg); } } read_unlock_bh(&table->tb6_lock); @@ -3142,8 +3174,10 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) if (cfg.fc_mp) return ip6_route_multipath_del(&cfg); - else + else { + cfg.fc_delete_all_nh = 1; return ip6_route_del(&cfg); + } } static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) -- cgit v1.2.3 From beb1afac518dec5a15dc92ba8f0ca016dcf457b4 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 2 Feb 2017 12:37:09 -0800 Subject: net: ipv6: Add support to dump multipath routes via RTA_MULTIPATH attribute IPv6 returns multipath routes as a series of individual routes making their display and handling by userspace different and more complicated than IPv4, putting the burden on the user to see that a route is part of a multipath route and internally creating a multipath route if desired (e.g., libnl does this as of commit 29b71371e764). This patch addresses this difference, allowing multipath routes to be returned using the RTA_MULTIPATH attribute. The end result is that IPv6 multipath routes can be treated and displayed in a format similar to IPv4: $ ip -6 ro ls vrf red 2001:db8:1::/120 dev eth1 proto kernel metric 256 pref medium 2001:db8:2::/120 dev eth2 proto kernel metric 256 pref medium 2001:db8:200::/120 metric 1024 nexthop via 2001:db8:1::2 dev eth1 weight 1 nexthop via 2001:db8:2::2 dev eth2 weight 1 Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/ip6_fib.c | 10 +++++ net/ipv6/route.c | 112 +++++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 105 insertions(+), 17 deletions(-) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index febde6c112bf..1bf5e22fb95d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -318,6 +318,16 @@ static int fib6_dump_node(struct fib6_walker *w) w->leaf = rt; return 1; } + + /* Multipath routes are dumped in one route with the + * RTA_MULTIPATH attribute. Jump 'rt' to point to the + * last sibling of this route (no need to dump the + * sibling routes again) + */ + if (rt->rt6i_nsiblings) + rt = list_last_entry(&rt->rt6i_siblings, + struct rt6_info, + rt6i_siblings); } w->leaf = NULL; return 0; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 635b7fdef2eb..c740d9e249a6 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3195,8 +3195,20 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) return ip6_route_add(&cfg); } -static inline size_t rt6_nlmsg_size(struct rt6_info *rt) +static size_t rt6_nlmsg_size(struct rt6_info *rt) { + int nexthop_len = 0; + + if (rt->rt6i_nsiblings) { + nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ + + NLA_ALIGN(sizeof(struct rtnexthop)) + + nla_total_size(16) /* RTA_GATEWAY */ + + nla_total_size(4) /* RTA_OIF */ + + lwtunnel_get_encap_size(rt->dst.lwtstate); + + nexthop_len *= rt->rt6i_nsiblings; + } + return NLMSG_ALIGN(sizeof(struct rtmsg)) + nla_total_size(16) /* RTA_SRC */ + nla_total_size(16) /* RTA_DST */ @@ -3210,7 +3222,62 @@ static inline size_t rt6_nlmsg_size(struct rt6_info *rt) + nla_total_size(sizeof(struct rta_cacheinfo)) + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ + nla_total_size(1) /* RTA_PREF */ - + lwtunnel_get_encap_size(rt->dst.lwtstate); + + lwtunnel_get_encap_size(rt->dst.lwtstate) + + nexthop_len; +} + +static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, + unsigned int *flags) +{ + if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { + *flags |= RTNH_F_LINKDOWN; + if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) + *flags |= RTNH_F_DEAD; + } + + if (rt->rt6i_flags & RTF_GATEWAY) { + if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0) + goto nla_put_failure; + } + + if (rt->dst.dev && + nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) + goto nla_put_failure; + + if (rt->dst.lwtstate && + lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) +{ + struct rtnexthop *rtnh; + unsigned int flags = 0; + + rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); + if (!rtnh) + goto nla_put_failure; + + rtnh->rtnh_hops = 0; + rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; + + if (rt6_nexthop_info(skb, rt, &flags) < 0) + goto nla_put_failure; + + rtnh->rtnh_flags = flags; + + /* length of rtnetlink header + attributes */ + rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; + + return 0; + +nla_put_failure: + return -EMSGSIZE; } static int rt6_fill_node(struct net *net, @@ -3264,11 +3331,6 @@ static int rt6_fill_node(struct net *net, else rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; - if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { - rtm->rtm_flags |= RTNH_F_LINKDOWN; - if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) - rtm->rtm_flags |= RTNH_F_DEAD; - } rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = rt->rt6i_protocol; if (rt->rt6i_flags & RTF_DYNAMIC) @@ -3332,17 +3394,35 @@ static int rt6_fill_node(struct net *net, if (rtnetlink_put_metrics(skb, metrics) < 0) goto nla_put_failure; - if (rt->rt6i_flags & RTF_GATEWAY) { - if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0) - goto nla_put_failure; - } - - if (rt->dst.dev && - nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) - goto nla_put_failure; if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) goto nla_put_failure; + /* For multipath routes, walk the siblings list and add + * each as a nexthop within RTA_MULTIPATH. + */ + if (rt->rt6i_nsiblings) { + struct rt6_info *sibling, *next_sibling; + struct nlattr *mp; + + mp = nla_nest_start(skb, RTA_MULTIPATH); + if (!mp) + goto nla_put_failure; + + if (rt6_add_nexthop(skb, rt) < 0) + goto nla_put_failure; + + list_for_each_entry_safe(sibling, next_sibling, + &rt->rt6i_siblings, rt6i_siblings) { + if (rt6_add_nexthop(skb, sibling) < 0) + goto nla_put_failure; + } + + nla_nest_end(skb, mp); + } else { + if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags) < 0) + goto nla_put_failure; + } + expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0; if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0) @@ -3351,8 +3431,6 @@ static int rt6_fill_node(struct net *net, if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) goto nla_put_failure; - if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0) - goto nla_put_failure; nlmsg_end(skb, nlh); return 0; -- cgit v1.2.3 From 3b1137fe74829e021f483756a648cbb87c8a1b4a Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 2 Feb 2017 12:37:10 -0800 Subject: net: ipv6: Change notifications for multipath add to RTA_MULTIPATH Change ip6_route_multipath_add to send one notifciation with the full route encoded with RTA_MULTIPATH instead of a series of individual routes. This is done by adding a skip_notify flag to the nl_info struct. The flag is used to skip sending of the notification in the fib code that actually inserts the route. Once the full route has been added, a notification is generated with all nexthops. ip6_route_multipath_add handles 3 use cases: new routes, route replace, and route append. The multipath notification generated needs to be consistent with the order of the nexthops and it should be consistent with the order in a FIB dump which means the route with the first nexthop needs to be used as the route reference. For the first 2 cases (new and replace), a reference to the route used to send the notification is obtained by saving the first route added. For the append case, the last route added is used to loop back to its first sibling route which is the first nexthop in the multipath route. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/netlink.h | 1 + net/ipv6/ip6_fib.c | 6 ++++-- net/ipv6/route.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 54 insertions(+), 3 deletions(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index d3938f11ae52..b239fcd33d80 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -229,6 +229,7 @@ struct nl_info { struct nlmsghdr *nlh; struct net *nl_net; u32 portid; + bool skip_notify; }; int netlink_rcv_skb(struct sk_buff *skb, diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 1bf5e22fb95d..99c68ce6ef78 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -881,7 +881,8 @@ add: *ins = rt; rt->rt6i_node = fn; atomic_inc(&rt->rt6i_ref); - inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); + if (!info->skip_notify) + inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); info->nl_net->ipv6.rt6_stats->fib_rt_entries++; if (!(fn->fn_flags & RTN_RTINFO)) { @@ -907,7 +908,8 @@ add: rt->rt6i_node = fn; rt->dst.rt6_next = iter->dst.rt6_next; atomic_inc(&rt->rt6i_ref); - inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); + if (!info->skip_notify) + inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); if (!(fn->fn_flags & RTN_RTINFO)) { info->nl_net->ipv6.rt6_stats->fib_route_nodes++; fn->fn_flags |= RTN_RTINFO; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c740d9e249a6..cb3366d5e165 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3023,13 +3023,37 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list, return 0; } +static void ip6_route_mpath_notify(struct rt6_info *rt, + struct rt6_info *rt_last, + struct nl_info *info, + __u16 nlflags) +{ + /* if this is an APPEND route, then rt points to the first route + * inserted and rt_last points to last route inserted. Userspace + * wants a consistent dump of the route which starts at the first + * nexthop. Since sibling routes are always added at the end of + * the list, find the first sibling of the last route appended + */ + if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) { + rt = list_first_entry(&rt_last->rt6i_siblings, + struct rt6_info, + rt6i_siblings); + } + + if (rt) + inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); +} + static int ip6_route_multipath_add(struct fib6_config *cfg) { + struct rt6_info *rt_notif = NULL, *rt_last = NULL; + struct nl_info *info = &cfg->fc_nlinfo; struct fib6_config r_cfg; struct rtnexthop *rtnh; struct rt6_info *rt; struct rt6_nh *err_nh; struct rt6_nh *nh, *nh_safe; + __u16 nlflags; int remaining; int attrlen; int err = 1; @@ -3038,6 +3062,10 @@ static int ip6_route_multipath_add(struct fib6_config *cfg) (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); LIST_HEAD(rt6_nh_list); + nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE; + if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND) + nlflags |= NLM_F_APPEND; + remaining = cfg->fc_mp_len; rtnh = (struct rtnexthop *)cfg->fc_mp; @@ -3080,9 +3108,20 @@ static int ip6_route_multipath_add(struct fib6_config *cfg) rtnh = rtnh_next(rtnh, &remaining); } + /* for add and replace send one notification with all nexthops. + * Skip the notification in fib6_add_rt2node and send one with + * the full route when done + */ + info->skip_notify = 1; + err_nh = NULL; list_for_each_entry(nh, &rt6_nh_list, next) { - err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc); + rt_last = nh->rt6_info; + err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc); + /* save reference to first route for notification */ + if (!rt_notif && !err) + rt_notif = nh->rt6_info; + /* nh->rt6_info is used or freed at this point, reset to NULL*/ nh->rt6_info = NULL; if (err) { @@ -3104,9 +3143,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg) nhn++; } + /* success ... tell user about new route */ + ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); goto cleanup; add_errout: + /* send notification for routes that were added so that + * the delete notifications sent by ip6_route_del are + * coherent + */ + if (rt_notif) + ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); + /* Delete routes that were already added */ list_for_each_entry(nh, &rt6_nh_list, next) { if (err_nh == nh) -- cgit v1.2.3 From 16a16cd35ee29d9bea54dd60e55d9c1cc685a37d Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 2 Feb 2017 12:37:11 -0800 Subject: net: ipv6: Change notifications for multipath delete to RTA_MULTIPATH If an entire multipath route is deleted using prefix and len (without any nexthops), send a single RTM_DELROUTE notification with the full route using RTA_MULTIPATH. This is done by generating the skb before the route delete when all of the sibling routes are still present but sending it after the route has been removed from the FIB. The skip_notify flag is used to tell the lower fib code not to send notifications for the individual nexthop routes. If a route is deleted using RTA_MULTIPATH for any nexthops or a single nexthop entry is deleted, then the nexthops are deleted one at a time with notifications sent as each hop is deleted. This is necessary given that IPv6 allows individual hops within a route to be deleted. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/ip6_fib.c | 3 ++- net/ipv6/route.c | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 99c68ce6ef78..e4266746e4a2 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1454,7 +1454,8 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, fib6_purge_rt(rt, fn, net); - inet6_rt_notify(RTM_DELROUTE, rt, info, 0); + if (!info->skip_notify) + inet6_rt_notify(RTM_DELROUTE, rt, info, 0); rt6_release(rt); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index cb3366d5e165..194261acb87d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -98,6 +98,12 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); static void rt6_dst_from_metrics_check(struct rt6_info *rt); static int rt6_score_route(struct rt6_info *rt, int oif, int strict); +static size_t rt6_nlmsg_size(struct rt6_info *rt); +static int rt6_fill_node(struct net *net, + struct sk_buff *skb, struct rt6_info *rt, + struct in6_addr *dst, struct in6_addr *src, + int iif, int type, u32 portid, u32 seq, + unsigned int flags); #ifdef CONFIG_IPV6_ROUTE_INFO static struct rt6_info *rt6_add_route_info(struct net *net, @@ -2146,6 +2152,7 @@ int ip6_del_rt(struct rt6_info *rt) static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) { struct nl_info *info = &cfg->fc_nlinfo; + struct sk_buff *skb = NULL; struct fib6_table *table; int err; @@ -2155,6 +2162,20 @@ static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) { struct rt6_info *sibling, *next_sibling; + /* prefer to send a single notification with all hops */ + skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); + if (skb) { + u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; + + if (rt6_fill_node(info->nl_net, skb, rt, + NULL, NULL, 0, RTM_DELROUTE, + info->portid, seq, 0) < 0) { + kfree_skb(skb); + skb = NULL; + } else + info->skip_notify = 1; + } + list_for_each_entry_safe(sibling, next_sibling, &rt->rt6i_siblings, rt6i_siblings) { @@ -2168,6 +2189,11 @@ static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) out: write_unlock_bh(&table->tb6_lock); ip6_rt_put(rt); + + if (skb) { + rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV6_ROUTE, + info->nlh, gfp_any()); + } return err; } -- cgit v1.2.3 From 7d4d5065ecb0cea9c5815d5e0df5fb586c5ee9b5 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 2 Feb 2017 12:37:12 -0800 Subject: net: ipv6: Use compressed IPv6 addresses showing route replace error ip6_print_replace_route_err logs an error if a route replace fails with IPv6 addresses in the full format. e.g,: IPv6: IPV6: multipath route replace failed (check consistency of installed routes): 2001:0db8:0200:0000:0000:0000:0000:0000 nexthop 2001:0db8:0001:0000:0000:0000:0000:0016 ifi 0 Change the message to dump the addresses in the compressed format. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 194261acb87d..8ffa24cc8899 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3010,7 +3010,7 @@ static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) struct rt6_nh *nh; list_for_each_entry(nh, rt6_nh_list, next) { - pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n", + pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, nh->r_cfg.fc_ifindex); } -- cgit v1.2.3 From 617f01211bafed65b5cb2fa5b671e839c7f2af86 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:24:51 -0800 Subject: 8139too: use napi_complete_done() Use napi_complete_done() instead of __napi_complete() to : 1) Get support of gro_flush_timeout if opt-in 2) Not rearm interrupts for busy-polling users. 3) use standard NAPI API. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/8139too.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 5ad59c6d29a4..89631753e799 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2135,14 +2135,10 @@ static int rtl8139_poll(struct napi_struct *napi, int budget) if (likely(RTL_R16(IntrStatus) & RxAckBits)) work_done += rtl8139_rx(dev, tp, budget); - if (work_done < budget) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; - /* - * Order is important since data can get interrupted - * again when we think we are done. - */ + spin_lock_irqsave(&tp->lock, flags); - __napi_complete(napi); RTL_W16_F(IntrMask, rtl8139_intr_mask); spin_unlock_irqrestore(&tp->lock, flags); } -- cgit v1.2.3 From ab1e7e1d26669365e49e8312f96f7a5fa0b703a9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:24:52 -0800 Subject: 8139cp: use napi_complete_done() Use napi_complete_done() instead of __napi_complete() to : 1) Get support of gro_flush_timeout if opt-in 2) Not rearm interrupts for busy-polling users. 3) use standard NAPI API. 4) Eventually get rid of napi_gro_flush() in the future. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/8139cp.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 0b3cd58093d5..672f6b696069 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -465,10 +465,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) struct cp_private *cp = container_of(napi, struct cp_private, napi); struct net_device *dev = cp->dev; unsigned int rx_tail = cp->rx_tail; - int rx; + int rx = 0; - rx = 0; -rx_status_loop: cpw16(IntrStatus, cp_rx_intr_mask); while (rx < budget) { @@ -556,15 +554,10 @@ rx_next: /* if we did not reach work limit, then we're done with * this round of polling */ - if (rx < budget) { + if (rx < budget && napi_complete_done(napi, rx)) { unsigned long flags; - if (cpr16(IntrStatus) & cp_rx_intr_mask) - goto rx_status_loop; - - napi_gro_flush(napi, false); spin_lock_irqsave(&cp->lock, flags); - __napi_complete(napi); cpw16_f(IntrMask, cp_intr_mask); spin_unlock_irqrestore(&cp->lock, flags); } -- cgit v1.2.3 From 1fa8c5f33a7edb67cef8e44f22fa3afeb7d18e89 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:24:53 -0800 Subject: epic100: use napi_complete_done() Use napi_complete_done() instead of __napi_complete() to : 1) Get support of gro_flush_timeout if opt-in 2) Not rearm interrupts for busy-polling users. 3) use standard NAPI API. 4) get rid of baroque code and ease maintenance. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/epic100.c | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 55a95e1d69d6..5f2737189c72 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -264,7 +264,6 @@ struct epic_private { spinlock_t lock; /* Group with Tx control cache line. */ spinlock_t napi_lock; struct napi_struct napi; - unsigned int reschedule_in_poll; unsigned int cur_tx, dirty_tx; unsigned int cur_rx, dirty_rx; @@ -400,7 +399,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&ep->lock); spin_lock_init(&ep->napi_lock); - ep->reschedule_in_poll = 0; /* Bring the chip out of low-power mode. */ ew32(GENCTL, 0x4200); @@ -1086,13 +1084,12 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) handled = 1; - if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { + if (status & EpicNapiEvent) { spin_lock(&ep->napi_lock); if (napi_schedule_prep(&ep->napi)) { epic_napi_irq_off(dev, ep); __napi_schedule(&ep->napi); - } else - ep->reschedule_in_poll++; + } spin_unlock(&ep->napi_lock); } status &= ~EpicNapiEvent; @@ -1248,37 +1245,23 @@ static int epic_poll(struct napi_struct *napi, int budget) { struct epic_private *ep = container_of(napi, struct epic_private, napi); struct net_device *dev = ep->mii.dev; - int work_done = 0; void __iomem *ioaddr = ep->ioaddr; - -rx_action: + int work_done; epic_tx(dev, ep); - work_done += epic_rx(dev, budget); + work_done = epic_rx(dev, budget); epic_rx_err(dev, ep); - if (work_done < budget) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; - int more; - - /* A bit baroque but it avoids a (space hungry) spin_unlock */ spin_lock_irqsave(&ep->napi_lock, flags); - more = ep->reschedule_in_poll; - if (!more) { - __napi_complete(napi); - ew32(INTSTAT, EpicNapiEvent); - epic_napi_irq_on(dev, ep); - } else - ep->reschedule_in_poll--; - + ew32(INTSTAT, EpicNapiEvent); + epic_napi_irq_on(dev, ep); spin_unlock_irqrestore(&ep->napi_lock, flags); - - if (more) - goto rx_action; } return work_done; -- cgit v1.2.3 From c46e9907d4dcc2062f742d4e37e41a81a162fe7b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:24:54 -0800 Subject: amd8111e: add GRO support Use napi_complete_done() instead of __napi_complete() to : 1) Get support of gro_flush_timeout if opt-in 2) Not rearm interrupts for busy-polling users. 3) use standard NAPI API. 4) get rid of baroque code and ease maintenance. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 164 ++++++++++++++++-------------------- 1 file changed, 72 insertions(+), 92 deletions(-) diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 9595f1bc535b..7b5df562f30f 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) void __iomem *mmio = lp->mmio; struct sk_buff *skb,*new_skb; int min_pkt_len, status; - unsigned int intr0; int num_rx_pkt = 0; short pkt_len; #if AMD8111E_VLAN_TAG_USED short vtag; #endif - int rx_pkt_limit = budget; - unsigned long flags; - if (rx_pkt_limit <= 0) - goto rx_not_empty; + while (num_rx_pkt < budget) { + status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); + if (status & OWN_BIT) + break; - do{ - /* process receive packets until we use the quota. - * If we own the next entry, it's a new packet. Send it up. + /* There is a tricky error noted by John Murphy, + * to Russ Nelson: Even with + * full-sized * buffers it's possible for a + * jabber packet to use two buffers, with only + * the last correctly noting the error. */ - while(1) { - status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); - if (status & OWN_BIT) - break; - - /* There is a tricky error noted by John Murphy, - * to Russ Nelson: Even with - * full-sized * buffers it's possible for a - * jabber packet to use two buffers, with only - * the last correctly noting the error. - */ - if(status & ERR_BIT) { - /* resetting flags */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - goto err_next_pkt; - } - /* check for STP and ENP */ - if(!((status & STP_BIT) && (status & ENP_BIT))){ - /* resetting flags */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - goto err_next_pkt; - } - pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; + if (status & ERR_BIT) { + /* resetting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + /* check for STP and ENP */ + if (!((status & STP_BIT) && (status & ENP_BIT))){ + /* resetting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; #if AMD8111E_VLAN_TAG_USED - vtag = status & TT_MASK; - /*MAC will strip vlan tag*/ - if (vtag != 0) - min_pkt_len =MIN_PKT_LEN - 4; + vtag = status & TT_MASK; + /* MAC will strip vlan tag */ + if (vtag != 0) + min_pkt_len = MIN_PKT_LEN - 4; else #endif - min_pkt_len =MIN_PKT_LEN; + min_pkt_len = MIN_PKT_LEN; - if (pkt_len < min_pkt_len) { - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - lp->drv_rx_errors++; - goto err_next_pkt; - } - if(--rx_pkt_limit < 0) - goto rx_not_empty; - new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); - if (!new_skb) { - /* if allocation fail, - * ignore that pkt and go to next one - */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - lp->drv_rx_errors++; - goto err_next_pkt; - } + if (pkt_len < min_pkt_len) { + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } + new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); + if (!new_skb) { + /* if allocation fail, + * ignore that pkt and go to next one + */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } - skb_reserve(new_skb, 2); - skb = lp->rx_skbuff[rx_index]; - pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], - lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len); - lp->rx_skbuff[rx_index] = new_skb; - lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, - new_skb->data, - lp->rx_buff_len-2, - PCI_DMA_FROMDEVICE); + skb_reserve(new_skb, 2); + skb = lp->rx_skbuff[rx_index]; + pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], + lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[rx_index] = new_skb; + lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, + new_skb->data, + lp->rx_buff_len-2, + PCI_DMA_FROMDEVICE); - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, dev); #if AMD8111E_VLAN_TAG_USED - if (vtag == TT_VLAN_TAGGED){ - u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - } -#endif - netif_receive_skb(skb); - /*COAL update rx coalescing parameters*/ - lp->coal_conf.rx_packets++; - lp->coal_conf.rx_bytes += pkt_len; - num_rx_pkt++; - - err_next_pkt: - lp->rx_ring[rx_index].buff_phy_addr - = cpu_to_le32(lp->rx_dma_addr[rx_index]); - lp->rx_ring[rx_index].buff_count = - cpu_to_le16(lp->rx_buff_len-2); - wmb(); - lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); - rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; + if (vtag == TT_VLAN_TAGGED){ + u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } - /* Check the interrupt status register for more packets in the - * mean time. Process them since we have not used up our quota. - */ - intr0 = readl(mmio + INT0); - /*Ack receive packets */ - writel(intr0 & RINT0,mmio + INT0); +#endif + napi_gro_receive(napi, skb); + /* COAL update rx coalescing parameters */ + lp->coal_conf.rx_packets++; + lp->coal_conf.rx_bytes += pkt_len; + num_rx_pkt++; + +err_next_pkt: + lp->rx_ring[rx_index].buff_phy_addr + = cpu_to_le32(lp->rx_dma_addr[rx_index]); + lp->rx_ring[rx_index].buff_count = + cpu_to_le16(lp->rx_buff_len-2); + wmb(); + lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); + rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; + } - } while(intr0 & RINT0); + if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) { + unsigned long flags; - if (rx_pkt_limit > 0) { /* Receive descriptor is empty now */ spin_lock_irqsave(&lp->lock, flags); - __napi_complete(napi); writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL2 | RDMD0, mmio + CMD0); spin_unlock_irqrestore(&lp->lock, flags); } -rx_not_empty: return num_rx_pkt; } -- cgit v1.2.3 From 5b2ec6f2be512485cb9fde7eb1119279af524c28 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:24:55 -0800 Subject: pcnet32: use napi_complete_done() Use napi_complete_done() instead of __napi_complete() to : 1) Get support of gro_flush_timeout if opt-in 2) Not rearm interrupts for busy-polling users. 3) use standard NAPI API. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/pcnet32.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 41e58cca8fee..a8a22c0d688a 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1350,13 +1350,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) pcnet32_restart(dev, CSR0_START); netif_wake_queue(dev); } - spin_unlock_irqrestore(&lp->lock, flags); - - if (work_done < budget) { - spin_lock_irqsave(&lp->lock, flags); - - __napi_complete(napi); + if (work_done < budget && napi_complete_done(napi, work_done)) { /* clear interrupt masks */ val = lp->a->read_csr(ioaddr, CSR3); val &= 0x00ff; @@ -1364,9 +1359,9 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) /* Set interrupt enable. */ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); - - spin_unlock_irqrestore(&lp->lock, flags); } + + spin_unlock_irqrestore(&lp->lock, flags); return work_done; } -- cgit v1.2.3 From a3961789727c3b7cc3ae5f83c5e41005c6e2d1ec Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:24:56 -0800 Subject: ep93xx_eth: add GRO support Use napi_complete_done() instead of __napi_complete() to : 1) Get support of gro_flush_timeout if opt-in 2) Not rearm interrupts for busy-polling users. 3) use standard NAPI API. 4) get rid of baroque code and ease maintenance. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/cirrus/ep93xx_eth.c | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 396c88678eab..7a7c02f1f8b9 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -228,9 +228,10 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d pr_info("mdio write timed out\n"); } -static int ep93xx_rx(struct net_device *dev, int processed, int budget) +static int ep93xx_rx(struct net_device *dev, int budget) { struct ep93xx_priv *ep = netdev_priv(dev); + int processed = 0; while (processed < budget) { int entry; @@ -294,7 +295,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) skb_put(skb, length); skb->protocol = eth_type_trans(skb, dev); - netif_receive_skb(skb); + napi_gro_receive(&ep->napi, skb); dev->stats.rx_packets++; dev->stats.rx_bytes += length; @@ -310,35 +311,17 @@ err: return processed; } -static int ep93xx_have_more_rx(struct ep93xx_priv *ep) -{ - struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer; - return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); -} - static int ep93xx_poll(struct napi_struct *napi, int budget) { struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); struct net_device *dev = ep->dev; - int rx = 0; - -poll_some_more: - rx = ep93xx_rx(dev, rx, budget); - if (rx < budget) { - int more = 0; + int rx; + rx = ep93xx_rx(dev, budget); + if (rx < budget && napi_complete_done(napi, rx)) { spin_lock_irq(&ep->rx_lock); - __napi_complete(napi); wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); - if (ep93xx_have_more_rx(ep)) { - wrl(ep, REG_INTEN, REG_INTEN_TX); - wrl(ep, REG_INTSTSP, REG_INTSTS_RX); - more = 1; - } spin_unlock_irq(&ep->rx_lock); - - if (more && napi_reschedule(napi)) - goto poll_some_more; } if (rx) { -- cgit v1.2.3 From 135844ef9feff93263319c18ead863a011381b57 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:24:57 -0800 Subject: skge: use napi_complete_done() Use napi_complete_done() instead of __napi_complete() to : 1) Get support of gro_flush_timeout if opt-in 2) Not rearm interrupts for busy-polling users. 3) use standard NAPI API and get rid of napi_gro_flush() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/skge.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 81106b73e468..edb95271a4f2 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3201,7 +3201,7 @@ static void skge_tx_done(struct net_device *dev) } } -static int skge_poll(struct napi_struct *napi, int to_do) +static int skge_poll(struct napi_struct *napi, int budget) { struct skge_port *skge = container_of(napi, struct skge_port, napi); struct net_device *dev = skge->netdev; @@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do) skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); - for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { + for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) { struct skge_rx_desc *rd = e->desc; struct sk_buff *skb; u32 control; @@ -3236,12 +3236,10 @@ static int skge_poll(struct napi_struct *napi, int to_do) wmb(); skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); - if (work_done < to_do) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; - napi_gro_flush(napi, false); spin_lock_irqsave(&hw->hw_lock, flags); - __napi_complete(napi); hw->intr_mask |= napimask[skge->port]; skge_write32(hw, B0_IMSK, hw->intr_mask); skge_read32(hw, B0_IMSK); -- cgit v1.2.3 From 7ea4007757e56bd2fb589820ba7676d2dfb0b47a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:24:58 -0800 Subject: ks8695net: add GRO support Use napi_complete_done() instead of __napi_complete() to : 1) Get support of gro_flush_timeout if opt-in 2) Not rearm interrupts for busy-polling users. 3) use standard NAPI API. Note that rx_lock seems to be useless, NAPI logic should not need this extra care. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ks8695net.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index 20cb85bc0c5f..d2106159473f 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -519,7 +519,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget) /* Relinquish the SKB to the network layer */ skb_put(skb, pktlen); skb->protocol = eth_type_trans(skb, ndev); - netif_receive_skb(skb); + napi_gro_receive(&ksp->napi, skb); /* Record stats */ ndev->stats.rx_packets++; @@ -561,18 +561,17 @@ rx_finished: static int ks8695_poll(struct napi_struct *napi, int budget) { struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi); - unsigned long work_done; - unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN); unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); + int work_done; work_done = ks8695_rx(ksp, budget); - if (work_done < budget) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; + spin_lock_irqsave(&ksp->rx_lock, flags); - __napi_complete(napi); - /*enable rx interrupt*/ + /* enable rx interrupt */ writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN); spin_unlock_irqrestore(&ksp->rx_lock, flags); } -- cgit v1.2.3 From 0eb7b85c9669fce5ebf22915ee5f93b6d721256e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:24:59 -0800 Subject: qla3xxx: add GRO support Use napi_complete_done() instead of __napi_complete() to : 1) Get support of gro_flush_timeout if opt-in 2) Not rearm interrupts for busy-polling users. 3) use standard NAPI API. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qla3xxx.c | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 5c100ab86c00..ea38236f1ced 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2025,7 +2025,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, qdev->ndev); - netif_receive_skb(skb); + napi_gro_receive(&qdev->napi, skb); lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) @@ -2095,7 +2095,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, } skb2->protocol = eth_type_trans(skb2, qdev->ndev); - netif_receive_skb(skb2); + napi_gro_receive(&qdev->napi, skb2); ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; lrg_buf_cb2->skb = NULL; @@ -2105,8 +2105,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } -static int ql_tx_rx_clean(struct ql3_adapter *qdev, - int *tx_cleaned, int *rx_cleaned, int work_to_do) +static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget) { struct net_rsp_iocb *net_rsp; struct net_device *ndev = qdev->ndev; @@ -2114,7 +2113,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, /* While there are entries in the completion queue. */ while ((le32_to_cpu(*(qdev->prsp_producer_index)) != - qdev->rsp_consumer_index) && (work_done < work_to_do)) { + qdev->rsp_consumer_index) && (work_done < budget)) { net_rsp = qdev->rsp_current; rmb(); @@ -2130,21 +2129,20 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, case OPCODE_OB_MAC_IOCB_FN2: ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) net_rsp); - (*tx_cleaned)++; break; case OPCODE_IB_MAC_IOCB: case OPCODE_IB_3032_MAC_IOCB: ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) net_rsp); - (*rx_cleaned)++; + work_done++; break; case OPCODE_IB_IP_IOCB: case OPCODE_IB_3032_IP_IOCB: ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) net_rsp); - (*rx_cleaned)++; + work_done++; break; default: { u32 *tmp = (u32 *)net_rsp; @@ -2169,7 +2167,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, qdev->rsp_current++; } - work_done = *tx_cleaned + *rx_cleaned; } return work_done; @@ -2178,25 +2175,25 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, static int ql_poll(struct napi_struct *napi, int budget) { struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); - int rx_cleaned = 0, tx_cleaned = 0; - unsigned long hw_flags; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + int work_done; - ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); + work_done = ql_tx_rx_clean(qdev, budget); - if (tx_cleaned + rx_cleaned != budget) { - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - __napi_complete(napi); + if (work_done < budget && napi_complete_done(napi, work_done)) { + unsigned long flags; + + spin_lock_irqsave(&qdev->hw_lock, flags); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, &port_regs->CommonRegs.rspQConsumerIndex); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, flags); ql_enable_interrupts(qdev); } - return tx_cleaned + rx_cleaned; + return work_done; } static irqreturn_t ql3xxx_isr(int irq, void *dev_id) -- cgit v1.2.3 From 3d1a6333d90167f2b196f2cc0c2a988a10eb76c5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:25:00 -0800 Subject: ibm/emac: use napi_complete_done() Use napi_complete_done() instead of __napi_complete() We plan to remove __napi_complete() to reduce NAPI complexity. Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/emac/mal.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index aaf6fec566b5..cd3227b088b7 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -421,20 +421,20 @@ static int mal_poll(struct napi_struct *napi, int budget) int n; if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) continue; - n = mc->ops->poll_rx(mc->dev, budget); + n = mc->ops->poll_rx(mc->dev, budget - received); if (n) { received += n; - budget -= n; - if (budget <= 0) - goto more_work; // XXX What if this is the last one ? + if (received >= budget) + return budget; } } - /* We need to disable IRQs to protect from RXDE IRQ here */ - spin_lock_irqsave(&mal->lock, flags); - __napi_complete(napi); - mal_enable_eob_irq(mal); - spin_unlock_irqrestore(&mal->lock, flags); + if (napi_complete_done(napi, received)) { + /* We need to disable IRQs to protect from RXDE IRQ here */ + spin_lock_irqsave(&mal->lock, flags); + mal_enable_eob_irq(mal); + spin_unlock_irqrestore(&mal->lock, flags); + } /* Check for "rotting" packet(s) */ list_for_each(l, &mal->poll_list) { -- cgit v1.2.3 From 32e19300a4f6705e79354cdadac3b2ef3cd0c309 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:25:01 -0800 Subject: aeroflex/greth: use napi_complete_done() We plan to remove __napi_complete() soon, this driver is the last user. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/aeroflex/greth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 93def92f9997..9f7422ada704 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1008,7 +1008,7 @@ restart_txrx_poll: spin_unlock_irqrestore(&greth->devlock, flags); goto restart_txrx_poll; } else { - __napi_complete(napi); + napi_complete_done(napi, work_done); spin_unlock_irqrestore(&greth->devlock, flags); } } -- cgit v1.2.3 From 02c1602ee7b3e3d062c3eacd374d6a6e3a2ebb73 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 4 Feb 2017 15:25:02 -0800 Subject: net: remove __napi_complete() All __napi_complete() callers have been converted to use the more standard napi_complete_done(), we can now remove this NAPI method for good. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 - net/core/dev.c | 24 +++--------------------- 2 files changed, 3 insertions(+), 22 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6f18b509fb2f..014fbe256d55 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -463,7 +463,6 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } -bool __napi_complete(struct napi_struct *n); bool napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete diff --git a/net/core/dev.c b/net/core/dev.c index 42ba0379575a..404d2e6d5d32 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4883,23 +4883,6 @@ void __napi_schedule_irqoff(struct napi_struct *n) } EXPORT_SYMBOL(__napi_schedule_irqoff); -bool __napi_complete(struct napi_struct *n) -{ - BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); - - /* Some drivers call us directly, instead of calling - * napi_complete_done(). - */ - if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state))) - return false; - - list_del_init(&n->poll_list); - smp_mb__before_atomic(); - clear_bit(NAPI_STATE_SCHED, &n->state); - return true; -} -EXPORT_SYMBOL(__napi_complete); - bool napi_complete_done(struct napi_struct *n, int work_done) { unsigned long flags; @@ -4926,14 +4909,13 @@ bool napi_complete_done(struct napi_struct *n, int work_done) else napi_gro_flush(n, false); } - if (likely(list_empty(&n->poll_list))) { - WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state)); - } else { + if (unlikely(!list_empty(&n->poll_list))) { /* If n->poll_list is not empty, we need to mask irqs */ local_irq_save(flags); - __napi_complete(n); + list_del_init(&n->poll_list); local_irq_restore(flags); } + WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state)); return true; } EXPORT_SYMBOL(napi_complete_done); -- cgit v1.2.3 From a61d5ce9cc56e2e41bbb1ad62ca7a16d7e7567bd Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 8 Dec 2016 12:58:45 +0200 Subject: net/mlx5: Fix static checker warnings For some reason, sparse doesn't like using an expression of type (!x) with a bitwise | and &. In order to mitigate that, we use a local variable. This removes the following sparse complaints on the core driver (and similar ones on the IB driver too): drivers/net/ethernet/mellanox/mlx5/core/srq.c:83:9: warning: dubious: !x & y drivers/net/ethernet/mellanox/mlx5/core/srq.c:96:9: warning: dubious: !x & y drivers/net/ethernet/mellanox/mlx5/core/port.c:59:9: warning: dubious: !x & y drivers/net/ethernet/mellanox/mlx5/core/vport.c:561:9: warning: dubious: !x & y Signed-off-by: Or Gerlitz Signed-off-by: Matan Barak Reported-by: Bart Van Assche Signed-off-by: Saeed Mahameed --- include/linux/mlx5/device.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 7b6cd67a263f..dd9a263ed368 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -67,10 +67,11 @@ /* insert a value to a struct */ #define MLX5_SET(typ, p, fld, v) do { \ + u32 _v = v; \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ - (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ + (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) -- cgit v1.2.3 From d254586c34538c0014280806c5d4795697cf21e5 Mon Sep 17 00:00:00 2001 From: David Jander Date: Fri, 10 Oct 2014 17:30:10 +0200 Subject: can: rx-offload: Add support for HW fifo based irq offloading Some CAN controllers have a usable FIFO already but can still benefit from off-loading the CAN controller FIFO. The CAN frames of the FIFO are read and put into a skb queue during interrupt and then transmitted in a NAPI context. Signed-off-by: David Jander Signed-off-by: Marc Kleine-Budde --- drivers/net/can/Makefile | 3 +- drivers/net/can/rx-offload.c | 156 +++++++++++++++++++++++++++++++++++++++++ include/linux/can/rx-offload.h | 51 ++++++++++++++ 3 files changed, 209 insertions(+), 1 deletion(-) create mode 100644 drivers/net/can/rx-offload.c create mode 100644 include/linux/can/rx-offload.h diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 7a85495dbb0c..0da4f2f5c7e3 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -6,7 +6,8 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o obj-$(CONFIG_CAN_SLCAN) += slcan.o obj-$(CONFIG_CAN_DEV) += can-dev.o -can-dev-y := dev.o +can-dev-y += dev.o +can-dev-y += rx-offload.o can-dev-$(CONFIG_CAN_LEDS) += led.o diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c new file mode 100644 index 000000000000..7267c61762c7 --- /dev/null +++ b/drivers/net/can/rx-offload.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2014 David Jander, Protonic Holland + * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include + +static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) +{ + struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + int work_done = 0; + + while ((work_done < quota) && + (skb = skb_dequeue(&offload->skb_queue))) { + struct can_frame *cf = (struct can_frame *)skb->data; + + work_done++; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + } + + if (work_done < quota) { + napi_complete_done(napi, work_done); + + /* Check if there was another interrupt */ + if (!skb_queue_empty(&offload->skb_queue)) + napi_reschedule(&offload->napi); + } + + can_led_event(offload->dev, CAN_LED_EVENT_RX); + + return work_done; +} + +static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) +{ + struct sk_buff *skb = NULL; + struct can_frame *cf; + int ret; + + /* If queue is full or skb not available, read to discard mailbox */ + if (likely(skb_queue_len(&offload->skb_queue) <= + offload->skb_queue_len_max)) + skb = alloc_can_skb(offload->dev, &cf); + + if (!skb) { + struct can_frame cf_overflow; + + ret = offload->mailbox_read(offload, &cf_overflow, n); + if (ret) + offload->dev->stats.rx_dropped++; + + return NULL; + } + + ret = offload->mailbox_read(offload, cf, n); + if (!ret) { + kfree_skb(skb); + return NULL; + } + + return skb; +} + +int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) +{ + struct sk_buff *skb; + int received = 0; + + while ((skb = can_rx_offload_offload_one(offload, 0))) { + skb_queue_tail(&offload->skb_queue, skb); + received++; + } + + if (received) + can_rx_offload_schedule(offload); + + return received; +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); + +int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) +{ + if (skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max) + return -ENOMEM; + + skb_queue_tail(&offload->skb_queue, skb); + can_rx_offload_schedule(offload); + + return 0; +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); + +static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) +{ + offload->dev = dev; + + /* Limit queue len to 4x the weight (rounted to next power of two) */ + offload->skb_queue_len_max = 2 << fls(weight); + offload->skb_queue_len_max *= 4; + skb_queue_head_init(&offload->skb_queue); + + can_rx_offload_reset(offload); + netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); + + dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n", + __func__, offload->skb_queue_len_max); + + return 0; +} + +int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) +{ + if (!offload->mailbox_read) + return -EINVAL; + + return can_rx_offload_init_queue(dev, offload, weight); +} +EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo); + +void can_rx_offload_enable(struct can_rx_offload *offload) +{ + can_rx_offload_reset(offload); + napi_enable(&offload->napi); +} +EXPORT_SYMBOL_GPL(can_rx_offload_enable); + +void can_rx_offload_del(struct can_rx_offload *offload) +{ + netif_napi_del(&offload->napi); + skb_queue_purge(&offload->skb_queue); +} +EXPORT_SYMBOL_GPL(can_rx_offload_del); + +void can_rx_offload_reset(struct can_rx_offload *offload) +{ +} +EXPORT_SYMBOL_GPL(can_rx_offload_reset); diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h new file mode 100644 index 000000000000..cadfe9869600 --- /dev/null +++ b/include/linux/can/rx-offload.h @@ -0,0 +1,51 @@ +/* + * linux/can/rx-offload.h + * + * Copyright (c) 2014 David Jander, Protonic Holland + * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CAN_RX_OFFLOAD_H +#define _CAN_RX_OFFLOAD_H + +#include +#include + +struct can_rx_offload { + struct net_device *dev; + + unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf, unsigned int mb); + + struct sk_buff_head skb_queue; + u32 skb_queue_len_max; + + struct napi_struct napi; +}; + +int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); +int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); +int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); +void can_rx_offload_reset(struct can_rx_offload *offload); +void can_rx_offload_del(struct can_rx_offload *offload); +void can_rx_offload_enable(struct can_rx_offload *offload); + +static inline void can_rx_offload_schedule(struct can_rx_offload *offload) +{ + napi_schedule(&offload->napi); +} + +static inline void can_rx_offload_disable(struct can_rx_offload *offload) +{ + napi_disable(&offload->napi); +} + +#endif /* !_CAN_RX_OFFLOAD_H */ -- cgit v1.2.3 From 3abbac0b5dd3e3b3dfb30a4885cdde5c20e1cb83 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 23 Sep 2014 15:28:21 +0200 Subject: can: rx-offload: Add support for timestamp based irq offloading Some CAN controllers don't implement a FIFO in hardware, but fill their mailboxes in a particular order (from lowest to highest or highest to lowest). This makes problems to read the frames in the correct order from the hardware, as new frames might be filled into just read (low) mailboxes. This gets worse, when following new frames are received into not read (higher) mailboxes. On the bright side some these CAN controllers put a timestamp on each received CAN frame. This patch adds support to offload CAN frames in interrupt context, order them by timestamp and then transmitted in a NAPI context. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/rx-offload.c | 137 ++++++++++++++++++++++++++++++++++++++++- include/linux/can/rx-offload.h | 10 ++- 2 files changed, 144 insertions(+), 3 deletions(-) diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index 7267c61762c7..f394f77d7528 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -18,6 +18,33 @@ #include #include +struct can_rx_offload_cb { + u32 timestamp; +}; + +static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); + + return (struct can_rx_offload_cb *)skb->cb; +} + +static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b) +{ + if (offload->inc) + return a <= b; + else + return a >= b; +} + +static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) +{ + if (offload->inc) + return (*val)++; + else + return (*val)--; +} + static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) { struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); @@ -49,9 +76,50 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) return work_done; } +static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, + int (*compare)(struct sk_buff *a, struct sk_buff *b)) +{ + struct sk_buff *pos, *insert = (struct sk_buff *)head; + + skb_queue_reverse_walk(head, pos) { + const struct can_rx_offload_cb *cb_pos, *cb_new; + + cb_pos = can_rx_offload_get_cb(pos); + cb_new = can_rx_offload_get_cb(new); + + netdev_dbg(new->dev, + "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n", + __func__, + cb_pos->timestamp, cb_new->timestamp, + cb_new->timestamp - cb_pos->timestamp, + skb_queue_len(head)); + + if (compare(pos, new) < 0) + continue; + insert = pos; + break; + } + + __skb_queue_after(head, insert, new); +} + +static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) +{ + const struct can_rx_offload_cb *cb_a, *cb_b; + + cb_a = can_rx_offload_get_cb(a); + cb_b = can_rx_offload_get_cb(b); + + /* Substract two u32 and return result as int, to keep + * difference steady around the u32 overflow. + */ + return cb_b->timestamp - cb_a->timestamp; +} + static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) { struct sk_buff *skb = NULL; + struct can_rx_offload_cb *cb; struct can_frame *cf; int ret; @@ -62,15 +130,18 @@ static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload if (!skb) { struct can_frame cf_overflow; + u32 timestamp; - ret = offload->mailbox_read(offload, &cf_overflow, n); + ret = offload->mailbox_read(offload, &cf_overflow, + ×tamp, n); if (ret) offload->dev->stats.rx_dropped++; return NULL; } - ret = offload->mailbox_read(offload, cf, n); + cb = can_rx_offload_get_cb(skb); + ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); if (!ret) { kfree_skb(skb); return NULL; @@ -79,6 +150,48 @@ static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload return skb; } +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) +{ + struct sk_buff_head skb_queue; + unsigned int i; + + __skb_queue_head_init(&skb_queue); + + for (i = offload->mb_first; + can_rx_offload_le(offload, i, offload->mb_last); + can_rx_offload_inc(offload, &i)) { + struct sk_buff *skb; + + if (!(pending & BIT_ULL(i))) + continue; + + skb = can_rx_offload_offload_one(offload, i); + if (!skb) + break; + + __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); + } + + if (!skb_queue_empty(&skb_queue)) { + unsigned long flags; + u32 queue_len; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + skb_queue_splice_tail(&skb_queue, &offload->skb_queue); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + if ((queue_len = skb_queue_len(&offload->skb_queue)) > + (offload->skb_queue_len_max / 8)) + netdev_dbg(offload->dev, "%s: queue_len=%d\n", + __func__, queue_len); + + can_rx_offload_schedule(offload); + } + + return skb_queue_len(&skb_queue); +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); + int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) { struct sk_buff *skb; @@ -127,6 +240,26 @@ static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offlo return 0; } +int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload) +{ + unsigned int weight; + + if (offload->mb_first > BITS_PER_LONG_LONG || + offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) + return -EINVAL; + + if (offload->mb_first < offload->mb_last) { + offload->inc = true; + weight = offload->mb_last - offload->mb_first; + } else { + offload->inc = false; + weight = offload->mb_first - offload->mb_last; + } + + return can_rx_offload_init_queue(dev, offload, weight);; +} +EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); + int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) { if (!offload->mailbox_read) diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index cadfe9869600..cb31683bbe15 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -23,15 +23,23 @@ struct can_rx_offload { struct net_device *dev; - unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf, unsigned int mb); + unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf, + u32 *timestamp, unsigned int mb); struct sk_buff_head skb_queue; u32 skb_queue_len_max; + unsigned int mb_first; + unsigned int mb_last; + struct napi_struct napi; + + bool inc; }; +int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload); int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); void can_rx_offload_reset(struct can_rx_offload *offload); -- cgit v1.2.3 From 62d1086e87223260fcdc49c232d5c2e26ff05721 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Thu, 27 Aug 2015 16:01:27 +0200 Subject: can: flexcan: add missing register definitions This patch adds some missing register definitions, which are needed in an upcoming patch. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 43cfce8b076b..74909e599f31 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -55,7 +55,7 @@ #define FLEXCAN_MCR_WAK_SRC BIT(19) #define FLEXCAN_MCR_DOZE BIT(18) #define FLEXCAN_MCR_SRX_DIS BIT(17) -#define FLEXCAN_MCR_BCC BIT(16) +#define FLEXCAN_MCR_IRMQ BIT(16) #define FLEXCAN_MCR_LPRIO_EN BIT(13) #define FLEXCAN_MCR_AEN BIT(12) #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) @@ -213,7 +213,10 @@ struct flexcan_regs { u32 imask1; /* 0x28 */ u32 iflag2; /* 0x2c */ u32 iflag1; /* 0x30 */ - u32 ctrl2; /* 0x34 */ + union { /* 0x34 */ + u32 gfwr_mx28; /* MX28, MX53 */ + u32 ctrl2; /* MX6, VF610 */ + }; u32 esr2; /* 0x38 */ u32 imeur; /* 0x3c */ u32 lrfr; /* 0x40 */ @@ -232,7 +235,11 @@ struct flexcan_regs { * size conf'ed via ctrl2::RFFN * (mx6, vf610) */ - u32 _reserved4[408]; + u32 _reserved4[256]; /* 0x480 */ + u32 rximr[64]; /* 0x880 */ + u32 _reserved5[24]; /* 0x980 */ + u32 gfwr_mx6; /* 0x9e0 - MX6 */ + u32 _reserved6[63]; /* 0x9e4 */ u32 mecr; /* 0xae0 */ u32 erriar; /* 0xae4 */ u32 erridpr; /* 0xae8 */ -- cgit v1.2.3 From 1c10feee3e033b898a293d813ef4a4ebfb88dba2 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Mon, 4 Jul 2016 14:43:53 +0200 Subject: can: flexcan: remove write-only member pdata of struct flexcan_priv This patch removes the write only member pdata from the struct flexcan_priv. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 74909e599f31..44a5aae3cefe 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -264,7 +264,6 @@ struct flexcan_priv { struct clk *clk_ipg; struct clk *clk_per; - struct flexcan_platform_data *pdata; const struct flexcan_devtype_data *devtype_data; struct regulator *reg_xceiver; }; @@ -1230,7 +1229,6 @@ static int flexcan_probe(struct platform_device *pdev) priv->regs = regs; priv->clk_ipg = clk_ipg; priv->clk_per = clk_per; - priv->pdata = dev_get_platdata(&pdev->dev); priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; -- cgit v1.2.3 From a3c11a7ac6cdd670324df39244ed1d2321bb0c9c Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Mon, 4 Jul 2016 14:45:44 +0200 Subject: can: flexcan: make declaration of devtype_data const This patch changes the declaration of the devtype data to const. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 44a5aae3cefe..84e465a94a96 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -268,17 +268,17 @@ struct flexcan_priv { struct regulator *reg_xceiver; }; -static struct flexcan_devtype_data fsl_p1010_devtype_data = { +static const struct flexcan_devtype_data fsl_p1010_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE, }; -static struct flexcan_devtype_data fsl_imx28_devtype_data; +static const struct flexcan_devtype_data fsl_imx28_devtype_data; -static struct flexcan_devtype_data fsl_imx6q_devtype_data = { +static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG, }; -static struct flexcan_devtype_data fsl_vf610_devtype_data = { +static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_DISABLE_MECR, }; -- cgit v1.2.3 From d166f56bf58b64027679905a01001df9ac0764bd Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 17 Jan 2017 17:33:46 +0100 Subject: can: flexcan: do_bus_err(): convert rx_,tx_errors into bool This patch converts the rx_errors and tx_errors from int into bool values, to reflect their actual meaning. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 84e465a94a96..84ebb3e5958d 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -524,41 +524,41 @@ static void do_bus_err(struct net_device *dev, struct can_frame *cf, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); - int rx_errors = 0, tx_errors = 0; + bool rx_errors = false, tx_errors = false; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; if (reg_esr & FLEXCAN_ESR_BIT1_ERR) { netdev_dbg(dev, "BIT1_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT1; - tx_errors = 1; + tx_errors = true; } if (reg_esr & FLEXCAN_ESR_BIT0_ERR) { netdev_dbg(dev, "BIT0_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT0; - tx_errors = 1; + tx_errors = true; } if (reg_esr & FLEXCAN_ESR_ACK_ERR) { netdev_dbg(dev, "ACK_ERR irq\n"); cf->can_id |= CAN_ERR_ACK; cf->data[3] = CAN_ERR_PROT_LOC_ACK; - tx_errors = 1; + tx_errors = true; } if (reg_esr & FLEXCAN_ESR_CRC_ERR) { netdev_dbg(dev, "CRC_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT; cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; - rx_errors = 1; + rx_errors = true; } if (reg_esr & FLEXCAN_ESR_FRM_ERR) { netdev_dbg(dev, "FRM_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_FORM; - rx_errors = 1; + rx_errors = true; } if (reg_esr & FLEXCAN_ESR_STF_ERR) { netdev_dbg(dev, "STF_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_STUFF; - rx_errors = 1; + rx_errors = true; } priv->can.can_stats.bus_error++; -- cgit v1.2.3 From 238443df8154696018fd01c1a7c8fb352b9ee49e Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 18 Jan 2017 11:25:41 +0100 Subject: can: flexcan: flexcan_poll_state(): no need to initialize new_state, rx_state, tx_state This patch removed the not needed initialisation from the new_state, rx_state, tx_state variabled. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 84ebb3e5958d..74715174802f 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -591,7 +591,7 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) struct flexcan_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; - enum can_state new_state = 0, rx_state = 0, tx_state = 0; + enum can_state new_state, rx_state, tx_state; int flt; struct can_berr_counter bec; -- cgit v1.2.3 From a5c02f668c36c05fbbb29d157507be4868ecd176 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 18 Jan 2017 11:38:26 +0100 Subject: can: flexcan: flexcan_poll_bus_err(): fold in do_bus_err() This patch folds in the do_bus_err() function into flexcan_poll_bus_err(). Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 74715174802f..649d636bc41e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -520,12 +520,17 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static void do_bus_err(struct net_device *dev, - struct can_frame *cf, u32 reg_esr) +static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); + struct sk_buff *skb; + struct can_frame *cf; bool rx_errors = false, tx_errors = false; + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; if (reg_esr & FLEXCAN_ESR_BIT1_ERR) { @@ -566,18 +571,6 @@ static void do_bus_err(struct net_device *dev, dev->stats.rx_errors++; if (tx_errors) dev->stats.tx_errors++; -} - -static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) -{ - struct sk_buff *skb; - struct can_frame *cf; - - skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; - - do_bus_err(dev, cf, reg_esr); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; -- cgit v1.2.3 From dd2f122a96ea041679f46c063a18145a7cf6ff5f Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 18 Jan 2017 11:45:14 +0100 Subject: can: flexcan: flexcan_irq(): don't unconditionally return IRQ_HANDLED This patch changes the flexcan_irq() function to only return IRQ_HANDLED, if the interrupt really has been handled, otherwise IRQ_NONE is returned. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 649d636bc41e..725baba95952 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -717,15 +717,12 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) struct net_device_stats *stats = &dev->stats; struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; + irqreturn_t handled = IRQ_NONE; u32 reg_iflag1, reg_esr; reg_iflag1 = flexcan_read(®s->iflag1); reg_esr = flexcan_read(®s->esr); - /* ACK all bus error and state change IRQ sources */ - if (reg_esr & FLEXCAN_ESR_ALL_INT) - flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); - /* schedule NAPI in case of: * - rx IRQ * - state change IRQ @@ -734,6 +731,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) || (reg_esr & FLEXCAN_ESR_ERR_STATE) || flexcan_has_and_handle_berr(priv, reg_esr)) { + handled = IRQ_HANDLED; /* The error bits are cleared on read, * save them for later use. */ @@ -747,6 +745,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) /* FIFO overflow */ if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { + handled = IRQ_HANDLED; flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1); dev->stats.rx_over_errors++; dev->stats.rx_errors++; @@ -754,6 +753,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) /* transmission complete interrupt */ if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) { + handled = IRQ_HANDLED; stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); @@ -765,7 +765,13 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) netif_wake_queue(dev); } - return IRQ_HANDLED; + /* ACK all bus error and state change IRQ sources */ + if (reg_esr & FLEXCAN_ESR_ALL_INT) { + handled = IRQ_HANDLED; + flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); + } + + return handled; } static void flexcan_set_bittiming(struct net_device *dev) -- cgit v1.2.3 From 28ac7dcd5b1354e46f5f28a53b5a6b443fcc88ed Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 4 Aug 2015 13:46:10 +0200 Subject: can: flexcan: calculate default value for imask1 during runtime This patch converts the define FLEXCAN_IFLAG_DEFAULT into the runtime calculated value priv->reg_imask1_default. This is a preparation patch to make the TX mailbox selectable during runtime, too. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 725baba95952..0a00a6dd7994 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -149,9 +149,6 @@ #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) -#define FLEXCAN_IFLAG_DEFAULT \ - (FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \ - FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID)) /* FLEXCAN message buffers */ #define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) @@ -261,6 +258,7 @@ struct flexcan_priv { struct flexcan_regs __iomem *regs; u32 reg_esr; u32 reg_ctrl_default; + u32 reg_imask1_default; struct clk *clk_ipg; struct clk *clk_per; @@ -704,7 +702,7 @@ static int flexcan_poll(struct napi_struct *napi, int quota) if (work_done < quota) { napi_complete_done(napi, work_done); /* enable IRQs */ - flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); + flexcan_write(priv->reg_imask1_default, ®s->imask1); flexcan_write(priv->reg_ctrl_default, ®s->ctrl); } @@ -736,7 +734,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) * save them for later use. */ priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS; - flexcan_write(FLEXCAN_IFLAG_DEFAULT & + flexcan_write(priv->reg_imask1_default & ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->imask1); flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, ®s->ctrl); @@ -947,7 +945,7 @@ static int flexcan_chip_start(struct net_device *dev) /* enable interrupts atomically */ disable_irq(dev->irq); flexcan_write(priv->reg_ctrl_default, ®s->ctrl); - flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); + flexcan_write(priv->reg_imask1_default, ®s->imask1); enable_irq(dev->irq); /* print chip status */ @@ -1231,6 +1229,10 @@ static int flexcan_probe(struct platform_device *pdev) priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; + priv->reg_imask1_default = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | + FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | + FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID); + netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); platform_set_drvdata(pdev, dev); -- cgit v1.2.3 From b93917c3700636dadc6688144e7d55e07699367b Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Sun, 12 Jul 2015 00:47:47 +0200 Subject: can: flexcan: make TX mailbox selectable during runtime This patch makes the TX mailbox selectable duing runtime. This is a preparation patch to use of the hardware FIFO selectable via runtime. As the TX mailbox number is different in HW FIFO and normal mode. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 0a00a6dd7994..1a7df29a0f32 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -143,9 +143,9 @@ /* FLEXCAN interrupt flag register (IFLAG) bits */ /* Errata ERR005829 step7: Reserve first valid MB */ -#define FLEXCAN_TX_BUF_RESERVED 8 -#define FLEXCAN_TX_BUF_ID 9 -#define FLEXCAN_IFLAG_BUF(x) BIT(x) +#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 +#define FLEXCAN_TX_MB_OFF_FIFO 9 +#define FLEXCAN_IFLAG_MB(x) BIT(x) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) @@ -256,6 +256,9 @@ struct flexcan_priv { struct napi_struct napi; struct flexcan_regs __iomem *regs; + struct flexcan_mb __iomem *tx_mb; + struct flexcan_mb __iomem *tx_mb_reserved; + u8 tx_mb_idx; u32 reg_esr; u32 reg_ctrl_default; u32 reg_imask1_default; @@ -472,7 +475,6 @@ static int flexcan_get_berr_counter(const struct net_device *dev, static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); - struct flexcan_regs __iomem *regs = priv->regs; struct can_frame *cf = (struct can_frame *)skb->data; u32 can_id; u32 data; @@ -495,25 +497,25 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) if (cf->can_dlc > 0) { data = be32_to_cpup((__be32 *)&cf->data[0]); - flexcan_write(data, ®s->mb[FLEXCAN_TX_BUF_ID].data[0]); + flexcan_write(data, &priv->tx_mb->data[0]); } if (cf->can_dlc > 3) { data = be32_to_cpup((__be32 *)&cf->data[4]); - flexcan_write(data, ®s->mb[FLEXCAN_TX_BUF_ID].data[1]); + flexcan_write(data, &priv->tx_mb->data[1]); } can_put_echo_skb(skb, dev, 0); - flexcan_write(can_id, ®s->mb[FLEXCAN_TX_BUF_ID].can_id); - flexcan_write(ctrl, ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl); + flexcan_write(can_id, &priv->tx_mb->can_id); + flexcan_write(ctrl, &priv->tx_mb->can_ctrl); /* Errata ERR005829 step8: * Write twice INACTIVE(0x8) code to first MB. */ flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + &priv->tx_mb_reserved->can_ctrl); flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + &priv->tx_mb_reserved->can_ctrl); return NETDEV_TX_OK; } @@ -750,7 +752,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) } /* transmission complete interrupt */ - if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) { + if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { handled = IRQ_HANDLED; stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_packets++; @@ -758,8 +760,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) /* after sending a RTR frame MB is in RX mode */ flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl); - flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); + &priv->tx_mb->can_ctrl); + flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag1); netif_wake_queue(dev); } @@ -849,7 +851,7 @@ static int flexcan_chip_start(struct net_device *dev) reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | - FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID); + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); flexcan_write(reg_mcr, ®s->mcr); @@ -887,18 +889,18 @@ static int flexcan_chip_start(struct net_device *dev) flexcan_write(reg_ctrl, ®s->ctrl); /* clear and invalidate all mailboxes first */ - for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->mb); i++) { + for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) { flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, ®s->mb[i].can_ctrl); } /* Errata ERR005829: mark first TX mailbox as INACTIVE */ flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + &priv->tx_mb_reserved->can_ctrl); /* mark TX mailbox as INACTIVE */ flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl); + &priv->tx_mb->can_ctrl); /* acceptance mask/acceptance code (accept everything) */ flexcan_write(0x0, ®s->rxgmask); @@ -1229,9 +1231,13 @@ static int flexcan_probe(struct platform_device *pdev) priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; + priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO; + priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; + priv->tx_mb = ®s->mb[priv->tx_mb_idx]; + priv->reg_imask1_default = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | - FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID); + FLEXCAN_IFLAG_MB(priv->tx_mb_idx); netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); -- cgit v1.2.3 From 30164759db1bb1062bdf0f92451f57ed295cfa48 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Sun, 10 May 2015 15:26:58 +0200 Subject: can: flexcan: make use of rx-offload's irq_offload_fifo This patch converts the flexcan driver to make use of the rx-offload can_rx_offload_irq_offload_fifo() helper function. The idea is to read the CAN frames already in the interrupt context, as the depth of the flexcan HW FIFO is too shallow, resulting in too many missed frames. During a normal NAPI poll the frames are the pushed into the upper layers. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 164 +++++++++++++++------------------------------- 1 file changed, 52 insertions(+), 112 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 1a7df29a0f32..57ad79cadd60 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -253,13 +254,12 @@ struct flexcan_devtype_data { struct flexcan_priv { struct can_priv can; - struct napi_struct napi; + struct can_rx_offload offload; struct flexcan_regs __iomem *regs; struct flexcan_mb __iomem *tx_mb; struct flexcan_mb __iomem *tx_mb_reserved; u8 tx_mb_idx; - u32 reg_esr; u32 reg_ctrl_default; u32 reg_imask1_default; @@ -338,13 +338,6 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) return regulator_disable(priv->reg_xceiver); } -static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, - u32 reg_esr) -{ - return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && - (reg_esr & FLEXCAN_ESR_ERR_BUS); -} - static int flexcan_chip_enable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; @@ -520,7 +513,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) +static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); struct sk_buff *skb; @@ -529,7 +522,7 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) - return 0; + return; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; @@ -572,14 +565,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) if (tx_errors) dev->stats.tx_errors++; - dev->stats.rx_packets++; - dev->stats.rx_bytes += cf->can_dlc; - netif_receive_skb(skb); - - return 1; + can_rx_offload_irq_queue_err_skb(&priv->offload, skb); } -static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) +static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); struct sk_buff *skb; @@ -605,33 +594,42 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) /* state hasn't changed */ if (likely(new_state == priv->can.state)) - return 0; + return; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) - return 0; + return; can_change_state(dev, cf, tx_state, rx_state); if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += cf->can_dlc; - netif_receive_skb(skb); + can_rx_offload_irq_queue_err_skb(&priv->offload, skb); +} - return 1; +static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) +{ + return container_of(offload, struct flexcan_priv, offload); } -static void flexcan_read_fifo(const struct net_device *dev, - struct can_frame *cf) +static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, + struct can_frame *cf, + u32 *timestamp, unsigned int n) { - const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_priv *priv = rx_offload_to_priv(offload); struct flexcan_regs __iomem *regs = priv->regs; - struct flexcan_mb __iomem *mb = ®s->mb[0]; - u32 reg_ctrl, reg_id; + struct flexcan_mb __iomem *mb = ®s->mb[n]; + u32 reg_ctrl, reg_id, reg_iflag1; + + reg_iflag1 = flexcan_read(®s->iflag1); + if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE)) + return 0; reg_ctrl = flexcan_read(&mb->can_ctrl); + /* increase timstamp to full 32 bit */ + *timestamp = reg_ctrl << 16; + reg_id = flexcan_read(&mb->can_id); if (reg_ctrl & FLEXCAN_MB_CNT_IDE) cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; @@ -648,69 +646,10 @@ static void flexcan_read_fifo(const struct net_device *dev, /* mark as read */ flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); flexcan_read(®s->timer); -} - -static int flexcan_read_frame(struct net_device *dev) -{ - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf; - struct sk_buff *skb; - - skb = alloc_can_skb(dev, &cf); - if (unlikely(!skb)) { - stats->rx_dropped++; - return 0; - } - - flexcan_read_fifo(dev, cf); - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_receive_skb(skb); - - can_led_event(dev, CAN_LED_EVENT_RX); return 1; } -static int flexcan_poll(struct napi_struct *napi, int quota) -{ - struct net_device *dev = napi->dev; - const struct flexcan_priv *priv = netdev_priv(dev); - struct flexcan_regs __iomem *regs = priv->regs; - u32 reg_iflag1, reg_esr; - int work_done = 0; - - /* The error bits are cleared on read, - * use saved value from irq handler. - */ - reg_esr = flexcan_read(®s->esr) | priv->reg_esr; - - /* handle state changes */ - work_done += flexcan_poll_state(dev, reg_esr); - - /* handle RX-FIFO */ - reg_iflag1 = flexcan_read(®s->iflag1); - while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE && - work_done < quota) { - work_done += flexcan_read_frame(dev); - reg_iflag1 = flexcan_read(®s->iflag1); - } - - /* report bus errors */ - if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota) - work_done += flexcan_poll_bus_err(dev, reg_esr); - - if (work_done < quota) { - napi_complete_done(napi, work_done); - /* enable IRQs */ - flexcan_write(priv->reg_imask1_default, ®s->imask1); - flexcan_write(priv->reg_ctrl_default, ®s->ctrl); - } - - return work_done; -} - static irqreturn_t flexcan_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; @@ -721,29 +660,14 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) u32 reg_iflag1, reg_esr; reg_iflag1 = flexcan_read(®s->iflag1); - reg_esr = flexcan_read(®s->esr); - /* schedule NAPI in case of: - * - rx IRQ - * - state change IRQ - * - bus error IRQ and bus error reporting is activated - */ - if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) || - (reg_esr & FLEXCAN_ESR_ERR_STATE) || - flexcan_has_and_handle_berr(priv, reg_esr)) { + /* reception interrupt */ + if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { handled = IRQ_HANDLED; - /* The error bits are cleared on read, - * save them for later use. - */ - priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS; - flexcan_write(priv->reg_imask1_default & - ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->imask1); - flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, - ®s->ctrl); - napi_schedule(&priv->napi); + can_rx_offload_irq_offload_fifo(&priv->offload); } - /* FIFO overflow */ + /* FIFO overflow interrupt */ if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { handled = IRQ_HANDLED; flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1); @@ -765,12 +689,23 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) netif_wake_queue(dev); } + reg_esr = flexcan_read(®s->esr); + /* ACK all bus error and state change IRQ sources */ if (reg_esr & FLEXCAN_ESR_ALL_INT) { handled = IRQ_HANDLED; flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); } + /* state change interrupt */ + if (reg_esr & FLEXCAN_ESR_ERR_STATE) + flexcan_irq_state(dev, reg_esr); + + /* bus error IRQ - handle if bus error reporting is activated */ + if ((reg_esr & FLEXCAN_ESR_ERR_BUS) && + (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) + flexcan_irq_bus_err(dev, reg_esr); + return handled; } @@ -1013,7 +948,7 @@ static int flexcan_open(struct net_device *dev) can_led_event(dev, CAN_LED_EVENT_OPEN); - napi_enable(&priv->napi); + can_rx_offload_enable(&priv->offload); netif_start_queue(dev); return 0; @@ -1035,7 +970,7 @@ static int flexcan_close(struct net_device *dev) struct flexcan_priv *priv = netdev_priv(dev); netif_stop_queue(dev); - napi_disable(&priv->napi); + can_rx_offload_disable(&priv->offload); flexcan_chip_stop(dev); free_irq(dev->irq, dev); @@ -1213,6 +1148,9 @@ static int flexcan_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &flexcan_netdev_ops; dev->irq = irq; dev->flags |= IFF_ECHO; @@ -1239,10 +1177,11 @@ static int flexcan_probe(struct platform_device *pdev) FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | FLEXCAN_IFLAG_MB(priv->tx_mb_idx); - netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); + priv->offload.mailbox_read = flexcan_mailbox_read; - platform_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); + err = can_rx_offload_add_fifo(dev, &priv->offload, FLEXCAN_NAPI_WEIGHT); + if (err) + goto failed_offload; err = register_flexcandev(dev); if (err) { @@ -1257,6 +1196,7 @@ static int flexcan_probe(struct platform_device *pdev) return 0; + failed_offload: failed_register: free_candev(dev); return err; @@ -1268,7 +1208,7 @@ static int flexcan_remove(struct platform_device *pdev) struct flexcan_priv *priv = netdev_priv(dev); unregister_flexcandev(dev); - netif_napi_del(&priv->napi); + can_rx_offload_del(&priv->offload); free_candev(dev); return 0; -- cgit v1.2.3 From 4bd888a80b1d48dbd83f1cbf806e923a30051958 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Mon, 31 Aug 2015 21:03:29 +0200 Subject: can: flexcan: activate individual RX masking and initialize reg_rximr Modern flexcan IP cores support two RX modes. One is using the 6 fames deep hardware FIFO, the other is using up to 64 mailboxes (in non FIFO mode). For now only the HW FIFO mode is activated. In order to make use of the RX mailboxes the individual RX masking feature has to be activated, otherwise matching mailboxes are overwritten during the reception process. This however switches on the individual RX masking, which uses reg_rximr registers for masking. This patch activates the individual RX masking feature unconditionally and initializes the mask registers (reg_rximr) with 0x0 == "don't care", which switches off any filtering. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 57ad79cadd60..27121f9cd59b 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -779,6 +779,7 @@ static int flexcan_chip_start(struct net_device *dev) * only supervisor access * enable warning int * disable local echo + * enable individual RX masking * choose format C * set max mailbox number */ @@ -786,7 +787,8 @@ static int flexcan_chip_start(struct net_device *dev) reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | - FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C | + FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); flexcan_write(reg_mcr, ®s->mcr); @@ -845,6 +847,10 @@ static int flexcan_chip_start(struct net_device *dev) if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG) flexcan_write(0x0, ®s->rxfgmask); + /* clear acceptance filters */ + for (i = 0; i < ARRAY_SIZE(regs->mb); i++) + flexcan_write(0, ®s->rximr[i]); + /* On Vybrid, disable memory error detection interrupts * and freeze mode. * This also works around errata e5295 which generates -- cgit v1.2.3 From 9eb7aa891101a4a09114ff3191f9877ea35eae06 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 1 Sep 2015 08:57:55 +0200 Subject: can: flexcan: add quirk FLEXCAN_QUIRK_ENABLE_EACEN_RRS In order to receive RTR frames in the non HW FIFO mode the RSS and EACEN bits of the reg_ctrl2 have to be activated. As this has no side effect in the FIFO mode, we do this unconditionally on cores with the reg_ctrl2. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 27121f9cd59b..a0fdced0e849 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -187,7 +187,8 @@ */ #define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ -#define FLEXCAN_QUIRK_DISABLE_MECR BIT(3) /* Disble Memory error detection */ +#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ +#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disble Memory error detection */ /* Structure of the message buffer */ struct flexcan_mb { @@ -276,11 +277,12 @@ static const struct flexcan_devtype_data fsl_p1010_devtype_data = { static const struct flexcan_devtype_data fsl_imx28_devtype_data; static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { - .quirks = FLEXCAN_QUIRK_DISABLE_RXFG, + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS, }; static const struct flexcan_devtype_data fsl_vf610_devtype_data = { - .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_DISABLE_MECR, + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR, }; static const struct can_bittiming_const flexcan_bittiming_const = { @@ -825,6 +827,12 @@ static int flexcan_chip_start(struct net_device *dev) netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); flexcan_write(reg_ctrl, ®s->ctrl); + if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) { + reg_ctrl2 = flexcan_read(®s->ctrl2); + reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS; + flexcan_write(reg_ctrl2, ®s->ctrl2); + } + /* clear and invalidate all mailboxes first */ for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) { flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, -- cgit v1.2.3 From b3cf53e988cedb11a407d65abafca836da78b7c9 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 1 Sep 2015 09:00:13 +0200 Subject: can: flexcan: add support for timestamp based rx-offload The flexcan IP core has 64 mailboxes. For now they are configured for RX as a hardware FIFO. This FIFO has a fixed depth of 6 CAN frames. In some high load scenarios it turns out thas this buffer is too small. In order to have a buffer larger than the 6 frames FIFO, this patch adds support for timestamp based offloading via the generic rx-offload infrastructure. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 158 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 129 insertions(+), 29 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index a0fdced0e849..6c84afed5e6c 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -3,7 +3,8 @@ * * Copyright (c) 2005-2006 Varma Electronics Oy * Copyright (c) 2009 Sascha Hauer, Pengutronix - * Copyright (c) 2010 Marc Kleine-Budde, Pengutronix + * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde + * Copyright (c) 2014 David Jander, Protonic Holland * * Based on code originally by Andrey Volkov * @@ -59,6 +60,7 @@ #define FLEXCAN_MCR_IRMQ BIT(16) #define FLEXCAN_MCR_LPRIO_EN BIT(13) #define FLEXCAN_MCR_AEN BIT(12) +/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */ #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) #define FLEXCAN_MCR_IDAM_A (0x0 << 8) #define FLEXCAN_MCR_IDAM_B (0x1 << 8) @@ -146,12 +148,18 @@ /* Errata ERR005829 step7: Reserve first valid MB */ #define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 #define FLEXCAN_TX_MB_OFF_FIFO 9 +#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 +#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 #define FLEXCAN_IFLAG_MB(x) BIT(x) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) /* FLEXCAN message buffers */ +#define FLEXCAN_MB_CODE_MASK (0xf << 24) +#define FLEXCAN_MB_CODE_RX_BUSY_BIT (0x1 << 24) #define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) #define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24) #define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24) @@ -189,6 +197,7 @@ #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disble Memory error detection */ +#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ /* Structure of the message buffer */ struct flexcan_mb { @@ -263,6 +272,7 @@ struct flexcan_priv { u8 tx_mb_idx; u32 reg_ctrl_default; u32 reg_imask1_default; + u32 reg_imask2_default; struct clk *clk_ipg; struct clk *clk_per; @@ -624,11 +634,32 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, struct flexcan_mb __iomem *mb = ®s->mb[n]; u32 reg_ctrl, reg_id, reg_iflag1; - reg_iflag1 = flexcan_read(®s->iflag1); - if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE)) - return 0; + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + u32 code; + + do { + reg_ctrl = flexcan_read(&mb->can_ctrl); + } while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT); + + /* is this MB empty? */ + code = reg_ctrl & FLEXCAN_MB_CODE_MASK; + if ((code != FLEXCAN_MB_CODE_RX_FULL) && + (code != FLEXCAN_MB_CODE_RX_OVERRUN)) + return 0; + + if (code == FLEXCAN_MB_CODE_RX_OVERRUN) { + /* This MB was overrun, we lost data */ + offload->dev->stats.rx_over_errors++; + offload->dev->stats.rx_errors++; + } + } else { + reg_iflag1 = flexcan_read(®s->iflag1); + if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE)) + return 0; + + reg_ctrl = flexcan_read(&mb->can_ctrl); + } - reg_ctrl = flexcan_read(&mb->can_ctrl); /* increase timstamp to full 32 bit */ *timestamp = reg_ctrl << 16; @@ -646,12 +677,33 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, *(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1])); /* mark as read */ - flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); - flexcan_read(®s->timer); + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + /* Clear IRQ */ + if (n < 32) + flexcan_write(BIT(n), ®s->iflag1); + else + flexcan_write(BIT(n - 32), ®s->iflag2); + } else { + flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); + flexcan_read(®s->timer); + } return 1; } + +static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + u32 iflag1, iflag2; + + iflag2 = flexcan_read(®s->iflag2) & priv->reg_imask2_default; + iflag1 = flexcan_read(®s->iflag1) & priv->reg_imask1_default & + ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + + return (u64)iflag2 << 32 | iflag1; +} + static irqreturn_t flexcan_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; @@ -664,17 +716,30 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) reg_iflag1 = flexcan_read(®s->iflag1); /* reception interrupt */ - if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { - handled = IRQ_HANDLED; - can_rx_offload_irq_offload_fifo(&priv->offload); - } + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + u64 reg_iflag; + int ret; + + while ((reg_iflag = flexcan_read_reg_iflag_rx(priv))) { + handled = IRQ_HANDLED; + ret = can_rx_offload_irq_offload_timestamp(&priv->offload, + reg_iflag); + if (!ret) + break; + } + } else { + if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { + handled = IRQ_HANDLED; + can_rx_offload_irq_offload_fifo(&priv->offload); + } - /* FIFO overflow interrupt */ - if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { - handled = IRQ_HANDLED; - flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1); - dev->stats.rx_over_errors++; - dev->stats.rx_errors++; + /* FIFO overflow interrupt */ + if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { + handled = IRQ_HANDLED; + flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1); + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + } } /* transmission complete interrupt */ @@ -787,10 +852,17 @@ static int flexcan_chip_start(struct net_device *dev) */ reg_mcr = flexcan_read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); - reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | - FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | - FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C | - FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | + FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | + FLEXCAN_MCR_IDAM_C; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + reg_mcr &= ~FLEXCAN_MCR_FEN; + reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last); + } else { + reg_mcr |= FLEXCAN_MCR_FEN | + FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + } netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); flexcan_write(reg_mcr, ®s->mcr); @@ -839,6 +911,12 @@ static int flexcan_chip_start(struct net_device *dev) ®s->mb[i].can_ctrl); } + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) + flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY, + ®s->mb[i].can_ctrl); + } + /* Errata ERR005829: mark first TX mailbox as INACTIVE */ flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, &priv->tx_mb_reserved->can_ctrl); @@ -897,6 +975,7 @@ static int flexcan_chip_start(struct net_device *dev) disable_irq(dev->irq); flexcan_write(priv->reg_ctrl_default, ®s->ctrl); flexcan_write(priv->reg_imask1_default, ®s->imask1); + flexcan_write(priv->reg_imask2_default, ®s->imask2); enable_irq(dev->irq); /* print chip status */ @@ -926,6 +1005,7 @@ static void flexcan_chip_stop(struct net_device *dev) flexcan_chip_disable(priv); /* Disable all interrupts */ + flexcan_write(0, ®s->imask2); flexcan_write(0, ®s->imask1); flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, ®s->ctrl); @@ -1058,8 +1138,9 @@ static int register_flexcandev(struct net_device *dev) flexcan_write(reg, ®s->mcr); /* Currently we only support newer versions of this core - * featuring a RX FIFO. Older cores found on some Coldfire - * derivates are not yet supported. + * featuring a RX hardware FIFO (although this driver doesn't + * make use of it on some cores). Older cores, found on some + * Coldfire derivates are not tested. */ reg = flexcan_read(®s->mcr); if (!(reg & FLEXCAN_MCR_FEN)) { @@ -1183,17 +1264,36 @@ static int flexcan_probe(struct platform_device *pdev) priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; - priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO; - priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP; + priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; + } else { + priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO; + priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; + } priv->tx_mb = ®s->mb[priv->tx_mb_idx]; - priv->reg_imask1_default = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | - FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | - FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + priv->reg_imask2_default = 0; priv->offload.mailbox_read = flexcan_mailbox_read; - err = can_rx_offload_add_fifo(dev, &priv->offload, FLEXCAN_NAPI_WEIGHT); + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + u64 imask; + + priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; + priv->offload.mb_last = FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST; + + imask = GENMASK_ULL(priv->offload.mb_last, priv->offload.mb_first); + priv->reg_imask1_default |= imask; + priv->reg_imask2_default |= imask >> 32; + + err = can_rx_offload_add_timestamp(dev, &priv->offload); + } else { + priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | + FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; + err = can_rx_offload_add_fifo(dev, &priv->offload, FLEXCAN_NAPI_WEIGHT); + } if (err) goto failed_offload; -- cgit v1.2.3 From 096de07f1d7156e500dec5abe89e9b44314e38e5 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 1 Sep 2015 10:28:46 +0200 Subject: can: flexcan: switch imx6 and vf610 to timestamp based offloading This patch switches the imx6 and vf610 based SoCs from the hardware FIFO to the timestamp based rx offloading. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 6c84afed5e6c..ea57fed375c6 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -287,12 +287,13 @@ static const struct flexcan_devtype_data fsl_p1010_devtype_data = { static const struct flexcan_devtype_data fsl_imx28_devtype_data; static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { - .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS, + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, }; static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_DISABLE_MECR, + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, }; static const struct can_bittiming_const flexcan_bittiming_const = { -- cgit v1.2.3 From b60a00f9c5f14695991cb77dce7e926623269d88 Mon Sep 17 00:00:00 2001 From: Jingju Hou Date: Mon, 6 Feb 2017 14:58:13 +0800 Subject: net: mvneta: implement .set_wol and .get_wol The mvneta itself does not support WOL, but the PHY might. So pass the calls to the PHY Signed-off-by: Jingju Hou Signed-off-by: Jisheng Zhang Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index de6c47744b8e..0f4d1697be46 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3927,6 +3927,25 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, return 0; } +static void mvneta_ethtool_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + if (dev->phydev) + phy_ethtool_get_wol(dev->phydev, wol); +} + +static int mvneta_ethtool_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + if (!dev->phydev) + return -EOPNOTSUPP; + + return phy_ethtool_set_wol(dev->phydev, wol); +} + static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, @@ -3956,6 +3975,8 @@ const struct ethtool_ops mvneta_eth_tool_ops = { .set_rxfh = mvneta_ethtool_set_rxfh, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = mvneta_ethtool_set_link_ksettings, + .get_wol = mvneta_ethtool_get_wol, + .set_wol = mvneta_ethtool_set_wol, }; /* Initialize hw */ -- cgit v1.2.3 From e158e5ef24d191462428ff1be6b5048c0168fd14 Mon Sep 17 00:00:00 2001 From: Elad Raz Date: Mon, 6 Feb 2017 13:56:27 +0100 Subject: mlxsw: reg: Fix HTGT register length HTGT register length is limited to 32 bytes and not 256 bytes. Signed-off-by: Elad Raz Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index b50a312d89c1..0899e2d310e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3653,7 +3653,7 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) * Configures the properties for forwarding to CPU. */ #define MLXSW_REG_HTGT_ID 0x7002 -#define MLXSW_REG_HTGT_LEN 0x100 +#define MLXSW_REG_HTGT_LEN 0x20 MLXSW_REG_DEFINE(htgt, MLXSW_REG_HTGT_ID, MLXSW_REG_HTGT_LEN); -- cgit v1.2.3 From f32f5bd2eb7e91a5090c06bbe1ed14bffbbda5d3 Mon Sep 17 00:00:00 2001 From: Daniel Jurgens Date: Thu, 19 Nov 2015 17:12:26 +0200 Subject: net/mlx5: Configure cache line size for start and end padding There is a hardware feature that will pad the start or end of a DMA to be cache line aligned to avoid RMWs on the last cache line. The default cache line size setting for this feature is 64B. This change configures the hardware to use 128B alignment on systems with 128B cache lines. In addition we lower bound MPWRQ stride by HCA cacheline in mlx5e, MPWRQ stride should be at least the HCA cacheline, the current default is 64B and in case HCA_CAP.cach_line_128byte capability is set, MPWRQ RX stride will automatically be aligned to 128B. Signed-off-by: Daniel Jurgens Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 9 +++++++-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/main.c | 6 ++++++ include/linux/mlx5/mlx5_ifc.h | 6 ++++-- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 9b52c58cd528..9b23d3329847 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -70,8 +70,13 @@ #define MLX5_RX_HEADROOM NET_SKB_PAD -#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */ -#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */ +#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ + (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ +#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \ + max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) +#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6) +#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8) + #define MLX5_MPWRQ_LOG_WQE_SZ 18 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ab6f4d3b8063..1b7fe43ab22b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -89,8 +89,8 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; priv->params.mpwqe_log_stride_sz = MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? - MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : - MLX5_MPWRQ_LOG_STRIDE_SIZE; + MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) : + MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev); priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - priv->params.mpwqe_log_stride_sz; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f7e50ba67f94..c4242a4e8130 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -543,6 +543,12 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); + if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte)) + MLX5_SET(cmd_hca_cap, + set_hca_cap, + cache_line_128byte, + cache_line_size() == 128 ? 1 : 0); + err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index a919dfb920ae..cc8ae860cd45 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -804,10 +804,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_150[0xa]; u8 log_max_ra_res_qp[0x6]; - u8 pad_cap[0x1]; + u8 end_pad[0x1]; u8 cc_query_allowed[0x1]; u8 cc_modify_allowed[0x1]; - u8 reserved_at_163[0xd]; + u8 start_pad[0x1]; + u8 cache_line_128byte[0x1]; + u8 reserved_at_163[0xb]; u8 gid_table_size[0x10]; u8 out_of_seq_cnt[0x1]; -- cgit v1.2.3 From 2b31f7ae5f645edd852addfca445895b5806f3f9 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 28 Nov 2016 18:04:50 +0200 Subject: net/mlx5: TX WQE update Add new TX WQE fields for Connect-X5 vlan insertion support, type and vlan_tci, when type = MLX5_ETH_WQE_INSERT_VLAN the HW will insert the vlan and prio fields (vlan_tci) to the packet. Those bits and the inline header fields are mutually exclusive, and valid only when: MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_NOT_REQUIRED and MLX5_CAP_ETH(mdev, wqe_vlan_insert), who will be set in ConnectX-5 and later HW generations. Signed-off-by: Saeed Mahameed Reviewed-by: Tariq Toukan --- drivers/infiniband/hw/mlx5/qp.c | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 8 ++++---- include/linux/mlx5/mlx5_ifc.h | 3 ++- include/linux/mlx5/qp.h | 16 ++++++++++++++-- 5 files changed, 25 insertions(+), 12 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 6a83fb32599d..e31bf11ae64f 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -2984,20 +2984,20 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg, if (wr->opcode == IB_WR_LSO) { struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); - int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start); + int size_of_inl_hdr_start = sizeof(eseg->inline_hdr.start); u64 left, leftlen, copysz; void *pdata = ud_wr->header; left = ud_wr->hlen; eseg->mss = cpu_to_be16(ud_wr->mss); - eseg->inline_hdr_sz = cpu_to_be16(left); + eseg->inline_hdr.sz = cpu_to_be16(left); /* * check if there is space till the end of queue, if yes, * copy all in one shot, otherwise copy till the end of queue, * rollback and than the copy the left */ - leftlen = qend - (void *)eseg->inline_hdr_start; + leftlen = qend - (void *)eseg->inline_hdr.start; copysz = min_t(u64, leftlen, left); memcpy(seg - size_of_inl_hdr_start, pdata, copysz); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index fd8dff6acc12..965e69e9ff1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -687,8 +687,8 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, memset(wqe, 0, sizeof(*wqe)); /* copy the inline part */ - memcpy(eseg->inline_hdr_start, xdp->data, MLX5E_XDP_MIN_INLINE); - eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); + memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); + eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index cfb68371c397..678c07c8fbb0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -284,18 +284,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) wi->num_bytes = num_bytes; if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, + mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); ihs += VLAN_HLEN; } else { - memcpy(eseg->inline_hdr_start, skb_data, ihs); + memcpy(eseg->inline_hdr.start, skb_data, ihs); mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); } - eseg->inline_hdr_sz = cpu_to_be16(ihs); + eseg->inline_hdr.sz = cpu_to_be16(ihs); ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; - ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start), + ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index cc8ae860cd45..afcd4736d8df 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -577,7 +577,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 lro_cap[0x1]; u8 lro_psh_flag[0x1]; u8 lro_time_stamp[0x1]; - u8 reserved_at_5[0x3]; + u8 reserved_at_5[0x2]; + u8 wqe_vlan_insert[0x1]; u8 self_lb_en_modifiable[0x1]; u8 reserved_at_9[0x2]; u8 max_lso_cap[0x5]; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 219c699c17b7..3096370fe831 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -221,14 +221,26 @@ enum { MLX5_ETH_WQE_L4_CSUM = 1 << 7, }; +enum { + MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, +}; + struct mlx5_wqe_eth_seg { u8 rsvd0[4]; u8 cs_flags; u8 rsvd1; __be16 mss; __be32 rsvd2; - __be16 inline_hdr_sz; - u8 inline_hdr_start[2]; + union { + struct { + __be16 sz; + u8 start[2]; + } inline_hdr; + struct { + __be16 type; + __be16 vlan_tci; + } insert; + }; }; struct mlx5_wqe_xrc_seg { -- cgit v1.2.3 From a6f402e4990145252ce4fde59b273fa7e4f91e1b Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 6 Dec 2016 13:53:49 +0200 Subject: net/mlx5e: Tx, no inline copy on ConnectX-5 ConnectX-5 and later HW generations will report min inline mode == MLX5_INLINE_MODE_NONE, which means driver is not required to copy packet headers to inline fields of TX WQE. When inline is not required, vlan insertion will be handled in the TX descriptor rather than copy to inline. For LSO case driver is still required to copy headers, for the HW to duplicate on wire. This will improve CPU utilization and boost TX performance. Tested with pktgen burst single flow: CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz HCA: Mellanox Technologies MT28800 Family [ConnectX-5 Ex] Before: 15.1Mpps After: 17.2Mpps Improvement: 14% Signed-off-by: Saeed Mahameed Reviewed-by: Tariq Toukan --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 13 +++++++--- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 30 +++++++++++++---------- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1b7fe43ab22b..9cd38401fdc9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1029,9 +1029,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; - sq->min_inline_mode = - MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ? - param->min_inline_mode : 0; + sq->min_inline_mode = param->min_inline_mode; err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -1095,7 +1093,10 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ? 0 : priv->tisn[sq->tc]); MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); - MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode); + + if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) + MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1); @@ -3533,6 +3534,10 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); + if (priv->params.tx_min_inline_mode == MLX5_INLINE_MODE_NONE && + !MLX5_CAP_ETH(mdev, wqe_vlan_insert)) + priv->params.tx_min_inline_mode = MLX5_INLINE_MODE_L2; + priv->params.num_tc = 1; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 678c07c8fbb0..f193128bac4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -154,6 +154,8 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, int hlen; switch (mode) { + case MLX5_INLINE_MODE_NONE: + return 0; case MLX5_INLINE_MODE_TCP_UDP: hlen = eth_get_headlen(skb->data, skb_headlen(skb)); if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) @@ -283,21 +285,23 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) wi->num_bytes = num_bytes; - if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, - &skb_len); - ihs += VLAN_HLEN; - } else { - memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + if (ihs) { + if (skb_vlan_tag_present(skb)) { + mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); + ihs += VLAN_HLEN; + } else { + memcpy(eseg->inline_hdr.start, skb_data, ihs); + mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + } + eseg->inline_hdr.sz = cpu_to_be16(ihs); + ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); + } else if (skb_vlan_tag_present(skb)) { + eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); + eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); } - eseg->inline_hdr.sz = cpu_to_be16(ihs); - - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; - ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), - MLX5_SEND_WQE_DS); - dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; + dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; wi->num_dma = 0; -- cgit v1.2.3 From b70149dd7dc98661fbbfe7281f127caa408028fe Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 6 Dec 2016 14:04:05 +0200 Subject: net/mlx5e: XDP Tx, no inline copy on ConnectX-5 ConnectX-5 and later HW generations will report min inline mode == MLX5_INLINE_MODE_NONE, which means driver is not required to copy packet headers to inline fields of TX WQE. Avoid copy to inline segment in XDP TX routine when HW inline mode doesn't require it. This will improve CPU utilization and boost XDP TX performance. Tested with xdp2 single flow: CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz HCA: Mellanox Technologies MT28800 Family [ConnectX-5 Ex] Before: 7.4Mpps After: 7.8Mpps Improvement: 5% Signed-off-by: Saeed Mahameed Reviewed-by: Tariq Toukan --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 +-- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 20 +++++++++++++------- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 9b23d3329847..8be4b12b5545 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -120,8 +120,7 @@ #define MLX5E_XDP_IHS_DS_COUNT \ DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS) #define MLX5E_XDP_TX_DS_COUNT \ - (MLX5E_XDP_IHS_DS_COUNT + \ - (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) + ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) #define MLX5E_XDP_TX_WQEBBS \ DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9cd38401fdc9..ed230757d9c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1806,8 +1806,7 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); param->max_inline = priv->params.tx_max_inline; - /* FOR XDP SQs will support only L2 inline mode */ - param->min_inline_mode = MLX5_INLINE_MODE_NONE; + param->min_inline_mode = priv->params.tx_min_inline_mode; param->type = MLX5E_SQ_XDP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 965e69e9ff1e..b039b87742a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -657,9 +657,10 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg; + u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT; ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; - dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE; + dma_addr_t dma_addr = di->addr + data_offset; unsigned int dma_len = xdp->data_end - xdp->data; if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || @@ -680,17 +681,22 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, return false; } - dma_len -= MLX5E_XDP_MIN_INLINE; dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); memset(wqe, 0, sizeof(*wqe)); - /* copy the inline part */ - memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); - eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); + dseg = (struct mlx5_wqe_data_seg *)eseg + 1; + /* copy the inline part if required */ + if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { + memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); + eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); + dma_len -= MLX5E_XDP_MIN_INLINE; + dma_addr += MLX5E_XDP_MIN_INLINE; - dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1); + ds_cnt += MLX5E_XDP_IHS_DS_COUNT; + dseg++; + } /* write the dma part */ dseg->addr = cpu_to_be64(dma_addr); @@ -698,7 +704,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, dseg->lkey = sq->mkey_be; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); - cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); sq->db.xdp.di[pi] = *di; wi->opcode = MLX5_OPCODE_SEND; -- cgit v1.2.3 From 8ca967ab67671f07ac7daef4f854559bc66799a3 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 9 Jan 2017 21:57:49 +0200 Subject: net/mlx5e: Bring back bfreg uar map dedicated pointer 4K Uar series modified the mlx5e driver to use the new bfreg API, and mistakenly removed the sq->uar_map iomem data path dedicated pointer, which was meant to be read from xmit path for cache locality utilization. Fix that by returning that pointer to the SQ struct. Fixes: 7309cb4ad71e ("IB/mlx5: Support 4k UAR for libmlx5") Signed-off-by: Saeed Mahameed Reviewed-by: Tariq Toukan --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 5 +++-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8be4b12b5545..95ca03c0d9f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -475,6 +475,7 @@ struct mlx5e_sq { /* read only */ struct mlx5_wq_cyc wq; u32 dma_fifo_mask; + void __iomem *uar_map; struct netdev_queue *txq; u32 sqn; u16 bf_buf_size; @@ -831,9 +832,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, */ wmb(); if (bf_sz) - __iowrite64_copy(sq->bfreg.map + ofst, ctrl, bf_sz); + __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz); else - mlx5_write64((__be32 *)ctrl, sq->bfreg.map + ofst, NULL); + mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL); /* flush the write-combining mapped buffer */ wmb(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ed230757d9c5..3cce6281e075 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1016,6 +1016,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, if (err) return err; + sq->uar_map = sq->bfreg.map; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, -- cgit v1.2.3 From a0e4761d9bd546eba2e9d3a675863a843624e613 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 6 Feb 2017 16:20:10 +0100 Subject: mlxsw: core: Queue work immediately instead of delaying it We always use zero delay before queueing a work on the ordered workqueue ('mlxsw_owq'), so use work_struct directly instead of delayable work. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 6 +++--- drivers/net/ethernet/mellanox/mlxsw/core.h | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 57a98849551b..a4c07841aaf6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1901,11 +1901,11 @@ int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) } EXPORT_SYMBOL(mlxsw_core_schedule_dw); -int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay) +bool mlxsw_core_schedule_work(struct work_struct *work) { - return queue_delayed_work(mlxsw_owq, dwork, delay); + return queue_work(mlxsw_owq, work); } -EXPORT_SYMBOL(mlxsw_core_schedule_odw); +EXPORT_SYMBOL(mlxsw_core_schedule_work); void mlxsw_core_flush_owq(void) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index a7f94fbc898b..cf38cf9027f8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -207,7 +207,7 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, u8 local_port); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); -int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay); +bool mlxsw_core_schedule_work(struct work_struct *work); void mlxsw_core_flush_owq(void); #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 9e494a446b7e..01e86e7b33c9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1964,7 +1964,7 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) } struct mlxsw_sp_fib_event_work { - struct delayed_work dw; + struct work_struct work; struct fib_entry_notifier_info fen_info; struct mlxsw_sp *mlxsw_sp; unsigned long event; @@ -1973,7 +1973,7 @@ struct mlxsw_sp_fib_event_work { static void mlxsw_sp_router_fib_event_work(struct work_struct *work) { struct mlxsw_sp_fib_event_work *fib_work = - container_of(work, struct mlxsw_sp_fib_event_work, dw.work); + container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; int err; @@ -2014,7 +2014,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, if (WARN_ON(!fib_work)) return NOTIFY_BAD; - INIT_DELAYED_WORK(&fib_work->dw, mlxsw_sp_router_fib_event_work); + INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work); fib_work->mlxsw_sp = mlxsw_sp; fib_work->event = event; @@ -2029,7 +2029,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, break; } - mlxsw_core_schedule_odw(&fib_work->dw, 0); + mlxsw_core_schedule_work(&fib_work->work); return NOTIFY_DONE; } -- cgit v1.2.3 From e60234ddb541b556b57353dfddca73155977d02f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 6 Feb 2017 16:20:11 +0100 Subject: mlxsw: spectrum_router: Use ordered workqueue for neigh updates We currently associate each neighbour entry with a work item, so it's not possible to have multiple events queued for the same neighbour entry. However, this is about to be changed so that the neighbour entry is only resolved when the work item is scheduled. The above can result in a mismatch between the kernel's and the device's neighbour table, unless the associated work items are processed in the order in which they were submitted. Do that by migrating the NEIGH_UPDATE work items to be processed in the ordered workqueue which was recently introduced in mlxsw in commit a3832b31898f ("mlxsw: core: Create an ordered workqueue for FIB offload"). Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 01e86e7b33c9..0d8201e67985 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -614,7 +614,7 @@ struct mlxsw_sp_neigh_entry { struct mlxsw_sp_neigh_key key; u16 rif; bool offloaded; - struct delayed_work dw; + struct work_struct work; struct mlxsw_sp_port *mlxsw_sp_port; unsigned char ha[ETH_ALEN]; struct list_head nexthop_list; /* list of nexthops using @@ -659,7 +659,7 @@ mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif) return NULL; neigh_entry->key.n = n; neigh_entry->rif = rif; - INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); + INIT_WORK(&neigh_entry->work, mlxsw_sp_router_neigh_update_hw); INIT_LIST_HEAD(&neigh_entry->nexthop_list); return neigh_entry; } @@ -935,7 +935,7 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) { struct mlxsw_sp_neigh_entry *neigh_entry = - container_of(work, struct mlxsw_sp_neigh_entry, dw.work); + container_of(work, struct mlxsw_sp_neigh_entry, work); struct neighbour *n = neigh_entry->key.n; struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -1052,7 +1052,7 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, * work. */ neigh_clone(n); - if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) { + if (!mlxsw_core_schedule_work(&neigh_entry->work)) { neigh_release(n); mlxsw_sp_port_dev_put(mlxsw_sp_port); } -- cgit v1.2.3 From de04b6a35887b742dccbc17e6663e4749aaa7542 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 6 Feb 2017 16:20:12 +0100 Subject: mlxsw: spectrum_router: Remove unused variable Since commit 33b1341cd1bf ("mlxsw: spectrum_router: Fix handling of neighbour structure") we no longer use destination IP for neighbour lookup, so remove it. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 0d8201e67985..851799d60a20 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1004,7 +1004,6 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, struct net_device *dev; struct neigh_parms *p; struct neighbour *n; - u32 dip; switch (event) { case NETEVENT_DELAY_PROBE_TIME_UPDATE: @@ -1039,7 +1038,6 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, return NOTIFY_DONE; mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - dip = ntohl(*((__be32 *) n->primary_key)); neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); if (WARN_ON(!neigh_entry)) { mlxsw_sp_port_dev_put(mlxsw_sp_port); -- cgit v1.2.3 From 5c8802f14a0679e970e7b25f809a12c3ae1a873d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 6 Feb 2017 16:20:13 +0100 Subject: mlxsw: spectrum_router: Simplify neighbour reflection Up until now we had two interfaces for neighbour related configuration: ndo_neigh_{construct,destroy} and NEIGH_UPDATE netevents. The ndos were used to add and remove neighbours from the driver's cache, whereas the netevent was used to reflect the neighbours into the device's tables. However, if the NUD state of a neighbour isn't NUD_VALID or if the neighbour is dead, then there's really no reason for us to keep it inside our cache. The only exception to this rule are neighbours that are also used for nexthops, which we periodically refresh to get them resolved. We can therefore eliminate the ndo entry point into the driver and simplify the code, making it similar to the FIB reflection, which is based solely on events. This also helps us avoid a locking issue, in which the RIF cache was traversed without proper locking during insertion into the neigh entry cache. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 - drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 4 - .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 263 +++++++++++---------- 3 files changed, 135 insertions(+), 134 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 8a52c860794b..c2a675125801 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1400,8 +1400,6 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, - .ndo_neigh_construct = mlxsw_sp_router_neigh_construct, - .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy, .ndo_fdb_add = switchdev_port_fdb_add, .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4d251e06bf31..dcd641aff785 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -599,10 +599,6 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); -int mlxsw_sp_router_neigh_construct(struct net_device *dev, - struct neighbour *n); -void mlxsw_sp_router_neigh_destroy(struct net_device *dev, - struct neighbour *n); int mlxsw_sp_router_netevent_event(struct notifier_block *unused, unsigned long event, void *ptr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 851799d60a20..c338b08889a2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -613,9 +613,7 @@ struct mlxsw_sp_neigh_entry { struct rhash_head ht_node; struct mlxsw_sp_neigh_key key; u16 rif; - bool offloaded; - struct work_struct work; - struct mlxsw_sp_port *mlxsw_sp_port; + bool connected; unsigned char ha[ETH_ALEN]; struct list_head nexthop_list; /* list of nexthops using * this neigh entry @@ -629,105 +627,88 @@ static const struct rhashtable_params mlxsw_sp_neigh_ht_params = { .key_len = sizeof(struct mlxsw_sp_neigh_key), }; -static int -mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_neigh_entry *neigh_entry) -{ - return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht, - &neigh_entry->ht_node, - mlxsw_sp_neigh_ht_params); -} - -static void -mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_neigh_entry *neigh_entry) -{ - rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht, - &neigh_entry->ht_node, - mlxsw_sp_neigh_ht_params); -} - -static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); - static struct mlxsw_sp_neigh_entry * -mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif) +mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n, + u16 rif) { struct mlxsw_sp_neigh_entry *neigh_entry; - neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); + neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL); if (!neigh_entry) return NULL; + neigh_entry->key.n = n; neigh_entry->rif = rif; - INIT_WORK(&neigh_entry->work, mlxsw_sp_router_neigh_update_hw); INIT_LIST_HEAD(&neigh_entry->nexthop_list); + return neigh_entry; } -static void -mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry) +static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry) { kfree(neigh_entry); } -static struct mlxsw_sp_neigh_entry * -mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) +static int +mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) { - struct mlxsw_sp_neigh_key key; + return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht, + &neigh_entry->ht_node, + mlxsw_sp_neigh_ht_params); +} - key.n = n; - return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, - &key, mlxsw_sp_neigh_ht_params); +static void +mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht, + &neigh_entry->ht_node, + mlxsw_sp_neigh_ht_params); } -int mlxsw_sp_router_neigh_construct(struct net_device *dev, - struct neighbour *n) +static struct mlxsw_sp_neigh_entry * +mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) { - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_neigh_entry *neigh_entry; struct mlxsw_sp_rif *r; int err; - if (n->tbl != &arp_tbl) - return 0; - - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); - if (neigh_entry) - return 0; - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); - if (WARN_ON(!r)) - return -EINVAL; + if (!r) + return ERR_PTR(-EINVAL); - neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif); + neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif); if (!neigh_entry) - return -ENOMEM; + return ERR_PTR(-ENOMEM); + err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); if (err) goto err_neigh_entry_insert; - return 0; + + return neigh_entry; err_neigh_entry_insert: - mlxsw_sp_neigh_entry_destroy(neigh_entry); - return err; + mlxsw_sp_neigh_entry_free(neigh_entry); + return ERR_PTR(err); } -void mlxsw_sp_router_neigh_destroy(struct net_device *dev, - struct neighbour *n) +static void +mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) { - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_neigh_entry *neigh_entry; + mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); + mlxsw_sp_neigh_entry_free(neigh_entry); +} - if (n->tbl != &arp_tbl) - return; +static struct mlxsw_sp_neigh_entry * +mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) +{ + struct mlxsw_sp_neigh_key key; - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); - if (!neigh_entry) - return; - mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); - mlxsw_sp_neigh_entry_destroy(neigh_entry); + key.n = n; + return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, + &key, mlxsw_sp_neigh_ht_params); } static void @@ -932,76 +913,99 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, bool removing); -static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) +static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding) +{ + return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD : + MLXSW_REG_RAUHT_OP_WRITE_DELETE; +} + +static void +mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + enum mlxsw_reg_rauht_op op) { - struct mlxsw_sp_neigh_entry *neigh_entry = - container_of(work, struct mlxsw_sp_neigh_entry, work); struct neighbour *n = neigh_entry->key.n; - struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u32 dip = ntohl(*((__be32 *) n->primary_key)); char rauht_pl[MLXSW_REG_RAUHT_LEN]; - struct net_device *dev; + + mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha, + dip); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); +} + +static void +mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + bool adding) +{ + if (!adding && !neigh_entry->connected) + return; + neigh_entry->connected = adding; + if (neigh_entry->key.n->tbl == &arp_tbl) + mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry, + mlxsw_sp_rauht_op(adding)); + else + WARN_ON_ONCE(1); +} + +struct mlxsw_sp_neigh_event_work { + struct work_struct work; + struct mlxsw_sp *mlxsw_sp; + struct neighbour *n; +}; + +static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) +{ + struct mlxsw_sp_neigh_event_work *neigh_work = + container_of(work, struct mlxsw_sp_neigh_event_work, work); + struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp; + struct mlxsw_sp_neigh_entry *neigh_entry; + struct neighbour *n = neigh_work->n; + unsigned char ha[ETH_ALEN]; bool entry_connected; u8 nud_state, dead; - bool updating; - bool removing; - bool adding; - u32 dip; - int err; + /* If these parameters are changed after we release the lock, + * then we are guaranteed to receive another event letting us + * know about it. + */ read_lock_bh(&n->lock); - dip = ntohl(*((__be32 *) n->primary_key)); - memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha)); + memcpy(ha, n->ha, ETH_ALEN); nud_state = n->nud_state; dead = n->dead; - dev = n->dev; read_unlock_bh(&n->lock); + rtnl_lock(); entry_connected = nud_state & NUD_VALID && !dead; - adding = (!neigh_entry->offloaded) && entry_connected; - updating = neigh_entry->offloaded && entry_connected; - removing = neigh_entry->offloaded && !entry_connected; - - if (adding || updating) { - mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD, - neigh_entry->rif, - neigh_entry->ha, dip); - err = mlxsw_reg_write(mlxsw_sp->core, - MLXSW_REG(rauht), rauht_pl); - if (err) { - netdev_err(dev, "Could not add neigh %pI4h\n", &dip); - neigh_entry->offloaded = false; - } else { - neigh_entry->offloaded = true; - } - mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false); - } else if (removing) { - mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE, - neigh_entry->rif, - neigh_entry->ha, dip); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), - rauht_pl); - if (err) { - netdev_err(dev, "Could not delete neigh %pI4h\n", &dip); - neigh_entry->offloaded = true; - } else { - neigh_entry->offloaded = false; - } - mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); + if (!entry_connected && !neigh_entry) + goto out; + if (!neigh_entry) { + neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); + if (IS_ERR(neigh_entry)) + goto out; } + memcpy(neigh_entry->ha, ha, ETH_ALEN); + mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected); + mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected); + + if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) + mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); + +out: + rtnl_unlock(); neigh_release(n); - mlxsw_sp_port_dev_put(mlxsw_sp_port); + kfree(neigh_work); } int mlxsw_sp_router_netevent_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct mlxsw_sp_neigh_entry *neigh_entry; + struct mlxsw_sp_neigh_event_work *neigh_work; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp; unsigned long interval; - struct net_device *dev; struct neigh_parms *p; struct neighbour *n; @@ -1028,32 +1032,31 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, break; case NETEVENT_NEIGH_UPDATE: n = ptr; - dev = n->dev; if (n->tbl != &arp_tbl) return NOTIFY_DONE; - mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev); + mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev); if (!mlxsw_sp_port) return NOTIFY_DONE; - mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); - if (WARN_ON(!neigh_entry)) { + neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC); + if (!neigh_work) { mlxsw_sp_port_dev_put(mlxsw_sp_port); - return NOTIFY_DONE; + return NOTIFY_BAD; } - neigh_entry->mlxsw_sp_port = mlxsw_sp_port; + + INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work); + neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + neigh_work->n = n; /* Take a reference to ensure the neighbour won't be * destructed until we drop the reference in delayed * work. */ neigh_clone(n); - if (!mlxsw_core_schedule_work(&neigh_entry->work)) { - neigh_release(n); - mlxsw_sp_port_dev_put(mlxsw_sp_port); - } + mlxsw_core_schedule_work(&neigh_work->work); + mlxsw_sp_port_dev_put(mlxsw_sp_port); break; } @@ -1334,14 +1337,11 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop *nh; - /* Take RTNL mutex here to prevent lists from changes */ - rtnl_lock(); list_for_each_entry(nh, &neigh_entry->nexthop_list, neigh_list_node) { __mlxsw_sp_nexthop_neigh_update(nh, removing); mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); } - rtnl_unlock(); } static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, @@ -1368,8 +1368,11 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, } neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); if (!neigh_entry) { - neigh_release(n); - return -EINVAL; + neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); + if (IS_ERR(neigh_entry)) { + neigh_release(n); + return -EINVAL; + } } /* If that is the first nexthop connected to that neigh, add to @@ -1395,6 +1398,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; + struct neighbour *n = neigh_entry->key.n; __mlxsw_sp_nexthop_neigh_update(nh, true); list_del(&nh->neigh_list_node); @@ -1405,7 +1409,10 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, if (list_empty(&nh->neigh_entry->nexthop_list)) list_del(&nh->neigh_entry->nexthop_neighs_list_node); - neigh_release(neigh_entry->key.n); + if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) + mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); + + neigh_release(n); } static struct mlxsw_sp_nexthop_group * -- cgit v1.2.3 From a8eca326151ee1beac82a4fd86d9edad3a37aaed Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 6 Feb 2017 16:20:14 +0100 Subject: net: remove ndo_neigh_{construct, destroy} from stacked devices In commit 18bfb924f000 ("net: introduce default neigh_construct/destroy ndo calls for L2 upper devices") we added these ndos to stacked devices such as team and bond, so that calls will be propagated to mlxsw. However, previous commit removed the reliance on these ndos and no new users of these ndos have appeared since above mentioned commit. We can therefore safely remove this dead code. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 2 -- drivers/net/team/team.c | 2 -- include/linux/netdevice.h | 4 ---- net/8021q/vlan_dev.c | 2 -- net/bridge/br_device.c | 2 -- net/core/dev.c | 44 ----------------------------------------- 6 files changed, 56 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e6af04716cf7..6321f12630c8 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4145,8 +4145,6 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_add_slave = bond_enslave, .ndo_del_slave = bond_release, .ndo_fix_features = bond_fix_features, - .ndo_neigh_construct = netdev_default_l2upper_neigh_construct, - .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a3711769544b..4a24b5d15f5a 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2001,8 +2001,6 @@ static const struct net_device_ops team_netdev_ops = { .ndo_add_slave = team_add_slave, .ndo_del_slave = team_del_slave, .ndo_fix_features = team_fix_features, - .ndo_neigh_construct = netdev_default_l2upper_neigh_construct, - .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy, .ndo_change_carrier = team_change_carrier, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_getlink = switchdev_port_bridge_getlink, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 014fbe256d55..58afbd1cc659 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3888,10 +3888,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); void netdev_lower_state_changed(struct net_device *lower_dev, void *lower_state_info); -int netdev_default_l2upper_neigh_construct(struct net_device *dev, - struct neighbour *n); -void netdev_default_l2upper_neigh_destroy(struct net_device *dev, - struct neighbour *n); /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 116455ac3db5..e97ab824e368 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -791,8 +791,6 @@ static const struct net_device_ops vlan_netdev_ops = { .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, #endif .ndo_fix_features = vlan_dev_fix_features, - .ndo_neigh_construct = netdev_default_l2upper_neigh_construct, - .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy, .ndo_fdb_add = switchdev_port_fdb_add, .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 6c46d1b4cdbb..5ba0b558f8ae 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -347,8 +347,6 @@ static const struct net_device_ops br_netdev_ops = { .ndo_add_slave = br_add_slave, .ndo_del_slave = br_del_slave, .ndo_fix_features = br_fix_features, - .ndo_neigh_construct = netdev_default_l2upper_neigh_construct, - .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy, .ndo_fdb_add = br_fdb_add, .ndo_fdb_del = br_fdb_delete, .ndo_fdb_dump = br_fdb_dump, diff --git a/net/core/dev.c b/net/core/dev.c index 404d2e6d5d32..3e1a60102e64 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6111,50 +6111,6 @@ void netdev_lower_state_changed(struct net_device *lower_dev, } EXPORT_SYMBOL(netdev_lower_state_changed); -int netdev_default_l2upper_neigh_construct(struct net_device *dev, - struct neighbour *n) -{ - struct net_device *lower_dev, *stop_dev; - struct list_head *iter; - int err; - - netdev_for_each_lower_dev(dev, lower_dev, iter) { - if (!lower_dev->netdev_ops->ndo_neigh_construct) - continue; - err = lower_dev->netdev_ops->ndo_neigh_construct(lower_dev, n); - if (err) { - stop_dev = lower_dev; - goto rollback; - } - } - return 0; - -rollback: - netdev_for_each_lower_dev(dev, lower_dev, iter) { - if (lower_dev == stop_dev) - break; - if (!lower_dev->netdev_ops->ndo_neigh_destroy) - continue; - lower_dev->netdev_ops->ndo_neigh_destroy(lower_dev, n); - } - return err; -} -EXPORT_SYMBOL_GPL(netdev_default_l2upper_neigh_construct); - -void netdev_default_l2upper_neigh_destroy(struct net_device *dev, - struct neighbour *n) -{ - struct net_device *lower_dev; - struct list_head *iter; - - netdev_for_each_lower_dev(dev, lower_dev, iter) { - if (!lower_dev->netdev_ops->ndo_neigh_destroy) - continue; - lower_dev->netdev_ops->ndo_neigh_destroy(lower_dev, n); - } -} -EXPORT_SYMBOL_GPL(netdev_default_l2upper_neigh_destroy); - static void dev_change_rx_flags(struct net_device *dev, int flags) { const struct net_device_ops *ops = dev->netdev_ops; -- cgit v1.2.3 From 8a0b7275268504259cc1a17bae839d23c4f6e2bc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 6 Feb 2017 16:20:15 +0100 Subject: mlxsw: spectrum_router: Remove redundant check We only add neighbour entries that are also used for nexthops to 'nexthop_neighs_list', so when iterating over this list there's no need to check that the entry is indeed used for nexthops. Remove the redundant check. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index c338b08889a2..98a6d03fc0a6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -847,13 +847,11 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp) /* Take RTNL mutex here to prevent lists from changes */ rtnl_lock(); list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, - nexthop_neighs_list_node) { + nexthop_neighs_list_node) /* If this neigh have nexthops, make the kernel think this neigh * is active regardless of the traffic. */ - if (!list_empty(&neigh_entry->nexthop_list)) - neigh_event_send(neigh_entry->key.n, NULL); - } + neigh_event_send(neigh_entry->key.n, NULL); rtnl_unlock(); } @@ -897,11 +895,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) */ rtnl_lock(); list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, - nexthop_neighs_list_node) { - if (!(neigh_entry->key.n->nud_state & NUD_VALID) && - !list_empty(&neigh_entry->nexthop_list)) + nexthop_neighs_list_node) + if (!(neigh_entry->key.n->nud_state & NUD_VALID)) neigh_event_send(neigh_entry->key.n, NULL); - } rtnl_unlock(); mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, -- cgit v1.2.3 From 01b1aa359dd481579de6047af8d7874e6e529585 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 6 Feb 2017 16:20:16 +0100 Subject: mlxsw: spectrum_router: Don't read 'nud_state' without lock We periodically ask the neighbouring system to try and resolve neighbours that are used for nexthops, but aren't currently resolved. However, 'nud_state' is protected by the neighbour lock, so we shouldn't access it without taking it. Instead, we can simply check the 'connected' field of the neighbour entry, which we update upon NEIGH_UPDATE events. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 98a6d03fc0a6..ec022f9c302a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -896,7 +896,7 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) rtnl_lock(); list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, nexthop_neighs_list_node) - if (!(neigh_entry->key.n->nud_state & NUD_VALID)) + if (!neigh_entry->connected) neigh_event_send(neigh_entry->key.n, NULL); rtnl_unlock(); -- cgit v1.2.3 From fd76d9105b8e5b80bee58311a5c88d6594622c00 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 6 Feb 2017 16:20:17 +0100 Subject: mlxsw: spectrum_router: Fix typo in comment Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index ec022f9c302a..b19f69f4e28e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1353,7 +1353,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, /* Take a reference of neigh here ensuring that neigh would * not be detructed before the nexthop entry is finished. * The reference is taken either in neigh_lookup() or - * in neith_create() in case n is not found. + * in neigh_create() in case n is not found. */ n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev); if (!n) { -- cgit v1.2.3 From d15c9ede6123dbce14c17eb9ced229e488002735 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 3 Feb 2017 17:37:06 +0800 Subject: sctp: process fwd tsn chunk only when prsctp is enabled This patch is to check if asoc->peer.prsctp_capable is set before processing fwd tsn chunk, if not, it will return an ERROR to the peer, just as rfc3758 section 3.3.1 demands. Reported-by: Julian Cordes Signed-off-by: Xin Long Acked-by: Neil Horman Signed-off-by: David S. Miller --- net/sctp/sm_statefuns.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 782e579472c9..d8798ddda726 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3867,6 +3867,9 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } + if (!asoc->peer.prsctp_capable) + return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands); + /* Make sure that the FORWARD_TSN chunk has valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, @@ -3935,6 +3938,9 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } + if (!asoc->peer.prsctp_capable) + return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands); + /* Make sure that the FORWARD_TSN chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, -- cgit v1.2.3 From f820c0ac6cc84d4f9cb22c77d99ea510bd5c382a Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Mon, 6 Feb 2017 16:50:55 +0000 Subject: sfc: don't rearm interrupts if busy polling Since commit 364b6055738b ("net: busy-poll: return busypolling status to drivers"), napi_complete_done() returns a boolean that can be used by drivers to conditionally rearm interrupts. Testing with a 7142 shows a small latency improvement of ~100 ns. Signed-off-by: Bert Kenward Cc: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index fcd4eeecfef4..c28cd9daf949 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -328,8 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget) * since efx_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ - napi_complete_done(napi, spent); - efx_nic_eventq_read_ack(channel); + if (napi_complete_done(napi, spent)) + efx_nic_eventq_read_ack(channel); } return spent; -- cgit v1.2.3 From 73cfb2a2e49e166c569360ee60fe023b3efe3cf6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 3 Feb 2017 12:54:59 +0300 Subject: net/mlx4_en: fix a condition There is a "||" vs "|" typo here so we test 0x1 instead of 0x6. Fixes: 1f8176f7352a ("net/mlx4_en: Check the enabling pptx/pprx flags in SET_PORT wrapper flow") Signed-off-by: Dan Carpenter Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/port.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 5053c949148f..4e36e287d605 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1395,7 +1395,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, gen_context); if (gen_context->flags & - (MLX4_FLAG_V_PPRX_MASK || MLX4_FLAG_V_PPTX_MASK)) + (MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK)) mlx4_en_set_port_global_pause(dev, slave, gen_context); -- cgit v1.2.3 From bd05a5bd6b11d7fd26a668de83c5cb996de05f8f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 2 Dec 2016 09:57:40 +0100 Subject: iwlwifi: mvm: overwrite skb info later We don't really need clear the skb's status area nor store the dev_cmd into it until we really commit to the frame by handing it to the transport - defer those operations until just before we do that. This doesn't entirely fix the bug with frames not getting sent out after having been deferred due to DQA, because it doesn't restore the info->driver_data[0] place that was already set to zero (or another value) by the A-MSDU logic. Fixes: 24afba7690e4 ("iwlwifi: mvm: support bss dynamic alloc/dealloc of queues") Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index e2f82c10b019..0dcf0ebab45d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -466,7 +466,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct iwl_device_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; @@ -486,12 +485,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); + return dev_cmd; +} + +static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, + struct iwl_device_cmd *cmd) +{ + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); + memset(&skb_info->status, 0, sizeof(skb_info->status)); memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); - skb_info->driver_data[1] = dev_cmd; - - return dev_cmd; + skb_info->driver_data[1] = cmd; } static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, @@ -607,6 +612,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (!dev_cmd) return -1; + /* From now on, we cannot access info->control */ + iwl_mvm_skb_prepare_status(skb, dev_cmd); + tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; /* Copy MAC header from skb into command buffer */ @@ -917,7 +925,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, goto drop; tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; - /* From now on, we cannot access info->control */ /* * we handle that entirely ourselves -- for uAPSD the firmware @@ -1024,6 +1031,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); + /* From now on, we cannot access info->control */ + iwl_mvm_skb_prepare_status(skb, dev_cmd); + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; -- cgit v1.2.3 From 05e5a7e58d3f8f597ebe6f78aaa13a2656b78239 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 2 Dec 2016 10:04:49 +0100 Subject: iwlwifi: mvm/pcie: adjust A-MSDU tx_cmd length in PCIe Instead of setting the tx_cmd length in the mvm code, which is complicated by the fact that DQA may want to temporarily store the SKB on the side, adjust the length in the PCIe code which also knows about this since it's responsible for duplicating all those headers that are account for in this code. As the PCIe code already relies on the tx_cmd->len field, this doesn't really introduce any new dependencies. To make this possible we need to move the memcpy() of the TX command until after it was updated. This does even simplify the code though, since the PCIe code already does a lot of manipulations to build A-MSDUs correctly and changing the length becomes a simple operation to see how much was added/removed, rather than predicting it. Fixes: 24afba7690e4 ("iwlwifi: mvm: support bss dynamic alloc/dealloc of queues") Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 38 +++------------------------- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 23 ++++++++++++++--- 2 files changed, 22 insertions(+), 39 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 0dcf0ebab45d..8daf4fbe4534 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -202,7 +202,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id) { - struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); @@ -284,9 +283,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; tx_cmd->tx_flags = cpu_to_le32(tx_flags); - /* Total # bytes to be transmitted */ - tx_cmd->len = cpu_to_le16((u16)skb->len + - (uintptr_t)skb_info->driver_data[0]); + /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */ + tx_cmd->len = cpu_to_le16((u16)skb->len); tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; @@ -562,9 +560,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) info.hw_queue != info.control.vif->cab_queue))) return -1; - /* This holds the amsdu headers length */ - skb_info->driver_data[0] = (void *)(uintptr_t)0; - queue = info.hw_queue; /* @@ -651,7 +646,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; bool ipv4 = (skb->protocol == htons(ETH_P_IP)); u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; - u16 amsdu_add, snap_ip_tcp, pad, i = 0; + u16 snap_ip_tcp, pad, i = 0; unsigned int dbg_max_amsdu_len; netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG; u8 *qc, tid, txf; @@ -753,21 +748,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, /* This skb fits in one single A-MSDU */ if (num_subframes * mss >= tcp_payload_len) { - struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); - - /* - * Compute the length of all the data added for the A-MSDU. - * This will be used to compute the length to write in the TX - * command. We have: SNAP + IP + TCP for n -1 subframes and - * ETH header for n subframes. Note that the original skb - * already had one set of SNAP / IP / TCP headers. - */ - num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); - amsdu_add = num_subframes * sizeof(struct ethhdr) + - (num_subframes - 1) * (snap_ip_tcp + pad); - /* This holds the amsdu headers length */ - skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add; - __skb_queue_tail(mpdus_skb, skb); return 0; } @@ -806,14 +786,6 @@ segment: ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); if (tcp_payload_len > mss) { - struct ieee80211_tx_info *skb_info = - IEEE80211_SKB_CB(tmp); - - num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); - amsdu_add = num_subframes * sizeof(struct ethhdr) + - (num_subframes - 1) * (snap_ip_tcp + pad); - skb_info->driver_data[0] = - (void *)(uintptr_t)amsdu_add; skb_shinfo(tmp)->gso_size = mss; } else { qc = ieee80211_get_qos_ctl((void *)tmp->data); @@ -1059,7 +1031,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; unsigned int payload_len; @@ -1073,9 +1044,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, memcpy(&info, skb->cb, sizeof(info)); - /* This holds the amsdu headers length */ - skb_info->driver_data[0] = (void *)(uintptr_t)0; - if (!skb_is_gso(skb)) return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index e44e5adc2b95..911cf9868107 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -2096,6 +2096,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, struct iwl_device_cmd *dev_cmd, u16 tb1_len) { + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; @@ -2145,6 +2146,13 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, */ skb_pull(skb, hdr_len + iv_len); + /* + * Remove the length of all the headers that we don't actually + * have in the MPDU by themselves, but that we duplicate into + * all the different MSDUs inside the A-MSDU. + */ + le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); + tso_start(skb, &tso); while (total_len) { @@ -2155,7 +2163,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, unsigned int hdr_tb_len; dma_addr_t hdr_tb_phys; struct tcphdr *tcph; - u8 *iph; + u8 *iph, *subf_hdrs_start = hdr_page->pos; total_len -= data_left; @@ -2216,6 +2224,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, hdr_tb_len, false); trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, hdr_tb_len); + /* add this subframe's headers' length to the tx_cmd */ + le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); /* prepare the start_hdr for the next subframe */ start_hdr = hdr_page->pos; @@ -2408,9 +2418,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tb1_len = len; } - /* The first TB points to bi-directional DMA data */ - memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, - IWL_FIRST_TB_SIZE); + /* + * The first TB points to bi-directional DMA data, we'll + * memcpy the data into it later. + */ iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, IWL_FIRST_TB_SIZE, true); @@ -2434,6 +2445,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, goto out_err; } + /* building the A-MSDU might have changed this data, so memcpy it now */ + memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, + IWL_FIRST_TB_SIZE); + tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), -- cgit v1.2.3 From df88c08d5c7e47c301339a966b5b287453edf86d Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Thu, 24 Nov 2016 15:31:00 +0200 Subject: iwlwifi: mvm: release static queues on bcast release A few of the static queues are enabled along with the bcast STA. Make sure they are removed along with it, rather than waiting for the mac ctxt release. This is needed because we sometimes have a STA being removed and then added again (either with the same sta_id or a different one). If we wait for the mac ctxt release we will try to allocate the queues again (as this is currently done in the STA allocation and not in the MAC init) although they weren't freed, and even if the sta_id of the STA has changed. Signed-off-by: Liad Kaufman Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 34 +++------ drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 16 ---- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 90 ++++++++++++++++++----- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 4 +- 4 files changed, 85 insertions(+), 59 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index ebf6c071eb36..bf0555fcf695 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -531,38 +531,26 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + /* + * If DQA is supported - queues were already disabled, since in + * DQA-mode the queues are a property of the STA and not of the + * vif, and at this point the STA was already deleted + */ + if (iwl_mvm_is_dqa_supported(mvm)) + return; + switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: - if (!iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MAX_TID_COUNT, 0); - else - iwl_mvm_disable_txq(mvm, - IWL_MVM_DQA_P2P_DEVICE_QUEUE, - vif->hw_queue[0], IWL_MAX_TID_COUNT, - 0); + iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MAX_TID_COUNT, 0); break; case NL80211_IFTYPE_AP: iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, IWL_MAX_TID_COUNT, 0); - - if (iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_disable_txq(mvm, - IWL_MVM_DQA_AP_PROBE_RESP_QUEUE, - vif->hw_queue[0], IWL_MAX_TID_COUNT, - 0); /* fall through */ default: - /* - * If DQA is supported - queues were already disabled, since in - * DQA-mode the queues are a property of the STA and not of the - * vif, and at this point the STA was already deleted - */ - if (iwl_mvm_is_dqa_supported(mvm)) - break; - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], vif->hw_queue[ac], diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 72102566a02b..15ecd3aba71b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2104,22 +2104,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (ret) goto out_unbind; - /* enable the multicast queue, now that we have a station for it */ - if (iwl_mvm_is_dqa_supported(mvm)) { - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = IWL_MVM_TX_FIFO_MCAST, - .sta_id = mvmvif->bcast_sta.sta_id, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; - - iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, - &cfg, wdg_timeout); - } - /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 4afd52f8d9c3..40fb64ead469 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1780,6 +1780,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; + int ret; lockdep_assert_held(&mvm->mutex); @@ -1795,19 +1796,16 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_get_wd_timeout(mvm, vif, false, false); int queue; - if ((vif->type == NL80211_IFTYPE_AP) && - (mvmvif->bcast_sta.tfd_queue_msk & - BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE))) + if (vif->type == NL80211_IFTYPE_AP) queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; - else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) && - (mvmvif->bcast_sta.tfd_queue_msk & - BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE))) + else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; - else if (WARN(1, "Missed required TXQ for adding bcast STA\n")) + else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) return -EINVAL; iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg, wdg_timeout); + bsta->tfd_queue_msk |= BIT(queue); } if (vif->type == NL80211_IFTYPE_ADHOC) @@ -1816,8 +1814,67 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) return -ENOSPC; - return iwl_mvm_add_int_sta_common(mvm, bsta, baddr, - mvmvif->id, mvmvif->color); + ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, + mvmvif->id, mvmvif->color); + if (ret) + return ret; + + /* + * In AP vif type, we also need to enable the cab_queue. However, we + * have to enable it after the ADD_STA command is sent, otherwise the + * FW will throw an assert once we send the ADD_STA command (it'll + * detect a mismatch in the tfd_queue_msk, as we can't add the + * enabled-cab_queue to the mask) + */ + if (iwl_mvm_is_dqa_supported(mvm) && + vif->type == NL80211_IFTYPE_AP) { + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_MCAST, + .sta_id = mvmvif->bcast_sta.sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + + iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, + 0, &cfg, wdg_timeout); + } + + return 0; +} + +static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, + IWL_MAX_TID_COUNT, 0); + + if (mvmvif->bcast_sta.tfd_queue_msk & + BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) { + iwl_mvm_disable_txq(mvm, + IWL_MVM_DQA_AP_PROBE_RESP_QUEUE, + vif->hw_queue[0], IWL_MAX_TID_COUNT, + 0); + mvmvif->bcast_sta.tfd_queue_msk &= + ~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); + } + + if (mvmvif->bcast_sta.tfd_queue_msk & + BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) { + iwl_mvm_disable_txq(mvm, + IWL_MVM_DQA_P2P_DEVICE_QUEUE, + vif->hw_queue[0], IWL_MAX_TID_COUNT, + 0); + mvmvif->bcast_sta.tfd_queue_msk &= + ~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); + } } /* Send the FW a request to remove the station from it's internal data @@ -1829,6 +1886,9 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_free_bcast_sta_queues(mvm, vif); + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -1842,22 +1902,16 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) + if (!iwl_mvm_is_dqa_supported(mvm)) { qmask = iwl_mvm_mac_get_queues_mask(vif); - if (vif->type == NL80211_IFTYPE_AP) { /* * The firmware defines the TFD queue mask to only be relevant * for *unicast* queues, so the multicast (CAB) queue shouldn't - * be included. + * be included. This only happens in NL80211_IFTYPE_AP vif type, + * so the next line will only have an effect there. */ qmask &= ~BIT(vif->cab_queue); - - if (iwl_mvm_is_dqa_supported(mvm)) - qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); - } else if (iwl_mvm_is_dqa_supported(mvm) && - vif->type == NL80211_IFTYPE_P2P_DEVICE) { - qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); } return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index eb3434c0d148..dedea96a8e0f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -655,8 +655,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, /* Make sure this TID isn't already enabled */ if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) { spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n", - cfg->tid); + IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", + queue, cfg->tid); return; } -- cgit v1.2.3 From 2c6262b754f3c3338cb40b23880a3ac1f4693b25 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 7 Dec 2016 12:22:11 +0200 Subject: iwlwifi: pcie: fix the set of DMA memory mask Our 9000 device supports 64 bit DMA address for RX only, and not for TX. Setting DMA mask to 64 for the whole device is erroneous - we can do it only for a000 devices where device is capable of both RX & TX DMA with 64 bit address space. Fixes: 96a6497bc3ed ("iwlwifi: pcie: add 9000 series multi queue rx DMA support") Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index c1d99d15796d..b28d99f61a35 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2953,16 +2953,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, PCIE_LINK_STATE_CLKPM); } - if (cfg->mq_rx_supported) - addr_size = 64; - else - addr_size = 36; - if (cfg->use_tfh) { + addr_size = 64; trans_pcie->max_tbs = IWL_TFH_NUM_TBS; trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd); - } else { + addr_size = 36; trans_pcie->max_tbs = IWL_NUM_OF_TBS; trans_pcie->tfd_size = sizeof(struct iwl_tfd); } -- cgit v1.2.3 From 94c3e614df2117626fccfac8f821c66e30556384 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 7 Dec 2016 15:04:37 +0200 Subject: iwlwifi: mvm: fix pending frame counter calculation In DQA mode the check whether to decrement the pending frames counter relies on the tid status and not on the txq id. This may result in an inconsistent state of the pending frames counter in case frame is queued on a non aggregation queue but with this TID, and will be followed by a failure to remove the station and later on SYSASSERT 0x3421 when trying to remove the MAC. Such frames are for example bar and qos NDPs. Fix it by aligning the condition of incrementing the counter with the condition of decrementing it - rely on TID state for DQA mode. Also, avoid internal error like this affecting station removal for DQA mode - since we can know for sure it is an internal error. Fixes: cf961e16620f ("iwlwifi: mvm: support dqa-mode agg on non-shared queue") Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 33 ++++++++++++++++++---------- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 5 ++++- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 40fb64ead469..1bad933b3ad4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1501,6 +1501,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + u8 sta_id = mvm_sta->sta_id; int ret; lockdep_assert_held(&mvm->mutex); @@ -1509,7 +1510,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, kfree(mvm_sta->dup_data); if ((vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == mvm_sta->sta_id) || + mvmvif->ap_sta_id == sta_id) || iwl_mvm_is_dqa_supported(mvm)){ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); if (ret) @@ -1525,8 +1526,17 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); /* If DQA is supported - the queues can be disabled now */ - if (iwl_mvm_is_dqa_supported(mvm)) + if (iwl_mvm_is_dqa_supported(mvm)) { iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); + /* + * If pending_frames is set at this point - it must be + * driver internal logic error, since queues are empty + * and removed successuly. + * warn on it but set it to 0 anyway to avoid station + * not being removed later in the function + */ + WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0)); + } /* If there is a TXQ still marked as reserved - free it */ if (iwl_mvm_is_dqa_supported(mvm) && @@ -1544,7 +1554,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && (*status != IWL_MVM_QUEUE_FREE), "sta_id %d reserved txq %d status %d", - mvm_sta->sta_id, reserved_txq, *status)) { + sta_id, reserved_txq, *status)) { spin_unlock_bh(&mvm->queue_info_lock); return -EINVAL; } @@ -1554,7 +1564,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, } if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == mvm_sta->sta_id) { + mvmvif->ap_sta_id == sta_id) { /* if associated - we can't remove the AP STA now */ if (vif->bss_conf.assoc) return ret; @@ -1563,7 +1573,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; /* clear d0i3_ap_sta_id if no longer relevant */ - if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) + if (mvm->d0i3_ap_sta_id == sta_id) mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; } } @@ -1572,7 +1582,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, * This shouldn't happen - the TDLS channel switch should be canceled * before the STA is removed. */ - if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) { + if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; cancel_delayed_work(&mvm->tdls_cs.dwork); } @@ -1582,21 +1592,20 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, * calls the drain worker. */ spin_lock_bh(&mvm_sta->lock); + /* * There are frames pending on the AC queues for this station. * We need to wait until all the frames are drained... */ - if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { - rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], + if (atomic_read(&mvm->pending_frames[sta_id])) { + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], ERR_PTR(-EBUSY)); spin_unlock_bh(&mvm_sta->lock); /* disable TDLS sta queues on drain complete */ if (sta->tdls) { - mvm->tfd_drained[mvm_sta->sta_id] = - mvm_sta->tfd_queue_msk; - IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", - mvm_sta->sta_id); + mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk; + IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id); } ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 8daf4fbe4534..f82b765650d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1015,7 +1015,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); /* Increase pending frames count if this isn't AMPDU */ - if (!is_ampdu) + if ((iwl_mvm_is_dqa_supported(mvm) && + mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON && + mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) || + (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)) atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); return 0; -- cgit v1.2.3 From 4b40571eaf423b96e3809006bd192b4efd52a64b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 8 Dec 2016 10:38:08 +0100 Subject: iwlwifi: mvm: align copy-break SKB payload for MQ RX When a small frame is copied completely into the skb->head, the code doesn't take alignment into account, making mac80211 copy it again later on architectures that need the alignment. Avoid this by taking the PAD flag from the device into account when copying. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 6c802cee900c..c154ab42c80d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -149,8 +149,17 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, unsigned int headlen, fraglen, pad_len = 0; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); - if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) + if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { pad_len = 2; + + /* + * If the device inserted padding it means that (it thought) + * the 802.11 header wasn't a multiple of 4 bytes long. In + * this case, reserve two bytes at the start of the SKB to + * align the payload properly in case we end up copying it. + */ + skb_reserve(skb, pad_len); + } len -= pad_len; /* If frame is small enough to fit in skb->head, pull it completely. -- cgit v1.2.3 From 0d7f1b993b2e8babf424f009f5984fa06a713919 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 8 Dec 2016 10:43:40 +0200 Subject: iwlwifi: mvm: cleanup iwl_mvm_tx_mpdu a bit Unify code, remove redundant assignments. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 35 ++++++++++------------------- 1 file changed, 12 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index f82b765650d6..4ba639eda7a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -907,6 +907,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_lock(&mvmsta->lock); + /* nullfunc frames should go to the MGMT queue regardless of QOS, + * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default + * assignment of MGMT TID + */ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { u8 *qc = NULL; qc = ieee80211_get_qos_ctl(hdr); @@ -919,27 +923,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; - } else if (iwl_mvm_is_dqa_supported(mvm) && - (ieee80211_is_qos_nullfunc(fc) || - ieee80211_is_nullfunc(fc))) { - /* - * nullfunc frames should go to the MGMT queue regardless of QOS - */ - tid = IWL_MAX_TID_COUNT; + if (WARN_ON_ONCE(is_ampdu && + mvmsta->tid_data[tid].state != IWL_AGG_ON)) + goto drop_unlock_sta; } - if (iwl_mvm_is_dqa_supported(mvm)) { + if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu) txq_id = mvmsta->tid_data[tid].txq_id; - - if (ieee80211_is_mgmt(fc)) - tx_cmd->tid_tspec = IWL_TID_NON_QOS; - } - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdrlen); - - WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); - if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) { /* default to TID 0 for non-QoS packets */ u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; @@ -947,11 +937,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; } - if (is_ampdu) { - if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON)) - goto drop_unlock_sta; - txq_id = mvmsta->tid_data[tid].txq_id; - } + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdrlen); + + WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); /* Check if TXQ needs to be allocated or re-activated */ if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE || -- cgit v1.2.3 From 49060383a7585c61ab01c0043fd6202878ee9796 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 13 Dec 2016 09:48:57 +0200 Subject: iwlwifi: remove unnecessary argument to iwl_drv_start() When iwl_drv_start() is called, trans->cfg must already be set, so there's no need to pass cfg separately, since it can be accessed directly from trans->cfg. Signed-off-by: Luca Coelho Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 5 ++--- drivers/net/wireless/intel/iwlwifi/iwl-drv.h | 4 +--- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index d22821501676..899223e8414c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1504,8 +1504,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) kfree(pieces); } -struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, - const struct iwl_cfg *cfg) +struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) { struct iwl_drv *drv; int ret; @@ -1518,7 +1517,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, drv->trans = trans; drv->dev = trans->dev; - drv->cfg = cfg; + drv->cfg = trans->cfg; init_completion(&drv->request_firmware_complete); INIT_LIST_HEAD(&drv->list); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index f6eacfdbc265..6c537e04864e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -118,15 +118,13 @@ struct iwl_cfg; * iwl_drv_start - start the drv * * @trans_ops: the ops of the transport - * @cfg: device specific constants / virtual functions * * starts the driver: fetches the firmware. This should be called by bus * specific system flows implementations. For example, the bus specific probe * function should do bus related operations only, and then call to this * function. It returns the driver object or %NULL if an error occurred. */ -struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, - const struct iwl_cfg *cfg); +struct iwl_drv *iwl_drv_start(struct iwl_trans *trans); /** * iwl_drv_stop - stop the drv diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 2f8134b2a504..43c2475346ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -677,7 +677,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif pci_set_drvdata(pdev, iwl_trans); - iwl_trans->drv = iwl_drv_start(iwl_trans, cfg); + iwl_trans->drv = iwl_drv_start(iwl_trans); if (IS_ERR(iwl_trans->drv)) { ret = PTR_ERR(iwl_trans->drv); -- cgit v1.2.3 From 0c4881ced8d2e380d0544f1e7e3249d7bbc45979 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 13 Dec 2016 09:56:07 +0200 Subject: iwlwifi: remove unnecessary cfg element in iwl_drv The iwl_drv structure contains trans which already contains cfg, so storing cfg separately in iwl_drv is redundant. Remove it and access trans->cfg instead. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 899223e8414c..0e0293d42b5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -102,7 +102,6 @@ static struct dentry *iwl_dbgfs_root; * @op_mode: the running op_mode * @trans: transport layer * @dev: for debug prints only - * @cfg: configuration struct * @fw_index: firmware revision to try loading * @firmware_name: composite filename of ucode file to load * @request_firmware_complete: the firmware has been obtained from user space @@ -114,7 +113,6 @@ struct iwl_drv { struct iwl_op_mode *op_mode; struct iwl_trans *trans; struct device *dev; - const struct iwl_cfg *cfg; int fw_index; /* firmware we're trying to load */ char firmware_name[64]; /* name of firmware file to load */ @@ -213,18 +211,18 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, static int iwl_request_firmware(struct iwl_drv *drv, bool first) { - const char *name_pre = drv->cfg->fw_name_pre; + const char *name_pre = drv->trans->cfg->fw_name_pre; char tag[8]; if (first) { - drv->fw_index = drv->cfg->ucode_api_max; + drv->fw_index = drv->trans->cfg->ucode_api_max; sprintf(tag, "%d", drv->fw_index); } else { drv->fw_index--; sprintf(tag, "%d", drv->fw_index); } - if (drv->fw_index < drv->cfg->ucode_api_min) { + if (drv->fw_index < drv->trans->cfg->ucode_api_min) { IWL_ERR(drv, "no suitable firmware found!\n"); return -ENOENT; } @@ -1207,7 +1205,7 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) dbgfs_dir = drv->dbgfs_op_mode; #endif - op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir); + op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir); #ifdef CONFIG_IWLWIFI_DEBUGFS if (!op_mode) { @@ -1247,8 +1245,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) struct iwlwifi_opmode_table *op; int err; struct iwl_firmware_pieces *pieces; - const unsigned int api_max = drv->cfg->ucode_api_max; - const unsigned int api_min = drv->cfg->ucode_api_min; + const unsigned int api_max = drv->trans->cfg->ucode_api_max; + const unsigned int api_min = drv->trans->cfg->ucode_api_min; size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; u32 api_ver; int i; @@ -1310,7 +1308,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * In mvm uCode there is no difference between data and instructions * sections. */ - if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->cfg)) + if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, + drv->trans->cfg)) goto try_again; /* Allocate ucode buffers for card's bus-master loading ... */ @@ -1408,14 +1407,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12; else fw->init_evtlog_size = - drv->cfg->base_params->max_event_log_size; + drv->trans->cfg->base_params->max_event_log_size; fw->init_errlog_ptr = pieces->init_errlog_ptr; fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr; if (pieces->inst_evtlog_size) fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12; else fw->inst_evtlog_size = - drv->cfg->base_params->max_event_log_size; + drv->trans->cfg->base_params->max_event_log_size; fw->inst_errlog_ptr = pieces->inst_errlog_ptr; /* @@ -1517,7 +1516,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) drv->trans = trans; drv->dev = trans->dev; - drv->cfg = trans->cfg; init_completion(&drv->request_firmware_complete); INIT_LIST_HEAD(&drv->list); -- cgit v1.2.3 From 5594d80e9bf46d607bfcb08df0353b009dd6e2e8 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 23 Nov 2016 17:08:20 +0200 Subject: iwlwifi: support two phys for a000 devices Support differentiating between two phys for a000 devices in order to load the correct firmware. Eventually when moving completely to the new phy we will be able to remove this. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-a000.c | 30 +++++++++++++++++++------ drivers/net/wireless/intel/iwlwifi/iwl-config.h | 3 ++- drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 1 + drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 8 ++++++- 4 files changed, 33 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c index 82f18d967c40..15dd7f6137c8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c @@ -72,9 +72,13 @@ #define IWL_A000_SMEM_OFFSET 0x400000 #define IWL_A000_SMEM_LEN 0x68000 -#define IWL_A000_FW_PRE "iwlwifi-Qu-a0-jf-b0-" -#define IWL_A000_MODULE_FIRMWARE(api) \ - IWL_A000_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" +#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" + +#define IWL_A000_HR_MODULE_FIRMWARE(api) \ + IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_A000_JF_MODULE_FIRMWARE(api) \ + IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_A000 10 @@ -116,11 +120,22 @@ static const struct iwl_ht_params iwl_a000_ht_params = { .mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = true, \ - .use_tfh = true + .use_tfh = true, \ + .rf_id = true + +const struct iwl_cfg iwla000_2ac_cfg_hr = { + .name = "Intel(R) Dual Band Wireless AC a000", + .fw_name_pre = IWL_A000_HR_FW_PRE, + IWL_DEVICE_A000, + .ht_params = &iwl_a000_ht_params, + .nvm_ver = IWL_A000_NVM_VERSION, + .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; -const struct iwl_cfg iwla000_2ac_cfg = { +const struct iwl_cfg iwla000_2ac_cfg_jf = { .name = "Intel(R) Dual Band Wireless AC a000", - .fw_name_pre = IWL_A000_FW_PRE, + .fw_name_pre = IWL_A000_JF_FW_PRE, IWL_DEVICE_A000, .ht_params = &iwl_a000_ht_params, .nvm_ver = IWL_A000_NVM_VERSION, @@ -128,4 +143,5 @@ const struct iwl_cfg iwla000_2ac_cfg = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; -MODULE_FIRMWARE(IWL_A000_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 2660cc4b9f8c..94f8a51b633e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -455,7 +455,8 @@ extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; -extern const struct iwl_cfg iwla000_2ac_cfg; +extern const struct iwl_cfg iwla000_2ac_cfg_hr; +extern const struct iwl_cfg iwla000_2ac_cfg_jf; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index d73e9d436027..4ee3b621ec27 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -349,6 +349,7 @@ enum { /* RF_ID value */ #define CSR_HW_RF_ID_TYPE_JF (0x00105000) #define CSR_HW_RF_ID_TYPE_LC (0x00101000) +#define CSR_HW_RF_ID_TYPE_HR (0x00109000) /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 43c2475346ac..618894ecfd22 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -533,7 +533,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, /* a000 Series */ - {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)}, #endif /* CONFIG_IWLMVM */ {0} @@ -673,6 +673,12 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) cfg = &iwl9000lc_2ac_cfg; iwl_trans->cfg = cfg; } + + if (cfg == &iwla000_2ac_cfg_hr && + iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) { + cfg = &iwla000_2ac_cfg_jf; + iwl_trans->cfg = cfg; + } } #endif -- cgit v1.2.3 From 23aeea943b466083637dfd878b298e76416fcc29 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 13 Dec 2016 10:29:07 +0100 Subject: iwlwifi: pcie: fix another RF-kill race When resuming, it's possible for the following scenario to occur: * iwl_pci_resume() enables the RF-kill interrupt * iwl_pci_resume() reads the RF-kill state (e.g. to 'radio enabled') * RF_KILL interrupt triggers, and iwl_pcie_irq_handler() reads the state, now 'radio disabled', and acquires the &trans_pcie->mutex. * iwl_pcie_irq_handler() further calls iwl_trans_pcie_rf_kill() to indicate to the higher layers that the radio is now disabled (and stops the device while at it) * iwl_pcie_irq_handler() drops the mutex * iwl_pci_resume() continues, acquires the mutex and calls the higher layers to indicate that the radio is enabled. At this point, the device is stopped but the higher layers think it's available, and can call deeply into the driver to try to enable it. However, this will fail since the device is actually disabled. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 7 ++++--- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 2 ++ drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 4 ++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 618894ecfd22..ba8a81cb0e2b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -784,13 +784,14 @@ static int iwl_pci_resume(struct device *device) /* * Enable rfkill interrupt (in order to keep track of - * the rfkill status) + * the rfkill status). Must be locked to avoid processing + * a possible rfkill interrupt between reading the state + * and calling iwl_trans_pcie_rf_kill() with it. */ + mutex_lock(&trans_pcie->mutex); iwl_enable_rfkill_int(trans); hw_rfkill = iwl_is_rfkill_set(trans); - - mutex_lock(&trans_pcie->mutex); iwl_trans_pcie_rf_kill(trans, hw_rfkill); mutex_unlock(&trans_pcie->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index cac6d99012b3..cf5bda06042c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -670,6 +670,8 @@ static inline u8 get_cmd_index(struct iwl_txq *q, u32 index) static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) { + lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->mutex); + return !(iwl_read32(trans, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 6fe5546dc773..e1bf6da20909 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1607,13 +1607,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) if (inta & CSR_INT_BIT_RF_KILL) { bool hw_rfkill; + mutex_lock(&trans_pcie->mutex); hw_rfkill = iwl_is_rfkill_set(trans); IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rfkill ? "disable radio" : "enable radio"); isr_stats->rfkill++; - mutex_lock(&trans_pcie->mutex); iwl_trans_pcie_rf_kill(trans, hw_rfkill); mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { @@ -1952,13 +1952,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) { bool hw_rfkill; + mutex_lock(&trans_pcie->mutex); hw_rfkill = iwl_is_rfkill_set(trans); IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rfkill ? "disable radio" : "enable radio"); isr_stats->rfkill++; - mutex_lock(&trans_pcie->mutex); iwl_trans_pcie_rf_kill(trans, hw_rfkill); mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { -- cgit v1.2.3 From 8364fbb497f00de21d6d347194fa8b6bbae1d6f5 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 8 Dec 2016 11:44:20 +0200 Subject: iwlwifi: mvm: support new beacon template command Support new version of beacon template command which deprecates the use of the tx command inside. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h | 29 +++++++-- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 71 +++++++++++++++------- 2 files changed, 73 insertions(+), 27 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index 59ca97a11b2b..b38cc073adcc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -672,8 +672,7 @@ struct iwl_mac_beacon_cmd_v6 { } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ /** - * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA - * @tx: the tx commands associated with the beacon frame + * struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA * @template_id: currently equal to the mac context id of the coresponding * mac. * @tim_idx: the offset of the tim IE in the beacon @@ -682,16 +681,38 @@ struct iwl_mac_beacon_cmd_v6 { * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame */ -struct iwl_mac_beacon_cmd { - struct iwl_tx_cmd tx; +struct iwl_mac_beacon_cmd_data { __le32 template_id; __le32 tim_idx; __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[0]; +}; + +/** + * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA + * @tx: the tx commands associated with the beacon frame + * @data: see &iwl_mac_beacon_cmd_data + */ +struct iwl_mac_beacon_cmd_v7 { + struct iwl_tx_cmd tx; + struct iwl_mac_beacon_cmd_data data; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ +/** + * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA + * @byte_cnt: byte count of the beacon frame + * @flags: for future use + * @data: see &iwl_mac_beacon_cmd_data + */ +struct iwl_mac_beacon_cmd { + __le16 byte_cnt; + __le16 flags; + __le64 reserved; + struct iwl_mac_beacon_cmd_data data; +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */ + struct iwl_beacon_notif { struct iwl_mvm_tx_resp beacon_notify_hdr; __le64 tsf; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index bf0555fcf695..99132ea16ede 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -979,7 +979,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, } static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, - struct iwl_mac_beacon_cmd_v6 *beacon_cmd, + __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size) { u32 tim_idx; @@ -996,8 +996,8 @@ static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, /* If TIM field was found, set variables */ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { - beacon_cmd->tim_idx = cpu_to_le32(tim_idx); - beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]); + *tim_index = cpu_to_le32(tim_idx); + *tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]); } else { IWL_WARN(mvm, "Unable to find TIM Element in beacon\n"); } @@ -1031,8 +1031,9 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, }; union { struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; - struct iwl_mac_beacon_cmd beacon_cmd; + struct iwl_mac_beacon_cmd_v7 beacon_cmd; } u = {}; + struct iwl_mac_beacon_cmd beacon_cmd; struct ieee80211_tx_info *info; u32 beacon_skb_len; u32 rate, tx_flags; @@ -1042,6 +1043,46 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, beacon_skb_len = beacon->len; + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) { + u32 csa_offset, ecsa_offset; + + csa_offset = iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon_skb_len); + ecsa_offset = + iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon_skb_len); + + if (iwl_mvm_has_new_tx_api(mvm)) { + beacon_cmd.data.template_id = + cpu_to_le32((u32)mvmvif->id); + beacon_cmd.data.ecsa_offset = cpu_to_le32(ecsa_offset); + beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset); + beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon_skb_len); + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, + &beacon_cmd.data.tim_idx, + &beacon_cmd.data.tim_size, + beacon->data, + beacon_skb_len); + cmd.len[0] = sizeof(beacon_cmd); + cmd.data[0] = &beacon_cmd; + goto send; + + } else { + u.beacon_cmd.data.ecsa_offset = + cpu_to_le32(ecsa_offset); + u.beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset); + cmd.len[0] = sizeof(u.beacon_cmd); + cmd.data[0] = &u; + } + } else { + cmd.len[0] = sizeof(u.beacon_cmd_v6); + cmd.data[0] = &u; + } + /* TODO: for now the beacon template id is set to be the mac context id. * Might be better to handle it as another resource ... */ u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id); @@ -1080,29 +1121,13 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, /* Set up TX beacon command fields */ if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6, + iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6.tim_idx, + &u.beacon_cmd_v6.tim_size, beacon->data, beacon_skb_len); +send: /* Submit command */ - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) { - u.beacon_cmd.csa_offset = - cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_CHANNEL_SWITCH, - beacon_skb_len)); - u.beacon_cmd.ecsa_offset = - cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_EXT_CHANSWITCH_ANN, - beacon_skb_len)); - - cmd.len[0] = sizeof(u.beacon_cmd); - } else { - cmd.len[0] = sizeof(u.beacon_cmd_v6); - } - - cmd.data[0] = &u; cmd.dataflags[0] = 0; cmd.len[1] = beacon_skb_len; cmd.data[1] = beacon->data; -- cgit v1.2.3 From d0d7b10b05945f40fefd4e60f457c61aefa3e9a9 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sat, 4 Feb 2017 11:00:49 -0600 Subject: net-next: treewide use is_vlan_dev() helper function. This patch makes use of is_vlan_dev() function instead of flag comparison which is exactly done by is_vlan_dev() helper function. Signed-off-by: Parav Pandit Reviewed-by: Daniel Jurgens Acked-by: Stephen Hemminger Acked-by: Jon Maxwell Acked-by: Johannes Thumshirn Acked-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/infiniband/core/cma.c | 6 ++---- drivers/infiniband/sw/rxe/rxe_net.c | 2 +- drivers/net/ethernet/broadcom/cnic.c | 2 +- drivers/net/ethernet/chelsio/cxgb3/l2t.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb4/l2t.c | 2 +- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 8 ++++---- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 4 ++-- drivers/net/hyperv/netvsc_drv.c | 2 +- drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 6 +++--- drivers/scsi/cxgbi/libcxgbi.c | 6 +++--- drivers/scsi/fcoe/fcoe.c | 13 ++++++------- include/rdma/ib_addr.h | 6 ++---- net/hsr/hsr_slave.c | 3 ++- 14 files changed, 31 insertions(+), 35 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 3e70a9c5d79d..4eb5a80e5d81 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2467,14 +2467,12 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos) struct net_device *dev; prio = rt_tos2priority(tos); - dev = ndev->priv_flags & IFF_802_1Q_VLAN ? - vlan_dev_real_dev(ndev) : ndev; - + dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; if (dev->num_tc) return netdev_get_prio_tc_map(dev, prio); #if IS_ENABLED(CONFIG_VLAN_8021Q) - if (ndev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(ndev)) return (vlan_dev_get_egress_qos_mask(ndev, prio) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; #endif diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 4abdeb359fb4..d9d15561eb5d 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -118,7 +118,7 @@ static struct device *dma_device(struct rxe_dev *rxe) ndev = rxe->ndev; - if (ndev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(ndev)) ndev = vlan_dev_real_dev(ndev); return ndev->dev.parent; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index b1d2ac818710..cec94bbb2ea5 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -3665,7 +3665,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk) static inline u16 cnic_get_vlan(struct net_device *dev, struct net_device **vlan_dev) { - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { *vlan_dev = vlan_dev_real_dev(dev); return vlan_dev_vlan_id(dev); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index 5f226eda8cd6..52063587e1e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -351,7 +351,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, e->smt_idx = smt_idx; atomic_set(&e->refcnt, 1); neigh_replace(e, neigh); - if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(neigh->dev)) e->vlan = vlan_dev_vlan_id(neigh->dev); else e->vlan = VLAN_NONE; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index f4f569060689..70590144be03 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1805,7 +1805,7 @@ static void check_neigh_update(struct neighbour *neigh) const struct device *parent; const struct net_device *netdev = neigh->dev; - if (netdev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(netdev)) netdev = vlan_dev_real_dev(netdev); parent = netdev->dev.parent; if (parent && parent->driver == &cxgb4_driver.driver) @@ -2111,7 +2111,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this, #if IS_ENABLED(CONFIG_BONDING) struct adapter *adap; #endif - if (event_dev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(event_dev)) event_dev = vlan_dev_real_dev(event_dev); #if IS_ENABLED(CONFIG_BONDING) if (event_dev->flags & IFF_MASTER) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 60a26037a1c6..7c8c5b9a3c22 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -432,7 +432,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, else lport = netdev2pinfo(physdev)->lport; - if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(neigh->dev)) vlan = vlan_dev_vlan_id(neigh->dev); else vlan = VLAN_NONE; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 0cf8a3703275..3b5d7cfa2321 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -3264,7 +3264,7 @@ netxen_list_config_ip(struct netxen_adapter *adapter, cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC); if (cur == NULL) goto out; - if (dev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(dev)) dev = vlan_dev_real_dev(dev); cur->master = !!netif_is_bond_master(dev); cur->ip_addr = ifa->ifa_address; @@ -3374,7 +3374,7 @@ static void netxen_config_master(struct net_device *dev, unsigned long event) !netif_is_bond_slave(dev)) { netxen_config_indev_addr(adapter, master, event); for_each_netdev_rcu(&init_net, slave) - if (slave->priv_flags & IFF_802_1Q_VLAN && + if (is_vlan_dev(slave) && vlan_dev_real_dev(slave) == master) netxen_config_indev_addr(adapter, slave, event); } @@ -3400,7 +3400,7 @@ recheck: if (dev == NULL) goto done; - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } @@ -3445,7 +3445,7 @@ recheck: if (dev == NULL) goto done; - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 4c0cce962585..b6628aaa6e4a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -4220,7 +4220,7 @@ recheck: if (dev == NULL) goto done; - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } @@ -4256,7 +4256,7 @@ recheck: if (dev == NULL) goto done; - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 72b0c1f7496e..425285f36595 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1605,7 +1605,7 @@ static int netvsc_netdev_event(struct notifier_block *this, return NOTIFY_DONE; /* Avoid Vlan dev with same MAC registering as VF */ - if (event_dev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(event_dev)) return NOTIFY_DONE; /* Avoid Bonding master dev with same MAC registering as VF */ diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index c639d5a02656..4b9e7761a97b 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -2282,7 +2282,7 @@ static int _bnx2fc_create(struct net_device *netdev, } /* obtain physical netdev */ - if (netdev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(netdev)) phys_dev = vlan_dev_real_dev(netdev); /* verify if the physical device is a netxtreme2 device */ @@ -2320,7 +2320,7 @@ static int _bnx2fc_create(struct net_device *netdev, goto ifput_err; } - if (netdev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(netdev)) { vlan_id = vlan_dev_vlan_id(netdev); interface->vlan_enabled = 1; } @@ -2538,7 +2538,7 @@ static bool bnx2fc_match(struct net_device *netdev) struct net_device *phys_dev = netdev; mutex_lock(&bnx2fc_dev_lock); - if (netdev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(netdev)) phys_dev = vlan_dev_real_dev(netdev); if (bnx2fc_hba_lookup(phys_dev)) { diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 9167bcd9fffe..bd7d39ecbd24 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -223,7 +223,7 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, struct cxgbi_device *cdev, *tmp; int i; - if (ndev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(ndev)) { vdev = ndev; ndev = vlan_dev_real_dev(ndev); log_debug(1 << CXGBI_DBG_DEV, @@ -256,7 +256,7 @@ struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, struct cxgbi_device *cdev; int i; - if (ndev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(ndev)) { vdev = ndev; ndev = vlan_dev_real_dev(ndev); pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); @@ -290,7 +290,7 @@ static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, struct cxgbi_device *cdev, *tmp; int i; - if (ndev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(ndev)) { vdev = ndev; ndev = vlan_dev_real_dev(ndev); pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 59150cad0353..79160ffae483 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -326,8 +326,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, /* look for SAN MAC address, if multiple SAN MACs exist, only * use the first one for SPMA */ - real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? - vlan_dev_real_dev(netdev) : netdev; + real_dev = is_vlan_dev(netdev) ? vlan_dev_real_dev(netdev) : netdev; fcoe->realdev = real_dev; rcu_read_lock(); for_each_dev_addr(real_dev, ha) { @@ -730,7 +729,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) ctlr = fcoe_to_ctlr(fcoe); /* Figure out the VLAN ID, if any */ - if (netdev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(netdev)) lport->vlan = vlan_dev_vlan_id(netdev); else lport->vlan = 0; @@ -959,13 +958,13 @@ static inline int fcoe_em_config(struct fc_lport *lport) * Reuse existing offload em instance in case * it is already allocated on real eth device */ - if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(fcoe->netdev)) cur_real_dev = vlan_dev_real_dev(fcoe->netdev); else cur_real_dev = fcoe->netdev; list_for_each_entry(oldfcoe, &fcoe_hostlist, list) { - if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(oldfcoe->netdev)) old_real_dev = vlan_dev_real_dev(oldfcoe->netdev); else old_real_dev = oldfcoe->netdev; @@ -1563,7 +1562,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) skb->protocol = htons(ETH_P_FCOE); skb->priority = fcoe->priority; - if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && + if (is_vlan_dev(fcoe->netdev) && fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { /* must set skb->dev before calling vlan_put_tag */ skb->dev = fcoe->realdev; @@ -1794,7 +1793,7 @@ fcoe_hostlist_lookup_realdev_port(struct net_device *netdev) struct net_device *real_dev; list_for_each_entry(fcoe, &fcoe_hostlist, list) { - if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(fcoe->netdev)) real_dev = vlan_dev_real_dev(fcoe->netdev); else real_dev = fcoe->netdev; diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 1beab5532035..4b34c51f859e 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -160,8 +160,7 @@ static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr) static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev) { - return dev->priv_flags & IFF_802_1Q_VLAN ? - vlan_dev_vlan_id(dev) : 0xffff; + return is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0xffff; } static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid) @@ -326,8 +325,7 @@ static inline u16 rdma_get_vlan_id(union ib_gid *dgid) static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev) { - return dev->priv_flags & IFF_802_1Q_VLAN ? - vlan_dev_real_dev(dev) : NULL; + return is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : NULL; } #endif /* IB_ADDR_H */ diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index f5b60388d02f..56080da4aa77 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -12,6 +12,7 @@ #include "hsr_slave.h" #include #include +#include #include "hsr_main.h" #include "hsr_device.h" #include "hsr_forward.h" @@ -81,7 +82,7 @@ static int hsr_check_dev_ok(struct net_device *dev) return -EINVAL; } - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n"); return -EINVAL; } -- cgit v1.2.3 From 321fa4ffd94e333657e54037d2511c862ec92f6f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 3 Feb 2017 17:37:03 +0100 Subject: net/mlx5e: fix another maybe-uninitialized false-positive In commit abeffce ("net/mlx5e: Fix a -Wmaybe-uninitialized warning"), I fixed a gcc warning for the ipv4 offload handling. Now we get the same warning for the added ipv6 support: drivers/net/ethernet/mellanox/mlx5/core/en_tc.c:815:40: warning: 'out_dev' may be used uninitialized in this function [-Wmaybe-uninitialized] We can apply the same workaround here as well. Fixes: ce99f6b97fcd ("net/mlx5e: Support SRIOV TC encapsulation offloads for IPv6 tunnels") Signed-off-by: Arnd Bergmann Acked-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index e3cf5f484153..d87a82682cb5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -731,8 +731,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, int ret; dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6); - if (dst->error) { - ret = dst->error; + ret = dst->error; + if (ret) { dst_release(dst); return ret; } -- cgit v1.2.3 From 88e4f0ca4e4e7760e4aad544789c5408219886d5 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 3 Feb 2017 13:20:16 -0500 Subject: net: dsa: move netdevice notifier registration Move the netdevice notifier block register code in slave.c and provide helpers for dsa.c to register and unregister it. At the same time, check for errors since (un)register_netdevice_notifier may fail. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/dsa.c | 10 ++++------ net/dsa/dsa_priv.h | 4 ++-- net/dsa/slave.c | 22 ++++++++++++++++++++-- 3 files changed, 26 insertions(+), 10 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 619e57a44d1d..beb79ccf0f59 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -903,10 +903,6 @@ static struct packet_type dsa_pack_type __read_mostly = { .func = dsa_switch_rcv, }; -static struct notifier_block dsa_netdevice_nb __read_mostly = { - .notifier_call = dsa_slave_netdevice_event, -}; - #ifdef CONFIG_PM_SLEEP static int dsa_suspend(struct device *d) { @@ -964,7 +960,9 @@ static int __init dsa_init_module(void) { int rc; - register_netdevice_notifier(&dsa_netdevice_nb); + rc = dsa_slave_register_notifier(); + if (rc) + return rc; rc = platform_driver_register(&dsa_driver); if (rc) @@ -978,7 +976,7 @@ module_init(dsa_init_module); static void __exit dsa_cleanup_module(void) { - unregister_netdevice_notifier(&dsa_netdevice_nb); + dsa_slave_unregister_notifier(); dev_remove_pack(&dsa_pack_type); platform_driver_unregister(&dsa_driver); } diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index a5509b765fc0..591a40aea9ca 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -63,8 +63,8 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, void dsa_slave_destroy(struct net_device *slave_dev); int dsa_slave_suspend(struct net_device *slave_dev); int dsa_slave_resume(struct net_device *slave_dev); -int dsa_slave_netdevice_event(struct notifier_block *unused, - unsigned long event, void *ptr); +int dsa_slave_register_notifier(void); +void dsa_slave_unregister_notifier(void); /* tag_dsa.c */ extern const struct dsa_device_ops dsa_netdev_ops; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 09fc3e9462c1..949644c1dac2 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1524,8 +1524,8 @@ static int dsa_slave_port_event(struct net_device *dev, unsigned long event, return NOTIFY_DONE; } -int dsa_slave_netdevice_event(struct notifier_block *unused, - unsigned long event, void *ptr) +static int dsa_slave_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); @@ -1534,3 +1534,21 @@ int dsa_slave_netdevice_event(struct notifier_block *unused, return NOTIFY_DONE; } + +static struct notifier_block dsa_slave_nb __read_mostly = { + .notifier_call = dsa_slave_netdevice_event, +}; + +int dsa_slave_register_notifier(void) +{ + return register_netdevice_notifier(&dsa_slave_nb); +} + +void dsa_slave_unregister_notifier(void) +{ + int err; + + err = unregister_netdevice_notifier(&dsa_slave_nb); + if (err) + pr_err("DSA: failed to unregister slave notifier (%d)\n", err); +} -- cgit v1.2.3 From 8e92ab3a426e04dc355b196e3b4474f633025a3b Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 3 Feb 2017 13:20:17 -0500 Subject: net: dsa: simplify netdevice events handling Simplify the code handling the slave netdevice notifier call by providing a dsa_slave_changeupper helper for NETDEV_CHANGEUPPER, and so on (only this event is supported at the moment.) Return NOTIFY_DONE when we did not care about an event, and NOTIFY_OK when we were concerned but no error occurred, as the API suggests. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/slave.c | 44 ++++++++++++++++---------------------------- 1 file changed, 16 insertions(+), 28 deletions(-) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 949644c1dac2..332eb234dc21 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1491,37 +1491,22 @@ static bool dsa_slave_dev_check(struct net_device *dev) return dev->netdev_ops == &dsa_slave_netdev_ops; } -static int dsa_slave_port_upper_event(struct net_device *dev, - unsigned long event, void *ptr) +static int dsa_slave_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) { - struct netdev_notifier_changeupper_info *info = ptr; - struct net_device *upper = info->upper_dev; - int err = 0; + int err = NOTIFY_DONE; - switch (event) { - case NETDEV_CHANGEUPPER: - if (netif_is_bridge_master(upper)) { - if (info->linking) - err = dsa_slave_bridge_port_join(dev, upper); - else - dsa_slave_bridge_port_leave(dev, upper); + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) { + err = dsa_slave_bridge_port_join(dev, info->upper_dev); + err = notifier_from_errno(err); + } else { + dsa_slave_bridge_port_leave(dev, info->upper_dev); + err = NOTIFY_OK; } - - break; - } - - return notifier_from_errno(err); -} - -static int dsa_slave_port_event(struct net_device *dev, unsigned long event, - void *ptr) -{ - switch (event) { - case NETDEV_CHANGEUPPER: - return dsa_slave_port_upper_event(dev, event, ptr); } - return NOTIFY_DONE; + return err; } static int dsa_slave_netdevice_event(struct notifier_block *nb, @@ -1529,8 +1514,11 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - if (dsa_slave_dev_check(dev)) - return dsa_slave_port_event(dev, event, ptr); + if (dev->netdev_ops != &dsa_slave_netdev_ops) + return NOTIFY_DONE; + + if (event == NETDEV_CHANGEUPPER) + return dsa_slave_changeupper(dev, ptr); return NOTIFY_DONE; } -- cgit v1.2.3 From 9c26542685130ef3b55cdb4e04eec0ac33376b41 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 3 Feb 2017 13:20:18 -0500 Subject: net: dsa: rollback bridging on error When an error is returned during the bridging of a port in a NETDEV_CHANGEUPPER event, net/core/dev.c rolls back the operation. Be consistent and unassign dp->bridge_dev when this happens. In the meantime, add comments to document this behavior. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/slave.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 332eb234dc21..d726307c7795 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -562,12 +562,21 @@ static int dsa_slave_bridge_port_join(struct net_device *dev, struct dsa_switch *ds = p->dp->ds; int ret = -EOPNOTSUPP; + /* Here the port is already bridged. Reflect the current configuration + * so that drivers can program their chips accordingly. + */ p->dp->bridge_dev = br; if (ds->ops->port_bridge_join) ret = ds->ops->port_bridge_join(ds, p->dp->index, br); - return ret == -EOPNOTSUPP ? 0 : ret; + /* The bridging is rolled back on error */ + if (ret && ret != -EOPNOTSUPP) { + p->dp->bridge_dev = NULL; + return ret; + } + + return 0; } static void dsa_slave_bridge_port_leave(struct net_device *dev, @@ -576,6 +585,9 @@ static void dsa_slave_bridge_port_leave(struct net_device *dev, struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->dp->ds; + /* Here the port is already unbridged. Reflect the current configuration + * so that drivers can program their chips accordingly. + */ p->dp->bridge_dev = NULL; if (ds->ops->port_bridge_leave) -- cgit v1.2.3 From c5d35cb32cffa6e4c2db1cbd9a544e10a8d6fda9 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 3 Feb 2017 13:20:19 -0500 Subject: net: dsa: change state setter scope The scope of the functions inside net/dsa/slave.c must be the slave net_device pointer. Change to state setter helper accordingly to simplify callers. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/slave.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index d726307c7795..d8c3c0f00cf3 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -74,9 +74,12 @@ static inline bool dsa_port_is_bridged(struct dsa_port *dp) return !!dp->bridge_dev; } -static void dsa_port_set_stp_state(struct dsa_switch *ds, int port, u8 state) +static void dsa_slave_set_state(struct net_device *dev, u8 state) { - struct dsa_port *dp = &ds->ports[port]; + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_port *dp = p->dp; + struct dsa_switch *ds = dp->ds; + int port = dp->index; if (ds->ops->port_stp_state_set) ds->ops->port_stp_state_set(ds, port, state); @@ -133,7 +136,7 @@ static int dsa_slave_open(struct net_device *dev) goto clear_promisc; } - dsa_port_set_stp_state(ds, p->dp->index, stp_state); + dsa_slave_set_state(dev, stp_state); if (p->phy) phy_start(p->phy); @@ -175,7 +178,7 @@ static int dsa_slave_close(struct net_device *dev) if (ds->ops->port_disable) ds->ops->port_disable(ds, p->dp->index, p->phy); - dsa_port_set_stp_state(ds, p->dp->index, BR_STATE_DISABLED); + dsa_slave_set_state(dev, BR_STATE_DISABLED); return 0; } @@ -382,7 +385,7 @@ static int dsa_slave_stp_state_set(struct net_device *dev, if (switchdev_trans_ph_prepare(trans)) return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP; - dsa_port_set_stp_state(ds, p->dp->index, attr->u.stp_state); + dsa_slave_set_state(dev, attr->u.stp_state); return 0; } @@ -596,7 +599,7 @@ static void dsa_slave_bridge_port_leave(struct net_device *dev, /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, * so allow it to be in BR_STATE_FORWARDING to be kept functional */ - dsa_port_set_stp_state(ds, p->dp->index, BR_STATE_FORWARDING); + dsa_slave_set_state(dev, BR_STATE_FORWARDING); } static int dsa_slave_port_attr_get(struct net_device *dev, -- cgit v1.2.3 From f515f192ab4f45bb695146b82432d63d98775787 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 3 Feb 2017 13:20:20 -0500 Subject: net: dsa: add switch notifier Add a notifier block per DSA switch, registered against a notifier head in the switch fabric they belong to. This infrastructure will allow to propagate fabric-wide events such as port bridging, VLAN configuration, etc. If a DSA switch driver cares about cross-chip configuration, such events can be caught. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 7 +++++++ net/dsa/Makefile | 1 + net/dsa/dsa.c | 6 ++++++ net/dsa/dsa2.c | 6 ++++++ net/dsa/dsa_priv.h | 4 ++++ net/dsa/switch.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 77 insertions(+) create mode 100644 net/dsa/switch.c diff --git a/include/net/dsa.h b/include/net/dsa.h index 2cb77e64d648..ac4ea7c3a102 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -92,6 +93,9 @@ struct packet_type; struct dsa_switch_tree { struct list_head list; + /* Notifier chain for switch-wide events */ + struct raw_notifier_head nh; + /* Tree identifier */ u32 tree; @@ -182,6 +186,9 @@ struct dsa_switch { struct dsa_switch_tree *dst; int index; + /* Listener for switch fabric events */ + struct notifier_block nb; + /* * Give the switch driver somewhere to hang its private data * structure. diff --git a/net/dsa/Makefile b/net/dsa/Makefile index a3380ed0e0be..72912982de3d 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -1,6 +1,7 @@ # the core obj-$(CONFIG_NET_DSA) += dsa_core.o dsa_core-y += dsa.o slave.o dsa2.o +dsa_core-y += dsa.o slave.o dsa2.o switch.o # tagging formats dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index beb79ccf0f59..22e44f691ab9 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -275,6 +275,10 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) if (ret < 0) return ret; + ret = dsa_switch_register_notifier(ds); + if (ret) + return ret; + if (ops->set_addr) { ret = ops->set_addr(ds, dst->master_netdev->dev_addr); if (ret < 0) @@ -400,6 +404,8 @@ static void dsa_switch_destroy(struct dsa_switch *ds) if (ds->slave_mii_bus && ds->ops->phy_read) mdiobus_unregister(ds->slave_mii_bus); + + dsa_switch_unregister_notifier(ds); } #ifdef CONFIG_PM_SLEEP diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 9f8cc26be9ea..1c546b6621ee 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -294,6 +294,10 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds) if (err < 0) return err; + err = dsa_switch_register_notifier(ds); + if (err) + return err; + if (ds->ops->set_addr) { err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr); if (err < 0) @@ -364,6 +368,8 @@ static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds) if (ds->slave_mii_bus && ds->ops->phy_read) mdiobus_unregister(ds->slave_mii_bus); + + dsa_switch_unregister_notifier(ds); } static int dsa_dst_apply(struct dsa_switch_tree *dst) diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 591a40aea9ca..0706a511244e 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -66,6 +66,10 @@ int dsa_slave_resume(struct net_device *slave_dev); int dsa_slave_register_notifier(void); void dsa_slave_unregister_notifier(void); +/* switch.c */ +int dsa_switch_register_notifier(struct dsa_switch *ds); +void dsa_switch_unregister_notifier(struct dsa_switch *ds); + /* tag_dsa.c */ extern const struct dsa_device_ops dsa_netdev_ops; diff --git a/net/dsa/switch.c b/net/dsa/switch.c new file mode 100644 index 000000000000..e22fa7633d03 --- /dev/null +++ b/net/dsa/switch.c @@ -0,0 +1,53 @@ +/* + * Handling of a single switch chip, part of a switch fabric + * + * Copyright (c) 2017 Vivien Didelot + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +static int dsa_switch_event(struct notifier_block *nb, + unsigned long event, void *info) +{ + struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb); + int err; + + switch (event) { + default: + err = -EOPNOTSUPP; + break; + } + + /* Non-switchdev operations cannot be rolled back. If a DSA driver + * returns an error during the chained call, switch chips may be in an + * inconsistent state. + */ + if (err) + dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n", + event, err); + + return notifier_from_errno(err); +} + +int dsa_switch_register_notifier(struct dsa_switch *ds) +{ + ds->nb.notifier_call = dsa_switch_event; + + return raw_notifier_chain_register(&ds->dst->nh, &ds->nb); +} + +void dsa_switch_unregister_notifier(struct dsa_switch *ds) +{ + int err; + + err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb); + if (err) + dev_err(ds->dev, "failed to unregister notifier (%d)\n", err); +} -- cgit v1.2.3 From 04d3a4c6af52a58370795bc9f70dc15f51f8bb84 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 3 Feb 2017 13:20:21 -0500 Subject: net: dsa: introduce bridge notifier A slave device will now notify the switch fabric once its port is bridged or unbridged, instead of calling directly its switch operations. This code allows propagating cross-chip bridging events in the fabric. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 10 ++++++++++ net/dsa/slave.c | 40 +++++++++++++++++++++++++++++----------- net/dsa/switch.c | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 11 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index ac4ea7c3a102..e9c940c8936f 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -268,6 +268,16 @@ struct switchdev_obj_port_fdb; struct switchdev_obj_port_mdb; struct switchdev_obj_port_vlan; +#define DSA_NOTIFIER_BRIDGE_JOIN 1 +#define DSA_NOTIFIER_BRIDGE_LEAVE 2 + +/* DSA_NOTIFIER_BRIDGE_* */ +struct dsa_notifier_bridge_info { + struct net_device *br; + int sw_index; + int port; +}; + struct dsa_switch_ops { /* * Probing and setup. diff --git a/net/dsa/slave.c b/net/dsa/slave.c index d8c3c0f00cf3..061a49c29cef 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -27,6 +27,17 @@ static bool dsa_slave_dev_check(struct net_device *dev); +static int dsa_slave_notify(struct net_device *dev, unsigned long e, void *v) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct raw_notifier_head *nh = &p->dp->ds->dst->nh; + int err; + + err = raw_notifier_call_chain(nh, e, v); + + return notifier_to_errno(err); +} + /* slave mii_bus handling ***************************************************/ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) { @@ -562,39 +573,46 @@ static int dsa_slave_bridge_port_join(struct net_device *dev, struct net_device *br) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->dp->ds; - int ret = -EOPNOTSUPP; + struct dsa_notifier_bridge_info info = { + .sw_index = p->dp->ds->index, + .port = p->dp->index, + .br = br, + }; + int err; /* Here the port is already bridged. Reflect the current configuration * so that drivers can program their chips accordingly. */ p->dp->bridge_dev = br; - if (ds->ops->port_bridge_join) - ret = ds->ops->port_bridge_join(ds, p->dp->index, br); + err = dsa_slave_notify(dev, DSA_NOTIFIER_BRIDGE_JOIN, &info); /* The bridging is rolled back on error */ - if (ret && ret != -EOPNOTSUPP) { + if (err) p->dp->bridge_dev = NULL; - return ret; - } - return 0; + return err; } static void dsa_slave_bridge_port_leave(struct net_device *dev, struct net_device *br) { struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->dp->ds; + struct dsa_notifier_bridge_info info = { + .sw_index = p->dp->ds->index, + .port = p->dp->index, + .br = br, + }; + int err; /* Here the port is already unbridged. Reflect the current configuration * so that drivers can program their chips accordingly. */ p->dp->bridge_dev = NULL; - if (ds->ops->port_bridge_leave) - ds->ops->port_bridge_leave(ds, p->dp->index, br); + err = dsa_slave_notify(dev, DSA_NOTIFIER_BRIDGE_LEAVE, &info); + if (err) + netdev_err(dev, "failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n"); /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, * so allow it to be in BR_STATE_FORWARDING to be kept functional diff --git a/net/dsa/switch.c b/net/dsa/switch.c index e22fa7633d03..6456dacf9ae9 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -13,6 +13,32 @@ #include #include +static int dsa_switch_bridge_join(struct dsa_switch *ds, + struct dsa_notifier_bridge_info *info) +{ + if (ds->index == info->sw_index && ds->ops->port_bridge_join) + return ds->ops->port_bridge_join(ds, info->port, info->br); + + if (ds->index != info->sw_index) + dev_dbg(ds->dev, "crosschip DSA port %d.%d bridged to %s\n", + info->sw_index, info->port, netdev_name(info->br)); + + return 0; +} + +static int dsa_switch_bridge_leave(struct dsa_switch *ds, + struct dsa_notifier_bridge_info *info) +{ + if (ds->index == info->sw_index && ds->ops->port_bridge_leave) + ds->ops->port_bridge_leave(ds, info->port, info->br); + + if (ds->index != info->sw_index) + dev_dbg(ds->dev, "crosschip DSA port %d.%d unbridged from %s\n", + info->sw_index, info->port, netdev_name(info->br)); + + return 0; +} + static int dsa_switch_event(struct notifier_block *nb, unsigned long event, void *info) { @@ -20,6 +46,12 @@ static int dsa_switch_event(struct notifier_block *nb, int err; switch (event) { + case DSA_NOTIFIER_BRIDGE_JOIN: + err = dsa_switch_bridge_join(ds, info); + break; + case DSA_NOTIFIER_BRIDGE_LEAVE: + err = dsa_switch_bridge_leave(ds, info); + break; default: err = -EOPNOTSUPP; break; -- cgit v1.2.3 From 29200c199cc9bde59033ab30fcc40b6c8ae630b0 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 3 Feb 2017 16:25:23 -0500 Subject: bpf: test for AND edge cases These two tests are based on the work done for f23cc643f9ba. The first test is just a basic one to make sure we don't allow AND'ing negative values, even if it would result in a valid index for the array. The second is a cleaned up version of the original testcase provided by Jann Horn that resulted in the commit. Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: Josef Bacik Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_verifier.c | 55 +++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 0d0912c7f03c..df194e1d56c2 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -4370,6 +4370,61 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, + { + "invalid and of negative number", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, + offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "invalid range check", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_9, 1), + BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1), + BPF_MOV32_IMM(BPF_REG_3, 1), + BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9), + BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), + BPF_MOV64_REG(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", + .result = REJECT, + .result_unpriv = REJECT, + } }; static int probe_filter_length(const struct bpf_insn *fp) -- cgit v1.2.3 From 63dfef75ed75364901d7caa52c6420cec3e73519 Mon Sep 17 00:00:00 2001 From: William Tu Date: Sat, 4 Feb 2017 08:37:29 -0800 Subject: bpf: enable verifier to add 0 to packet ptr The patch fixes the case when adding a zero value to the packet pointer. The zero value could come from src_reg equals type BPF_K or CONST_IMM. The patch fixes both, otherwise the verifer reports the following error: [...] R0=imm0,min_value=0,max_value=0 R1=pkt(id=0,off=0,r=4) R2=pkt_end R3=fp-12 R4=imm4,min_value=4,max_value=4 R5=pkt(id=0,off=4,r=4) 269: (bf) r2 = r0 // r2 becomes imm0 270: (77) r2 >>= 3 271: (bf) r4 = r1 // r4 becomes pkt ptr 272: (0f) r4 += r2 // r4 += 0 addition of negative constant to packet pointer is not allowed Signed-off-by: William Tu Signed-off-by: Mihai Budiu Cc: Daniel Borkmann Cc: Alexei Starovoitov Acked-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 2 +- tools/testing/selftests/bpf/test_verifier.c | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index fb3513b35c0b..1a754e5d2695 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1397,7 +1397,7 @@ static int check_packet_ptr_add(struct bpf_verifier_env *env, imm = insn->imm; add_imm: - if (imm <= 0) { + if (imm < 0) { verbose("addition of negative constant to packet pointer is not allowed\n"); return -EACCES; } diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index df194e1d56c2..71f6407cde60 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -2403,6 +2403,29 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, + { + "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), + BPF_MOV64_IMM(BPF_REG_5, 12), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, { "helper access to packet: test1, valid packet_ptr range", .insns = { -- cgit v1.2.3 From 1f90c7f3470580e24da25f6a6c1fb480ed9371ac Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sat, 4 Feb 2017 18:05:06 +0100 Subject: bridge: modify bridge and port to have often accessed fields in one cache line Move around net_bridge so the vlan fields are in the beginning since they're checked on every packet even if vlan filtering is disabled. For the port move flags & vlan group to the beginning, so they're in the same cache line with the port's state (both flags and state are checked on each packet). Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_private.h | 43 ++++++++++++++++++++----------------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 40177df45ba6..ec8560349b6f 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -212,12 +212,16 @@ struct net_bridge_mdb_htable u32 ver; }; -struct net_bridge_port -{ +struct net_bridge_port { struct net_bridge *br; struct net_device *dev; struct list_head list; + unsigned long flags; +#ifdef CONFIG_BRIDGE_VLAN_FILTERING + struct net_bridge_vlan_group __rcu *vlgrp; +#endif + /* STP */ u8 priority; u8 state; @@ -238,8 +242,6 @@ struct net_bridge_port struct kobject kobj; struct rcu_head rcu; - unsigned long flags; - #ifdef CONFIG_BRIDGE_IGMP_SNOOPING struct bridge_mcast_own_query ip4_own_query; #if IS_ENABLED(CONFIG_IPV6) @@ -259,9 +261,6 @@ struct net_bridge_port #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *np; #endif -#ifdef CONFIG_BRIDGE_VLAN_FILTERING - struct net_bridge_vlan_group __rcu *vlgrp; -#endif #ifdef CONFIG_NET_SWITCHDEV int offload_fwd_mark; #endif @@ -283,14 +282,21 @@ static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device * rtnl_dereference(dev->rx_handler_data) : NULL; } -struct net_bridge -{ +struct net_bridge { spinlock_t lock; + spinlock_t hash_lock; struct list_head port_list; struct net_device *dev; - struct pcpu_sw_netstats __percpu *stats; - spinlock_t hash_lock; + /* These fields are accessed on each packet */ +#ifdef CONFIG_BRIDGE_VLAN_FILTERING + u8 vlan_enabled; + u8 vlan_stats_enabled; + __be16 vlan_proto; + u16 default_pvid; + struct net_bridge_vlan_group __rcu *vlgrp; +#endif + struct hlist_head hash[BR_HASH_SIZE]; #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) union { @@ -308,6 +314,9 @@ struct net_bridge bridge_id designated_root; bridge_id bridge_id; u32 root_path_cost; + unsigned char topology_change; + unsigned char topology_change_detected; + u16 root_port; unsigned long max_age; unsigned long hello_time; unsigned long forward_delay; @@ -319,7 +328,6 @@ struct net_bridge u8 group_addr[ETH_ALEN]; bool group_addr_set; - u16 root_port; enum { BR_NO_STP, /* no spanning tree */ @@ -327,9 +335,6 @@ struct net_bridge BR_USER_STP, /* new RSTP in userspace */ } stp_enabled; - unsigned char topology_change; - unsigned char topology_change_detected; - #ifdef CONFIG_BRIDGE_IGMP_SNOOPING unsigned char multicast_router; @@ -381,14 +386,6 @@ struct net_bridge #ifdef CONFIG_NET_SWITCHDEV int offload_fwd_mark; #endif - -#ifdef CONFIG_BRIDGE_VLAN_FILTERING - struct net_bridge_vlan_group __rcu *vlgrp; - u8 vlan_enabled; - u8 vlan_stats_enabled; - __be16 vlan_proto; - u16 default_pvid; -#endif }; struct br_input_skb_cb { -- cgit v1.2.3 From f7cdee8a79a1cb03fa9ca71b825e72f880b344e1 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sat, 4 Feb 2017 18:05:07 +0100 Subject: bridge: move to workqueue gc Move the fdb garbage collector to a workqueue which fires at least 10 milliseconds apart and cleans chain by chain allowing for other tasks to run in the meantime. When having thousands of fdbs the system is much more responsive. Most importantly remove the need to check if the matched entry has expired in __br_fdb_get that causes false-sharing and is completely unnecessary if we cleanup entries, at worst we'll get 10ms of traffic for that entry before it gets deleted. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_device.c | 1 + net/bridge/br_fdb.c | 31 +++++++++++++++++++------------ net/bridge/br_if.c | 2 +- net/bridge/br_ioctl.c | 2 +- net/bridge/br_netlink.c | 2 +- net/bridge/br_private.h | 4 ++-- net/bridge/br_stp.c | 2 +- net/bridge/br_stp_if.c | 4 ++-- net/bridge/br_stp_timer.c | 2 -- net/bridge/br_sysfs_br.c | 2 +- 10 files changed, 29 insertions(+), 23 deletions(-) diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 5ba0b558f8ae..d208ee9ab60a 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -411,4 +411,5 @@ void br_dev_setup(struct net_device *dev) br_netfilter_rtable_init(br); br_stp_timer_init(br); br_multicast_init(br); + INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup); } diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index e4a4176171c9..5cbed5c0db88 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -154,7 +154,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) if (f->added_by_external_learn) fdb_del_external_learn(f); - hlist_del_rcu(&f->hlist); + hlist_del_init_rcu(&f->hlist); fdb_notify(br, f, RTM_DELNEIGH); call_rcu(&f->rcu, fdb_rcu_free); } @@ -290,34 +290,43 @@ out: spin_unlock_bh(&br->hash_lock); } -void br_fdb_cleanup(unsigned long _data) +void br_fdb_cleanup(struct work_struct *work) { - struct net_bridge *br = (struct net_bridge *)_data; + struct net_bridge *br = container_of(work, struct net_bridge, + gc_work.work); unsigned long delay = hold_time(br); - unsigned long next_timer = jiffies + br->ageing_time; + unsigned long work_delay = delay; + unsigned long now = jiffies; int i; - spin_lock(&br->hash_lock); for (i = 0; i < BR_HASH_SIZE; i++) { struct net_bridge_fdb_entry *f; struct hlist_node *n; + if (!br->hash[i].first) + continue; + + spin_lock_bh(&br->hash_lock); hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) { unsigned long this_timer; + if (f->is_static) continue; if (f->added_by_external_learn) continue; this_timer = f->updated + delay; - if (time_before_eq(this_timer, jiffies)) + if (time_after(this_timer, now)) + work_delay = min(work_delay, this_timer - now); + else fdb_delete(br, f); - else if (time_before(this_timer, next_timer)) - next_timer = this_timer; } + spin_unlock_bh(&br->hash_lock); + cond_resched(); } - spin_unlock(&br->hash_lock); - mod_timer(&br->gc_timer, round_jiffies_up(next_timer)); + /* Cleanup minimum 10 milliseconds apart */ + work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10)); + mod_delayed_work(system_long_wq, &br->gc_work, work_delay); } /* Completely flush all dynamic entries in forwarding database.*/ @@ -382,8 +391,6 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, &br->hash[br_mac_hash(addr, vid)], hlist) { if (ether_addr_equal(fdb->addr.addr, addr) && fdb->vlan_id == vid) { - if (unlikely(has_expired(br, fdb))) - break; return fdb; } } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ed0dd3340084..8ac1770aa222 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -313,7 +313,7 @@ void br_dev_delete(struct net_device *dev, struct list_head *head) br_vlan_flush(br); br_multicast_dev_del(br); - del_timer_sync(&br->gc_timer); + cancel_delayed_work_sync(&br->gc_work); br_sysfs_delbr(br->dev); unregister_netdevice_queue(br->dev, head); diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index da8157c57eb1..7970f8540cbb 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -149,7 +149,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) b.hello_timer_value = br_timer_value(&br->hello_timer); b.tcn_timer_value = br_timer_value(&br->tcn_timer); b.topology_change_timer_value = br_timer_value(&br->topology_change_timer); - b.gc_timer_value = br_timer_value(&br->gc_timer); + b.gc_timer_value = br_timer_value(&br->gc_work.timer); rcu_read_unlock(); if (copy_to_user((void __user *)args[1], &b, sizeof(b))) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index fc5d885dbb22..1cbdc5b96aa7 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1250,7 +1250,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval, IFLA_BR_PAD)) return -EMSGSIZE; - clockval = br_timer_value(&br->gc_timer); + clockval = br_timer_value(&br->gc_work.timer); if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD)) return -EMSGSIZE; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index ec8560349b6f..47fd64bf5022 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -379,7 +379,7 @@ struct net_bridge { struct timer_list hello_timer; struct timer_list tcn_timer; struct timer_list topology_change_timer; - struct timer_list gc_timer; + struct delayed_work gc_work; struct kobject *ifobj; u32 auto_cnt; @@ -502,7 +502,7 @@ void br_fdb_find_delete_local(struct net_bridge *br, const unsigned char *addr, u16 vid); void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr); void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr); -void br_fdb_cleanup(unsigned long arg); +void br_fdb_cleanup(struct work_struct *work); void br_fdb_delete_by_port(struct net_bridge *br, const struct net_bridge_port *p, u16 vid, int do_all); struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 71fd1a4e63cc..8f56c2d1f1a7 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -602,7 +602,7 @@ int br_set_ageing_time(struct net_bridge *br, clock_t ageing_time) br->ageing_time = t; spin_unlock_bh(&br->lock); - mod_timer(&br->gc_timer, jiffies); + mod_delayed_work(system_long_wq, &br->gc_work, 0); return 0; } diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 6c1e21411125..08341d2aa9c9 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -57,7 +57,7 @@ void br_stp_enable_bridge(struct net_bridge *br) spin_lock_bh(&br->lock); if (br->stp_enabled == BR_KERNEL_STP) mod_timer(&br->hello_timer, jiffies + br->hello_time); - mod_timer(&br->gc_timer, jiffies + HZ/10); + mod_delayed_work(system_long_wq, &br->gc_work, HZ / 10); br_config_bpdu_generation(br); @@ -88,7 +88,7 @@ void br_stp_disable_bridge(struct net_bridge *br) del_timer_sync(&br->hello_timer); del_timer_sync(&br->topology_change_timer); del_timer_sync(&br->tcn_timer); - del_timer_sync(&br->gc_timer); + cancel_delayed_work_sync(&br->gc_work); } /* called under bridge lock */ diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 7ddb38e0a06e..c98b3e5c140a 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -153,8 +153,6 @@ void br_stp_timer_init(struct net_bridge *br) setup_timer(&br->topology_change_timer, br_topology_change_timer_expired, (unsigned long) br); - - setup_timer(&br->gc_timer, br_fdb_cleanup, (unsigned long) br); } void br_stp_port_timer_init(struct net_bridge_port *p) diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index a18148213b08..0f4034934d56 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -263,7 +263,7 @@ static ssize_t gc_timer_show(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); - return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer)); + return sprintf(buf, "%ld\n", br_timer_value(&br->gc_work.timer)); } static DEVICE_ATTR_RO(gc_timer); -- cgit v1.2.3 From 1214628cb1868254e107230c9052f28ff9899b6a Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sat, 4 Feb 2017 18:05:08 +0100 Subject: bridge: move write-heavy fdb members in their own cache line Fdb's used and updated fields are written to on every packet forward and packet receive respectively. Thus if we are receiving packets from a particular fdb, they'll cause false-sharing with everyone who has looked it up (even if it didn't match, since mac/vid share cache line!). The "used" field is even worse since it is updated on every packet forward to that fdb, thus the standard config where X ports use a single gateway results in 100% fdb false-sharing. Note that this patch does not prevent the last scenario, but it makes it better for other bridge participants which are not using that fdb (and are only doing lookups over it). The point is with this move we make sure that only communicating parties get the false-sharing, in a later patch we'll show how to avoid that too. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_private.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 47fd64bf5022..1cbbf63a5ef7 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -160,19 +160,21 @@ struct net_bridge_vlan_group { u16 pvid; }; -struct net_bridge_fdb_entry -{ +struct net_bridge_fdb_entry { struct hlist_node hlist; struct net_bridge_port *dst; - unsigned long updated; - unsigned long used; mac_addr addr; __u16 vlan_id; unsigned char is_local:1, is_static:1, added_by_user:1, added_by_external_learn:1; + + /* write-heavy members should not affect lookups */ + unsigned long updated ____cacheline_aligned_in_smp; + unsigned long used; + struct rcu_head rcu; }; -- cgit v1.2.3 From 83a718d6294964fd1b227fa5f1ad001bc1fe7656 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sat, 4 Feb 2017 18:05:09 +0100 Subject: bridge: fdb: write to used and updated at most once per jiffy Writing once per jiffy is enough to limit the bridge's false sharing. After this change the bridge doesn't show up in the local load HitM stats. Suggested-by: David S. Miller Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 3 ++- net/bridge/br_input.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 5cbed5c0db88..5028691fa68a 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -597,7 +597,8 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, fdb->dst = source; fdb_modified = true; } - fdb->updated = jiffies; + if (jiffies != fdb->updated) + fdb->updated = jiffies; if (unlikely(added_by_user)) fdb->added_by_user = 1; if (unlikely(fdb_modified)) diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index fba38d8a1a08..220943f920d2 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -198,7 +198,8 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb if (dst->is_local) return br_pass_frame_up(skb); - dst->used = jiffies; + if (jiffies != dst->used) + dst->used = jiffies; br_forward(dst->dst, skb, local_rcv, false); } else { if (!mcast_hit) -- cgit v1.2.3 From 1e75622c630b8701f11ea6af568e0006885c9b46 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 6 Feb 2017 21:29:58 -0600 Subject: rtlwifi: Fix programing CAM content sequence. There is a potential race condition when the control byte of a CAM entry is written first. Write in reverse order to correct the condition. Signed-off-by: Ping-Ke Shih Signed-off-by: shaofu Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/cam.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c index a0605d8e9970..f7a7dcbf945e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/cam.c +++ b/drivers/net/wireless/realtek/rtlwifi/cam.c @@ -45,12 +45,13 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no, u32 target_command; u32 target_content = 0; - u8 entry_i; + int entry_i; RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :", key_cont_128, 16); - for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { + /* 0-1 config + mac, 2-5 fill 128key,6-7 are reserved */ + for (entry_i = CAM_CONTENT_COUNT - 1; entry_i >= 0; entry_i--) { target_command = entry_i + CAM_CONTENT_COUNT * entry_no; target_command = target_command | BIT(31) | BIT(16); @@ -102,7 +103,6 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no, target_content); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], target_command); - udelay(100); RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE A4: %x\n", target_content); -- cgit v1.2.3 From 8d0d43e342083c9d2b15542e9a99727c0ef43a7f Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 6 Feb 2017 21:29:59 -0600 Subject: rtlwifi: Set retry limit depends on vif type. We assign different retry limit according to vif type, because it can boost performance in field. Signed-off-by: Ping-Ke Shih Signed-off-by: shaofu Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/core.c | 21 ++++++++++++++++++--- drivers/net/wireless/realtek/rtlwifi/pci.c | 7 +++++++ drivers/net/wireless/realtek/rtlwifi/ps.c | 3 +++ 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 179a699cc6ac..a4f8e326a2bc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -233,6 +233,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); int err = 0; + u8 retry_limit = 0x30; if (mac->vif) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, @@ -271,6 +272,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, (u8 *)(&mac->basic_rates)); + retry_limit = 0x07; break; case NL80211_IFTYPE_P2P_GO: mac->p2p = P2P_ROLE_GO; @@ -287,6 +289,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, mac->basic_rates = 0xff0; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, (u8 *)(&mac->basic_rates)); + + retry_limit = 0x07; break; case NL80211_IFTYPE_MESH_POINT: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, @@ -300,6 +304,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, mac->basic_rates = 0xff0; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, (u8 *)(&mac->basic_rates)); + + retry_limit = 0x07; break; default: pr_err("operation mode %d is not supported!\n", @@ -321,6 +327,10 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, memcpy(mac->mac_addr, vif->addr, ETH_ALEN); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); + mac->retry_long = retry_limit; + mac->retry_short = retry_limit; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, + (u8 *)(&retry_limit)); out: mutex_unlock(&rtlpriv->locks.conf_mutex); return err; @@ -645,10 +655,15 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n", hw->conf.long_frame_max_tx_count); - mac->retry_long = hw->conf.long_frame_max_tx_count; - mac->retry_short = hw->conf.long_frame_max_tx_count; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, + /* brought up everything changes (changed == ~0) indicates first + * open, so use our default value instead of that of wiphy. + */ + if (changed != ~0) { + mac->retry_long = hw->conf.long_frame_max_tx_count; + mac->retry_short = hw->conf.long_frame_max_tx_count; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, (u8 *)(&hw->conf.long_frame_max_tx_count)); + } } if (changed & IEEE80211_CONF_CHANGE_CHANNEL && diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index b402f438b1af..2e6b888bd417 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1213,6 +1213,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw, mac->current_ampdu_density = 7; mac->current_ampdu_factor = 3; + /*Retry Limit*/ + mac->retry_short = 7; + mac->retry_long = 7; + /*QOS*/ rtlpci->acm_method = EACMWAY2_SW; @@ -1813,6 +1817,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); int err; @@ -1830,6 +1835,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw) "Failed to config hardware!\n"); return err; } + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, + &rtlmac->retry_long); rtlpriv->cfg->ops->enable_interrupt(hw); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 4f8327097086..0d152877d969 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -34,6 +34,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); /*<1> reset trx ring */ if (rtlhal->interface == INTF_PCI) @@ -46,6 +47,8 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw) /*<2> Enable Adapter */ if (rtlpriv->cfg->ops->hw_init(hw)) return false; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, + &rtlmac->retry_long); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); /*<3> Enable Interrupt */ -- cgit v1.2.3 From 6f85c03bc34ce7040f0b56eca963be4969b719de Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 6 Feb 2017 21:30:00 -0600 Subject: rtlwifi: Add a new enumeration value to btc_set_type The new value is needed for future capability. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index 3d308ebbe048..eb890110bd39 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -263,6 +263,7 @@ enum btc_set_type { /* type trigger some action */ BTC_SET_ACT_GET_BT_RSSI, BTC_SET_ACT_AGGREGATE_CTRL, + BTC_SET_ACT_ANTPOSREGRISTRY_CTRL, /********* for 1Ant **********/ /* type bool */ -- cgit v1.2.3 From 1a2814739fe5876592048550cce15d5fceeae355 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 6 Feb 2017 21:30:01 -0600 Subject: rtlwifi: btcoexist: Add vendor definition for new btcoexist Routine halbtc_get() will need to be able to get the vendor ID. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 3 +++ drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 91cc1397b150..8a0e773e076e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -335,6 +335,9 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf) case BTC_GET_U4_BT_PATCH_VER: *u32_tmp = halbtc_get_bt_patch_version(btcoexist); break; + case BTC_GET_U4_VENDOR: + *u32_tmp = BTC_VENDOR_OTHER; + break; case BTC_GET_U1_WIFI_DOT11_CHNL: *u8_tmp = rtlphy->current_channel; break; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index eb890110bd39..f9e7c5e77f51 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -228,6 +228,7 @@ enum btc_get_type { BTC_GET_U4_WIFI_FW_VER, BTC_GET_U4_WIFI_LINK_STATUS, BTC_GET_U4_BT_PATCH_VER, + BTC_GET_U4_VENDOR, /* type u1Byte */ BTC_GET_U1_WIFI_DOT11_CHNL, @@ -245,6 +246,12 @@ enum btc_get_type { BTC_GET_MAX }; +enum btc_vendor { + BTC_VENDOR_LENOVO, + BTC_VENDOR_ASUS, + BTC_VENDOR_OTHER +}; + enum btc_set_type { /* type bool */ BTC_SET_BL_BT_DISABLE, -- cgit v1.2.3 From d46fa3e47aebc10ec800dce1c55a7e149a57b93b Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Mon, 6 Feb 2017 21:30:02 -0600 Subject: rtlwifi: btcoexist: Change logging in halbtc8192e2ant.c This routine uses its own debugging macros These are changed to use the the recently rewritten RT_TRACE macro. There are also some renamed variables that were missed in the previous step. The only functional change is that some debugging statements have been dropped based on the final code supplied by Realtek. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../wireless/realtek/rtlwifi/btcoexist/Makefile | 1 + .../realtek/rtlwifi/btcoexist/halbtc8192e2ant.c | 845 ++++++++++----------- 2 files changed, 417 insertions(+), 429 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile index d1454d4f08a5..0ef0d310fc24 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile @@ -1,4 +1,5 @@ btcoexist-objs := halbtc8723b2ant.o \ + halbtc8192e2ant.o \ halbtcoutsrc.o \ rtl_btc.o diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c index a30af6cc21f3..6a42159e5909 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c @@ -59,9 +59,11 @@ static u32 glcoex_ver_8192e_2ant = 0x34; /************************************************************** * local function start with halbtc8192e2ant_ **************************************************************/ -static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh, +static u8 halbtc8192e2ant_btrssi_state(struct btc_coexist *btcoexist, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) { + struct rtl_priv *rtlpriv = btcoexist->adapter; int btrssi = 0; u8 btrssi_state = coex_sta->pre_bt_rssi_state; @@ -70,84 +72,46 @@ static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh, if (level_num == 2) { if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi pre state = LOW\n"); - if (btrssi >= (rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + if (btrssi >= + (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) btrssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state switch to High\n"); - } else { + else btrssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state stay at Low\n"); - } } else { - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi pre state = HIGH\n"); - if (btrssi < rssi_thresh) { + if (btrssi < rssi_thresh) btrssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state switch to Low\n"); - } else { + else btrssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state stay at High\n"); - } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi thresh error!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } - if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi pre state = LOW\n"); - if (btrssi >= (rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + if (btrssi >= + (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) btrssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state switch to Medium\n"); - } else { + else btrssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state stay at Low\n"); - } } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi pre state = MEDIUM\n"); if (btrssi >= (rssi_thresh1 + - BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) btrssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state switch to High\n"); - } else if (btrssi < rssi_thresh) { + else if (btrssi < rssi_thresh) btrssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state switch to Low\n"); - } else { + else btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state stay at Medium\n"); - } } else { - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi pre state = HIGH\n"); - if (btrssi < rssi_thresh1) { + if (btrssi < rssi_thresh1) btrssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state switch to Medium\n"); - } else { + else btrssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "BT Rssi state stay at High\n"); - } } } @@ -160,6 +124,7 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist, u8 index, u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) { + struct rtl_priv *rtlpriv = btcoexist->adapter; int wifirssi = 0; u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index]; @@ -171,30 +136,20 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist, (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW)) { if (wifirssi >= (rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) wifirssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to High\n"); - } else { + else wifirssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state stay at Low\n"); - } } else { - if (wifirssi < rssi_thresh) { + if (wifirssi < rssi_thresh) wifirssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to Low\n"); - } else { + else wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state stay at High\n"); - } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI thresh error!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI thresh error!!\n"); return coex_sta->pre_wifi_rssi_state[index]; } @@ -203,43 +158,26 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist, (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW)) { if (wifirssi >= (rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) wifirssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to Medium\n"); - } else { + else wifirssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state stay at Low\n"); - } } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_MEDIUM)) { if (wifirssi >= (rssi_thresh1 + - BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) wifirssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to High\n"); - } else if (wifirssi < rssi_thresh) { + else if (wifirssi < rssi_thresh) wifirssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to Low\n"); - } else { + else wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state stay at Medium\n"); - } } else { - if (wifirssi < rssi_thresh1) { + if (wifirssi < rssi_thresh1) wifirssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to Medium\n"); - } else { + else wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "wifi RSSI state stay at High\n"); - } } } @@ -250,6 +188,7 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist, static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; static bool pre_bt_disabled; static u32 bt_disable_cnt; bool bt_active = true, bt_disabled = false; @@ -273,26 +212,26 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist) bt_disabled = false; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], BT is enabled !!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is enabled !!\n"); } else { bt_disable_cnt++; - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], bt all counters = 0, %d times!!\n", - bt_disable_cnt); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bt all counters = 0, %d times!!\n", + bt_disable_cnt); if (bt_disable_cnt >= 2) { bt_disabled = true; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], BT is disabled !!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is disabled !!\n"); } } if (pre_bt_disabled != bt_disabled) { - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], BT is from %s to %s!!\n", - (pre_bt_disabled ? "disabled" : "enabled"), - (bt_disabled ? "disabled" : "enabled")); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled" : "enabled"), + (bt_disabled ? "disabled" : "enabled")); pre_bt_disabled = bt_disabled; } } @@ -469,6 +408,7 @@ static void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist, static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u32 reg_hp_txrx, reg_lp_txrx, u32tmp; u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; @@ -488,12 +428,12 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) coex_sta->low_priority_tx = reg_lp_tx; coex_sta->low_priority_rx = reg_lp_rx; - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", - reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", - reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); /* reset counter */ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); @@ -501,15 +441,16 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; coex_sta->c2h_bt_info_req_sent = true; h2c_parameter[0] |= BIT0; /* trigger */ - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } @@ -572,6 +513,7 @@ static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist) static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; bool bt_hson = false; @@ -581,8 +523,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); if (!bt_link_info->bt_link_exist) { - btc_alg_dbg(ALGO_TRACE, - "No BT link exists!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "No BT link exists!!!\n"); return algorithm; } @@ -597,27 +539,29 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) if (numdiffprofile == 1) { if (bt_link_info->sco_exist) { - btc_alg_dbg(ALGO_TRACE, - "SCO only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "SCO only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; } else { if (bt_link_info->hid_exist) { - btc_alg_dbg(ALGO_TRACE, - "HID only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "HID only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "A2DP only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "A2DP only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP; } else if (bt_link_info->pan_exist) { if (bt_hson) { - btc_alg_dbg(ALGO_TRACE, - "PAN(HS) only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "PAN(HS) only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANHS; } else { - btc_alg_dbg(ALGO_TRACE, - "PAN(EDR) only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "PAN(EDR) only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR; } @@ -626,21 +570,23 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) } else if (numdiffprofile == 2) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist) { - btc_alg_dbg(ALGO_TRACE, - "SCO + HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "SCO + HID\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; } else if (bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "SCO + A2DP ==> SCO\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "SCO + A2DP ==> SCO\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->pan_exist) { if (bt_hson) { - btc_alg_dbg(ALGO_TRACE, - "SCO + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "SCO + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; } else { - btc_alg_dbg(ALGO_TRACE, - "SCO + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "SCO + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO_PAN; } @@ -649,38 +595,44 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { if (stack_info->num_of_hid >= 2) { - btc_alg_dbg(ALGO_TRACE, - "HID*2 + A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "HID*2 + A2DP\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR; } else { - btc_alg_dbg(ALGO_TRACE, - "HID + A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "HID + A2DP\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID_A2DP; } } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hson) { - btc_alg_dbg(ALGO_TRACE, - "HID + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "HID + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID; } else { - btc_alg_dbg(ALGO_TRACE, - "HID + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "HID + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hson) { - btc_alg_dbg(ALGO_TRACE, - "A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "A2DP + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS; } else { - btc_alg_dbg(ALGO_TRACE, - "A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "A2DP + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP; } @@ -690,30 +642,34 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "SCO + HID + A2DP ==> HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "SCO + HID + A2DP ==> HID\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hson) { - btc_alg_dbg(ALGO_TRACE, - "SCO + HID + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "SCO + HID + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; } else { - btc_alg_dbg(ALGO_TRACE, - "SCO + HID + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "SCO + HID + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO_PAN; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hson) { - btc_alg_dbg(ALGO_TRACE, - "SCO + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "SCO + A2DP + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; } else { - btc_alg_dbg(ALGO_TRACE, - "SCO + A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "SCO + A2DP + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } @@ -723,13 +679,15 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hson) { - btc_alg_dbg(ALGO_TRACE, - "HID + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "HID + A2DP + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID_A2DP; } else { - btc_alg_dbg(ALGO_TRACE, - "HID + A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "HID + A2DP + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR; } @@ -741,12 +699,14 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hson) { - btc_alg_dbg(ALGO_TRACE, - "ErrorSCO+HID+A2DP+PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "ErrorSCO+HID+A2DP+PAN(HS)\n"); } else { - btc_alg_dbg(ALGO_TRACE, - "SCO+HID+A2DP+PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "SCO+HID+A2DP+PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } @@ -760,6 +720,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist, u8 dac_swinglvl) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; /* There are several type of dacswing @@ -767,10 +728,10 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist, */ h2c_parameter[0] = dac_swinglvl; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl); - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); } @@ -778,13 +739,14 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist, static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist, u8 dec_btpwr_lvl) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; h2c_parameter[0] = dec_btpwr_lvl; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n", - dec_btpwr_lvl, h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n", + dec_btpwr_lvl, h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); } @@ -792,14 +754,16 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist, static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist, bool force_exec, u8 dec_btpwr_lvl) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s Dec BT power level = %d\n", + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s Dec BT power level = %d\n", (force_exec ? "force to" : ""), dec_btpwr_lvl); coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); } halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr); @@ -810,6 +774,7 @@ static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist, static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist, bool enable_autoreport) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; h2c_parameter[0] = 0; @@ -817,10 +782,10 @@ static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist, if (enable_autoreport) h2c_parameter[0] |= BIT0; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", - (enable_autoreport ? "Enabled!!" : "Disabled!!"), - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", + (enable_autoreport ? "Enabled!!" : "Disabled!!"), + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); } @@ -829,17 +794,19 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist, bool force_exec, bool enable_autoreport) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s BT Auto report = %s\n", - (force_exec ? "force to" : ""), - ((enable_autoreport) ? "Enabled" : "Disabled")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s BT Auto report = %s\n", + (force_exec ? "force to" : ""), + ((enable_autoreport) ? "Enabled" : "Disabled")); coex_dm->cur_bt_auto_report = enable_autoreport; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n", - coex_dm->pre_bt_auto_report, - coex_dm->cur_bt_auto_report); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n", + coex_dm->pre_bt_auto_report, + coex_dm->cur_bt_auto_report); if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) return; @@ -853,16 +820,18 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist, static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist, bool force_exec, u8 fw_dac_swinglvl) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s set FW Dac Swing level = %d\n", - (force_exec ? "force to" : ""), fw_dac_swinglvl); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec ? "force to" : ""), fw_dac_swinglvl); coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", - coex_dm->pre_fw_dac_swing_lvl, - coex_dm->cur_fw_dac_swing_lvl); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); if (coex_dm->pre_fw_dac_swing_lvl == coex_dm->cur_fw_dac_swing_lvl) @@ -878,10 +847,12 @@ static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist, static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, bool rx_rf_shrink_on) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (rx_rf_shrink_on) { /* Shrink RF Rx LPF corner */ - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Shrink RF Rx LPF corner!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, 0xffffc); } else { @@ -889,8 +860,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, * After initialized, we can use coex_dm->btRf0x1eBackup */ if (btcoexist->initilized) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Resume RF Rx LPF corner!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Resume RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, coex_dm->bt_rf0x1e_backup); @@ -901,17 +872,19 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist, bool force_exec, bool rx_rf_shrink_on) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s turn Rx RF Shrink = %s\n", - (force_exec ? "force to" : ""), - ((rx_rf_shrink_on) ? "ON" : "OFF")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec ? "force to" : ""), + ((rx_rf_shrink_on) ? "ON" : "OFF")); coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n", - coex_dm->pre_rf_rx_lpf_shrink, - coex_dm->cur_rf_rx_lpf_shrink); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); if (coex_dm->pre_rf_rx_lpf_shrink == coex_dm->cur_rf_rx_lpf_shrink) @@ -926,10 +899,11 @@ static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist, static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist, u32 level) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 val = (u8)level; - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Write SwDacSwing = 0x%x\n", level); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); } @@ -947,22 +921,24 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist, bool force_exec, bool dac_swingon, u32 dac_swinglvl) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n", - (force_exec ? "force to" : ""), - ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n", + (force_exec ? "force to" : ""), + ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl); coex_dm->cur_dac_swing_on = dac_swingon; coex_dm->cur_dac_swing_lvl = dac_swinglvl; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ", - coex_dm->pre_dac_swing_on, - coex_dm->pre_dac_swing_lvl); - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n", - coex_dm->cur_dac_swing_on, - coex_dm->cur_dac_swing_lvl); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ", + coex_dm->pre_dac_swing_on, + coex_dm->pre_dac_swing_lvl); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n", + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) @@ -978,10 +954,12 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist, static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, bool agc_table_en) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + /* BB AGC Gain Table */ if (agc_table_en) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], BB Agc Table On!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BB Agc Table On!\n"); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001); @@ -989,8 +967,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001); } else { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], BB Agc Table Off!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BB Agc Table Off!\n"); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); @@ -1003,17 +981,19 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist, bool force_exec, bool agc_table_en) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s %s Agc Table\n", - (force_exec ? "force to" : ""), - ((agc_table_en) ? "Enable" : "Disable")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s %s Agc Table\n", + (force_exec ? "force to" : ""), + ((agc_table_en) ? "Enable" : "Disable")); coex_dm->cur_agc_table_en = agc_table_en; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", - coex_dm->pre_agc_table_en, - coex_dm->cur_agc_table_en); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + coex_dm->pre_agc_table_en, + coex_dm->cur_agc_table_en); if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) return; @@ -1027,20 +1007,22 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } @@ -1049,30 +1031,32 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ", - (force_exec ? "force to" : ""), val0x6c0); - btc_alg_dbg(ALGO_TRACE_SW, - "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", - val0x6c4, val0x6c8, val0x6cc); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ", + (force_exec ? "force to" : ""), val0x6c0); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", + val0x6c4, val0x6c8, val0x6cc); coex_dm->cur_val0x6c0 = val0x6c0; coex_dm->cur_val0x6c4 = val0x6c4; coex_dm->cur_val0x6c8 = val0x6c8; coex_dm->cur_val0x6cc = val0x6cc; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ", - coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4); - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n", - coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n", - coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4); - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n", - coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ", + coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n", + coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n", + coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n", + coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && @@ -1121,14 +1105,15 @@ static void btc8192e2ant_coex_tbl_w_type(struct btc_coexist *btcoexist, static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist, bool enable) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; if (enable) h2c_parameter[0] |= BIT0; /* function enable */ - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } @@ -1136,18 +1121,20 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist, static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist, bool force_exec, bool enable) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s turn Ignore WlanAct %s\n", - (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); coex_dm->cur_ignore_wlan_act = enable; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPreIgnoreWlanAct = %d ", - coex_dm->pre_ignore_wlan_act); - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "bCurIgnoreWlanAct = %d!!\n", - coex_dm->cur_ignore_wlan_act); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPreIgnoreWlanAct = %d ", + coex_dm->pre_ignore_wlan_act); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "bCurIgnoreWlanAct = %d!!\n", + coex_dm->cur_ignore_wlan_act); if (coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) @@ -1161,6 +1148,8 @@ static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist, static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 h2c_parameter[5] = {0}; h2c_parameter[0] = byte1; @@ -1175,11 +1164,11 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1, coex_dm->ps_tdma_para[3] = byte4; coex_dm->ps_tdma_para[4] = byte5; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", - h2c_parameter[0], - h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | - h2c_parameter[3] << 8 | h2c_parameter[4]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | h2c_parameter[4]); btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } @@ -1203,20 +1192,22 @@ static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist, static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, bool turn_on, u8 type) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s turn %s PS TDMA, type=%d\n", - (force_exec ? "force to" : ""), - (turn_on ? "ON" : "OFF"), type); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn %s PS TDMA, type=%d\n", + (force_exec ? "force to" : ""), + (turn_on ? "ON" : "OFF"), type); coex_dm->cur_ps_tdma_on = turn_on; coex_dm->cur_ps_tdma = type; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", - coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", - coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) @@ -1340,11 +1331,12 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, u8 sstype) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 mimops = BTC_MIMO_PS_DYNAMIC; u32 disra_mask = 0x0; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], REAL set SS Type = %d\n", sstype); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], REAL set SS Type = %d\n", sstype); disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype, coex_dm->curra_masktype); @@ -1376,9 +1368,11 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist, bool force_exec, u8 new_sstype) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], %s Switch SS Type = %d\n", - (force_exec ? "force to" : ""), new_sstype); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s Switch SS Type = %d\n", + (force_exec ? "force to" : ""), new_sstype); coex_dm->cur_sstype = new_sstype; if (!force_exec) { @@ -1440,6 +1434,7 @@ static void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist) static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; bool common = false, wifi_connected = false, wifi_busy = false; bool bt_hson = false, low_pwr_disable = false; @@ -1459,8 +1454,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi non-connected idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi non-connected idle!!\n"); if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) || @@ -1496,8 +1491,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - btc_alg_dbg(ALGO_TRACE, - "Wifi connected + BT non connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Wifi connected + BT non connected-idle!!\n"); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2); @@ -1524,8 +1519,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) if (bt_hson) return false; - btc_alg_dbg(ALGO_TRACE, - "Wifi connected + BT connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Wifi connected + BT connected-idle!!\n"); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2); @@ -1550,12 +1545,12 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) &low_pwr_disable); if (wifi_busy) { - btc_alg_dbg(ALGO_TRACE, - "Wifi Connected-Busy + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Wifi Connected-Busy + BT Busy!!\n"); common = false; } else { - btc_alg_dbg(ALGO_TRACE, - "Wifi Connected-Idle + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Wifi Connected-Idle + BT Busy!!\n"); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); @@ -1581,9 +1576,11 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause, int result) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (tx_pause) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 71) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -1678,8 +1675,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71); @@ -1782,9 +1779,11 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause, static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause, int result) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (tx_pause) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); @@ -1873,8 +1872,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); @@ -1968,9 +1967,11 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause, static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause, int result) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (tx_pause) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); @@ -2059,8 +2060,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); @@ -2155,6 +2156,7 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, bool sco_hid, bool tx_pause, u8 max_interval) { + struct rtl_priv *rtlpriv = btcoexist->adapter; static int up, dn, m, n, wait_cnt; /* 0: no change, +1: increase WiFi duration, * -1: decrease WiFi duration @@ -2162,13 +2164,13 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, int result; u8 retry_cnt = 0; - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], TdmaDurationAdjust()\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TdmaDurationAdjust()\n"); if (!coex_dm->auto_tdma_adjust) { coex_dm->auto_tdma_adjust = true; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); if (sco_hid) { if (tx_pause) { if (max_interval == 1) { @@ -2181,11 +2183,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 14); coex_dm->tdma_adj_type = 14; - } else if (max_interval == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; } else { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2203,11 +2200,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 10); coex_dm->tdma_adj_type = 10; - } else if (max_interval == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; } else { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2227,11 +2219,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 6); coex_dm->tdma_adj_type = 6; - } else if (max_interval == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; } else { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2249,11 +2236,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 2); coex_dm->tdma_adj_type = 2; - } else if (max_interval == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; } else { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2272,11 +2254,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, } else { /* accquire the BT TRx retry count from BT_Info byte2 */ retry_cnt = coex_sta->bt_retry_cnt; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], retry_cnt = %d\n", retry_cnt); - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n", - up, dn, m, n, wait_cnt); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], retry_cnt = %d\n", retry_cnt); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n", + up, dn, m, n, wait_cnt); result = 0; wait_cnt++; /* no retry in the last 2-second duration */ @@ -2293,8 +2275,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, up = 0; dn = 0; result = 1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex]Increase wifi duration!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex]Increase wifi duration!!\n"); } } else if (retry_cnt <= 3) { up--; @@ -2317,8 +2299,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, dn = 0; wait_cnt = 0; result = -1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "Reduce wifi duration for retry<3\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Reduce wifi duration for retry<3\n"); } } else { if (wait_cnt == 1) @@ -2334,12 +2316,12 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, dn = 0; wait_cnt = 0; result = -1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "Decrease wifi duration for retryCounter>3!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Decrease wifi duration for retryCounter>3!!\n"); } - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], max Interval = %d\n", max_interval); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], max Interval = %d\n", max_interval); if (max_interval == 1) btc8192e_int1(btcoexist, tx_pause, result); else if (max_interval == 2) @@ -2355,11 +2337,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { bool scan = false, link = false, roam = false; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], PsTdma type dismatch!!!, "); - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "curPsTdma=%d, recordPsTdma=%d\n", - coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], PsTdma type dismatch!!!, "); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "curPsTdma=%d, recordPsTdma=%d\n", + coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -2370,8 +2352,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, true, coex_dm->tdma_adj_type); else - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); } } @@ -2390,7 +2372,7 @@ static void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist) btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); if ((btrssi_state == BTC_RSSI_STATE_LOW) || (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { @@ -2452,7 +2434,7 @@ static void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist) btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); if ((btrssi_state == BTC_RSSI_STATE_LOW) || (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { @@ -2506,7 +2488,7 @@ static void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist) u32 wifi_bw; wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); @@ -2564,19 +2546,20 @@ static void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist) /* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_bw; bool long_dist = false; wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); if ((btrssi_state == BTC_RSSI_STATE_LOW || btrssi_state == BTC_RSSI_STATE_STAY_LOW) && (wifirssi_state == BTC_RSSI_STATE_LOW || wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n"); long_dist = true; } if (long_dist) { @@ -2656,7 +2639,7 @@ static void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) u32 wifi_bw; wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); @@ -2717,7 +2700,7 @@ static void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist) u32 wifi_bw; wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); @@ -2778,7 +2761,7 @@ static void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist) u32 wifi_bw; wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); @@ -2836,7 +2819,7 @@ static void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) u32 wifi_bw; wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); @@ -2899,7 +2882,7 @@ static void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) u32 wifi_bw; wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); @@ -2963,7 +2946,7 @@ static void btc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) u32 wifi_bw; wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); @@ -3024,7 +3007,7 @@ static void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist) u32 wifi_bw; wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); @@ -3079,107 +3062,108 @@ static void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist) static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 algorithm = 0; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism()===>\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism()===>\n"); if (btcoexist->manual_control) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], return for Manual CTRL <===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], return for Manual CTRL <===\n"); return; } if (coex_sta->under_ips) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], wifi is under IPS !!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi is under IPS !!!\n"); return; } algorithm = halbtc8192e2ant_action_algorithm(btcoexist); if (coex_sta->c2h_bt_inquiry_page && (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT is under inquiry/page scan !!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is under inquiry/page scan !!\n"); halbtc8192e2ant_action_bt_inquiry(btcoexist); return; } coex_dm->cur_algorithm = algorithm; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); if (halbtc8192e2ant_is_common_action(btcoexist)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant common\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant common\n"); coex_dm->auto_tdma_adjust = false; } else { if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n", - coex_dm->pre_algorithm, - coex_dm->cur_algorithm); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n", + coex_dm->pre_algorithm, + coex_dm->cur_algorithm); coex_dm->auto_tdma_adjust = false; } switch (coex_dm->cur_algorithm) { case BT_8192E_2ANT_COEX_ALGO_SCO: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = SCO\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = SCO\n"); halbtc8192e2ant_action_sco(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_SCO_PAN: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = SCO+PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = SCO+PAN(EDR)\n"); halbtc8192e2ant_action_sco_pan(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_HID: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = HID\n"); halbtc8192e2ant_action_hid(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_A2DP: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = A2DP\n"); halbtc8192e2ant_action_a2dp(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); halbtc8192e2ant_action_a2dp_pan_hs(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANEDR: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = PAN(EDR)\n"); halbtc8192e2ant_action_pan_edr(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANHS: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = HS mode\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = HS mode\n"); halbtc8192e2ant_action_pan_hs(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = PAN+A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = PAN+A2DP\n"); halbtc8192e2ant_action_pan_edr_a2dp(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = PAN(EDR)+HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = PAN(EDR)+HID\n"); halbtc8192e2ant_action_pan_edr_hid(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = HID+A2DP+PAN\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = HID+A2DP+PAN\n"); btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_HID_A2DP: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = HID+A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = HID+A2DP\n"); halbtc8192e2ant_action_hid_a2dp(btcoexist); break; default: - btc_alg_dbg(ALGO_TRACE, - "Action 2-Ant, algorithm = unknown!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Action 2-Ant, algorithm = unknown!!\n"); /* halbtc8192e2ant_coex_alloff(btcoexist); */ break; } @@ -3553,6 +3537,7 @@ void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[3] = {0}; u32 wifi_bw; u8 wifi_center_chnl; @@ -3587,10 +3572,10 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x66 = 0x%x\n", - h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | - h2c_parameter[2]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } @@ -3606,6 +3591,7 @@ void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist, void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf, u8 length) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 bt_info = 0; u8 i, rsp_source = 0; bool bt_busy = false, limited_dig = false; @@ -3647,8 +3633,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, * because bt is reset and loss of the info. */ if ((coex_sta->bt_info_ext & BIT1)) { - btc_alg_dbg(ALGO_TRACE, - "bit1, send wifi BW&Chnl to BT!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "bit1, send wifi BW&Chnl to BT!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (wifi_connected) @@ -3664,8 +3650,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, if ((coex_sta->bt_info_ext & BIT3)) { if (!btcoexist->manual_control && !btcoexist->stop_coex_dm) { - btc_alg_dbg(ALGO_TRACE, - "bit3, BT NOT ignore Wlan active!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "bit3, BT NOT ignore Wlan active!\n"); halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, false); @@ -3723,25 +3709,25 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Non-Connected idle!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Non-Connected idle!!!\n"); } else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n"); } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) || (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n"); } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n"); } else { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n"); } if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || @@ -3777,13 +3763,14 @@ void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist) void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; static u8 dis_ver_info_cnt; u32 fw_ver = 0, bt_patch_ver = 0; struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; - btc_alg_dbg(ALGO_TRACE, - "=======================Periodical=======================\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "=======================Periodical=======================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; btc_iface_dbg(INTF_INIT, -- cgit v1.2.3 From db8cb0095b0ec9700d6f369f594eb31ff683e322 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 6 Feb 2017 21:30:03 -0600 Subject: rtlwifi: rtl8723be: btcoexist: Add single_ant_path Some devices with RTL8732BE wifi/Bluetooth adapters are shipped with only a single antenna. On a subset of these, the EEPROM is incorectly coded to indicate the wrong connection. The resulting problems have been fixed for wifi. This change handles them for BT. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 5 +++++ drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h | 1 + drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 1 + drivers/net/wireless/realtek/rtlwifi/wifi.h | 1 + 4 files changed, 8 insertions(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 8a0e773e076e..8e382a8e1011 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -146,6 +146,11 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) return chnl; } +u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) +{ + return rtlpriv->btcoexist.btc_info.single_ant_path; +} + static void halbtc_leave_lps(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h index ccd5a0f91e3b..65c1c5295237 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h @@ -49,6 +49,7 @@ struct rtl_btc_ops *rtl_btc_get_ops_pointer(void); u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv); u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv); u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv); +u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv); enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index ae2a38ed4ba5..5ef2b9664fdd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2657,6 +2657,7 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_info.btcoexist = 0; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; + rtlpriv->btcoexist.btc_info.single_ant_path = 0; } /* override ant_num / ant_path */ diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 310fa90200b2..9a5a8a98c4b9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2454,6 +2454,7 @@ struct rtl_btc_info { u8 bt_type; u8 btcoexist; u8 ant_num; + u8 single_ant_path; }; struct bt_coexist_info { -- cgit v1.2.3 From 0de9b5db9fbf70e8d9a98eae8cbdf33f7c04252c Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 6 Feb 2017 21:30:04 -0600 Subject: rtlwifi: move btcoex's ant_num declaration File halbtcoutsrc.c is a better place for routine rtl_get_hwpg_ant_num() than file rtl_btc.c. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 12 ++++++++++++ drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c | 11 ----------- drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 2 ++ 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 8e382a8e1011..39584228f4eb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -151,6 +151,18 @@ u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) return rtlpriv->btcoexist.btc_info.single_ant_path; } +u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) +{ + u8 num; + + if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) + num = 2; + else + num = 1; + + return num; +} + static void halbtc_leave_lps(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index d3fd9211b3a4..54e893cfee7b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c @@ -178,17 +178,6 @@ struct rtl_btc_ops *rtl_btc_get_ops_pointer(void) } EXPORT_SYMBOL(rtl_btc_get_ops_pointer); -u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) -{ - u8 num; - - if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) - num = 2; - else - num = 1; - - return num; -} enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw) { diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h index 65c1c5295237..bba7e23e63dd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h @@ -46,9 +46,9 @@ void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type); struct rtl_btc_ops *rtl_btc_get_ops_pointer(void); -u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv); u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv); u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv); +u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv); u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv); enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 5ef2b9664fdd..08fbe949bf1d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2653,6 +2653,8 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, value = hwinfo[EEPROM_RF_BT_SETTING_8723B]; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); + rtlpriv->btcoexist.btc_info.single_ant_path = + (value & 0x40); /*0xc3[6]*/ } else { rtlpriv->btcoexist.btc_info.btcoexist = 0; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; -- cgit v1.2.3 From 7fe1fe75c311149653c362c607804639492d7166 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 6 Feb 2017 21:30:05 -0600 Subject: rtlwifi: rtl8723be: btcoex: add package_type function to btcoex The new code handles the package-type of the chip. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 7 +++++ .../wireless/realtek/rtlwifi/btcoexist/rtl_btc.h | 2 ++ drivers/net/wireless/realtek/rtlwifi/efuse.c | 5 ++-- drivers/net/wireless/realtek/rtlwifi/efuse.h | 1 + .../net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 33 ++++++++++++++++++++++ drivers/net/wireless/realtek/rtlwifi/wifi.h | 9 ++++++ 6 files changed, 54 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 39584228f4eb..1cc955e39453 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -163,6 +163,13 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) return num; } +u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv) +{ + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + return rtlhal->package_type; +} + static void halbtc_leave_lps(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h index bba7e23e63dd..fff5117e1c4e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h @@ -50,6 +50,8 @@ u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv); u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv); u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv); u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv); +u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv); + enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index eb58633e674a..ef9acd466cca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -73,8 +73,6 @@ static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata, u8 *targetdata); static u8 enable_efuse_data_write(struct ieee80211_hw *hw, u16 efuse_addr, u8 word_en, u8 *data); -static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, - u8 pwrstate); static u16 efuse_get_current_size(struct ieee80211_hw *hw); static u8 efuse_calculate_word_cnts(u8 word_en); @@ -1124,7 +1122,7 @@ static u8 enable_efuse_data_write(struct ieee80211_hw *hw, return badworden; } -static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate) +void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -1210,6 +1208,7 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate) } } } +EXPORT_SYMBOL(efuse_power_switch); static u16 efuse_get_current_size(struct ieee80211_hw *hw) { diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h index 1338ae63fe54..952fdc288f0e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.h +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h @@ -109,6 +109,7 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw); void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw); void efuse_force_write_vendor_Id(struct ieee80211_hw *hw); void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx); +void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate); int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, int max_size, u8 *hwinfo, int *params); void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 08fbe949bf1d..ecccada89ea5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2020,6 +2020,37 @@ static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory); } +static u8 _rtl8723be_read_package_type(struct ieee80211_hw *hw) +{ + u8 package_type; + u8 value; + + efuse_power_switch(hw, false, true); + if (!efuse_one_byte_read(hw, 0x1FB, &value)) + value = 0; + efuse_power_switch(hw, false, false); + + switch (value & 0x7) { + case 0x4: + package_type = PACKAGE_TFBGA79; + break; + case 0x5: + package_type = PACKAGE_TFBGA90; + break; + case 0x6: + package_type = PACKAGE_QFN68; + break; + case 0x7: + package_type = PACKAGE_TFBGA80; + break; + default: + package_type = PACKAGE_DEFAULT; + break; + } + + return package_type; +} + static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, bool pseudo_test) { @@ -2078,6 +2109,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, rtlefuse->autoload_failflag, hwinfo); + rtlhal->package_type = _rtl8723be_read_package_type(hw); + /* set channel plan from efuse */ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 9a5a8a98c4b9..4dad962f9a61 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -925,6 +925,14 @@ enum wolpattern_type { UNKNOWN_TYPE = 4, }; +enum package_type { + PACKAGE_DEFAULT, + PACKAGE_QFN68, + PACKAGE_TFBGA90, + PACKAGE_TFBGA80, + PACKAGE_TFBGA79 +}; + struct octet_string { u8 *octet; u16 length; @@ -1509,6 +1517,7 @@ struct rtl_hal { u32 version; /*version of chip */ u8 state; /*stop 0, start 1 */ u8 board_type; + u8 package_type; u8 external_pa; u8 pa_mode; -- cgit v1.2.3 From e0215c1420262ee08b43ef7cb3dbd79c9f0d0632 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 6 Feb 2017 21:30:06 -0600 Subject: rtlwifi: btcoex: move bt_type declaration Routine rtl_get_hwpg_bt_type() is better in halbtcoutsrc.c than in rtl_btc.c. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 5 +++++ drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c | 5 ----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 1cc955e39453..9a89ae0df39b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -151,6 +151,11 @@ u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) return rtlpriv->btcoexist.btc_info.single_ant_path; } +u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) +{ + return rtlpriv->btcoexist.btc_info.bt_type; +} + u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) { u8 num; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index 54e893cfee7b..46e0fa6be273 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c @@ -198,11 +198,6 @@ u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv) return rtlpriv->btcoexist.btc_info.btcoexist; } -u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) -{ - return rtlpriv->btcoexist.btc_info.bt_type; -} - MODULE_AUTHOR("Page He "); MODULE_AUTHOR("Realtek WlanFAE "); MODULE_AUTHOR("Larry Finger "); -- cgit v1.2.3 From 0ff78adeef117a9aa547e737f635cae8ca2f0c90 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 6 Feb 2017 21:30:07 -0600 Subject: rtlwifi: rtl8723be: fix ant_sel code When ant_sel is set, we need to fill single_ant_path to select correct antenna path. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index ecccada89ea5..e6e5e5bedf63 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2696,9 +2696,13 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, } /* override ant_num / ant_path */ - if (mod_params->ant_sel) + if (mod_params->ant_sel) { rtlpriv->btcoexist.btc_info.ant_num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); + + rtlpriv->btcoexist.btc_info.single_ant_path = + (mod_params->ant_sel == 1 ? 0 : 1); + } } void rtl8723be_bt_reg_init(struct ieee80211_hw *hw) -- cgit v1.2.3 From cceb0a5973202b4a5d649ede937d35786d7ca168 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 6 Feb 2017 21:30:08 -0600 Subject: rtlwifi: Add work queue for c2h cmd. btcoex needs to sleep, thus it must run in thread context. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 93 ++++++++++++++++++++++ drivers/net/wireless/realtek/rtlwifi/base.h | 3 + .../net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 15 +++- .../net/wireless/realtek/rtlwifi/rtl8192ee/fw.h | 3 +- .../net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 1 + .../net/wireless/realtek/rtlwifi/rtl8723be/fw.c | 18 ++++- .../net/wireless/realtek/rtlwifi/rtl8723be/fw.h | 3 +- .../net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 1 + .../net/wireless/realtek/rtlwifi/rtl8821ae/fw.c | 18 ++++- .../net/wireless/realtek/rtlwifi/rtl8821ae/fw.h | 3 + .../net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 1 + drivers/net/wireless/realtek/rtlwifi/wifi.h | 14 ++++ 12 files changed, 160 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 01cf0a9aa31b..caea350f05aa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -475,6 +475,8 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) (void *)rtl_swlps_rfon_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, (void *)rtl_fwevt_wq_callback); + INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, + (void *)rtl_c2hcmd_wq_callback); } @@ -489,6 +491,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw) cancel_delayed_work(&rtlpriv->works.ps_work); cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); cancel_delayed_work(&rtlpriv->works.fwevt_wq); + cancel_delayed_work(&rtlpriv->works.c2hcmd_wq); } EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work); @@ -556,6 +559,7 @@ int rtl_init_core(struct ieee80211_hw *hw) spin_lock_init(&rtlpriv->locks.rf_lock); spin_lock_init(&rtlpriv->locks.waitq_lock); spin_lock_init(&rtlpriv->locks.entry_list_lock); + spin_lock_init(&rtlpriv->locks.c2hcmd_lock); spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock); spin_lock_init(&rtlpriv->locks.check_sendpkt_lock); spin_lock_init(&rtlpriv->locks.fw_ps_lock); @@ -563,6 +567,7 @@ int rtl_init_core(struct ieee80211_hw *hw) spin_lock_init(&rtlpriv->locks.iqk_lock); /* <5> init list */ INIT_LIST_HEAD(&rtlpriv->entry_list); + INIT_LIST_HEAD(&rtlpriv->c2hcmd_list); rtlmac->link_state = MAC80211_NOLINK; @@ -575,6 +580,7 @@ EXPORT_SYMBOL_GPL(rtl_init_core); void rtl_deinit_core(struct ieee80211_hw *hw) { + rtl_c2hcmd_launcher(hw, 0); } EXPORT_SYMBOL_GPL(rtl_deinit_core); @@ -1729,6 +1735,93 @@ void rtl_fwevt_wq_callback(void *data) rtlpriv->cfg->ops->c2h_command_handle(hw); } + +void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned long flags; + struct rtl_c2hcmd *c2hcmd; + + c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL); + + if (!c2hcmd) + goto label_err; + + c2hcmd->val = kmalloc(len, GFP_KERNEL); + + if (!c2hcmd->val) + goto label_err2; + + /* fill data */ + c2hcmd->tag = tag; + c2hcmd->len = len; + memcpy(c2hcmd->val, val, len); + + /* enqueue */ + spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); + + list_add_tail(&c2hcmd->list, &rtlpriv->c2hcmd_list); + + spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags); + + /* wake up wq */ + queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.c2hcmd_wq, 0); + + return; + +label_err2: + kfree(c2hcmd); + +label_err: + RT_TRACE(rtlpriv, COMP_CMD, DBG_WARNING, + "C2H cmd enqueue fail.\n"); +} +EXPORT_SYMBOL(rtl_c2hcmd_enqueue); + +void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned long flags; + struct rtl_c2hcmd *c2hcmd; + int i; + + for (i = 0; i < 200; i++) { + /* dequeue a task */ + spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); + + c2hcmd = list_first_entry_or_null(&rtlpriv->c2hcmd_list, + struct rtl_c2hcmd, list); + + if (c2hcmd) + list_del(&c2hcmd->list); + + spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags); + + /* do it */ + if (!c2hcmd) + break; + + if (rtlpriv->cfg->ops->c2h_content_parsing && exec) + rtlpriv->cfg->ops->c2h_content_parsing(hw, + c2hcmd->tag, c2hcmd->len, c2hcmd->val); + + /* free */ + kfree(c2hcmd->val); + + kfree(c2hcmd); + } +} + +void rtl_c2hcmd_wq_callback(void *data) +{ + struct rtl_works *rtlworks = container_of_dwork_rtl(data, + struct rtl_works, + c2hcmd_wq); + struct ieee80211_hw *hw = rtlworks->hw; + + rtl_c2hcmd_launcher(hw, 1); +} + void rtl_easy_concurrent_retrytimer_callback(unsigned long data) { struct ieee80211_hw *hw = (struct ieee80211_hw *)data; diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 6c770aecebe7..02ff0c5624a7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -136,6 +136,9 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tid); void rtl_watchdog_wq_callback(void *data); void rtl_fwevt_wq_callback(void *data); +void rtl_c2hcmd_wq_callback(void *data); +void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec); +void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val); void rtl_get_tcb_desc(struct ieee80211_hw *hw, struct ieee80211_tx_info *info, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 9d7a16c9e74e..9fec345a42a0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -764,8 +764,8 @@ static void _rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw, rtl92ee_dm_dynamic_arfb_select(hw, rate, collision_state); } -static void _rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf) +void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, + u8 c2h_cmd_len, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -820,5 +820,14 @@ void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE, "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - _rtl92ee_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); + switch (c2h_cmd_id) { + case C2H_8192E_BT_INFO: + case C2H_8192E_BT_MP: + rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); + break; + default: + rtl92ee_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, + tmp_buf); + break; + } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h index 069da1e7e80a..72da3f92f02c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h @@ -185,5 +185,6 @@ void rtl92ee_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus); void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); - +void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, + u8 c2h_cmd_len, u8 *tmp_buf); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index 554f2dc86bc5..48820bc497d8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -248,6 +248,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = { .fill_h2c_cmd = rtl92ee_fill_h2c_cmd, .get_btc_status = rtl92ee_get_btc_status, .rx_command_packet = rtl92ee_rx_command_packet, + .c2h_content_parsing = rtl92ee_c2h_content_parsing, }; static struct rtl_mod_params rtl92ee_mod_params = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index fbf396143985..c7ee9ba5e26e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -585,9 +585,9 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, (u8 *)p2p_ps_offload); } -static void _rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, - u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf) +void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, + u8 c2h_cmd_id, + u8 c2h_cmd_len, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -635,5 +635,15 @@ void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE, "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - _rtl8723be_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); + switch (c2h_cmd_id) { + case C2H_8723B_BT_INFO: + case C2H_8723B_BT_MP: + rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); + break; + + default: + rtl8723be_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, + tmp_buf); + break; + } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h index 067429669bda..c652fa1339a7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h @@ -148,5 +148,6 @@ void rtl8723be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus); void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); - +void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, + u8 c2h_cmd_len, u8 *tmp_buf); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index e571b876f0af..92dbfa8f297f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -260,6 +260,7 @@ static struct rtl_hal_ops rtl8723be_hal_ops = { .get_btc_status = rtl8723be_get_btc_status, .rx_command_packet = rtl8723be_rx_command_packet, .is_fw_header = is_fw_header, + .c2h_content_parsing = rtl8723be_c2h_content_parsing, }; static struct rtl_mod_params rtl8723be_mod_params = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index 328c64d465ba..a504dfae4ed3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1740,9 +1740,9 @@ static void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, rtl8821ae_dm_update_init_rate(hw, rate); } -static void _rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, - u8 c2h_cmd_id, u8 c2h_cmd_len, - u8 *tmp_buf) +void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, + u8 c2h_cmd_id, u8 c2h_cmd_len, + u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1784,5 +1784,15 @@ void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - _rtl8821ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); + + switch (c2h_cmd_id) { + case C2H_8812_BT_INFO: + rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); + break; + + default: + rtl8821ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, + tmp_buf); + break; + } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h index 8f5b4aade3c9..90a98ed879f7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h @@ -329,4 +329,7 @@ void rtl8821ae_set_fw_disconnect_decision_ctrl_cmd(struct ieee80211_hw *hw, void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw); void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 length); +void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, + u8 c2h_cmd_id, u8 c2h_cmd_len, + u8 *tmp_buf); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index cd2a53b7e053..77cf3b2cd3f1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -297,6 +297,7 @@ static struct rtl_hal_ops rtl8821ae_hal_ops = { .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd, .get_btc_status = rtl8821ae_get_btc_status, .rx_command_packet = rtl8821ae_rx_command_packet, + .c2h_content_parsing = rtl8821ae_c2h_content_parsing, .add_wowlan_pattern = rtl8821ae_add_wowlan_pattern, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 4dad962f9a61..e6be5641d4da 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2202,6 +2202,8 @@ struct rtl_hal_ops { struct rtl_wow_pattern *rtl_pattern, u8 index); u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx); + void (*c2h_content_parsing)(struct ieee80211_hw *hw, u8 tag, u8 len, + u8 *val); }; struct rtl_intf_ops { @@ -2317,6 +2319,7 @@ struct rtl_locks { spinlock_t waitq_lock; spinlock_t entry_list_lock; spinlock_t usb_lock; + spinlock_t c2hcmd_lock; /*FW clock change */ spinlock_t fw_ps_lock; @@ -2346,6 +2349,7 @@ struct rtl_works { struct workqueue_struct *rtl_wq; struct delayed_work watchdog_wq; struct delayed_work ips_nic_off_wq; + struct delayed_work c2hcmd_wq; /* For SW LPS */ struct delayed_work ps_work; @@ -2553,6 +2557,13 @@ struct proxim { u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type); }; +struct rtl_c2hcmd { + struct list_head list; + u8 tag; + u8 len; + u8 *val; +}; + struct rtl_priv { struct ieee80211_hw *hw; struct completion firmware_loading_complete; @@ -2585,6 +2596,9 @@ struct rtl_priv { /* sta entry list for ap adhoc or mesh */ struct list_head entry_list; + /* c2hcmd list for kthread level access */ + struct list_head c2hcmd_list; + int max_fw_size; /* -- cgit v1.2.3 From 50d55b6d3f1c40485e2e06b2805ef81f881ee9db Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 5 Jan 2017 13:38:35 +0100 Subject: mwifiex: don't include mac80211.h This driver doesn't use mac80211, so it shouldn't include mac80211.h, include only the necessary cfg80211.h instead. Signed-off-by: Johannes Berg Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/decl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h index bec300b9c2ea..188e4c370836 100644 --- a/drivers/net/wireless/marvell/mwifiex/decl.h +++ b/drivers/net/wireless/marvell/mwifiex/decl.h @@ -27,7 +27,7 @@ #include #include #include -#include +#include #define MWIFIEX_BSS_COEX_COUNT 2 #define MWIFIEX_MAX_BSS_NUM (3) -- cgit v1.2.3 From 0f83ff69735651cc7a3d150466a5257ff829b62b Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 17 Jan 2017 23:35:50 +0100 Subject: brcmfmac: use wiphy_read_of_freq_limits to respect limits from DT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This new helper reads extra frequency limits specified in DT and disables unavailable chanels. This is useful for devices (like home routers) with chipsets limited e.g. by board design. In order to respect info read from DT we simply need to check for IEEE80211_CHAN_DISABLED bit when constructing channel info. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 0e28d0710af5..10098b7586f3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5908,6 +5908,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, continue; } + if (channel->orig_flags & IEEE80211_CHAN_DISABLED) + continue; + /* assuming the chanspecs order is HT20, * HT40 upper, HT40 lower, and VHT80. */ @@ -6509,6 +6512,9 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->bands[NL80211_BAND_5GHZ] = band; } } + + wiphy_read_of_freq_limits(wiphy); + return 0; } -- cgit v1.2.3 From 6232c17438ed01f43665197db5a98a4a4f77ef47 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Thu, 2 Feb 2017 10:57:40 +0100 Subject: rt2x00: avoid introducing a USB dependency in the rt2x00lib module As reported by Felix: Though protected by an ifdef, introducing an usb symbol dependency in the rt2x00lib module is a major inconvenience for distributions that package kernel modules split into individual packages. Get rid of this unnecessary dependency by calling the usb related function from a more suitable place. Cc: Vishal Thanki Reported-by: Felix Fietkau Fixes: 8b4c0009313f ("rt2x00usb: Use usb anchor to manage URB") Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00dev.c | 23 ++++++++--------------- drivers/net/wireless/ralink/rt2x00/rt2x00usb.c | 5 +++++ 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 8fcbc8dc94c1..4b08007f93f7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -1436,21 +1436,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) cancel_work_sync(&rt2x00dev->intf_work); cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); cancel_work_sync(&rt2x00dev->sleep_work); -#if IS_ENABLED(CONFIG_RT2X00_LIB_USB) - if (rt2x00_is_usb(rt2x00dev)) { - usb_kill_anchored_urbs(rt2x00dev->anchor); - hrtimer_cancel(&rt2x00dev->txstatus_timer); - cancel_work_sync(&rt2x00dev->rxdone_work); - cancel_work_sync(&rt2x00dev->txdone_work); - } -#endif - if (rt2x00dev->workqueue) - destroy_workqueue(rt2x00dev->workqueue); - - /* - * Free the tx status fifo. - */ - kfifo_free(&rt2x00dev->txstatus_fifo); /* * Kill the tx status tasklet. @@ -1466,6 +1451,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) */ rt2x00lib_uninitialize(rt2x00dev); + if (rt2x00dev->workqueue) + destroy_workqueue(rt2x00dev->workqueue); + + /* + * Free the tx status fifo. + */ + kfifo_free(&rt2x00dev->txstatus_fifo); + /* * Free extra components */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index 838ca58d2dd6..5a2bf9f63cd7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -744,6 +744,11 @@ void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; + usb_kill_anchored_urbs(rt2x00dev->anchor); + hrtimer_cancel(&rt2x00dev->txstatus_timer); + cancel_work_sync(&rt2x00dev->rxdone_work); + cancel_work_sync(&rt2x00dev->txdone_work); + queue_for_each(rt2x00dev, queue) rt2x00usb_free_entries(queue); } -- cgit v1.2.3 From a5b60de6972decc6b50a39abb376077c3c3621c8 Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Fri, 3 Feb 2017 18:30:22 +0530 Subject: mwifiex: Avoid skipping WEP key deletion for AP This patch fixes the issue specific to AP. AP is started with WEP security and external station is connected to it. Data path works in this case. Now if AP is restarted with WPA/WPA2 security, station is able to connect but ping fails. Driver skips the deletion of WEP keys if interface type is AP. Removing that redundant check resolves the issue. Fixes: e57f1734d87a ("mwifiex: add key material v2 support") Signed-off-by: Ganapathi Bhat Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sta_ioctl.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 644f3a248741..1532ac9cee0b 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -1159,8 +1159,6 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp, encrypt_key.is_rx_seq_valid = true; } } else { - if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) - return 0; encrypt_key.key_disable = true; if (mac_addr) memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN); -- cgit v1.2.3 From d5e582523aead22cd1fd62aee232b6f9e4ce82c1 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Fri, 3 Feb 2017 11:35:15 -0600 Subject: rtlwifi: rtl8821ae: Fix typo in symbol for bandwidth numbers In several places, "BANDWITH" is used when "BANDWIDTH" should have been used. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c | 10 +++++----- drivers/net/wireless/realtek/rtlwifi/wifi.h | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index c60f07aa4acf..8da874cbec1a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -988,7 +988,7 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8 s8 temp_pwrlmt = 0; for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) { - for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) { + for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) { for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) { for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) { temp_pwrlmt = rtlphy->txpwr_limit_5g[regulation] @@ -1163,7 +1163,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211 _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(hw); for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) { - for (bw = 0; bw < MAX_2_4G_BANDWITH_NUM; ++bw) { + for (bw = 0; bw < MAX_2_4G_BANDWIDTH_NUM; ++bw) { for (channel = 0; channel < CHANNEL_MAX_NUMBER_2G; ++channel) { for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) { /* obtain the base dBm values in 2.4G band @@ -1219,7 +1219,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211 } } for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) { - for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) { + for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) { for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) { for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) { /* obtain the base dBm values in 5G band @@ -1296,7 +1296,7 @@ static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw) "=====> _rtl8821ae_phy_init_txpower_limit()!\n"); for (i = 0; i < MAX_REGULATION_NUM; ++i) { - for (j = 0; j < MAX_2_4G_BANDWITH_NUM; ++j) + for (j = 0; j < MAX_2_4G_BANDWIDTH_NUM; ++j) for (k = 0; k < MAX_RATE_SECTION_NUM; ++k) for (m = 0; m < CHANNEL_MAX_NUMBER_2G; ++m) for (l = 0; l < MAX_RF_PATH_NUM; ++l) @@ -1305,7 +1305,7 @@ static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw) = MAX_POWER_INDEX; } for (i = 0; i < MAX_REGULATION_NUM; ++i) { - for (j = 0; j < MAX_5G_BANDWITH_NUM; ++j) + for (j = 0; j < MAX_5G_BANDWIDTH_NUM; ++j) for (k = 0; k < MAX_RATE_SECTION_NUM; ++k) for (m = 0; m < CHANNEL_MAX_NUMBER_5G; ++m) for (l = 0; l < MAX_RF_PATH_NUM; ++l) diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index e6be5641d4da..71f766093294 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -157,8 +157,8 @@ enum rtl8192c_h2c_cmd { #define MAX_REGULATION_NUM 4 #define MAX_RF_PATH_NUM 4 #define MAX_RATE_SECTION_NUM 6 -#define MAX_2_4G_BANDWITH_NUM 4 -#define MAX_5G_BANDWITH_NUM 4 +#define MAX_2_4G_BANDWIDTH_NUM 4 +#define MAX_5G_BANDWIDTH_NUM 4 #define MAX_RF_PATH 4 #define MAX_CHNL_GROUP_24G 6 #define MAX_CHNL_GROUP_5G 14 @@ -1265,12 +1265,12 @@ struct rtl_phy { u8 cur_bw40_txpwridx; s8 txpwr_limit_2_4g[MAX_REGULATION_NUM] - [MAX_2_4G_BANDWITH_NUM] + [MAX_2_4G_BANDWIDTH_NUM] [MAX_RATE_SECTION_NUM] [CHANNEL_MAX_NUMBER_2G] [MAX_RF_PATH_NUM]; s8 txpwr_limit_5g[MAX_REGULATION_NUM] - [MAX_5G_BANDWITH_NUM] + [MAX_5G_BANDWIDTH_NUM] [MAX_RATE_SECTION_NUM] [CHANNEL_MAX_NUMBER_5G] [MAX_RF_PATH_NUM]; -- cgit v1.2.3 From 258b93cc391bb781f0ad438cd84adf3666b86922 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Fri, 3 Feb 2017 11:35:16 -0600 Subject: rtlwifi: btcoexist: Convert halbtc8192e2ant.c to use standard debugging The routines in btcoexist use different debugging routines than are used in the other drivers. This patch converts halbtc8192e2ant.c to use the standard routines. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtc8192e2ant.c | 237 +++++++++++---------- 1 file changed, 126 insertions(+), 111 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c index 6a42159e5909..ffa1f438424d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c @@ -758,13 +758,13 @@ static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist, RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], %s Dec BT power level = %d\n", - (force_exec ? "force to" : ""), dec_btpwr_lvl); + force_exec ? "force to" : "", dec_btpwr_lvl); coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl; if (!force_exec) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", - coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); } halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr); @@ -3174,11 +3174,12 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, bool backup) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u16 u16tmp = 0; u8 u8tmp = 0; - btc_iface_dbg(INTF_INIT, - "[BTCoex], 2Ant Init HW Config!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], 2Ant Init HW Config!!\n"); if (backup) { /* backup rf 0x1e value */ @@ -3261,8 +3262,10 @@ void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist) void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) { - btc_iface_dbg(INTF_INIT, - "[BTCoex], Coex Mechanism Init!!\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Coex Mechanism Init!!\n"); halbtc8192e2ant_init_coex_dm(btcoexist); } @@ -3282,13 +3285,13 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) u32 fw_ver = 0, bt_patch_ver = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[BT Coexist info]============"); + "\r\n ============[BT Coexist info]============"); if (btcoexist->manual_control) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ===========[Under Manual Control]==========="); + "\r\n ===========[Under Manual Control]==========="); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); + "\r\n =========================================="); } if (!board_info->bt_exist) { @@ -3297,43 +3300,43 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", + "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", board_info->pg_ant_num, board_info->btdm_ant_num); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d", - "BT stack/ hci ext ver", + "BT stack/ hci ext ver", ((stack_info->profile_notified) ? "Yes" : "No"), stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", - "CoexVer/ FwVer/ PatchVer", - glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, - fw_ver, bt_patch_ver, bt_patch_ver); + "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot11_chnl); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)", - "Dot11 channel / HsMode(HsChnl)", - wifi_dot11_chnl, bt_hson, wifi_hs_chnl); + "Dot11 channel / HsMode(HsChnl)", + wifi_dot11_chnl, bt_hson, wifi_hs_chnl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ", - "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); + "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi); + "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", - "Wifi link/ roam/ scan", link, roam, scan); + "Wifi link/ roam/ scan", link, roam, scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); @@ -3341,7 +3344,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ", - "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), + "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), ((!wifi_busy) ? "idle" : @@ -3349,7 +3352,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) "uplink" : "downlink"))); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ", - "BT [status/ rssi/ retryCnt]", + "BT [status/ rssi/ retryCnt]", ((btcoexist->bt_info.bt_disabled) ? ("disabled") : ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : @@ -3360,127 +3363,127 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) coex_sta->bt_rssi, coex_sta->bt_retry_cnt); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d", - "SCO/HID/PAN/A2DP", stack_info->sco_exist, + "SCO/HID/PAN/A2DP", stack_info->sco_exist, stack_info->hid_exist, stack_info->pan_exist, stack_info->a2dp_exist); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); bt_info_ext = coex_sta->bt_info_ext; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", - "BT Info A2DP rate", + "BT Info A2DP rate", (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate"); for (i = 0; i < BT_INFO_SRC_8192E_2ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %7ph(%d)", - GLBtInfoSrc8192e2Ant[i], - coex_sta->bt_info_c2h[i], - coex_sta->bt_info_c2h_cnt[i]); + "\r\n %-35s = %7ph(%d)", + GLBtInfoSrc8192e2Ant[i], + coex_sta->bt_info_c2h[i], + coex_sta->bt_info_c2h_cnt[i]); } } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s", - "PS state, IPS/LPS", - ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); + "PS state, IPS/LPS", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type", - coex_dm->cur_sstype); + coex_dm->cur_sstype); /* Sw mechanism */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Sw mechanism]============"); + "============[Sw mechanism]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", - "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, - coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); + "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, + coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ", - "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", - coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, - coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "Rate Mask", - btcoexist->bt_info.ra_mask); + btcoexist->bt_info.ra_mask); /* Fw mechanism */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Fw mechanism]============"); + "============[Fw mechanism]============"); ps_tdma_case = coex_dm->cur_ps_tdma; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %5ph case-%d (auto:%d)", - "PS TDMA", coex_dm->ps_tdma_para, - ps_tdma_case, coex_dm->auto_tdma_adjust); + "\r\n %-35s = %5ph case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para, + ps_tdma_case, coex_dm->auto_tdma_adjust); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", - "DecBtPwr/ IgnWlanAct", - coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act); + "DecBtPwr/ IgnWlanAct", + coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act); /* Hw setting */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Hw setting]============"); + "============[Hw setting]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", - "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); + "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, - coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit, - coex_dm->backup_ampdu_maxtime); + "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, + coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit, + coex_dm->backup_ampdu_maxtime); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434); u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "0x430/0x434/0x42a/0x456", - u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); + "0x430/0x434/0x42a/0x456", + u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]); + "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0x778", - u8tmp[0]); + u8tmp[0]); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]); + "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x40/ 0x4f", u8tmp[0], u8tmp[1]); + "0x40/ 0x4f", u8tmp[0], u8tmp[1]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0xc50(dig)", - u32tmp[0]); + u32tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", - "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", - u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x770(hp rx[31:16]/tx[15:0])", - coex_sta->high_priority_rx, coex_sta->high_priority_tx); + "0x770(hp rx[31:16]/tx[15:0])", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x774(lp rx[31:16]/tx[15:0])", - coex_sta->low_priority_rx, coex_sta->low_priority_tx); + "0x774(lp rx[31:16]/tx[15:0])", + coex_sta->low_priority_rx, coex_sta->low_priority_tx); #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 1) halbtc8192e2ant_monitor_bt_ctr(btcoexist); #endif @@ -3489,49 +3492,57 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_IPS_ENTER == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], IPS ENTER notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; halbtc8192e2ant_coex_alloff(btcoexist); } else if (BTC_IPS_LEAVE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], IPS LEAVE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; } } void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_LPS_ENABLE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], LPS ENABLE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS ENABLE notify\n"); coex_sta->under_lps = true; } else if (BTC_LPS_DISABLE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], LPS DISABLE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS DISABLE notify\n"); coex_sta->under_lps = false; } } void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_SCAN_START == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], SCAN START notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCAN START notify\n"); else if (BTC_SCAN_FINISH == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], SCAN FINISH notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCAN FINISH notify\n"); } void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_ASSOCIATE_START == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], CONNECT START notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CONNECT START notify\n"); else if (BTC_ASSOCIATE_FINISH == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], CONNECT FINISH notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CONNECT FINISH notify\n"); } void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, @@ -3548,11 +3559,11 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, return; if (BTC_MEDIA_CONNECT == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], MEDIA connect notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], MEDIA connect notify\n"); else - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], MEDIA disconnect notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], MEDIA disconnect notify\n"); /* only 2.4G we need to inform bt the chnl mask */ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, @@ -3583,9 +3594,11 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (type == BTC_PACKET_DHCP) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], DHCP Packet notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], DHCP Packet notify\n"); } void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, @@ -3604,19 +3617,19 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Bt info[%d], length=%d, hex data = [", - rsp_source, length); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Bt info[%d], length=%d, hex data = [", + rsp_source, length); for (i = 0; i < length; i++) { coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; if (i == 1) bt_info = tmp_buf[i]; if (i == length-1) - btc_iface_dbg(INTF_NOTIFY, - "0x%02x]\n", tmp_buf[i]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x%02x]\n", tmp_buf[i]); else - btc_iface_dbg(INTF_NOTIFY, - "0x%02x, ", tmp_buf[i]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x%02x, ", tmp_buf[i]); } if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) { @@ -3755,7 +3768,9 @@ void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist, void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist) { - btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n"); halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true); ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); @@ -3773,25 +3788,25 @@ void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist) "=======================Periodical=======================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; - btc_iface_dbg(INTF_INIT, - "************************************************\n"); - btc_iface_dbg(INTF_INIT, - "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", - board_info->pg_ant_num, board_info->btdm_ant_num, - board_info->btdm_ant_pos); - btc_iface_dbg(INTF_INIT, - "BT stack/ hci ext ver = %s / %d\n", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "************************************************\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "BT stack/ hci ext ver = %s / %d\n", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - btc_iface_dbg(INTF_INIT, - "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", - glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, - fw_ver, bt_patch_ver, bt_patch_ver); - btc_iface_dbg(INTF_INIT, - "************************************************\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "************************************************\n"); } #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0) -- cgit v1.2.3 From dd192494e6ed5fb29e75c82cc8dc40f9e7f1f764 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Fri, 3 Feb 2017 11:35:17 -0600 Subject: rtlwifi: btcoexist: Convert halbtc8723b1ant.c to use standard debugging The routines in btcoexist use different debugging routines than are used in the other drivers. This patch converts halbtc8723b1ant.c to use the standard routines. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtc8723b1ant.c | 827 +++++++++++---------- 1 file changed, 443 insertions(+), 384 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index 16add42a62af..e9f0a76107ed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -60,9 +60,11 @@ static u32 glcoex_ver_8723b_1ant = 0x47; /*************************************************************** * local function start with halbtc8723b1ant_ ***************************************************************/ -static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, +static u8 halbtc8723b1ant_bt_rssi_state(struct btc_coexist *btcoexist, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) { + struct rtl_priv *rtlpriv = btcoexist->adapter; s32 bt_rssi = 0; u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; @@ -74,28 +76,28 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to High\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Low\n"); } } else { if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi thresh error!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } @@ -104,12 +106,12 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Low\n"); } } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || @@ -118,26 +120,26 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to High\n"); } else if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Medium\n"); } } else { if (bt_rssi < rssi_thresh1) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at High\n"); } } } @@ -151,6 +153,7 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist, u8 index, u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) { + struct rtl_priv *rtlpriv = btcoexist->adapter; s32 wifi_rssi = 0; u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; @@ -165,28 +168,28 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to High\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else { if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI thresh error!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI thresh error!!\n"); return coex_sta->pre_wifi_rssi_state[index]; } @@ -197,12 +200,12 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || @@ -211,26 +214,26 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to High\n"); } else if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Medium\n"); } } else { if (wifi_rssi < rssi_thresh1) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at High\n"); } } } @@ -418,15 +421,16 @@ static void halbtc8723b1ant_monitor_bt_ctr(struct btc_coexist *btcoexist) static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; coex_sta->c2h_bt_info_req_sent = true; h2c_parameter[0] |= BIT0; /* trigger*/ - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } @@ -513,6 +517,7 @@ static void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist) static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; bool bt_hs_on = false; u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED; @@ -521,8 +526,8 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (!bt_link_info->bt_link_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], No BT link exists!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], No BT link exists!!!\n"); return algorithm; } @@ -537,27 +542,29 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) if (numdiffprofile == 1) { if (bt_link_info->sco_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = SCO only\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; } else { if (bt_link_info->hid_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = HID only\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = A2DP only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = A2DP only\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = PAN(HS) only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = PAN(HS) only\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANHS; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = PAN(EDR) only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = PAN(EDR) only\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR; } @@ -566,21 +573,23 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) } else if (numdiffprofile == 2) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = SCO + HID\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; } @@ -588,32 +597,36 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) } else { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = HID + A2DP\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = HID + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = HID + PAN(EDR)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP; } @@ -623,31 +636,35 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; } @@ -657,13 +674,15 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR; } @@ -675,11 +694,13 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; } @@ -693,6 +714,7 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist, bool low_penalty_ra) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[6] = {0}; h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty */ @@ -706,9 +728,9 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist, h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */ } - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], set WiFi Low-Penalty Retry: %s", - (low_penalty_ra ? "ON!!" : "OFF!!")); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); } @@ -732,20 +754,22 @@ static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } @@ -754,10 +778,12 @@ static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n", - (force_exec ? "force to" : ""), - val0x6c0, val0x6c4, val0x6cc); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n", + (force_exec ? "force to" : ""), + val0x6c0, val0x6c4, val0x6cc); coex_dm->cur_val0x6c0 = val0x6c0; coex_dm->cur_val0x6c4 = val0x6c4; coex_dm->cur_val0x6c8 = val0x6c8; @@ -823,14 +849,15 @@ static void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist, static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist, bool enable) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; if (enable) h2c_parameter[0] |= BIT0; /* function enable */ - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } @@ -838,16 +865,18 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist, static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist, bool force_exec, bool enable) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s turn Ignore WlanAct %s\n", - (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); coex_dm->cur_ignore_wlan_act = enable; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", - coex_dm->pre_ignore_wlan_act, - coex_dm->cur_ignore_wlan_act); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); if (coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) @@ -862,6 +891,7 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[5] = {0}; u8 real_byte1 = byte1, real_byte5 = byte5; bool ap_enable = false; @@ -871,8 +901,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, if (ap_enable) { if ((byte1 & BIT4) && !(byte1 & BIT5)) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], FW for 1Ant AP mode\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW for 1Ant AP mode\n"); real_byte1 &= ~BIT4; real_byte1 |= BIT5; @@ -893,8 +923,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, coex_dm->ps_tdma_para[3] = byte4; coex_dm->ps_tdma_para[4] = real_byte5; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", h2c_parameter[0], h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | @@ -918,22 +948,24 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist, bool force_exec, u8 lps_val, u8 rpwm_val) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", - (force_exec ? "force to" : ""), lps_val, rpwm_val); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", + (force_exec ? "force to" : ""), lps_val, rpwm_val); coex_dm->cur_lps = lps_val; coex_dm->cur_rpwm = rpwm_val; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n", - coex_dm->cur_lps, coex_dm->cur_rpwm); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n", + coex_dm->cur_lps, coex_dm->cur_rpwm); if ((coex_dm->pre_lps == coex_dm->cur_lps) && (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n", - coex_dm->pre_rpwm, coex_dm->cur_rpwm); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n", + coex_dm->pre_rpwm, coex_dm->cur_rpwm); return; } @@ -947,8 +979,10 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist, static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist, bool low_penalty_ra) { - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); } @@ -1153,6 +1187,7 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist, static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, bool turn_on, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_busy = false; u8 rssi_adjust_val = 0; @@ -1163,13 +1198,13 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, if (!force_exec) { if (coex_dm->cur_ps_tdma_on) - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], ******** TDMA(on, %d) *********\n", - coex_dm->cur_ps_tdma); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ******** TDMA(on, %d) *********\n", + coex_dm->cur_ps_tdma); else - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], ******** TDMA(off, %d) ********\n", - coex_dm->cur_ps_tdma); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ******** TDMA(off, %d) ********\n", + coex_dm->cur_ps_tdma); if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) @@ -1374,6 +1409,7 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool commom = false, wifi_connected = false; bool wifi_busy = false; @@ -1383,45 +1419,45 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist) if (!wifi_connected && BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); halbtc8723b1ant_sw_mechanism(btcoexist, false); commom = true; } else if (wifi_connected && (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi connected + BT non connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi connected + BT non connected-idle!!\n"); halbtc8723b1ant_sw_mechanism(btcoexist, false); commom = true; } else if (!wifi_connected && (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); halbtc8723b1ant_sw_mechanism(btcoexist, false); commom = true; } else if (wifi_connected && (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi connected + BT connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi connected + BT connected-idle!!\n"); halbtc8723b1ant_sw_mechanism(btcoexist, false); commom = true; } else if (!wifi_connected && (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE != coex_dm->bt_status)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); halbtc8723b1ant_sw_mechanism(btcoexist, false); commom = true; } else { if (wifi_busy) - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); else - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); commom = false; } @@ -1432,6 +1468,7 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist) static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, u8 wifi_status) { + struct rtl_priv *rtlpriv = btcoexist->adapter; static s32 up, dn, m, n, wait_count; /* 0: no change, +1: increase WiFi duration, * -1: decrease WiFi duration @@ -1440,8 +1477,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, u8 retry_count = 0, bt_info_ext; bool wifi_busy = false; - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], TdmaDurationAdjustForAcl()\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TdmaDurationAdjustForAcl()\n"); if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status) wifi_busy = true; @@ -1470,8 +1507,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, if (!coex_dm->auto_tdma_adjust) { coex_dm->auto_tdma_adjust = true; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); coex_dm->tdma_adj_type = 2; @@ -1502,8 +1539,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, up = 0; dn = 0; result = 1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Increase wifi duration!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Increase wifi duration!!\n"); } } else if (retry_count <= 3) { up--; @@ -1526,8 +1563,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); } } else { if (wait_count == 1) @@ -1543,8 +1580,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); } if (result == -1) { @@ -1589,9 +1626,9 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, } } else { /*no change */ /*if busy / idle change */ - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex],********* TDMA(on, %d) ********\n", - coex_dm->cur_ps_tdma); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex],********* TDMA(on, %d) ********\n", + coex_dm->cur_ps_tdma); } if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 && @@ -1807,7 +1844,7 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy( struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 28, 0); + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(btcoexist, 2, 28, 0); if (bt_link_info->hid_only) { /*HID */ btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, wifi_status); @@ -1993,19 +2030,20 @@ static void halbtc8723b1ant_action_wifi_connected_special_packet( static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_busy = false; bool scan = false, link = false, roam = false; bool under_4way = false, ap_enable = false; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], CoexForWifiConnect()===>\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CoexForWifiConnect()===>\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way); if (under_4way) { halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist); - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); return; } @@ -2019,8 +2057,8 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist) else halbtc8723b1ant_action_wifi_connected_special_packet( btcoexist); - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); return; } @@ -2081,6 +2119,7 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist) static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 algorithm = 0; algorithm = halbtc8723b1ant_action_algorithm(btcoexist); @@ -2089,58 +2128,58 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) if (!halbtc8723b1ant_is_common_action(btcoexist)) { switch (coex_dm->cur_algorithm) { case BT_8723B_1ANT_COEX_ALGO_SCO: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = SCO\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = SCO\n"); halbtc8723b1ant_action_sco(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_HID: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = HID\n"); halbtc8723b1ant_action_hid(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = A2DP\n"); halbtc8723b1ant_action_a2dp(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = A2DP+PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = A2DP+PAN(HS)\n"); halbtc8723b1ant_action_a2dp_pan_hs(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_PANEDR: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = PAN(EDR)\n"); halbtc8723b1ant_action_pan_edr(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_PANHS: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = HS mode\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = HS mode\n"); halbtc8723b1ant_action_pan_hs(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = PAN+A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = PAN+A2DP\n"); halbtc8723b1ant_action_pan_edr_a2dp(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = PAN(EDR)+HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = PAN(EDR)+HID\n"); halbtc8723b1ant_action_pan_edr_hid(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = HID+A2DP+PAN\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = HID+A2DP+PAN\n"); btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_HID_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = HID+A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = HID+A2DP\n"); halbtc8723b1ant_action_hid_a2dp(btcoexist); break; default: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = coexist All Off!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = coexist All Off!!\n"); break; } coex_dm->pre_algorithm = coex_dm->cur_algorithm; @@ -2149,6 +2188,7 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; bool wifi_connected = false, bt_hs_on = false; bool increase_scan_dev_num = false; @@ -2158,24 +2198,24 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) u32 wifi_link_status = 0; u32 num_of_wifi_link = 0; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism()===>\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism()===>\n"); if (btcoexist->manual_control) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); return; } if (btcoexist->stop_coex_dm) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); return; } if (coex_sta->under_ips) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], wifi is under IPS !!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi is under IPS !!!\n"); return; } @@ -2254,8 +2294,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) if (!wifi_connected) { bool scan = false, link = false, roam = false; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], wifi is non connected-idle !!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi is non connected-idle !!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -2288,12 +2328,13 @@ static void halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist) static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist, bool backup) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u32 u32tmp = 0; u8 u8tmp = 0; u32 cnt_bt_cal_chk = 0; - btc_iface_dbg(INTF_INIT, - "[BTCoex], 1Ant Init HW Config!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], 1Ant Init HW Config!!\n"); if (backup) {/* backup rf 0x1e value */ coex_dm->backup_arfr_cnt1 = @@ -2320,13 +2361,13 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist, u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d); cnt_bt_cal_chk++; if (u32tmp & BIT0) { - btc_iface_dbg(INTF_INIT, - "[BTCoex], ########### BT calibration(cnt=%d) ###########\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ########### BT calibration(cnt=%d) ###########\n", cnt_bt_cal_chk); mdelay(50); } else { - btc_iface_dbg(INTF_INIT, - "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n", cnt_bt_cal_chk); break; } @@ -2370,8 +2411,10 @@ void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist) void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist) { - btc_iface_dbg(INTF_INIT, - "[BTCoex], Coex Mechanism Init!!\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Coex Mechanism Init!!\n"); btcoexist->stop_coex_dm = false; @@ -2398,19 +2441,19 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) u32 fw_ver = 0, bt_patch_ver = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[BT Coexist info]============"); + "\r\n ============[BT Coexist info]============"); if (btcoexist->manual_control) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[Under Manual Control]=========="); + "\r\n ============[Under Manual Control]=========="); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); + "\r\n =========================================="); } if (btcoexist->stop_coex_dm) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[Coex is STOPPED]============"); + "\r\n ============[Coex is STOPPED]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); + "\r\n =========================================="); } if (!board_info->bt_exist) { @@ -2419,45 +2462,45 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d", - "Ant PG Num/ Ant Mech/ Ant Pos:", - board_info->pg_ant_num, board_info->btdm_ant_num, - board_info->btdm_ant_pos); + "Ant PG Num/ Ant Mech/ Ant Pos:", + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d", - "BT stack/ hci ext ver", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", - "CoexVer/ FwVer/ PatchVer", - glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant, - fw_ver, bt_patch_ver, bt_patch_ver); + "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant, + fw_ver, bt_patch_ver, bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot11_chnl); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)", - "Dot11 channel / HsChnl(HsMode)", - wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + "Dot11 channel / HsChnl(HsMode)", + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ", - "H2C Wifi inform bt chnl Info", - coex_dm->wifi_chnl_info); + "H2C Wifi inform bt chnl Info", + coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); + "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", - "Wifi link/ roam/ scan", link, roam, scan); + "Wifi link/ roam/ scan", link, roam, scan); btcoexist->btc_get(btcoexist , BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); @@ -2467,106 +2510,106 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) &wifi_traffic_dir); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ", - "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), - ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : - (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), - ((!wifi_busy) ? "idle" : - ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? - "uplink" : "downlink"))); + "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), + ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" : + ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")), + ((!wifi_busy) ? "idle" : + ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ? + "uplink" : "downlink"))); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, &wifi_link_status); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d/ %d/ %d", - "sta/vwifi/hs/p2pGo/p2pGc", - ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0), - ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0), - ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0), - ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0), - ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0)); + "sta/vwifi/hs/p2pGo/p2pGc", + ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0)); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ", - "BT [status/ rssi/ retryCnt]", - ((btcoexist->bt_info.bt_disabled) ? ("disabled") : - ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : - ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == - coex_dm->bt_status) ? - "non-connected idle" : - ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == - coex_dm->bt_status) ? - "connected-idle" : "busy")))), + "BT [status/ rssi/ retryCnt]", + ((btcoexist->bt_info.bt_disabled) ? ("disabled") : + ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : + ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) ? + "non-connected idle" : + ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status) ? + "connected-idle" : "busy")))), coex_sta->bt_rssi, coex_sta->bt_retry_cnt); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d / %d / %d / %d", - "SCO/HID/PAN/A2DP", bt_link_info->sco_exist, - bt_link_info->hid_exist, bt_link_info->pan_exist, - bt_link_info->a2dp_exist); + "\r\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", bt_link_info->sco_exist, + bt_link_info->hid_exist, bt_link_info->pan_exist, + bt_link_info->a2dp_exist); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); bt_info_ext = coex_sta->bt_info_ext; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", - "BT Info A2DP rate", - (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate"); + "BT Info A2DP rate", + (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate"); for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %7ph(%d)", - GLBtInfoSrc8723b1Ant[i], - coex_sta->bt_info_c2h[i], - coex_sta->bt_info_c2h_cnt[i]); + "\r\n %-35s = %7ph(%d)", + GLBtInfoSrc8723b1Ant[i], + coex_sta->bt_info_c2h[i], + coex_sta->bt_info_c2h_cnt[i]); } } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s/%s, (0x%x/0x%x)", - "PS state, IPS/LPS, (lps/rpwm)", - ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")), - btcoexist->bt_info.lps_val, - btcoexist->bt_info.rpwm_val); + "\r\n %-35s = %s/%s, (0x%x/0x%x)", + "PS state, IPS/LPS, (lps/rpwm)", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")), + btcoexist->bt_info.lps_val, + btcoexist->bt_info.rpwm_val); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); if (!btcoexist->manual_control) { /* Sw mechanism */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Sw mechanism]============"); + "============[Sw mechanism]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/", - "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra); + "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/ %s/ %d ", - "DelBA/ BtCtrlAgg/ AggSize", + "DelBA/ BtCtrlAgg/ AggSize", (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"), (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"), btcoexist->bt_info.agg_buf_size); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", - "Rate Mask", btcoexist->bt_info.ra_mask); + "Rate Mask", btcoexist->bt_info.ra_mask); /* Fw mechanism */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Fw mechanism]============"); + "============[Fw mechanism]============"); pstdmacase = coex_dm->cur_ps_tdma; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %5ph case-%d (auto:%d)", + "\r\n %-35s = %5ph case-%d (auto:%d)", "PS TDMA", coex_dm->ps_tdma_para, pstdmacase, coex_dm->auto_tdma_adjust); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d ", - "IgnWlanAct", coex_dm->cur_ignore_wlan_act); + "IgnWlanAct", coex_dm->cur_ignore_wlan_act); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", - "Latest error condition(should be 0)", + "Latest error condition(should be 0)", coex_dm->error_condition); } /* Hw setting */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Hw setting]============"); + "============[Hw setting]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, + "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit, coex_dm->backup_ampdu_max_time); @@ -2575,49 +2618,49 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "0x430/0x434/0x42a/0x456", - u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); + "0x430/0x434/0x42a/0x456", + u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0], - (u32tmp[1] & 0x3e000000) >> 25); + "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0], + (u32tmp[1] & 0x3e000000) >> 25); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x948/ 0x67[5] / 0x765", - u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]); + "0x948/ 0x67[5] / 0x765", + u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", - u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3); + "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", + u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", - "0x38[11]/0x40/0x4c[24:23]/0x64[0]", - ((u8tmp[0] & 0x8)>>3), u8tmp[1], - ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1); + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x38[11]/0x40/0x4c[24:23]/0x64[0]", + ((u8tmp[0] & 0x8) >> 3), u8tmp[1], + ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]); + "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4); @@ -2636,22 +2679,22 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) fa_cck = (u8tmp[0] << 8) + u8tmp[1]; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "OFDM-CCA/OFDM-FA/CCK-FA", - u32tmp[0] & 0xffff, fa_ofdm, fa_cck); + "OFDM-CCA/OFDM-FA/CCK-FA", + u32tmp[0] & 0xffff, fa_ofdm, fa_cck); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x6c0/0x6c4/0x6c8(coexTable)", - u32tmp[0], u32tmp[1], u32tmp[2]); + "0x6c0/0x6c4/0x6c8(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2]); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x770(high-pri rx/tx)", coex_sta->high_priority_rx, - coex_sta->high_priority_tx); + "0x770(high-pri rx/tx)", coex_sta->high_priority_rx, + coex_sta->high_priority_tx); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, - coex_sta->low_priority_tx); + "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, + coex_sta->low_priority_tx); #if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 1) halbtc8723b1ant_monitor_bt_ctr(btcoexist); #endif @@ -2660,12 +2703,14 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (btcoexist->manual_control || btcoexist->stop_coex_dm) return; if (BTC_IPS_ENTER == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], IPS ENTER notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, @@ -2676,8 +2721,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) NORMAL_EXEC, 0); halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); } else if (BTC_IPS_LEAVE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], IPS LEAVE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; halbtc8723b1ant_init_hw_config(btcoexist, false); @@ -2688,22 +2733,25 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (btcoexist->manual_control || btcoexist->stop_coex_dm) return; if (BTC_LPS_ENABLE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], LPS ENABLE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS ENABLE notify\n"); coex_sta->under_lps = true; } else if (BTC_LPS_DISABLE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], LPS DISABLE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS DISABLE notify\n"); coex_sta->under_lps = false; } } void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_connected = false, bt_hs_on = false; u32 wifi_link_status = 0; u32 num_of_wifi_link = 0; @@ -2740,15 +2788,15 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) } if (BTC_SCAN_START == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], SCAN START notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCAN START notify\n"); if (!wifi_connected) /* non-connected scan */ btc8723b1ant_action_wifi_not_conn_scan(btcoexist); else /* wifi is connected */ btc8723b1ant_action_wifi_conn_scan(btcoexist); } else if (BTC_SCAN_FINISH == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], SCAN FINISH notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCAN FINISH notify\n"); if (!wifi_connected) /* non-connected scan */ btc8723b1ant_action_wifi_not_conn(btcoexist); else @@ -2758,6 +2806,7 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_connected = false, bt_hs_on = false; u32 wifi_link_status = 0; u32 num_of_wifi_link = 0; @@ -2789,12 +2838,12 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) } if (BTC_ASSOCIATE_START == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], CONNECT START notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CONNECT START notify\n"); btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist); } else if (BTC_ASSOCIATE_FINISH == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], CONNECT FINISH notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CONNECT FINISH notify\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); @@ -2808,6 +2857,7 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[3] = {0}; u32 wifi_bw; u8 wifiCentralChnl; @@ -2817,11 +2867,11 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist, return; if (BTC_MEDIA_CONNECT == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], MEDIA connect notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], MEDIA connect notify\n"); else - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], MEDIA disconnect notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], MEDIA disconnect notify\n"); /* only 2.4G we need to inform bt the chnl mask */ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, @@ -2842,10 +2892,10 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist, coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x66 = 0x%x\n", - h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | - h2c_parameter[2]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } @@ -2853,6 +2903,7 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist, void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool bt_hs_on = false; u32 wifi_link_status = 0; u32 num_of_wifi_link = 0; @@ -2887,8 +2938,8 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist, if (BTC_PACKET_DHCP == type || BTC_PACKET_EAPOL == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], special Packet(%d) notify\n", type); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], special Packet(%d) notify\n", type); halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist); } } @@ -2896,6 +2947,7 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist, void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf, u8 length) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 bt_info = 0; u8 i, rsp_source = 0; bool wifi_connected = false; @@ -2908,19 +2960,19 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Bt info[%d], length=%d, hex data = [", - rsp_source, length); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Bt info[%d], length=%d, hex data = [", + rsp_source, length); for (i = 0; i < length; i++) { coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; if (i == 1) bt_info = tmp_buf[i]; if (i == length - 1) - btc_iface_dbg(INTF_NOTIFY, - "0x%02x]\n", tmp_buf[i]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x%02x]\n", tmp_buf[i]); else - btc_iface_dbg(INTF_NOTIFY, - "0x%02x, ", tmp_buf[i]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x%02x, ", tmp_buf[i]); } if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) { @@ -2937,8 +2989,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, * because bt is reset and loss of the info. */ if (coex_sta->bt_info_ext & BIT1) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (wifi_connected) @@ -2952,8 +3004,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, if (coex_sta->bt_info_ext & BIT3) { if (!btcoexist->manual_control && !btcoexist->stop_coex_dm) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n"); halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, false); @@ -3008,30 +3060,30 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) { coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n"); /* connection exists but no busy */ } else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) { coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); } else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) || (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) { coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); } else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) { if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status) coex_dm->auto_tdma_adjust = false; coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); } else { coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_MAX; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n"); } if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || @@ -3047,7 +3099,9 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist) { - btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n"); btcoexist->stop_coex_dm = true; @@ -3065,11 +3119,13 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist) void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) { - btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Pnp notify\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n"); if (BTC_WIFI_PNP_SLEEP == pnp_state) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Pnp notify to SLEEP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Pnp notify to SLEEP\n"); btcoexist->stop_coex_dm = true; halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, true); @@ -3079,8 +3135,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Pnp notify to WAKE UP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Pnp notify to WAKE UP\n"); btcoexist->stop_coex_dm = false; halbtc8723b1ant_init_hw_config(btcoexist, false); halbtc8723b1ant_init_coex_dm(btcoexist); @@ -3090,8 +3146,10 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], *****************Coex DM Reset****************\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], *****************Coex DM Reset****************\n"); halbtc8723b1ant_init_hw_config(btcoexist, false); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -3101,36 +3159,37 @@ void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist) void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; static u8 dis_ver_info_cnt; u32 fw_ver = 0, bt_patch_ver = 0; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], ==========================Periodical===========================\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ==========================Periodical===========================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; - btc_iface_dbg(INTF_INIT, - "[BTCoex], ****************************************************************\n"); - btc_iface_dbg(INTF_INIT, - "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", - board_info->pg_ant_num, board_info->btdm_ant_num, - board_info->btdm_ant_pos); - btc_iface_dbg(INTF_INIT, - "[BTCoex], BT stack/ hci ext ver = %s / %d\n", - stack_info->profile_notified ? "Yes" : "No", - stack_info->hci_version); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ****************************************************************\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + stack_info->profile_notified ? "Yes" : "No", + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - btc_iface_dbg(INTF_INIT, - "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", - glcoex_ver_date_8723b_1ant, - glcoex_ver_8723b_1ant, fw_ver, - bt_patch_ver, bt_patch_ver); - btc_iface_dbg(INTF_INIT, - "[BTCoex], ****************************************************************\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8723b_1ant, + glcoex_ver_8723b_1ant, fw_ver, + bt_patch_ver, bt_patch_ver); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ****************************************************************\n"); } #if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0) -- cgit v1.2.3 From 2277f5f1154aa101dac87ea06fe4925d6025ddf3 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Fri, 3 Feb 2017 11:35:18 -0600 Subject: rtlwifi: btcoexist: Convert halbtc8723b2ant.c to use standard debugging The routines in btcoexist use different debugging routines than are used in the other drivers. This patch converts halbtc8723b2ant.c to use the standard routines. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtc8723b2ant.c | 993 +++++++++++---------- 1 file changed, 527 insertions(+), 466 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index 5f488ecaef70..12125966a911 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -58,9 +58,11 @@ static u32 glcoex_ver_8723b_2ant = 0x3f; /************************************************************** * local function start with btc8723b2ant_ **************************************************************/ -static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, +static u8 btc8723b2ant_bt_rssi_state(struct btc_coexist *btcoexist, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) { + struct rtl_priv *rtlpriv = btcoexist->adapter; s32 bt_rssi = 0; u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; @@ -72,28 +74,28 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to High\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Low\n"); } } else { if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi thresh error!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } @@ -102,12 +104,12 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Low\n"); } } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || @@ -116,26 +118,26 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to High\n"); } else if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Medium\n"); } } else { if (bt_rssi < rssi_thresh1) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at High\n"); } } } @@ -149,6 +151,7 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, u8 index, u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) { + struct rtl_priv *rtlpriv = btcoexist->adapter; s32 wifi_rssi = 0; u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; @@ -162,28 +165,28 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to High\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else { if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI thresh error!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI thresh error!!\n"); return coex_sta->pre_wifi_rssi_state[index]; } @@ -194,12 +197,12 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || @@ -208,26 +211,26 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to High\n"); } else if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Medium\n"); } } else { if (wifi_rssi < rssi_thresh1) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at High\n"); } } } @@ -239,6 +242,7 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u32 reg_hp_txrx, reg_lp_txrx, u32tmp; u32 reg_hp_tx = 0, reg_hp_rx = 0; u32 reg_lp_tx = 0, reg_lp_rx = 0; @@ -259,12 +263,12 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) coex_sta->low_priority_tx = reg_lp_tx; coex_sta->low_priority_rx = reg_lp_rx; - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", - reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", - reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); /* reset counter */ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); @@ -272,15 +276,16 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; coex_sta->c2h_bt_info_req_sent = true; h2c_parameter[0] |= BIT0; /* trigger */ - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } @@ -386,6 +391,7 @@ static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist) static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; bool bt_hs_on = false; u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED; @@ -394,8 +400,8 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (!bt_link_info->bt_link_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], No BT link exists!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], No BT link exists!!!\n"); return algorithm; } @@ -410,27 +416,29 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) if (num_of_diff_profile == 1) { if (bt_link_info->sco_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCO only\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_SCO; } else { if (bt_link_info->hid_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], HID only\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], A2DP only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], A2DP only\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], PAN(HS) only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], PAN(HS) only\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANHS; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], PAN(EDR) only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], PAN(EDR) only\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR; } @@ -439,21 +447,23 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) } else if (num_of_diff_profile == 2) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCO + HID\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + A2DP ==> SCO\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCO + A2DP ==> SCO\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_SCO; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + PAN(EDR)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } @@ -461,31 +471,35 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) } else { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID + A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], HID + A2DP\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], HID + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_HID; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], HID + PAN(EDR)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], A2DP + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex],A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex],A2DP + PAN(EDR)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP; } @@ -495,32 +509,36 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + HID + A2DP ==> HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCO + HID + A2DP ==> HID\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + HID + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + HID + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + HID + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + HID + PAN(EDR)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + A2DP + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } @@ -530,13 +548,15 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], HID + A2DP + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID + A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], HID + A2DP + PAN(EDR)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR; } @@ -548,11 +568,13 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"); } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } @@ -564,6 +586,7 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool ret = false; bool bt_hs_on = false, wifi_connected = false; s32 bt_hs_rssi = 0; @@ -577,20 +600,20 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist) if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi)) return false; - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); if (wifi_connected) { if (bt_hs_on) { if (bt_hs_rssi > 37) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], Need to decrease bt power for HS mode!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Need to decrease bt power for HS mode!!\n"); ret = true; } } else { if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); ret = true; } } @@ -602,6 +625,7 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist) static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist, u8 dac_swing_lvl) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; /* There are several type of dacswing @@ -609,10 +633,10 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist, */ h2c_parameter[0] = dac_swing_lvl; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl); - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); } @@ -620,6 +644,7 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist, static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, bool dec_bt_pwr) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; h2c_parameter[0] = 0; @@ -627,8 +652,8 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, if (dec_bt_pwr) h2c_parameter[0] |= BIT1; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n", (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); @@ -637,14 +662,16 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist, bool force_exec, bool dec_bt_pwr) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s Dec BT power = %s\n", + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s Dec BT power = %s\n", force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF"); coex_dm->cur_dec_bt_pwr = dec_bt_pwr; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) @@ -658,14 +685,16 @@ static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist, static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, bool force_exec, u8 fw_dac_swing_lvl) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s set FW Dac Swing level = %d\n", + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s set FW Dac Swing level = %d\n", (force_exec ? "force to" : ""), fw_dac_swing_lvl); coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", coex_dm->pre_fw_dac_swing_lvl, coex_dm->cur_fw_dac_swing_lvl); @@ -682,18 +711,20 @@ static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, bool rx_rf_shrink_on) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (rx_rf_shrink_on) { /* Shrink RF Rx LPF corner */ - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Shrink RF Rx LPF corner!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, 0xffffc); } else { /* Resume RF Rx LPF corner */ /* After initialized, we can use coex_dm->btRf0x1eBackup */ if (btcoexist->initilized) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Resume RF Rx LPF corner!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Resume RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, coex_dm->bt_rf0x1e_backup); @@ -704,15 +735,17 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist, bool force_exec, bool rx_rf_shrink_on) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s turn Rx RF Shrink = %s\n", + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn Rx RF Shrink = %s\n", (force_exec ? "force to" : ""), (rx_rf_shrink_on ? "ON" : "OFF")); coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n", coex_dm->pre_rf_rx_lpf_shrink, coex_dm->cur_rf_rx_lpf_shrink); @@ -729,6 +762,7 @@ static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist, static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist, bool low_penalty_ra) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[6] = {0}; h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/ @@ -742,9 +776,9 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist, h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ } - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], set WiFi Low-Penalty Retry: %s", - (low_penalty_ra ? "ON!!" : "OFF!!")); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); } @@ -752,18 +786,20 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist, static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist, bool force_exec, bool low_penalty_ra) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + /*return; */ - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s turn LowPenaltyRA = %s\n", - (force_exec ? "force to" : ""), (low_penalty_ra ? - "ON" : "OFF")); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn LowPenaltyRA = %s\n", + (force_exec ? "force to" : ""), (low_penalty_ra ? + "ON" : "OFF")); coex_dm->cur_low_penalty_ra = low_penalty_ra; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n", - coex_dm->pre_low_penalty_ra, - coex_dm->cur_low_penalty_ra); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n", + coex_dm->pre_low_penalty_ra, + coex_dm->cur_low_penalty_ra); if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) return; @@ -776,9 +812,11 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist, static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist, u32 level) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 val = (u8) level; - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Write SwDacSwing = 0x%x\n", level); + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); } @@ -796,20 +834,22 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist, bool force_exec, bool dac_swing_on, u32 dac_swing_lvl) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n", - (force_exec ? "force to" : ""), - (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n", + (force_exec ? "force to" : ""), + (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl); coex_dm->cur_dac_swing_on = dac_swing_on; coex_dm->cur_dac_swing_lvl = dac_swing_lvl; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", - coex_dm->pre_dac_swing_on, - coex_dm->pre_dac_swing_lvl, - coex_dm->cur_dac_swing_on, - coex_dm->cur_dac_swing_lvl); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", + coex_dm->pre_dac_swing_on, + coex_dm->pre_dac_swing_lvl, + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) @@ -826,12 +866,13 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist, static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, bool agc_table_en) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 rssi_adjust_val = 0; /* BB AGC Gain Table */ if (agc_table_en) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], BB Agc Table On!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BB Agc Table On!\n"); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001); @@ -840,8 +881,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001); } else { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], BB Agc Table Off!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BB Agc Table Off!\n"); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); @@ -854,15 +895,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, /* RF Gain */ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000); if (agc_table_en) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Agc Table On!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Agc Table On!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x38fff); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x38ffe); } else { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Agc Table Off!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Agc Table Off!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x380c3); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, @@ -873,15 +914,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1); if (agc_table_en) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Agc Table On!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Agc Table On!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, 0xfffff, 0x38fff); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, 0xfffff, 0x38ffe); } else { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Agc Table Off!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Agc Table Off!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, 0xfffff, 0x380c3); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, @@ -899,17 +940,19 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist, bool force_exec, bool agc_table_en) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s %s Agc Table\n", - (force_exec ? "force to" : ""), - (agc_table_en ? "Enable" : "Disable")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s %s Agc Table\n", + (force_exec ? "force to" : ""), + (agc_table_en ? "Enable" : "Disable")); coex_dm->cur_agc_table_en = agc_table_en; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", - coex_dm->pre_agc_table_en, - coex_dm->cur_agc_table_en); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + coex_dm->pre_agc_table_en, + coex_dm->cur_agc_table_en); if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) return; @@ -923,20 +966,22 @@ static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0); btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4); btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8); btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc); btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } @@ -945,24 +990,26 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", - force_exec ? "force to" : "", - val0x6c0, val0x6c4, val0x6c8, val0x6cc); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", + force_exec ? "force to" : "", + val0x6c0, val0x6c4, val0x6c8, val0x6cc); coex_dm->cur_val0x6c0 = val0x6c0; coex_dm->cur_val0x6c4 = val0x6c4; coex_dm->cur_val0x6c8 = val0x6c8; coex_dm->cur_val0x6cc = val0x6cc; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n", - coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, - coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n", - coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4, - coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n", + coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, + coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n", + coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4, + coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && @@ -1043,14 +1090,15 @@ static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist, static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, bool enable) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; if (enable) h2c_parameter[0] |= BIT0;/* function enable*/ - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n", - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } @@ -1058,16 +1106,18 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist, bool force_exec, bool enable) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s turn Ignore WlanAct %s\n", - (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); coex_dm->cur_ignore_wlan_act = enable; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", - coex_dm->pre_ignore_wlan_act, - coex_dm->cur_ignore_wlan_act); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); if (coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) @@ -1081,6 +1131,7 @@ static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist, static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[5]; h2c_parameter[0] = byte1; @@ -1095,11 +1146,11 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1, coex_dm->ps_tdma_para[3] = byte4; coex_dm->ps_tdma_para[4] = byte5; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", - h2c_parameter[0], - h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | - h2c_parameter[3] << 8 | h2c_parameter[4]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | h2c_parameter[4]); btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } @@ -1208,20 +1259,22 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, bool turn_on, u8 type) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s turn %s PS TDMA, type=%d\n", - (force_exec ? "force to" : ""), - (turn_on ? "ON" : "OFF"), type); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn %s PS TDMA, type=%d\n", + (force_exec ? "force to" : ""), + (turn_on ? "ON" : "OFF"), type); coex_dm->cur_ps_tdma_on = turn_on; coex_dm->cur_ps_tdma = type; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", - coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", - coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) @@ -1405,6 +1458,7 @@ static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist) static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool common = false, wifi_connected = false; bool wifi_busy = false; bool bt_hs_on = false, low_pwr_disable = false; @@ -1419,8 +1473,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi non-connected idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi non-connected idle!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -1443,8 +1497,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi connected + BT non connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi connected + BT non connected-idle!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -1470,8 +1524,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) if (bt_hs_on) return false; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi connected + BT connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi connected + BT connected-idle!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -1495,15 +1549,15 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) &low_pwr_disable); if (wifi_busy) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); common = false; } else { if (bt_hs_on) return false; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -1539,10 +1593,12 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause, s32 result) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + /* Set PS TDMA for max interval == 1 */ if (tx_pause) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 71) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -1638,8 +1694,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71); coex_dm->tdma_adj_type = 71; @@ -1735,10 +1791,12 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause, static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause, s32 result) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + /* Set PS TDMA for max interval == 2 */ if (tx_pause) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); coex_dm->tdma_adj_type = 6; @@ -1819,8 +1877,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); coex_dm->tdma_adj_type = 2; @@ -1906,10 +1964,12 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause, static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause, s32 result) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + /* Set PS TDMA for max interval == 3 */ if (tx_pause) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); coex_dm->tdma_adj_type = 7; @@ -1990,8 +2050,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); coex_dm->tdma_adj_type = 3; @@ -2078,18 +2138,19 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, bool sco_hid, bool tx_pause, u8 max_interval) { + struct rtl_priv *rtlpriv = btcoexist->adapter; static s32 up, dn, m, n, wait_count; /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/ s32 result; u8 retry_count = 0; - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], TdmaDurationAdjust()\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TdmaDurationAdjust()\n"); if (!coex_dm->auto_tdma_adjust) { coex_dm->auto_tdma_adjust = true; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); if (sco_hid) { if (tx_pause) { if (max_interval == 1) { @@ -2102,11 +2163,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 14); coex_dm->tdma_adj_type = 14; - } else if (max_interval == 3) { - btc8723b2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; } else { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2124,11 +2180,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 10); coex_dm->tdma_adj_type = 10; - } else if (max_interval == 3) { - btc8723b2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; } else { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2148,11 +2199,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 6); coex_dm->tdma_adj_type = 6; - } else if (max_interval == 3) { - btc8723b2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; } else { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2170,11 +2216,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 2); coex_dm->tdma_adj_type = 2; - } else if (max_interval == 3) { - btc8723b2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; } else { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2193,11 +2234,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, } else { /*accquire the BT TRx retry count from BT_Info byte2*/ retry_count = coex_sta->bt_retry_cnt; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], retry_count = %d\n", retry_count); - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n", - up, dn, m, n, wait_count); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], retry_count = %d\n", retry_count); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n", + up, dn, m, n, wait_count); result = 0; wait_count++; /* no retry in the last 2-second duration*/ @@ -2214,8 +2255,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, up = 0; dn = 0; result = 1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Increase wifi duration!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Increase wifi duration!!\n"); } /* <=3 retry in the last 2-second duration*/ } else if (retry_count <= 3) { up--; @@ -2238,8 +2279,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retry_counter<3!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Decrease wifi duration for retry_counter<3!!\n"); } } else { if (wait_count == 1) @@ -2255,12 +2296,12 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retry_counter>3!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Decrease wifi duration for retry_counter>3!!\n"); } - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], max Interval = %d\n", max_interval); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], max Interval = %d\n", max_interval); if (max_interval == 1) set_tdma_int1(btcoexist, tx_pause, result); else if (max_interval == 2) @@ -2274,9 +2315,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, */ if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { bool scan = false, link = false, roam = false; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", - coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", + coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -2286,8 +2327,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, coex_dm->tdma_adj_type); else - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); } } @@ -2357,7 +2398,7 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2422,7 +2463,7 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist) 0, 2, 15, 0); wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, 40, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); @@ -2561,7 +2602,7 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2672,7 +2713,7 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2736,7 +2777,7 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (btc8723b_need_dec_pwr(btcoexist)) @@ -2806,7 +2847,7 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2870,7 +2911,7 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2923,28 +2964,29 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist) static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 algorithm = 0; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism()===>\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism()===>\n"); if (btcoexist->manual_control) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); return; } if (coex_sta->under_ips) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], wifi is under IPS !!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi is under IPS !!!\n"); return; } algorithm = btc8723b2ant_action_algorithm(btcoexist); if (coex_sta->c2h_bt_inquiry_page && (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT is under inquiry/page scan !!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is under inquiry/page scan !!\n"); btc8723b2ant_action_bt_inquiry(btcoexist); return; } else { @@ -2956,75 +2998,76 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) } coex_dm->cur_algorithm = algorithm; - btc_alg_dbg(ALGO_TRACE, "[BTCoex], Algorithm = %d\n", - coex_dm->cur_algorithm); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Algorithm = %d\n", + coex_dm->cur_algorithm); if (btc8723b2ant_is_common_action(btcoexist)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant common\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant common\n"); coex_dm->auto_tdma_adjust = false; } else { if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n", - coex_dm->pre_algorithm, - coex_dm->cur_algorithm); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n", + coex_dm->pre_algorithm, + coex_dm->cur_algorithm); coex_dm->auto_tdma_adjust = false; } switch (coex_dm->cur_algorithm) { case BT_8723B_2ANT_COEX_ALGO_SCO: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = SCO\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = SCO\n"); btc8723b2ant_action_sco(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_HID: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = HID\n"); btc8723b2ant_action_hid(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = A2DP\n"); btc8723b2ant_action_a2dp(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); btc8723b2ant_action_a2dp_pan_hs(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_PANEDR: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n"); btc8723b2ant_action_pan_edr(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_PANHS: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HS mode\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = HS mode\n"); btc8723b2ant_action_pan_hs(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n"); btc8723b2ant_action_pan_edr_a2dp(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n"); btc8723b2ant_action_pan_edr_hid(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n"); btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_HID_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n"); btc8723b2ant_action_hid_a2dp(btcoexist); break; default: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"); btc8723b2ant_coex_alloff(btcoexist); break; } @@ -3050,10 +3093,11 @@ static void btc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist) *********************************************************************/ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u8tmp = 0; - btc_iface_dbg(INTF_INIT, - "[BTCoex], 2Ant Init HW Config!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], 2Ant Init HW Config!!\n"); coex_dm->bt_rf0x1e_backup = btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff); @@ -3078,8 +3122,10 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) { - btc_iface_dbg(INTF_INIT, - "[BTCoex], Coex Mechanism Init!!\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Coex Mechanism Init!!\n"); btc8723b2ant_init_coex_dm(btcoexist); } @@ -3101,13 +3147,13 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) u8 ap_num = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[BT Coexist info]============"); + "\r\n ============[BT Coexist info]============"); if (btcoexist->manual_control) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ==========[Under Manual Control]============"); + "\r\n ==========[Under Manual Control]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); + "\r\n =========================================="); } if (!board_info->bt_exist) { @@ -3116,21 +3162,21 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", - "Ant PG number/ Ant mechanism:", - board_info->pg_ant_num, board_info->btdm_ant_num); + "Ant PG number/ Ant mechanism:", + board_info->pg_ant_num, board_info->btdm_ant_num); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d", - "BT stack/ hci ext ver", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", - "CoexVer/ FwVer/ PatchVer", - glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, - fw_ver, bt_patch_ver, bt_patch_ver); + "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, @@ -3138,23 +3184,23 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)", - "Dot11 channel / HsChnl(HsMode)", - wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + "Dot11 channel / HsChnl(HsMode)", + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ", - "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); + "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d", - "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num); + "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", - "Wifi link/ roam/ scan", link, roam, scan); + "Wifi link/ roam/ scan", link, roam, scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); @@ -3162,112 +3208,112 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ", - "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), - ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : - (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), - ((!wifi_busy) ? "idle" : - ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? - "uplink" : "downlink"))); + "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), + ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" : + (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))), + ((!wifi_busy) ? "idle" : + ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ? + "uplink" : "downlink"))); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d", - "SCO/HID/PAN/A2DP", - bt_link_info->sco_exist, bt_link_info->hid_exist, - bt_link_info->pan_exist, bt_link_info->a2dp_exist); + "SCO/HID/PAN/A2DP", + bt_link_info->sco_exist, bt_link_info->hid_exist, + bt_link_info->pan_exist, bt_link_info->a2dp_exist); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); bt_info_ext = coex_sta->bt_info_ext; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", - "BT Info A2DP rate", - (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate"); + "BT Info A2DP rate", + (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate"); for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %7ph(%d)", - glbt_info_src_8723b_2ant[i], - coex_sta->bt_info_c2h[i], - coex_sta->bt_info_c2h_cnt[i]); + "\r\n %-35s = %7ph(%d)", + glbt_info_src_8723b_2ant[i], + coex_sta->bt_info_c2h[i], + coex_sta->bt_info_c2h_cnt[i]); } } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s", - "PS state, IPS/LPS", - ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); + "PS state, IPS/LPS", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); /* Sw mechanism */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s", "============[Sw mechanism]============"); + "\r\n %-35s", "============[Sw mechanism]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", - "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, - coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); + "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, + coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ", - "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", - coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, - coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); /* Fw mechanism */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Fw mechanism]============"); + "============[Fw mechanism]============"); ps_tdma_case = coex_dm->cur_ps_tdma; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %5ph case-%d (auto:%d)", - "PS TDMA", coex_dm->ps_tdma_para, - ps_tdma_case, coex_dm->auto_tdma_adjust); + "\r\n %-35s = %5ph case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para, + ps_tdma_case, coex_dm->auto_tdma_adjust); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", - "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr, - coex_dm->cur_ignore_wlan_act); + "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr, + coex_dm->cur_ignore_wlan_act); /* Hw setting */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Hw setting]============"); + "============[Hw setting]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", - "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); + "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x778/0x880[29:25]", u8tmp[0], - (u32tmp[0]&0x3e000000) >> 25); + "0x778/0x880[29:25]", u8tmp[0], + (u32tmp[0] & 0x3e000000) >> 25); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x948/ 0x67[5] / 0x765", - u32tmp[0], ((u8tmp[0]&0x20) >> 5), u8tmp[1]); + "0x948/ 0x67[5] / 0x765", + u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", - u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3); + "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", + u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", - "0x38[11]/0x40/0x4c[24:23]/0x64[0]", - ((u8tmp[0] & 0x8)>>3), u8tmp[1], - ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1); + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x38[11]/0x40/0x4c[24:23]/0x64[0]", + ((u8tmp[0] & 0x8) >> 3), u8tmp[1], + ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]); + "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4); @@ -3286,24 +3332,24 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) fa_cck = (u8tmp[0] << 8) + u8tmp[1]; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "OFDM-CCA/OFDM-FA/CCK-FA", - u32tmp[0]&0xffff, fa_ofdm, fa_cck); + "OFDM-CCA/OFDM-FA/CCK-FA", + u32tmp[0] & 0xffff, fa_ofdm, fa_cck); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", - "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", - u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x770(high-pri rx/tx)", - coex_sta->high_priority_rx, coex_sta->high_priority_tx); + "0x770(high-pri rx/tx)", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, - coex_sta->low_priority_tx); + "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, + coex_sta->low_priority_tx); #if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) btc8723b2ant_monitor_bt_ctr(btcoexist); #endif @@ -3313,16 +3359,18 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_IPS_ENTER == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], IPS ENTER notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; btc8723b2ant_wifioff_hwcfg(btcoexist); btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); btc8723b2ant_coex_alloff(btcoexist); } else if (BTC_IPS_LEAVE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], IPS LEAVE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; ex_btc8723b2ant_init_hwconfig(btcoexist); btc8723b2ant_init_coex_dm(btcoexist); @@ -3332,50 +3380,57 @@ void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_LPS_ENABLE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], LPS ENABLE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS ENABLE notify\n"); coex_sta->under_lps = true; } else if (BTC_LPS_DISABLE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], LPS DISABLE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS DISABLE notify\n"); coex_sta->under_lps = false; } } void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_SCAN_START == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], SCAN START notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCAN START notify\n"); else if (BTC_SCAN_FINISH == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], SCAN FINISH notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCAN FINISH notify\n"); } void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_ASSOCIATE_START == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], CONNECT START notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CONNECT START notify\n"); else if (BTC_ASSOCIATE_FINISH == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], CONNECT FINISH notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CONNECT FINISH notify\n"); } void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[3] = {0}; u32 wifi_bw; u8 wifi_central_chnl; if (BTC_MEDIA_CONNECT == type) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], MEDIA connect notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], MEDIA connect notify\n"); else - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], MEDIA disconnect notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], MEDIA disconnect notify\n"); /* only 2.4G we need to inform bt the chnl mask */ btcoexist->btc_get(btcoexist, @@ -3396,10 +3451,10 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x66=0x%x\n", - h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | - h2c_parameter[2]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x66=0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } @@ -3407,14 +3462,17 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (type == BTC_PACKET_DHCP) - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], DHCP Packet notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], DHCP Packet notify\n"); } void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmpbuf, u8 length) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 bt_info = 0; u8 i, rsp_source = 0; bool bt_busy = false, limited_dig = false; @@ -3427,24 +3485,24 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Bt info[%d], length=%d, hex data=[", - rsp_source, length); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Bt info[%d], length=%d, hex data=[", + rsp_source, length); for (i = 0; i < length; i++) { coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i]; if (i == 1) bt_info = tmpbuf[i]; if (i == length-1) - btc_iface_dbg(INTF_NOTIFY, - "0x%02x]\n", tmpbuf[i]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x%02x]\n", tmpbuf[i]); else - btc_iface_dbg(INTF_NOTIFY, - "0x%02x, ", tmpbuf[i]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x%02x, ", tmpbuf[i]); } if (btcoexist->manual_control) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"); return; } @@ -3462,8 +3520,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, because bt is reset and loss of the info. */ if ((coex_sta->bt_info_ext & BIT1)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (wifi_connected) @@ -3477,8 +3535,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, } if ((coex_sta->bt_info_ext & BIT3)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, false); } else { @@ -3531,26 +3589,26 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"); /* connection exists but no busy */ } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) || (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); } else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); } else { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"); } if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || @@ -3573,7 +3631,9 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist) { - btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n"); btc8723b2ant_wifioff_hwcfg(btcoexist); btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); @@ -3582,36 +3642,37 @@ void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist) void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; static u8 dis_ver_info_cnt; u32 fw_ver = 0, bt_patch_ver = 0; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], ==========================Periodical===========================\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ==========================Periodical===========================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; - btc_iface_dbg(INTF_INIT, - "[BTCoex], ****************************************************************\n"); - btc_iface_dbg(INTF_INIT, - "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ****************************************************************\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num, board_info->btdm_ant_num, board_info->btdm_ant_pos); - btc_iface_dbg(INTF_INIT, - "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", stack_info->profile_notified ? "Yes" : "No", stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - btc_iface_dbg(INTF_INIT, - "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, fw_ver, bt_patch_ver, bt_patch_ver); - btc_iface_dbg(INTF_INIT, - "[BTCoex], ****************************************************************\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ****************************************************************\n"); } #if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0) -- cgit v1.2.3 From bed4ff58af9d9c31da38522ead93403ffb661569 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Fri, 3 Feb 2017 11:35:19 -0600 Subject: rtlwifi: btcoexist: Convert halbtc8821a1ant.c to use standard debugging The routines in btcoexist use different debugging routines than are used in the other drivers. This patch converts halbtc8821a1ant.c to use the standard routines. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtc8821a1ant.c | 898 +++++++++++---------- 1 file changed, 480 insertions(+), 418 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index 3ce47c70bfa4..20562f081c96 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -62,9 +62,11 @@ static u32 glcoex_ver_8821a_1ant = 0x41; * local function start with halbtc8821a1ant_ *============================================================ */ -static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, +static u8 halbtc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) { + struct rtl_priv *rtlpriv = btcoexist->adapter; long bt_rssi = 0; u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; @@ -76,28 +78,28 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to High\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Low\n"); } } else { if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi thresh error!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } @@ -106,12 +108,12 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Low\n"); } } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || @@ -120,26 +122,26 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to High\n"); } else if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Medium\n"); } } else { if (bt_rssi < rssi_thresh1) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at High\n"); } } } @@ -152,6 +154,7 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, u8 index, u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) { + struct rtl_priv *rtlpriv = btcoexist->adapter; long wifi_rssi = 0; u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; @@ -165,28 +168,28 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, if (wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to High\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else { if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI thresh error!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI thresh error!!\n"); return coex_sta->pre_wifi_rssi_state[index]; } @@ -197,12 +200,12 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, if (wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || @@ -212,26 +215,26 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to High\n"); } else if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Medium\n"); } } else { if (wifi_rssi < rssi_thresh1) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at High\n"); } } } @@ -414,15 +417,16 @@ static void halbtc8821a1ant_monitor_bt_ctr(struct btc_coexist *btcoexist) static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; coex_sta->c2h_bt_info_req_sent = true; h2c_parameter[0] |= BIT0; /* trigger*/ - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } @@ -485,6 +489,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist) static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; bool bt_hs_on = false; u8 algorithm = BT_8821A_1ANT_COEX_ALGO_UNDEFINED; @@ -493,8 +498,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (!bt_link_info->bt_link_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], No BT link exists!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], No BT link exists!!!\n"); return algorithm; } @@ -509,26 +514,28 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) if (num_of_diff_profile == 1) { if (bt_link_info->sco_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = SCO only\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; } else { if (bt_link_info->hid_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = HID only\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = A2DP only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = A2DP only\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = PAN(HS) only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = PAN(HS) only\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = PAN(EDR) only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = PAN(EDR) only\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR; } } @@ -536,50 +543,56 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) } else if (num_of_diff_profile == 2) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = SCO + HID\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } } else { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = HID + A2DP\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = HID + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = HID + PAN(EDR)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP; } } @@ -588,29 +601,33 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } @@ -619,12 +636,14 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR; } } @@ -635,12 +654,14 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } @@ -652,6 +673,7 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist, bool enable_auto_report) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; h2c_parameter[0] = 0; @@ -659,10 +681,10 @@ static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist, if (enable_auto_report) h2c_parameter[0] |= BIT0; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", - (enable_auto_report ? "Enabled!!" : "Disabled!!"), - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", + (enable_auto_report ? "Enabled!!" : "Disabled!!"), + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); } @@ -671,14 +693,17 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist, bool force_exec, bool enable_auto_report) { - btc_alg_dbg(ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n", - (force_exec ? "force to" : ""), ((enable_auto_report) ? + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s BT Auto report = %s\n", + (force_exec ? "force to" : ""), ((enable_auto_report) ? "Enabled" : "Disabled")); coex_dm->cur_bt_auto_report = enable_auto_report; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", coex_dm->pre_bt_auto_report, coex_dm->cur_bt_auto_report); @@ -693,6 +718,7 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist, static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist, bool low_penalty_ra) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[6] = {0}; h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/ @@ -706,9 +732,9 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist, h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ } - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], set WiFi Low-Penalty Retry: %s", - (low_penalty_ra ? "ON!!" : "OFF!!")); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); } @@ -731,20 +757,22 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } @@ -752,8 +780,10 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist, bool force_exec, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", (force_exec ? "force to" : ""), val0x6c0, val0x6c4, val0x6c8, val0x6cc); coex_dm->cur_val_0x6c0 = val0x6c0; @@ -822,14 +852,15 @@ static void halbtc8821a1ant_coex_table_with_type(struct btc_coexist *btcoexist, static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, bool enable) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; if (enable) h2c_parameter[0] |= BIT0; /* function enable*/ - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } @@ -837,16 +868,18 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist, bool force_exec, bool enable) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s turn Ignore WlanAct %s\n", - (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); coex_dm->cur_ignore_wlan_act = enable; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", - coex_dm->pre_ignore_wlan_act, - coex_dm->cur_ignore_wlan_act); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); if (coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) @@ -861,6 +894,7 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist, u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[5] = {0}; h2c_parameter[0] = byte1; @@ -875,13 +909,13 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist, coex_dm->ps_tdma_para[3] = byte4; coex_dm->ps_tdma_para[4] = byte5; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", - h2c_parameter[0], - h2c_parameter[1] << 24 | - h2c_parameter[2] << 16 | - h2c_parameter[3] << 8 | - h2c_parameter[4]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | + h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | + h2c_parameter[4]); btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } @@ -898,22 +932,24 @@ static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist, static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist, bool force_exec, u8 lps_val, u8 rpwm_val) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", - (force_exec ? "force to" : ""), lps_val, rpwm_val); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", + (force_exec ? "force to" : ""), lps_val, rpwm_val); coex_dm->cur_lps = lps_val; coex_dm->cur_rpwm = rpwm_val; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n", - coex_dm->cur_lps, coex_dm->cur_rpwm); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n", + coex_dm->cur_lps, coex_dm->cur_rpwm); if ((coex_dm->pre_lps == coex_dm->cur_lps) && (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n", - coex_dm->pre_rpwm, coex_dm->cur_rpwm); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n", + coex_dm->pre_rpwm, coex_dm->cur_rpwm); return; } @@ -927,8 +963,10 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist, static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist, bool low_penalty_ra) { - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); } @@ -1017,6 +1055,7 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist, static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, bool turn_on, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 rssi_adjust_val = 0; coex_dm->cur_ps_tdma_on = turn_on; @@ -1024,13 +1063,13 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist, if (!force_exec) { if (coex_dm->cur_ps_tdma_on) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], ********** TDMA(on, %d) **********\n", - coex_dm->cur_ps_tdma); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ********** TDMA(on, %d) **********\n", + coex_dm->cur_ps_tdma); } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], ********** TDMA(off, %d) **********\n", - coex_dm->cur_ps_tdma); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ********** TDMA(off, %d) **********\n", + coex_dm->cur_ps_tdma); } if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) @@ -1232,6 +1271,7 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist, static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool common = false, wifi_connected = false, wifi_busy = false; btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, @@ -1241,50 +1281,50 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) if (!wifi_connected && BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); halbtc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (wifi_connected && (BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi connected + BT non connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi connected + BT non connected-idle!!\n"); halbtc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (!wifi_connected && (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); halbtc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (wifi_connected && (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi connected + BT connected-idle!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi connected + BT connected-idle!!\n"); halbtc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (!wifi_connected && (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE != coex_dm->bt_status)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); halbtc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else { if (wifi_busy) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); } common = false; @@ -1296,13 +1336,14 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, u8 wifi_status) { + struct rtl_priv *rtlpriv = btcoexist->adapter; static long up, dn, m, n, wait_count; /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/ long result; u8 retry_count = 0, bt_info_ext; - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], TdmaDurationAdjustForAcl()\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TdmaDurationAdjustForAcl()\n"); if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifi_status) || @@ -1330,8 +1371,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, if (!coex_dm->auto_tdma_adjust) { coex_dm->auto_tdma_adjust = true; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); coex_dm->tdma_adj_type = 2; @@ -1366,8 +1407,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, up = 0; dn = 0; result = 1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Increase wifi duration!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Increase wifi duration!!\n"); } } else if (retry_count <= 3) { /* <=3 retry in the last 2-second duration*/ @@ -1397,8 +1438,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); } } else { /* retry count > 3, if retry count > 3 happens once, @@ -1419,8 +1460,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); } if (result == -1) { @@ -1465,9 +1506,9 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, } } else { /*no change*/ - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], ********** TDMA(on, %d) **********\n", - coex_dm->cur_ps_tdma); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ********** TDMA(on, %d) **********\n", + coex_dm->cur_ps_tdma); } if (coex_dm->cur_ps_tdma != 1 && @@ -1566,6 +1607,7 @@ static void halbtc8821a1ant_action_wifi_only(struct btc_coexist *btcoexist) static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; static bool pre_bt_disabled; static u32 bt_disable_cnt; bool bt_active = true, bt_disabled = false; @@ -1589,25 +1631,25 @@ static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist) bt_disabled = false; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], BT is enabled !!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is enabled !!\n"); } else { bt_disable_cnt++; - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], bt all counters = 0, %d times!!\n", - bt_disable_cnt); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bt all counters = 0, %d times!!\n", + bt_disable_cnt); if (bt_disable_cnt >= 2) { bt_disabled = true; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], BT is disabled !!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is disabled !!\n"); halbtc8821a1ant_action_wifi_only(btcoexist); } } if (pre_bt_disabled != bt_disabled) { - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], BT is from %s to %s!!\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is from %s to %s!!\n", (pre_bt_disabled ? "disabled" : "enabled"), (bt_disabled ? "disabled" : "enabled")); pre_bt_disabled = bt_disabled; @@ -1740,7 +1782,7 @@ static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist, struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - bt_rssi_state = halbtc8821a1ant_bt_rssi_state(2, 28, 0); + bt_rssi_state = halbtc8821a1ant_bt_rssi_state(btcoexist, 2, 28, 0); if (bt_link_info->hid_only) { /*HID*/ @@ -1879,19 +1921,20 @@ static void btc8821a1ant_act_wifi_conn_sp_pkt(struct btc_coexist *btcoexist) static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_busy = false; bool scan = false, link = false, roam = false; bool under_4way = false; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], CoexForWifiConnect()===>\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CoexForWifiConnect()===>\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way); if (under_4way) { btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist); - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); return; } @@ -1900,8 +1943,8 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); if (scan || link || roam) { halbtc8821a1ant_action_wifi_connected_scan(btcoexist); - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); return; } @@ -1954,6 +1997,7 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 algorithm = 0; algorithm = halbtc8821a1ant_action_algorithm(btcoexist); @@ -1962,58 +2006,58 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) if (!halbtc8821a1ant_is_common_action(btcoexist)) { switch (coex_dm->cur_algorithm) { case BT_8821A_1ANT_COEX_ALGO_SCO: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = SCO\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = SCO\n"); halbtc8821a1ant_action_sco(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_HID: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = HID\n"); halbtc8821a1ant_action_hid(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = A2DP\n"); halbtc8821a1ant_action_a2dp(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = A2DP+PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = A2DP+PAN(HS)\n"); halbtc8821a1ant_action_a2dp_pan_hs(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANEDR: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = PAN(EDR)\n"); halbtc8821a1ant_action_pan_edr(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANHS: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = HS mode\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = HS mode\n"); halbtc8821a1ant_action_pan_hs(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = PAN+A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = PAN+A2DP\n"); halbtc8821a1ant_action_pan_edr_a2dp(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = PAN(EDR)+HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = PAN(EDR)+HID\n"); halbtc8821a1ant_action_pan_edr_hid(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = HID+A2DP+PAN\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = HID+A2DP+PAN\n"); btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_HID_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = HID+A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = HID+A2DP\n"); halbtc8821a1ant_action_hid_a2dp(btcoexist); break; default: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action algorithm = coexist All Off!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action algorithm = coexist All Off!!\n"); /*halbtc8821a1ant_coex_all_off(btcoexist);*/ break; } @@ -2023,6 +2067,7 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; bool wifi_connected = false, bt_hs_on = false; bool increase_scan_dev_num = false; @@ -2031,31 +2076,31 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; bool wifi_under_5g = false; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism()===>\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism()===>\n"); if (btcoexist->manual_control) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); return; } if (btcoexist->stop_coex_dm) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); return; } if (coex_sta->under_ips) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], wifi is under IPS !!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi is under IPS !!!\n"); return; } btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); if (wifi_under_5g) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); halbtc8821a1ant_coex_under_5g(btcoexist); return; } @@ -2121,8 +2166,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) if (!wifi_connected) { bool scan = false, link = false, roam = false; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], wifi is non connected-idle !!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi is non connected-idle !!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -2151,11 +2196,12 @@ static void halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist, bool back_up) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u1_tmp = 0; bool wifi_under_5g = false; - btc_iface_dbg(INTF_INIT, - "[BTCoex], 1Ant Init HW Config!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], 1Ant Init HW Config!!\n"); if (back_up) { coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist, @@ -2206,8 +2252,10 @@ void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist) void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) { - btc_iface_dbg(INTF_INIT, - "[BTCoex], Coex Mechanism Init!!\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Coex Mechanism Init!!\n"); btcoexist->stop_coex_dm = false; @@ -2233,19 +2281,19 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) u32 fw_ver = 0, bt_patch_ver = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[BT Coexist info]============"); + "\r\n ============[BT Coexist info]============"); if (btcoexist->manual_control) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[Under Manual Control]============"); + "\r\n ============[Under Manual Control]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); + "\r\n =========================================="); } if (btcoexist->stop_coex_dm) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[Coex is STOPPED]============"); + "\r\n ============[Coex is STOPPED]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); + "\r\n =========================================="); } if (!board_info->bt_exist) { @@ -2254,27 +2302,27 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d/ %d", - "Ant PG Num/ Ant Mech/ Ant Pos:", - board_info->pg_ant_num, - board_info->btdm_ant_num, - board_info->btdm_ant_pos); + "\r\n %-35s = %d/ %d/ %d", + "Ant PG Num/ Ant Mech/ Ant Pos:", + board_info->pg_ant_num, + board_info->btdm_ant_num, + board_info->btdm_ant_pos); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", - "CoexVer/ FwVer/ PatchVer", - glcoex_ver_date_8821a_1ant, - glcoex_ver_8821a_1ant, - fw_ver, bt_patch_ver, - bt_patch_ver); + "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8821a_1ant, + glcoex_ver_8821a_1ant, + fw_ver, bt_patch_ver, + bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); @@ -2283,27 +2331,27 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d / %d(%d)", - "Dot11 channel / HsChnl(HsMode)", - wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + "\r\n %-35s = %d / %d(%d)", + "Dot11 channel / HsChnl(HsMode)", + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %3ph ", - "H2C Wifi inform bt chnl Info", - coex_dm->wifi_chnl_info); + "\r\n %-35s = %3ph ", + "H2C Wifi inform bt chnl Info", + coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", - (int)wifi_rssi, (int)bt_hs_rssi); + "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", + (int)wifi_rssi, (int)bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", - link, roam, scan); + "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", + link, roam, scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); @@ -2314,13 +2362,13 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s / %s/ %s ", "Wifi status", - (wifi_under_5g ? "5G" : "2.4G"), - ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : - (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), - ((!wifi_busy) ? "idle" : - ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? - "uplink" : "downlink"))); + "\r\n %-35s = %s / %s/ %s ", "Wifi status", + (wifi_under_5g ? "5G" : "2.4G"), + ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" : + (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))), + ((!wifi_busy) ? "idle" : + ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ? + "uplink" : "downlink"))); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", ((btcoexist->bt_info.bt_disabled) ? ("disabled") : @@ -2334,161 +2382,162 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) coex_sta->bt_rssi, coex_sta->bt_retry_cnt); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", - bt_link_info->sco_exist, - bt_link_info->hid_exist, - bt_link_info->pan_exist, - bt_link_info->a2dp_exist); + "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", + bt_link_info->sco_exist, + bt_link_info->hid_exist, + bt_link_info->pan_exist, + bt_link_info->a2dp_exist); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); bt_info_ext = coex_sta->bt_info_ext; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s", - "BT Info A2DP rate", - (bt_info_ext&BIT0) ? - "Basic rate" : "EDR rate"); + "\r\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext & BIT0) ? + "Basic rate" : "EDR rate"); for (i = 0; i < BT_INFO_SRC_8821A_1ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %7ph(%d)", - glbt_info_src_8821a_1ant[i], - coex_sta->bt_info_c2h[i], - coex_sta->bt_info_c2h_cnt[i]); + "\r\n %-35s = %7ph(%d)", + glbt_info_src_8821a_1ant[i], + coex_sta->bt_info_c2h[i], + coex_sta->bt_info_c2h_cnt[i]); } } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s/%s, (0x%x/0x%x)", - "PS state, IPS/LPS, (lps/rpwm)", - ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_Lps ? "LPS ON" : "LPS OFF")), - btcoexist->bt_info.lps_val, - btcoexist->bt_info.rpwm_val); + "\r\n %-35s = %s/%s, (0x%x/0x%x)", + "PS state, IPS/LPS, (lps/rpwm)", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_Lps ? "LPS ON" : "LPS OFF")), + btcoexist->bt_info.lps_val, + btcoexist->bt_info.rpwm_val); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); if (!btcoexist->manual_control) { /* Sw mechanism*/ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s", "============[Sw mechanism]============"); + "\r\n %-35s", + "============[Sw mechanism]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d", "SM[LowPenaltyRA]", - coex_dm->cur_low_penalty_ra); + "\r\n %-35s = %d", "SM[LowPenaltyRA]", + coex_dm->cur_low_penalty_ra); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s/ %s/ %d ", - "DelBA/ BtCtrlAgg/ AggSize", - (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"), - (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"), - btcoexist->bt_info.agg_buf_size); + "\r\n %-35s = %s/ %s/ %d ", + "DelBA/ BtCtrlAgg/ AggSize", + (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"), + (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"), + btcoexist->bt_info.agg_buf_size); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x ", "Rate Mask", - btcoexist->bt_info.ra_mask); + "\r\n %-35s = 0x%x ", "Rate Mask", + btcoexist->bt_info.ra_mask); /* Fw mechanism*/ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Fw mechanism]============"); + "============[Fw mechanism]============"); ps_tdma_case = coex_dm->cur_ps_tdma; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %5ph case-%d (auto:%d)", - "PS TDMA", - coex_dm->ps_tdma_para, - ps_tdma_case, - coex_dm->auto_tdma_adjust); + "\r\n %-35s = %5ph case-%d (auto:%d)", + "PS TDMA", + coex_dm->ps_tdma_para, + ps_tdma_case, + coex_dm->auto_tdma_adjust); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x ", - "Latest error condition(should be 0)", + "\r\n %-35s = 0x%x ", + "Latest error condition(should be 0)", coex_dm->error_condition); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d ", "IgnWlanAct", - coex_dm->cur_ignore_wlan_act); + "\r\n %-35s = %d ", "IgnWlanAct", + coex_dm->cur_ignore_wlan_act); } /* Hw setting*/ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s", "============[Hw setting]============"); + "\r\n %-35s", "============[Hw setting]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "backup ARFR1/ARFR2/RL/AMaxTime", - coex_dm->backup_arfr_cnt1, - coex_dm->backup_arfr_cnt2, - coex_dm->backup_retry_limit, - coex_dm->backup_ampdu_max_time); + "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "backup ARFR1/ARFR2/RL/AMaxTime", + coex_dm->backup_arfr_cnt1, + coex_dm->backup_arfr_cnt2, + coex_dm->backup_retry_limit, + coex_dm->backup_ampdu_max_time); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430); u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434); u2_tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "0x430/0x434/0x42a/0x456", - u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]); + "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "0x430/0x434/0x42a/0x456", + u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc58); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]", - u1_tmp[0], (u4_tmp[0]&0x3e000000) >> 25); + "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]", + u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x", "0x8db[6:5]", - ((u1_tmp[0]&0x60)>>5)); + "\r\n %-35s = 0x%x", "0x8db[6:5]", + ((u1_tmp[0] & 0x60) >> 5)); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x975); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]", - (u4_tmp[0] & 0x30000000)>>28, - u4_tmp[0] & 0xff, - u1_tmp[0] & 0x3); + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]", + (u4_tmp[0] & 0x30000000) >> 28, + u4_tmp[0] & 0xff, + u1_tmp[0] & 0x3); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x64); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x40/0x4c[24:23]/0x64[0]", - u1_tmp[0], ((u4_tmp[0]&0x01800000)>>23), u1_tmp[1]&0x1); + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x40/0x4c[24:23]/0x64[0]", + u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23), u1_tmp[1] & 0x1); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", - u4_tmp[0], u1_tmp[0]); + "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", + u4_tmp[0], u1_tmp[0]); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x", "0xc50(dig)", - u4_tmp[0]&0xff); + "\r\n %-35s = 0x%x", "0xc50(dig)", + u4_tmp[0] & 0xff); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5d); u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA", - u4_tmp[0], (u1_tmp[0]<<8) + u1_tmp[1]); + "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA", + u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", - "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)", - coex_sta->high_priority_rx, coex_sta->high_priority_tx); + "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)", - coex_sta->low_priority_rx, coex_sta->low_priority_tx); + "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)", + coex_sta->low_priority_rx, coex_sta->low_priority_tx); #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 1) halbtc8821a1ant_monitor_bt_ctr(btcoexist); #endif @@ -2497,12 +2546,14 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (btcoexist->manual_control || btcoexist->stop_coex_dm) return; if (BTC_IPS_ENTER == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], IPS ENTER notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, true); @@ -2511,8 +2562,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); } else if (BTC_IPS_LEAVE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], IPS LEAVE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; halbtc8821a1ant_run_coexist_mechanism(btcoexist); @@ -2521,22 +2572,25 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (btcoexist->manual_control || btcoexist->stop_coex_dm) return; if (BTC_LPS_ENABLE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], LPS ENABLE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS ENABLE notify\n"); coex_sta->under_Lps = true; } else if (BTC_LPS_DISABLE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], LPS DISABLE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS DISABLE notify\n"); coex_sta->under_Lps = false; } } void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_connected = false, bt_hs_on = false; if (btcoexist->manual_control || @@ -2560,8 +2614,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) } if (BTC_SCAN_START == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], SCAN START notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCAN START notify\n"); if (!wifi_connected) { /* non-connected scan*/ btc8821a1ant_act_wifi_not_conn_scan(btcoexist); @@ -2570,8 +2624,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) halbtc8821a1ant_action_wifi_connected_scan(btcoexist); } } else if (BTC_SCAN_FINISH == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], SCAN FINISH notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCAN FINISH notify\n"); if (!wifi_connected) { /* non-connected scan*/ halbtc8821a1ant_action_wifi_not_connected(btcoexist); @@ -2583,6 +2637,7 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_connected = false, bt_hs_on = false; if (btcoexist->manual_control || @@ -2600,12 +2655,12 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) } if (BTC_ASSOCIATE_START == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], CONNECT START notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CONNECT START notify\n"); btc8821a1ant_act_wifi_not_conn_scan(btcoexist); } else if (BTC_ASSOCIATE_FINISH == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], CONNECT FINISH notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CONNECT FINISH notify\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); @@ -2621,6 +2676,7 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[3] = {0}; u32 wifi_bw; u8 wifi_central_chnl; @@ -2631,11 +2687,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, return; if (BTC_MEDIA_CONNECT == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], MEDIA connect notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], MEDIA connect notify\n"); } else { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], MEDIA disconnect notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], MEDIA disconnect notify\n"); } /* only 2.4G we need to inform bt the chnl mask*/ @@ -2658,11 +2714,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x66 = 0x%x\n", - h2c_parameter[0] << 16 | - h2c_parameter[1] << 8 | - h2c_parameter[2]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0] << 16 | + h2c_parameter[1] << 8 | + h2c_parameter[2]); btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } @@ -2670,6 +2726,7 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool bt_hs_on = false; if (btcoexist->manual_control || @@ -2690,8 +2747,8 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist, if (BTC_PACKET_DHCP == type || BTC_PACKET_EAPOL == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], special Packet(%d) notify\n", type); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], special Packet(%d) notify\n", type); btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist); } } @@ -2699,6 +2756,7 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist, void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf, u8 length) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 bt_info = 0; u8 i, rsp_source = 0; bool wifi_connected = false; @@ -2715,19 +2773,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Bt info[%d], length = %d, hex data = [", - rsp_source, length); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Bt info[%d], length = %d, hex data = [", + rsp_source, length); for (i = 0; i < length; i++) { coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; if (i == 1) bt_info = tmp_buf[i]; if (i == length-1) { - btc_iface_dbg(INTF_NOTIFY, - "0x%02x]\n", tmp_buf[i]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x%02x]\n", tmp_buf[i]); } else { - btc_iface_dbg(INTF_NOTIFY, - "0x%02x, ", tmp_buf[i]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x%02x, ", tmp_buf[i]); } } @@ -2744,8 +2802,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, /* Here we need to resend some wifi info to BT*/ /* because bt is reset and loss of the info.*/ if (coex_sta->bt_info_ext & BIT1) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); @@ -2761,8 +2819,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) { if (!btcoexist->manual_control && !btcoexist->stop_coex_dm) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, false); @@ -2770,8 +2828,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, } #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0) if (!(coex_sta->bt_info_ext & BIT4)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n"); halbtc8821a1ant_bt_auto_report(btcoexist, FORCE_EXEC, true); } @@ -2816,28 +2874,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) { coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"); } else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) { /* connection exists but no busy*/ coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); } else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) || (bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) { coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); } else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) { if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status) coex_dm->auto_tdma_adjust = false; coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); } else { coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"); } if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || @@ -2854,8 +2912,10 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Halt notify\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Halt notify\n"); btcoexist->stop_coex_dm = true; @@ -2873,20 +2933,22 @@ void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist) void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Pnp notify\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Pnp notify\n"); if (BTC_WIFI_PNP_SLEEP == pnp_state) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Pnp notify to SLEEP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Pnp notify to SLEEP\n"); btcoexist->stop_coex_dm = true; halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9); } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Pnp notify to WAKE UP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Pnp notify to WAKE UP\n"); btcoexist->stop_coex_dm = false; halbtc8821a1ant_init_hw_config(btcoexist, false); halbtc8821a1ant_init_coex_dm(btcoexist); @@ -2894,41 +2956,41 @@ void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) } } -void -ex_halbtc8821a1ant_periodical( - struct btc_coexist *btcoexist) { +void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; static u8 dis_ver_info_cnt; u32 fw_ver = 0, bt_patch_ver = 0; struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], ==========================Periodical===========================\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ==========================Periodical===========================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; - btc_iface_dbg(INTF_INIT, - "[BTCoex], ****************************************************************\n"); - btc_iface_dbg(INTF_INIT, - "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ****************************************************************\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num, board_info->btdm_ant_num, board_info->btdm_ant_pos); - btc_iface_dbg(INTF_INIT, - "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", stack_info->profile_notified ? "Yes" : "No", stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - btc_iface_dbg(INTF_INIT, - "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", glcoex_ver_date_8821a_1ant, glcoex_ver_8821a_1ant, fw_ver, bt_patch_ver, bt_patch_ver); - btc_iface_dbg(INTF_INIT, - "[BTCoex], ****************************************************************\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ****************************************************************\n"); } #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0) -- cgit v1.2.3 From 10468c3b438c36a40e237bab86d15d2b87716046 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Fri, 3 Feb 2017 11:35:20 -0600 Subject: rtlwifi: btcoexist: Convert halbtc8821a2ant.c to use standard debugging The routines in btcoexist use different debugging routines than are used in the other drivers. This patch converts halbtc8821a2ant.c to use the standard routines. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtc8821a2ant.c | 1075 +++++++++++--------- 1 file changed, 571 insertions(+), 504 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index 81f843bba771..962fbaf3ba1d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -65,9 +65,11 @@ static u32 glcoex_ver_8821a_2ant = 0x5050; * local function start with halbtc8821a2ant_ *============================================================ */ -static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, +static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) { + struct rtl_priv *rtlpriv = btcoexist->adapter; long bt_rssi = 0; u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; @@ -80,28 +82,28 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT; if (bt_rssi >= tmp) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to High\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Low\n"); } } else { if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi thresh error!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } @@ -110,12 +112,12 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Low\n"); } } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || @@ -125,26 +127,26 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to High\n"); } else if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at Medium\n"); } } else { if (bt_rssi < rssi_thresh1) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT Rssi state stay at High\n"); } } } @@ -158,6 +160,7 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, u8 index, u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) { + struct rtl_priv *rtlpriv = btcoexist->adapter; long wifi_rssi = 0; u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; @@ -171,28 +174,28 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to High\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else { if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI thresh error!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI thresh error!!\n"); return coex_sta->pre_wifi_rssi_state[index]; } @@ -203,12 +206,12 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || @@ -217,26 +220,26 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to High\n"); } else if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at Medium\n"); } } else { if (wifi_rssi < rssi_thresh1) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - btc_alg_dbg(ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi RSSI state stay at High\n"); } } } @@ -247,6 +250,7 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; static bool pre_bt_disabled; static u32 bt_disable_cnt; bool bt_active = true, bt_disabled = false; @@ -268,32 +272,33 @@ static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist) bt_disabled = false; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], BT is enabled !!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is enabled !!\n"); } else { bt_disable_cnt++; - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], bt all counters = 0, %d times!!\n", - bt_disable_cnt); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], bt all counters = 0, %d times!!\n", + bt_disable_cnt); if (bt_disable_cnt >= 2) { bt_disabled = true; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], BT is disabled !!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is disabled !!\n"); } } if (pre_bt_disabled != bt_disabled) { - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], BT is from %s to %s!!\n", - (pre_bt_disabled ? "disabled" : "enabled"), - (bt_disabled ? "disabled" : "enabled")); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled" : "enabled"), + (bt_disabled ? "disabled" : "enabled")); pre_bt_disabled = bt_disabled; } } static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u32 reg_hp_txrx, reg_lp_txrx, u4tmp; u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; @@ -313,12 +318,12 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) coex_sta->low_priority_tx = reg_lp_tx; coex_sta->low_priority_rx = reg_lp_rx; - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); - btc_alg_dbg(ALGO_BT_MONITOR, - "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", - reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); /* reset counter */ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); @@ -326,21 +331,23 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist) { - u8 h2c_parameter[1] = {0}; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 h2c_parameter[1] = {0}; coex_sta->c2h_bt_info_req_sent = true; h2c_parameter[0] |= BIT0; /* trigger */ - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_stack_info *stack_info = &btcoexist->stack_info; bool bt_hs_on = false; u8 algorithm = BT_8821A_2ANT_COEX_ALGO_UNDEFINED; @@ -357,8 +364,8 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) stack_info->bt_link_exist = coex_sta->bt_link_exist; if (!coex_sta->bt_link_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], No profile exists!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], No profile exists!!!\n"); return algorithm; } @@ -373,26 +380,28 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) if (num_of_diff_profile == 1) { if (coex_sta->sco_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCO only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } else { if (coex_sta->hid_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], HID only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID; } else if (coex_sta->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], A2DP only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], A2DP only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP; } else if (coex_sta->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], PAN(HS) only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], PAN(HS) only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], PAN(EDR) only\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], PAN(EDR) only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR; } } @@ -400,50 +409,56 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) } else if (num_of_diff_profile == 2) { if (coex_sta->sco_exist) { if (coex_sta->hid_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCO + HID\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } else if (coex_sta->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + A2DP ==> SCO\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCO + A2DP ==> SCO\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } else if (coex_sta->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + PAN(EDR)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } } } else { if (coex_sta->hid_exist && coex_sta->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID + A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], HID + A2DP\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP; } else if (coex_sta->hid_exist && coex_sta->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], HID + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], HID + PAN(EDR)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } } else if (coex_sta->pan_exist && coex_sta->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], A2DP + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], A2DP + PAN(EDR)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP; } } @@ -452,29 +467,33 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) if (coex_sta->sco_exist) { if (coex_sta->hid_exist && coex_sta->a2dp_exist) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + HID + A2DP ==> HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCO + HID + A2DP ==> HID\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } else if (coex_sta->hid_exist && coex_sta->pan_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + HID + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + HID + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + HID + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + HID + PAN(EDR)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } } else if (coex_sta->pan_exist && coex_sta->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + A2DP + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } } @@ -483,12 +502,14 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) coex_sta->pan_exist && coex_sta->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], HID + A2DP + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], HID + A2DP + PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], HID + A2DP + PAN(EDR)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR; } } @@ -499,12 +520,14 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) coex_sta->pan_exist && coex_sta->a2dp_exist) { if (bt_hs_on) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"); } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_LOUD, + "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } } @@ -515,6 +538,7 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool ret = false; bool bt_hs_on = false, wifi_connected = false; long bt_hs_rssi = 0; @@ -528,20 +552,20 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist) if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi)) return false; - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); if (wifi_connected) { if (bt_hs_on) { if (bt_hs_rssi > 37) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], Need to decrease bt power for HS mode!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Need to decrease bt power for HS mode!!\n"); ret = true; } } else { if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); ret = true; } } @@ -552,17 +576,18 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist) static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist, u8 dac_swing_lvl) { - u8 h2c_parameter[1] = {0}; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 h2c_parameter[1] = {0}; /* There are several type of dacswing * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */ h2c_parameter[0] = dac_swing_lvl; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl); - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); } @@ -570,16 +595,17 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist, static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, bool dec_bt_pwr) { - u8 h2c_parameter[1] = {0}; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 h2c_parameter[1] = {0}; h2c_parameter[0] = 0; if (dec_bt_pwr) h2c_parameter[0] |= BIT1; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n", - (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n", + (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); } @@ -587,15 +613,17 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist, bool force_exec, bool dec_bt_pwr) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s Dec BT power = %s\n", + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s Dec BT power = %s\n", (force_exec ? "force to" : ""), ((dec_bt_pwr) ? "ON" : "OFF")); coex_dm->cur_dec_bt_pwr = dec_bt_pwr; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n", coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) @@ -609,6 +637,7 @@ static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist, static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist, bool bt_lna_cons_on) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[2] = {0}; h2c_parameter[0] = 0x3; /* opCode, 0x3 = BT_SET_LNA_CONSTRAIN */ @@ -616,10 +645,10 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist, if (bt_lna_cons_on) h2c_parameter[1] |= BIT0; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n", - bt_lna_cons_on ? "ON!!" : "OFF!!", - h2c_parameter[0] << 8 | h2c_parameter[1]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n", + bt_lna_cons_on ? "ON!!" : "OFF!!", + h2c_parameter[0] << 8 | h2c_parameter[1]); btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter); } @@ -627,15 +656,17 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist, static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist, bool force_exec, bool bt_lna_cons_on) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s BT Constrain = %s\n", - (force_exec ? "force" : ""), - ((bt_lna_cons_on) ? "ON" : "OFF")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s BT Constrain = %s\n", + (force_exec ? "force" : ""), + ((bt_lna_cons_on) ? "ON" : "OFF")); coex_dm->cur_bt_lna_constrain = bt_lna_cons_on; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n", coex_dm->pre_bt_lna_constrain, coex_dm->cur_bt_lna_constrain); @@ -652,16 +683,17 @@ static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist, static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist, u8 bt_psd_mode) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[2] = {0}; h2c_parameter[0] = 0x2; /* opCode, 0x2 = BT_SET_PSD_MODE */ h2c_parameter[1] = bt_psd_mode; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n", - h2c_parameter[1], - h2c_parameter[0] << 8 | h2c_parameter[1]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n", + h2c_parameter[1], + h2c_parameter[0] << 8 | h2c_parameter[1]); btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter); } @@ -669,15 +701,17 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist, static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist, bool force_exec, u8 bt_psd_mode) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s BT PSD mode = 0x%x\n", - (force_exec ? "force" : ""), bt_psd_mode); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s BT PSD mode = 0x%x\n", + (force_exec ? "force" : ""), bt_psd_mode); coex_dm->cur_bt_psd_mode = bt_psd_mode; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n", - coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n", + coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode); if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode) return; @@ -691,6 +725,7 @@ static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist, static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist, bool enable_auto_report) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; h2c_parameter[0] = 0; @@ -698,10 +733,10 @@ static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist, if (enable_auto_report) h2c_parameter[0] |= BIT0; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", - (enable_auto_report ? "Enabled!!" : "Disabled!!"), - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", + (enable_auto_report ? "Enabled!!" : "Disabled!!"), + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); } @@ -710,15 +745,17 @@ static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist, bool force_exec, bool enable_auto_report) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s BT Auto report = %s\n", - (force_exec ? "force to" : ""), - ((enable_auto_report) ? "Enabled" : "Disabled")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s BT Auto report = %s\n", + (force_exec ? "force to" : ""), + ((enable_auto_report) ? "Enabled" : "Disabled")); coex_dm->cur_bt_auto_report = enable_auto_report; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", coex_dm->pre_bt_auto_report, coex_dm->cur_bt_auto_report); @@ -735,16 +772,18 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, bool force_exec, u8 fw_dac_swing_lvl) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s set FW Dac Swing level = %d\n", - (force_exec ? "force to" : ""), fw_dac_swing_lvl); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec ? "force to" : ""), fw_dac_swing_lvl); coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n", - coex_dm->pre_fw_dac_swing_lvl, - coex_dm->cur_fw_dac_swing_lvl); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); if (coex_dm->pre_fw_dac_swing_lvl == coex_dm->cur_fw_dac_swing_lvl) @@ -760,10 +799,12 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, bool rx_rf_shrink_on) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (rx_rf_shrink_on) { /* Shrink RF Rx LPF corner */ - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Shrink RF Rx LPF corner!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, 0xffffc); } else { @@ -771,8 +812,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, * After initialized, we can use coex_dm->bt_rf0x1e_backup */ if (btcoexist->initilized) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Resume RF Rx LPF corner!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Resume RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, coex_dm->bt_rf0x1e_backup); @@ -783,17 +824,19 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist, bool force_exec, bool rx_rf_shrink_on) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s turn Rx RF Shrink = %s\n", + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn Rx RF Shrink = %s\n", (force_exec ? "force to" : ""), ((rx_rf_shrink_on) ? "ON" : "OFF")); coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n", - coex_dm->pre_rf_rx_lpf_shrink, - coex_dm->cur_rf_rx_lpf_shrink); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); if (coex_dm->pre_rf_rx_lpf_shrink == coex_dm->cur_rf_rx_lpf_shrink) @@ -808,6 +851,7 @@ static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist, static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist, bool low_penalty_ra) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[6] = {0}; h2c_parameter[0] = 0x6; /* opCode, 0x6 = Retry_Penalty */ @@ -824,9 +868,9 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist, h2c_parameter[5] = 0xf9; } - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], set WiFi Low-Penalty Retry: %s", - (low_penalty_ra ? "ON!!" : "OFF!!")); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); } @@ -834,17 +878,19 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist, static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist, bool force_exec, bool low_penalty_ra) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + /*return;*/ - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s turn LowPenaltyRA = %s\n", - (force_exec ? "force to" : ""), - ((low_penalty_ra) ? "ON" : "OFF")); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn LowPenaltyRA = %s\n", + (force_exec ? "force to" : ""), + ((low_penalty_ra) ? "ON" : "OFF")); coex_dm->cur_low_penalty_ra = low_penalty_ra; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n", - coex_dm->pre_low_penalty_ra, + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n", + coex_dm->pre_low_penalty_ra, coex_dm->cur_low_penalty_ra); if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) @@ -859,10 +905,11 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist, static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist, u32 level) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 val = (u8)level; - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], Write SwDacSwing = 0x%x\n", level); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val); } @@ -880,21 +927,23 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist, bool force_exec, bool dac_swing_on, u32 dac_swing_lvl) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n", - (force_exec ? "force to" : ""), - ((dac_swing_on) ? "ON" : "OFF"), - dac_swing_lvl); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n", + (force_exec ? "force to" : ""), + ((dac_swing_on) ? "ON" : "OFF"), + dac_swing_lvl); coex_dm->cur_dac_swing_on = dac_swing_on; coex_dm->cur_dac_swing_lvl = dac_swing_lvl; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n", - coex_dm->pre_dac_swing_on, - coex_dm->pre_dac_swing_lvl, - coex_dm->cur_dac_swing_on, - coex_dm->cur_dac_swing_lvl); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n", + coex_dm->pre_dac_swing_on, + coex_dm->pre_dac_swing_lvl, + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && (coex_dm->pre_dac_swing_lvl == @@ -912,13 +961,15 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist, static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist, bool adc_back_off) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (adc_back_off) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], BB BackOff Level On!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BB BackOff Level On!\n"); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3); } else { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], BB BackOff Level Off!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BB BackOff Level Off!\n"); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1); } } @@ -926,17 +977,19 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist, static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist, bool force_exec, bool adc_back_off) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s turn AdcBackOff = %s\n", - (force_exec ? "force to" : ""), - ((adc_back_off) ? "ON" : "OFF")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn AdcBackOff = %s\n", + (force_exec ? "force to" : ""), + ((adc_back_off) ? "ON" : "OFF")); coex_dm->cur_adc_back_off = adc_back_off; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n", - coex_dm->pre_adc_back_off, - coex_dm->cur_adc_back_off); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n", + coex_dm->pre_adc_back_off, + coex_dm->cur_adc_back_off); if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off) return; @@ -950,20 +1003,22 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); - btc_alg_dbg(ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } @@ -971,28 +1026,30 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist, bool force_exec, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - btc_alg_dbg(ALGO_TRACE_SW, - "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", - (force_exec ? "force to" : ""), - val0x6c0, val0x6c4, val0x6c8, val0x6cc); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", + (force_exec ? "force to" : ""), + val0x6c0, val0x6c4, val0x6c8, val0x6cc); coex_dm->cur_val0x6c0 = val0x6c0; coex_dm->cur_val0x6c4 = val0x6c4; coex_dm->cur_val0x6c8 = val0x6c8; coex_dm->cur_val0x6cc = val0x6cc; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n", - coex_dm->pre_val0x6c0, - coex_dm->pre_val0x6c4, - coex_dm->pre_val0x6c8, - coex_dm->pre_val0x6cc); - btc_alg_dbg(ALGO_TRACE_SW_DETAIL, - "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n", - coex_dm->cur_val0x6c0, - coex_dm->cur_val0x6c4, - coex_dm->cur_val0x6c8, - coex_dm->cur_val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n", + coex_dm->pre_val0x6c0, + coex_dm->pre_val0x6c4, + coex_dm->pre_val0x6c8, + coex_dm->pre_val0x6cc); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n", + coex_dm->cur_val0x6c0, + coex_dm->cur_val0x6c4, + coex_dm->cur_val0x6c8, + coex_dm->cur_val0x6cc); if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && @@ -1012,14 +1069,15 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist, static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex, bool enable) { + struct rtl_priv *rtlpriv = btcoex->adapter; u8 h2c_parameter[1] = {0}; if (enable) h2c_parameter[0] |= BIT0;/* function enable */ - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", - h2c_parameter[0]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter); } @@ -1027,16 +1085,18 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex, static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist, bool force_exec, bool enable) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s turn Ignore WlanAct %s\n", - (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); coex_dm->cur_ignore_wlan_act = enable; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", - coex_dm->pre_ignore_wlan_act, - coex_dm->cur_ignore_wlan_act); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); if (coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) @@ -1051,6 +1111,7 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist, u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[5]; h2c_parameter[0] = byte1; @@ -1065,13 +1126,13 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist, coex_dm->ps_tdma_para[3] = byte4; coex_dm->ps_tdma_para[4] = byte5; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", - h2c_parameter[0], - h2c_parameter[1] << 24 | - h2c_parameter[2] << 16 | - h2c_parameter[3] << 8 | - h2c_parameter[4]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | + h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | + h2c_parameter[4]); btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } @@ -1165,20 +1226,22 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist, static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, bool turn_on, u8 type) { - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], %s turn %s PS TDMA, type = %d\n", - (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"), - type); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], %s turn %s PS TDMA, type = %d\n", + (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"), + type); coex_dm->cur_ps_tdma_on = turn_on; coex_dm->cur_ps_tdma = type; if (!force_exec) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n", - coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n", - coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) @@ -1348,6 +1411,7 @@ static void halbtc8821a2ant_bt_inquiry_page(struct btc_coexist *btcoexist) static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool common = false, wifi_connected = false, wifi_busy = false; bool low_pwr_disable = false; @@ -1364,8 +1428,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi IPS + BT IPS!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi IPS + BT IPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -1382,13 +1446,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) &low_pwr_disable); if (wifi_busy) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi Busy + BT IPS!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Busy + BT IPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi LPS + BT IPS!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi LPS + BT IPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); } @@ -1406,8 +1470,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi IPS + BT LPS!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi IPS + BT LPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -1423,13 +1487,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); if (wifi_busy) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi Busy + BT LPS!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Busy + BT LPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi LPS + BT LPS!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi LPS + BT LPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); } @@ -1448,8 +1512,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi IPS + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi IPS + BT Busy!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -1468,12 +1532,12 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) &low_pwr_disable); if (wifi_busy) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi Busy + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Busy + BT Busy!!\n"); common = false; } else { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Wifi LPS + BT Busy!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi LPS + BT Busy!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 21); @@ -1494,9 +1558,11 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause, int result) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (tx_pause) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 71) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -1591,8 +1657,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71); @@ -1695,9 +1761,11 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause, static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause, int result) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (tx_pause) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); @@ -1786,8 +1854,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); @@ -1881,9 +1949,11 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause, static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause, int result) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (tx_pause) { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); @@ -1972,8 +2042,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); @@ -2068,6 +2138,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, bool sco_hid, bool tx_pause, u8 max_interval) { + struct rtl_priv *rtlpriv = btcoexist->adapter; static long up, dn, m, n, wait_count; /* 0: no change, +1: increase WiFi duration, * -1: decrease WiFi duration @@ -2075,13 +2146,13 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, int result; u8 retry_count = 0; - btc_alg_dbg(ALGO_TRACE_FW, - "[BTCoex], TdmaDurationAdjust()\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], TdmaDurationAdjust()\n"); if (coex_dm->reset_tdma_adjust) { coex_dm->reset_tdma_adjust = false; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); if (sco_hid) { if (tx_pause) { if (max_interval == 1) { @@ -2094,11 +2165,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 14); coex_dm->tdma_adj_type = 14; - } else if (max_interval == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; } else { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2116,11 +2182,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 10); coex_dm->tdma_adj_type = 10; - } else if (max_interval == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; } else { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2140,11 +2201,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 6); coex_dm->tdma_adj_type = 6; - } else if (max_interval == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; } else { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2162,11 +2218,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, NORMAL_EXEC, true, 2); coex_dm->tdma_adj_type = 2; - } else if (max_interval == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; } else { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -2185,10 +2236,10 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, } else { /* accquire the BT TRx retry count from BT_Info byte2 */ retry_count = coex_sta->bt_retry_cnt; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], retry_count = %d\n", retry_count); - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], retry_count = %d\n", retry_count); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n", (int)up, (int)dn, (int)m, (int)n, (int)wait_count); result = 0; wait_count++; @@ -2210,8 +2261,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, up = 0; dn = 0; result = 1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Increase wifi duration!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Increase wifi duration!!\n"); } } else if (retry_count <= 3) { /* <=3 retry in the last 2-second duration */ @@ -2240,8 +2291,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); } } else { /* retry count > 3, if retry count > 3 happens once, @@ -2262,12 +2313,12 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); } - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], max Interval = %d\n", max_interval); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], max Interval = %d\n", max_interval); if (max_interval == 1) btc8821a2_int1(btcoexist, tx_pause, result); else if (max_interval == 2) @@ -2283,8 +2334,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { bool scan = false, link = false, roam = false; - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); @@ -2295,8 +2346,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, coex_dm->tdma_adj_type); } else { - btc_alg_dbg(ALGO_TRACE_FW_DETAIL, - "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); } } @@ -2311,7 +2362,7 @@ static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist) wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4); @@ -2395,7 +2446,7 @@ static void halbtc8821a2ant_action_hid(struct btc_coexist *btcoexist) wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -2475,7 +2526,7 @@ static void halbtc8821a2ant_action_a2dp(struct btc_coexist *btcoexist) wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); /* fw dac swing is called in btc8821a2ant_tdma_dur_adj() * halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -2543,7 +2594,7 @@ static void halbtc8821a2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) bt_info_ext = coex_sta->bt_info_ext; wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); /*fw dac swing is called in btc8821a2ant_tdma_dur_adj() *halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -2612,7 +2663,7 @@ static void halbtc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist) wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -2692,7 +2743,7 @@ static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist) wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -2768,7 +2819,7 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) bt_info_ext = coex_sta->bt_info_ext; wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -2875,7 +2926,7 @@ static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -2958,7 +3009,7 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) bt_info_ext = coex_sta->bt_info_ext; wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -3066,7 +3117,7 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist) bt_info_ext = coex_sta->bt_info_ext; wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); @@ -3167,12 +3218,13 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist) static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_under_5g = false; u8 algorithm = 0; if (btcoexist->manual_control) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Manual control!!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Manual control!!!\n"); return; } @@ -3180,8 +3232,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); if (wifi_under_5g) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n"); halbtc8821a2ant_coex_under_5g(btcoexist); return; } @@ -3189,82 +3241,82 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) algorithm = halbtc8821a2ant_action_algorithm(btcoexist); if (coex_sta->c2h_bt_inquiry_page && (BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], BT is under inquiry/page scan !!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT is under inquiry/page scan !!\n"); halbtc8821a2ant_bt_inquiry_page(btcoexist); return; } coex_dm->cur_algorithm = algorithm; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); if (halbtc8821a2ant_is_common_action(btcoexist)) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant common\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant common\n"); coex_dm->reset_tdma_adjust = true; } else { if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n", coex_dm->pre_algorithm, coex_dm->cur_algorithm); coex_dm->reset_tdma_adjust = true; } switch (coex_dm->cur_algorithm) { case BT_8821A_2ANT_COEX_ALGO_SCO: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = SCO\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = SCO\n"); halbtc8821a2ant_action_sco(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_HID: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = HID\n"); halbtc8821a2ant_action_hid(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = A2DP\n"); halbtc8821a2ant_action_a2dp(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); halbtc8821a2ant_action_a2dp_pan_hs(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANEDR: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n"); halbtc8821a2ant_action_pan_edr(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANHS: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HS mode\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = HS mode\n"); halbtc8821a2ant_action_pan_hs(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n"); halbtc8821a2ant_action_pan_edr_a2dp(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n"); halbtc8821a2ant_action_pan_edr_hid(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n"); btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_HID_A2DP: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n"); halbtc8821a2ant_action_hid_a2dp(btcoexist); break; default: - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"); halbtc8821a2ant_coex_all_off(btcoexist); break; } @@ -3281,10 +3333,11 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) */ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u1tmp = 0; - btc_iface_dbg(INTF_INIT, - "[BTCoex], 2Ant Init HW Config!!\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], 2Ant Init HW Config!!\n"); /* backup rf 0x1e value */ coex_dm->bt_rf0x1e_backup = @@ -3312,13 +3365,12 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist) btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); } -void -ex_halbtc8821a2ant_init_coex_dm( - struct btc_coexist *btcoexist - ) +void ex_halbtc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist) { - btc_iface_dbg(INTF_INIT, - "[BTCoex], Coex Mechanism Init!!\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Coex Mechanism Init!!\n"); halbtc8821a2ant_init_coex_dm(btcoexist); } @@ -3341,7 +3393,7 @@ ex_halbtc8821a2ant_display_coex_info( u32 fw_ver = 0, bt_patch_ver = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[BT Coexist info]============"); + "\r\n ============[BT Coexist info]============"); if (!board_info->bt_exist) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!"); @@ -3349,23 +3401,23 @@ ex_halbtc8821a2ant_display_coex_info( } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", - board_info->pg_ant_num, board_info->btdm_ant_num); + "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", + board_info->pg_ant_num, board_info->btdm_ant_num); if (btcoexist->manual_control) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s", "[Action Manual control]!!"); + "\r\n %-35s", "[Action Manual control]!!"); } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", + "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", ((stack_info->profile_notified) ? "Yes" : "No"), stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", + "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer", glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant, fw_ver, bt_patch_ver, bt_patch_ver); @@ -3377,26 +3429,26 @@ ex_halbtc8821a2ant_display_coex_info( btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d / %d(%d)", + "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsMode(HsChnl)", wifi_dot_11_chnl, bt_hs_on, wifi_hs_chnl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %3ph ", + "\r\n %-35s = %3ph ", "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi", + "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", + "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", link, roam, scan); btcoexist->btc_get(btcoexist, @@ -3408,7 +3460,7 @@ ex_halbtc8821a2ant_display_coex_info( btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s / %s/ %s ", "Wifi status", + "\r\n %-35s = %s / %s/ %s ", "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), @@ -3417,7 +3469,7 @@ ex_halbtc8821a2ant_display_coex_info( "uplink" : "downlink"))); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", + "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : ((BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status) ? "idle" : ((BT_8821A_2ANT_BT_STATUS_CON_IDLE == @@ -3426,7 +3478,7 @@ ex_halbtc8821a2ant_display_coex_info( if (stack_info->profile_notified) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", + "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", stack_info->sco_exist, stack_info->hid_exist, stack_info->pan_exist, stack_info->a2dp_exist); @@ -3436,117 +3488,117 @@ ex_halbtc8821a2ant_display_coex_info( bt_info_ext = coex_sta->bt_info_ext; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", - "BT Info A2DP rate", + "BT Info A2DP rate", (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate"); for (i = 0; i < BT_INFO_SRC_8821A_2ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %7ph(%d)", - glbt_info_src_8821a_2ant[i], - coex_sta->bt_info_c2h[i], - coex_sta->bt_info_c2h_cnt[i]); + "\r\n %-35s = %7ph(%d)", + glbt_info_src_8821a_2ant[i], + coex_sta->bt_info_c2h[i], + coex_sta->bt_info_c2h_cnt[i]); } } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s", - "PS state, IPS/LPS", - ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); + "PS state, IPS/LPS", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); /* Sw mechanism*/ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Sw mechanism]============"); + "============[Sw mechanism]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d/ %d/ %d ", - "SM1[ShRf/ LpRA/ LimDig/ btLna]", - coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra, - coex_dm->limited_dig, coex_dm->cur_bt_lna_constrain); + "\r\n %-35s = %d/ %d/ %d/ %d ", + "SM1[ShRf/ LpRA/ LimDig/ btLna]", + coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra, + coex_dm->limited_dig, coex_dm->cur_bt_lna_constrain); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d/ %d(0x%x) ", - "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", - coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, - coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + "\r\n %-35s = %d/ %d/ %d(0x%x) ", + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); /* Fw mechanism*/ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Fw mechanism]============"); + "============[Fw mechanism]============"); if (!btcoexist->manual_control) { ps_tdma_case = coex_dm->cur_ps_tdma; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %5ph case-%d", - "PS TDMA", - coex_dm->ps_tdma_para, ps_tdma_case); + "\r\n %-35s = %5ph case-%d", + "PS TDMA", + coex_dm->ps_tdma_para, ps_tdma_case); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", - coex_dm->cur_dec_bt_pwr, - coex_dm->cur_ignore_wlan_act); + "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", + coex_dm->cur_dec_bt_pwr, + coex_dm->cur_ignore_wlan_act); } /* Hw setting*/ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s", "============[Hw setting]============"); + "\r\n %-35s", "============[Hw setting]============"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", - coex_dm->bt_rf0x1e_backup); + "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", + coex_dm->bt_rf0x1e_backup); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x ", - "0x778 (W_Act)/ 0x6cc (CoTab Sel)", - u1tmp[0], u1tmp[1]); + "0x778 (W_Act)/ 0x6cc (CoTab Sel)", + u1tmp[0], u1tmp[1]); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db); u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xc5b); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x8db(ADC)/0xc5b[29:25](DAC)", - ((u1tmp[0]&0x60)>>5), ((u1tmp[1]&0x3e)>>1)); + "0x8db(ADC)/0xc5b[29:25](DAC)", + ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1)); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)", - u4tmp[0]&0xff, ((u4tmp[0]&0x30000000)>>28)); + "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)", + u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28)); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x974); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x40/ 0x4c[24:23]/ 0x974", - u1tmp[0], ((u4tmp[0]&0x01800000)>>23), u4tmp[1]); + "0x40/ 0x4c[24:23]/ 0x974", + u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x550(bcn ctrl)/0x522", - u4tmp[0], u1tmp[0]); + "0x550(bcn ctrl)/0x522", + u4tmp[0], u1tmp[0]); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa0a); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0xc50(DIG)/0xa0a(CCK-TH)", - u4tmp[0], u1tmp[0]); + "0xc50(DIG)/0xa0a(CCK-TH)", + u4tmp[0], u1tmp[0]); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b); u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "OFDM-FA/ CCK-FA", - u4tmp[0], (u1tmp[0]<<8) + u1tmp[1]); + "OFDM-FA/ CCK-FA", + u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u4tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x6c0/0x6c4/0x6c8", - u4tmp[0], u4tmp[1], u4tmp[2]); + "0x6c0/0x6c4/0x6c8", + u4tmp[0], u4tmp[1], u4tmp[2]); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x770 (hi-pri Rx/Tx)", - coex_sta->high_priority_rx, coex_sta->high_priority_tx); + "0x770 (hi-pri Rx/Tx)", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", "0x774(low-pri Rx/Tx)", coex_sta->low_priority_rx, coex_sta->low_priority_tx); @@ -3554,22 +3606,24 @@ ex_halbtc8821a2ant_display_coex_info( /* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang*/ u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x41b); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", - "0x41b (mgntQ hang chk == 0xf)", - u1tmp[0]); + "0x41b (mgntQ hang chk == 0xf)", + u1tmp[0]); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); } void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_IPS_ENTER == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], IPS ENTER notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; halbtc8821a2ant_coex_all_off(btcoexist); } else if (BTC_IPS_LEAVE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], IPS LEAVE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; /*halbtc8821a2ant_init_coex_dm(btcoexist);*/ } @@ -3577,52 +3631,59 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_LPS_ENABLE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], LPS ENABLE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS ENABLE notify\n"); coex_sta->under_lps = true; } else if (BTC_LPS_DISABLE == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], LPS DISABLE notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], LPS DISABLE notify\n"); coex_sta->under_lps = false; } } void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_SCAN_START == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], SCAN START notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCAN START notify\n"); } else if (BTC_SCAN_FINISH == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], SCAN FINISH notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], SCAN FINISH notify\n"); } } void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (BTC_ASSOCIATE_START == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], CONNECT START notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CONNECT START notify\n"); } else if (BTC_ASSOCIATE_FINISH == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], CONNECT FINISH notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CONNECT FINISH notify\n"); } } void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[3] = {0}; u32 wifi_bw; u8 wifi_central_chnl; if (BTC_MEDIA_CONNECT == type) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], MEDIA connect notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], MEDIA connect notify\n"); } else { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], MEDIA disconnect notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], MEDIA disconnect notify\n"); } /* only 2.4G we need to inform bt the chnl mask*/ @@ -3643,26 +3704,29 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; - btc_alg_dbg(ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x66 = 0x%x\n", - h2c_parameter[0] << 16 | - h2c_parameter[1] << 8 | - h2c_parameter[2]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0] << 16 | + h2c_parameter[1] << 8 | + h2c_parameter[2]); btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist, u8 type) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + if (type == BTC_PACKET_DHCP) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], DHCP Packet notify\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], DHCP Packet notify\n"); } } void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf, u8 length) { + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 bt_info = 0; u8 i, rsp_source = 0; static u32 set_bt_lna_cnt, set_bt_psd_mode; @@ -3676,19 +3740,19 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Bt info[%d], length = %d, hex data = [", + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Bt info[%d], length = %d, hex data = [", rsp_source, length); for (i = 0; i < length; i++) { coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; if (i == 1) bt_info = tmp_buf[i]; if (i == length-1) { - btc_iface_dbg(INTF_NOTIFY, - "0x%02x]\n", tmp_buf[i]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x%02x]\n", tmp_buf[i]); } else { - btc_iface_dbg(INTF_NOTIFY, - "0x%02x, ", tmp_buf[i]); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "0x%02x, ", tmp_buf[i]); } } @@ -3814,8 +3878,10 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist) { - btc_iface_dbg(INTF_NOTIFY, - "[BTCoex], Halt notify\n"); + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Halt notify\n"); halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); @@ -3823,36 +3889,37 @@ void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist) void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; static u8 dis_ver_info_cnt; u32 fw_ver = 0, bt_patch_ver = 0; struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; - btc_alg_dbg(ALGO_TRACE, - "[BTCoex], ==========================Periodical===========================\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ==========================Periodical===========================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; - btc_iface_dbg(INTF_INIT, - "[BTCoex], ****************************************************************\n"); - btc_iface_dbg(INTF_INIT, - "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", - board_info->pg_ant_num, - board_info->btdm_ant_num, - board_info->btdm_ant_pos); - btc_iface_dbg(INTF_INIT, - "[BTCoex], BT stack/ hci ext ver = %s / %d\n", - stack_info->profile_notified ? "Yes" : "No", - stack_info->hci_version); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ****************************************************************\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, + board_info->btdm_ant_num, + board_info->btdm_ant_pos); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + stack_info->profile_notified ? "Yes" : "No", + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - btc_iface_dbg(INTF_INIT, - "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", - glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant, - fw_ver, bt_patch_ver, bt_patch_ver); - btc_iface_dbg(INTF_INIT, - "[BTCoex], ****************************************************************\n"); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], ****************************************************************\n"); } halbtc8821a2ant_query_bt_info(btcoexist); -- cgit v1.2.3 From 41880bb38a558fab804d826488bbaef332f179a2 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Fri, 3 Feb 2017 11:35:21 -0600 Subject: rtlwifi: btcoexist: Convert halbtcoutsrc.c to use standard debugging The routines in btcoexist use different debugging routines than are used in the other drivers. This patch converts halbtcoutsrc.c to use the standard routines. It also deletes the definitions of the now-unused debugging macros, and turns on compilation of all the routines in btcoexist. Signed-off-by: Larry Finger Cc: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile | 7 +++++-- .../net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 4 ++-- .../net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h | 12 ------------ 3 files changed, 7 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile index 0ef0d310fc24..20582df0465c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile @@ -1,5 +1,8 @@ -btcoexist-objs := halbtc8723b2ant.o \ - halbtc8192e2ant.o \ +btcoexist-objs := halbtc8192e2ant.o \ + halbtc8723b1ant.o \ + halbtc8723b2ant.o \ + halbtc8821a1ant.o \ + halbtc8821a2ant.o \ halbtcoutsrc.o \ rtl_btc.o diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 9a89ae0df39b..150aeb8e79d1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -141,8 +141,8 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) if (rtlphy->current_channel != 0) chnl = rtlphy->current_channel; - btc_alg_dbg(ALGO_TRACE, - "static halbtc_get_wifi_central_chnl:%d\n", chnl); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "static halbtc_get_wifi_central_chnl:%d\n", chnl); return chnl; } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index f9e7c5e77f51..601bbe1d22b3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -116,18 +116,6 @@ extern u32 btc_dbg_type[]; #define WIFI_P2P_GO_CONNECTED BIT3 #define WIFI_P2P_GC_CONNECTED BIT4 -#define btc_alg_dbg(dbgflag, fmt, ...) \ -do { \ - if (unlikely(btc_dbg_type[BTC_MSG_ALGORITHM] & dbgflag)) \ - printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ -} while (0) -#define btc_iface_dbg(dbgflag, fmt, ...) \ -do { \ - if (unlikely(btc_dbg_type[BTC_MSG_INTERFACE] & dbgflag)) \ - printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ -} while (0) - - #define BTC_RSSI_HIGH(_rssi_) \ ((_rssi_ == BTC_RSSI_STATE_HIGH || \ _rssi_ == BTC_RSSI_STATE_STAY_HIGH) ? true : false) -- cgit v1.2.3 From 6773386f977ce5af339f9678fa2918909a946c6b Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Sun, 5 Feb 2017 10:24:22 -0600 Subject: rtlwifi: rtl8192c-common: Fix "BUG: KASAN: Kernels built with CONFIG_KASAN=y report the following BUG for rtl8192cu and rtl8192c-common: ================================================================== BUG: KASAN: slab-out-of-bounds in rtl92c_dm_bt_coexist+0x858/0x1e40 [rtl8192c_common] at addr ffff8801c90edb08 Read of size 1 by task kworker/0:1/38 page:ffffea0007243800 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0 flags: 0x8000000000004000(head) page dumped because: kasan: bad access detected CPU: 0 PID: 38 Comm: kworker/0:1 Not tainted 4.9.7-gentoo #3 Hardware name: Gigabyte Technology Co., Ltd. To be filled by O.E.M./Z77-DS3H, BIOS F11a 11/13/2013 Workqueue: rtl92c_usb rtl_watchdog_wq_callback [rtlwifi] 0000000000000000 ffffffff829eea33 ffff8801d7f0fa30 ffff8801c90edb08 ffffffff824c0f09 ffff8801d4abee80 0000000000000004 0000000000000297 ffffffffc070b57c ffff8801c7aa7c48 ffff880100000004 ffffffff000003e8 Call Trace: [] ? dump_stack+0x5c/0x79 [] ? kasan_report_error+0x4b9/0x4e0 [] ? _usb_read_sync+0x15c/0x280 [rtl_usb] [] ? __asan_report_load1_noabort+0x45/0x50 [] ? rtl92c_dm_bt_coexist+0x858/0x1e40 [rtl8192c_common] [] ? rtl92c_dm_bt_coexist+0x858/0x1e40 [rtl8192c_common] [] ? rtl92c_dm_rf_saving+0x96e/0x1330 [rtl8192c_common] ... The problem is due to rtl8192ce and rtl8192cu sharing routines, and having different layouts of struct rtl_pci_priv, which is used by rtl8192ce, and struct rtl_usb_priv, which is used by rtl8192cu. The problem was resolved by placing the struct bt_coexist_info at the head of each of those private areas. Reported-and-tested-by: Dmitry Osipenko Signed-off-by: Larry Finger Cc: Stable # 4.0+ Cc: Dmitry Osipenko Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/pci.h | 4 ++-- drivers/net/wireless/realtek/rtlwifi/usb.h | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h index 578b1d900bfb..d9039ea10ba4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.h +++ b/drivers/net/wireless/realtek/rtlwifi/pci.h @@ -271,10 +271,10 @@ struct mp_adapter { }; struct rtl_pci_priv { + struct bt_coexist_info bt_coexist; + struct rtl_led_ctl ledctl; struct rtl_pci dev; struct mp_adapter ndis_adapter; - struct rtl_led_ctl ledctl; - struct bt_coexist_info bt_coexist; }; #define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv)) diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h index a6d43d2ecd36..cdb9e06db89e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.h +++ b/drivers/net/wireless/realtek/rtlwifi/usb.h @@ -146,8 +146,9 @@ struct rtl_usb { }; struct rtl_usb_priv { - struct rtl_usb dev; + struct bt_coexist_info bt_coexist; struct rtl_led_ctl ledctl; + struct rtl_usb dev; }; #define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv)) -- cgit v1.2.3 From b2d60415684663d9dfc616ed97a6f78b3c73c29a Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Thu, 2 Feb 2017 12:47:45 +0200 Subject: ath10k: prefer unsigned int over just unsigned Fixes new checkpatch warnings: drivers/net/wireless/ath/ath10k/htt.h:1639: Prefer 'unsigned int' to bare use of 'unsigned' drivers/net/wireless/ath/ath10k/htt.h:1660: Prefer 'unsigned int' to bare use of 'unsigned' Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 44b25cf00553..dfe9627df7d0 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1636,7 +1636,7 @@ struct ath10k_htt { int size; /* size - 1 */ - unsigned size_mask; + unsigned int size_mask; /* how many rx buffers to keep in the ring */ int fill_level; @@ -1657,7 +1657,7 @@ struct ath10k_htt { /* where HTT SW has processed bufs filled by rx MAC DMA */ struct { - unsigned msdu_payld; + unsigned int msdu_payld; } sw_rd_idx; /* -- cgit v1.2.3 From 019e4280fac9bf9b382ed3623a574b6946e2870c Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Thu, 2 Feb 2017 12:47:56 +0200 Subject: ath10k: use names in function definition arguments Fixes new checkpatch warnings: drivers/net/wireless/ath/ath10k/htt.h:1823: function definition argument 'struct sk_buff *' should also have an identifier name drivers/net/wireless/ath/ath10k/wmi.h:6607: function definition argument 'struct wmi_start_scan_arg *' should also have an identifier name Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 2 +- drivers/net/wireless/ath/ath10k/wmi.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index dfe9627df7d0..90c2f72666b8 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1820,7 +1820,7 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); -int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); +int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu); int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 861c2d8c6e8c..1333cbe48693 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -6604,7 +6604,7 @@ struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len); int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); -void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); +void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *arg); void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, struct ath10k_fw_stats_pdev *dst); -- cgit v1.2.3 From 39bfe9f7c0c9ac84025d26a073d2a5de4f9b1c99 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Thu, 2 Feb 2017 12:48:08 +0200 Subject: ath10k: few whitespace fixes Fixes checkpatch warnings: drivers/net/wireless/ath/ath10k/pci.c:1593: Statements should start on a tabstop drivers/net/wireless/ath/ath10k/ce.c:962: Alignment should match open parenthesis Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.c | 6 +++--- drivers/net/wireless/ath/ath10k/pci.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index da466ab2d823..4045657e0a6e 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -959,9 +959,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, */ dest_ring->base_addr_owner_space_unaligned = dma_zalloc_coherent(ar->dev, - (nentries * sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - &base_addr, GFP_KERNEL); + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + &base_addr, GFP_KERNEL); if (!dest_ring->base_addr_owner_space_unaligned) { kfree(dest_ring); return ERR_PTR(-ENOMEM); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 79e61459bc6c..cbe64c50e4b0 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1590,7 +1590,7 @@ void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) /* TODO: Find appropriate register configuration for QCA99X0 * to mask irq/MSI. */ - break; + break; } } -- cgit v1.2.3 From ee8b08a1be82edca6cfc3cb7e86f674e0ede9de2 Mon Sep 17 00:00:00 2001 From: Maharaja Kennadyrajan Date: Thu, 2 Feb 2017 08:32:19 +0200 Subject: ath10k: add debugfs support to get per peer tids log via tracing This patch provides support to get per peer tids log. echo 1 > /sys/kernel/debug/ieee80211/phyX/netdev\:wlanX/stations/ XX:XX/peer_debug_trigger These logs will be the part of FWLOGS which we collect the logs via tracing interface. Here we will get the peer tigd logs only once(not repeatedly) when we write 1 to the debugfs file. Signed-off-by: Maharaja Kennadyrajan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debugfs_sta.c | 65 +++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/wmi.h | 1 + 2 files changed, 66 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index fce6f8137d33..7353e7ea88f1 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -306,6 +306,69 @@ static const struct file_operations fops_delba = { .llseek = default_llseek, }; +static ssize_t ath10k_dbg_sta_read_peer_debug_trigger(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + char buf[8]; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, + "Write 1 to once trigger the debug logs\n"); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t +ath10k_dbg_sta_write_peer_debug_trigger(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + u8 peer_debug_trigger; + int ret; + + if (kstrtou8_from_user(user_buf, count, 0, &peer_debug_trigger)) + return -EINVAL; + + if (peer_debug_trigger != 1) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto out; + } + + ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr, + WMI_PEER_DEBUG, peer_debug_trigger); + if (ret) { + ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n", + ret); + goto out; + } +out: + mutex_unlock(&ar->conf_mutex); + return count; +} + +static const struct file_operations fops_peer_debug_trigger = { + .open = simple_open, + .read = ath10k_dbg_sta_read_peer_debug_trigger, + .write = ath10k_dbg_sta_write_peer_debug_trigger, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { @@ -314,4 +377,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba); debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp); debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba); + debugfs_create_file("peer_debug_trigger", 0600, dir, sta, + &fops_peer_debug_trigger); } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 1333cbe48693..cec713c99931 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -5811,6 +5811,7 @@ enum wmi_peer_param { WMI_PEER_CHAN_WIDTH = 0x4, WMI_PEER_NSS = 0x5, WMI_PEER_USE_4ADDR = 0x6, + WMI_PEER_DEBUG = 0xa, WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */ }; -- cgit v1.2.3 From cb4281528b62207918b1e95827cad7527aa4dbaa Mon Sep 17 00:00:00 2001 From: Tamizh chelvam Date: Thu, 2 Feb 2017 08:32:18 +0200 Subject: ath10k: fix boot failure in UTF mode/testmode Rx filter reset and the dynamic tx switch mode (EXT_RESOURCE_CFG) configuration are causing the following errors when UTF firmware is loaded to the target. Error message 1: [ 598.015629] ath10k_pci 0001:01:00.0: failed to ping firmware: -110 [ 598.020828] ath10k_pci 0001:01:00.0: failed to reset rx filter: -110 [ 598.141556] ath10k_pci 0001:01:00.0: failed to start core (testmode): -110 Error message 2: [ 668.615839] ath10k_ahb a000000.wifi: failed to send ext resource cfg command : -95 [ 668.618902] ath10k_ahb a000000.wifi: failed to start core (testmode): -95 Avoiding these configurations while bringing the target in testmode is solving the problem. Cc: stable@vger.kernel.org Signed-off-by: Tamizh chelvam Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index c2afcca6fd60..c27e7ea38a65 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1996,7 +1996,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", ar->hw->wiphy->fw_version); - if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) { + if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) && + mode == ATH10K_FIRMWARE_MODE_NORMAL) { val = 0; if (ath10k_peer_stats_enabled(ar)) val = WMI_10_4_PEER_STATS; @@ -2049,10 +2050,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, * possible to implicitly make it correct by creating a dummy vdev and * then deleting it. */ - status = ath10k_core_reset_rx_filter(ar); - if (status) { - ath10k_err(ar, "failed to reset rx filter: %d\n", status); - goto err_hif_stop; + if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { + status = ath10k_core_reset_rx_filter(ar); + if (status) { + ath10k_err(ar, + "failed to reset rx filter: %d\n", status); + goto err_hif_stop; + } } /* If firmware indicates Full Rx Reorder support it must be used in a -- cgit v1.2.3 From 7f622593cc5add77a99cd39404e8a851be9de792 Mon Sep 17 00:00:00 2001 From: Ashok Raj Nagarajan Date: Wed, 1 Feb 2017 00:06:51 +0530 Subject: ath10k: fix reading sram contents for QCA4019 With QCA4019 platform, SRAM address can be accessed directly from host but currently, we are assuming sram addresses cannot be accessed directly and hence we convert the addresses. While there, clean up growing hw checks during conversion of target CPU address to CE address. Now we have function pointer pertaining to different chips. Signed-off-by: Ashok Raj Nagarajan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ahb.c | 23 ++++++++++++++++ drivers/net/wireless/ath/ath10k/pci.c | 51 ++++++++++++++++++++++------------- drivers/net/wireless/ath/ath10k/pci.h | 5 ++++ 3 files changed, 60 insertions(+), 19 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 766c63bf05c4..45226dbee5ce 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -33,6 +33,9 @@ static const struct of_device_id ath10k_ahb_of_match[] = { MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match); +#define QCA4019_SRAM_ADDR 0x000C0000 +#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */ + static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar) { return &((struct ath10k_pci *)ar->drv_priv)->ahb[0]; @@ -699,6 +702,25 @@ out: return ret; } +static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + u32 val = 0, region = addr & 0xfffff; + + val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); + + if (region >= QCA4019_SRAM_ADDR && region <= + (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) { + /* SRAM contents for QCA4019 can be directly accessed and + * no conversions are required + */ + val |= region; + } else { + val |= 0x100000 | region; + } + + return val; +} + static const struct ath10k_hif_ops ath10k_ahb_hif_ops = { .tx_sg = ath10k_pci_hif_tx_sg, .diag_read = ath10k_pci_hif_diag_read, @@ -766,6 +788,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev) ar_pci->mem_len = ar_ahb->mem_len; ar_pci->ar = ar; ar_pci->bus_ops = &ath10k_ahb_bus_ops; + ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr; ret = ath10k_pci_setup_resource(ar); if (ret) { diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index cbe64c50e4b0..5d2f9b9922d3 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -840,31 +840,35 @@ void ath10k_pci_rx_replenish_retry(unsigned long ptr) ath10k_pci_rx_post(ar); } -static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) { - u32 val = 0; + u32 val = 0, region = addr & 0xfffff; - switch (ar->hw_rev) { - case ATH10K_HW_QCA988X: - case ATH10K_HW_QCA9887: - case ATH10K_HW_QCA6174: - case ATH10K_HW_QCA9377: - val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - CORE_CTRL_ADDRESS) & - 0x7ff) << 21; - break; - case ATH10K_HW_QCA9888: - case ATH10K_HW_QCA99X0: - case ATH10K_HW_QCA9984: - case ATH10K_HW_QCA4019: - val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); - break; - } + val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) + & 0x7ff) << 21; + val |= 0x100000 | region; + return val; +} + +static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + u32 val = 0, region = addr & 0xfffff; - val |= 0x100000 | (addr & 0xfffff); + val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); + val |= 0x100000 | region; return val; } +static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr)) + return -ENOTSUPP; + + return ar_pci->targ_cpu_to_ce_addr(ar, addr); +} + /* * Diagnostic read/write access is provided for startup/config/debug usage. * Caller must guarantee proper alignment, when applicable, and single user @@ -3170,6 +3174,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, bool pci_ps; int (*pci_soft_reset)(struct ath10k *ar); int (*pci_hard_reset)(struct ath10k *ar); + u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); switch (pci_dev->device) { case QCA988X_2_0_DEVICE_ID: @@ -3177,12 +3182,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev, pci_ps = false; pci_soft_reset = ath10k_pci_warm_reset; pci_hard_reset = ath10k_pci_qca988x_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; break; case QCA9887_1_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9887; pci_ps = false; pci_soft_reset = ath10k_pci_warm_reset; pci_hard_reset = ath10k_pci_qca988x_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; break; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: @@ -3190,30 +3197,35 @@ static int ath10k_pci_probe(struct pci_dev *pdev, pci_ps = true; pci_soft_reset = ath10k_pci_warm_reset; pci_hard_reset = ath10k_pci_qca6174_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; break; case QCA99X0_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA99X0; pci_ps = false; pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; break; case QCA9984_1_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9984; pci_ps = false; pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; break; case QCA9888_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9888; pci_ps = false; pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; break; case QCA9377_1_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9377; pci_ps = true; pci_soft_reset = NULL; pci_hard_reset = ath10k_pci_qca6174_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; break; default: WARN_ON(1); @@ -3240,6 +3252,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->bus_ops = &ath10k_pci_bus_ops; ar_pci->pci_soft_reset = pci_soft_reset; ar_pci->pci_hard_reset = pci_hard_reset; + ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; ar->id.vendor = pdev->vendor; ar->id.device = pdev->device; diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index c76789d5de99..c1e08ad63940 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -233,6 +233,11 @@ struct ath10k_pci { /* Chip specific pci full reset function */ int (*pci_hard_reset)(struct ath10k *ar); + /* chip specific methods for converting target CPU virtual address + * space to CE address space + */ + u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); + /* Keep this entry in the last, memory for struct ath10k_ahb is * allocated (ahb support enabled case) in the continuation of * this struct. -- cgit v1.2.3 From 03c95dbef64264b6d86d50c0f0d90fdf989e528e Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 1 Feb 2017 07:27:47 -0800 Subject: wcn36xx: Implement cancel_hw_scan In the even that the wcn36xx interface is brought down while a hw_scan is active we must abort and wait for the ongoing scan to signal completion to mac80211. Reported-by: Mart Raudsepp Fixes: 886039036c20 ("wcn36xx: Implement firmware assisted scan") Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 25 ++++++++++++++++++++++++- drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 1 + 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 0002190c9041..7a0c2e7da7f6 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -574,6 +574,7 @@ static void wcn36xx_hw_scan_worker(struct work_struct *work) struct cfg80211_scan_request *req = wcn->scan_req; u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX]; struct cfg80211_scan_info scan_info = {}; + bool aborted = false; int i; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels); @@ -585,6 +586,13 @@ static void wcn36xx_hw_scan_worker(struct work_struct *work) wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN); for (i = 0; i < req->n_channels; i++) { + mutex_lock(&wcn->scan_lock); + aborted = wcn->scan_aborted; + mutex_unlock(&wcn->scan_lock); + + if (aborted) + break; + wcn->scan_freq = req->channels[i]->center_freq; wcn->scan_band = req->channels[i]->band; @@ -596,7 +604,7 @@ static void wcn36xx_hw_scan_worker(struct work_struct *work) } wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN); - scan_info.aborted = false; + scan_info.aborted = aborted; ieee80211_scan_completed(wcn->hw, &scan_info); mutex_lock(&wcn->scan_lock); @@ -615,6 +623,8 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw, mutex_unlock(&wcn->scan_lock); return -EBUSY; } + + wcn->scan_aborted = false; wcn->scan_req = &hw_req->req; mutex_unlock(&wcn->scan_lock); @@ -623,6 +633,18 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw, return 0; } +static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wcn36xx *wcn = hw->priv; + + mutex_lock(&wcn->scan_lock); + wcn->scan_aborted = true; + mutex_unlock(&wcn->scan_lock); + + cancel_work_sync(&wcn->scan_work); +} + static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, enum nl80211_band band) { @@ -1034,6 +1056,7 @@ static const struct ieee80211_ops wcn36xx_ops = { .tx = wcn36xx_tx, .set_key = wcn36xx_set_key, .hw_scan = wcn36xx_hw_scan, + .cancel_hw_scan = wcn36xx_cancel_hw_scan, .bss_info_changed = wcn36xx_bss_info_changed, .set_rts_threshold = wcn36xx_set_rts_threshold, .sta_add = wcn36xx_sta_add, diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 35a6590c3ee5..7423998ddeb4 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -220,6 +220,7 @@ struct wcn36xx { int scan_freq; int scan_band; struct mutex scan_lock; + bool scan_aborted; /* DXE channels */ struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */ -- cgit v1.2.3 From d63ffc45c5d3df15f6fc8c73079458ce4a111995 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 2 Feb 2017 10:14:50 +0100 Subject: ath9k: rename tx_complete_work to hw_check_work Also include common MAC alive check. This should make the hang checks more reliable for modes where beacons are not sent and is used as a starting point for further hang check improvements Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ath9k.h | 6 ++--- drivers/net/wireless/ath/ath9k/init.c | 1 + drivers/net/wireless/ath/ath9k/link.c | 46 ++++++++++++++++++---------------- drivers/net/wireless/ath/ath9k/main.c | 10 +++++--- drivers/net/wireless/ath/ath9k/xmit.c | 2 -- 5 files changed, 35 insertions(+), 30 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 331947b6a667..b2fa44adc60e 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -108,7 +108,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, #define ATH_AGGR_MIN_QDEPTH 2 /* minimum h/w qdepth for non-aggregated traffic */ #define ATH_NON_AGGR_MIN_QDEPTH 8 -#define ATH_TX_COMPLETE_POLL_INT 1000 +#define ATH_HW_CHECK_POLL_INT 1000 #define ATH_TXFIFO_DEPTH 8 #define ATH_TX_ERROR 0x01 @@ -745,7 +745,7 @@ void ath9k_csa_update(struct ath_softc *sc); #define ATH_PAPRD_TIMEOUT 100 /* msecs */ #define ATH_PLL_WORK_INTERVAL 100 -void ath_tx_complete_poll_work(struct work_struct *work); +void ath_hw_check_work(struct work_struct *work); void ath_reset_work(struct work_struct *work); bool ath_hw_check(struct ath_softc *sc); void ath_hw_pll_work(struct work_struct *work); @@ -1053,7 +1053,7 @@ struct ath_softc { #ifdef CONFIG_ATH9K_DEBUGFS struct ath9k_debug debug; #endif - struct delayed_work tx_complete_work; + struct delayed_work hw_check_work; struct delayed_work hw_pll_work; struct timer_list sleep_timer; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 084ad1bd495f..a2f7f48bbb92 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -681,6 +681,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, INIT_WORK(&sc->hw_reset_work, ath_reset_work); INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work); + INIT_DELAYED_WORK(&sc->hw_check_work, ath_hw_check_work); ath9k_init_channel_context(sc); diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 5ad0feeebc86..27c50562dc47 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -20,20 +20,13 @@ * TX polling - checks if the TX engine is stuck somewhere * and issues a chip reset if so. */ -void ath_tx_complete_poll_work(struct work_struct *work) +static bool ath_tx_complete_check(struct ath_softc *sc) { - struct ath_softc *sc = container_of(work, struct ath_softc, - tx_complete_work.work); struct ath_txq *txq; int i; - bool needreset = false; - - if (sc->tx99_state) { - ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, - "skip tx hung detection on tx99\n"); - return; - } + if (sc->tx99_state) + return true; for (i = 0; i < IEEE80211_NUM_ACS; i++) { txq = sc->tx.txq_map[i]; @@ -41,25 +34,36 @@ void ath_tx_complete_poll_work(struct work_struct *work) ath_txq_lock(sc, txq); if (txq->axq_depth) { if (txq->axq_tx_inprogress) { - needreset = true; ath_txq_unlock(sc, txq); - break; - } else { - txq->axq_tx_inprogress = true; + goto reset; } + + txq->axq_tx_inprogress = true; } ath_txq_unlock(sc, txq); } - if (needreset) { - ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, - "tx hung, resetting the chip\n"); - ath9k_queue_reset(sc, RESET_TYPE_TX_HANG); + return true; + +reset: + ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, + "tx hung, resetting the chip\n"); + ath9k_queue_reset(sc, RESET_TYPE_TX_HANG); + return false; + +} + +void ath_hw_check_work(struct work_struct *work) +{ + struct ath_softc *sc = container_of(work, struct ath_softc, + hw_check_work.work); + + if (!ath_hw_check(sc) || + !ath_tx_complete_check(sc)) return; - } - ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, - msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); + ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work, + msecs_to_jiffies(ATH_HW_CHECK_POLL_INT)); } /* diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 58f06ce9a4cf..0345d6ec564f 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -181,7 +181,7 @@ void ath9k_ps_restore(struct ath_softc *sc) static void __ath_cancel_work(struct ath_softc *sc) { cancel_work_sync(&sc->paprd_work); - cancel_delayed_work_sync(&sc->tx_complete_work); + cancel_delayed_work_sync(&sc->hw_check_work); cancel_delayed_work_sync(&sc->hw_pll_work); #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT @@ -198,7 +198,8 @@ void ath_cancel_work(struct ath_softc *sc) void ath_restart_work(struct ath_softc *sc) { - ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); + ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work, + ATH_HW_CHECK_POLL_INT); if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah)) ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, @@ -2091,7 +2092,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop, int timeout; bool drain_txq; - cancel_delayed_work_sync(&sc->tx_complete_work); + cancel_delayed_work_sync(&sc->hw_check_work); if (ah->ah_flags & AH_UNPLUGGED) { ath_dbg(common, ANY, "Device has been unplugged!\n"); @@ -2129,7 +2130,8 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop, ath9k_ps_restore(sc); } - ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); + ieee80211_queue_delayed_work(hw, &sc->hw_check_work, + ATH_HW_CHECK_POLL_INT); } static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index c35a192861ab..11073cf87909 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2872,8 +2872,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) return error; } - INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) error = ath_tx_edma_init(sc); -- cgit v1.2.3 From a34d0a0da1abae46a5f6ebd06fb0ec484ca099d9 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 2 Feb 2017 10:14:51 +0100 Subject: ath9k_hw: check if the chip failed to wake up In an RFC patch, Sven Eckelmann and Simon Wunderlich reported: "QCA 802.11n chips (especially AR9330/AR9340) sometimes end up in a state in which a read of AR_CFG always returns 0xdeadbeef. This should not happen when when the power_mode of the device is ATH9K_PM_AWAKE." Include the check for the default register state in the existing MAC hang check. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/hw.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index ac36873d6da4..8c5c2dd8fa7f 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1603,6 +1603,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) int count = 50; u32 reg, last_val; + /* Check if chip failed to wake up */ + if (REG_READ(ah, AR_CFG) == 0xdeadbeef) + return false; + if (AR_SREV_9300(ah)) return !ath9k_hw_detect_mac_hang(ah); -- cgit v1.2.3 From 3a5e969bb2f6692a256352649355d56d018d6b88 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 2 Feb 2017 10:14:52 +0100 Subject: ath9k: fix race condition in enabling/disabling IRQs The code currently relies on refcounting to disable IRQs from within the IRQ handler and re-enabling them again after the tasklet has run. However, due to race conditions sometimes the IRQ handler might be called twice, or the tasklet may not run at all (if interrupted in the middle of a reset). This can cause nasty imbalances in the irq-disable refcount which will get the driver permanently stuck until the entire radio has been stopped and started again (ath_reset will not recover from this). Instead of using this fragile logic, change the code to ensure that running the irq handler during tasklet processing is safe, and leave the refcount untouched. Cc: stable@vger.kernel.org Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ath9k.h | 1 + drivers/net/wireless/ath/ath9k/init.c | 1 + drivers/net/wireless/ath/ath9k/mac.c | 44 ++++++++++++++++++++++++++-------- drivers/net/wireless/ath/ath9k/mac.h | 1 + drivers/net/wireless/ath/ath9k/main.c | 27 +++++++++------------ 5 files changed, 48 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index b2fa44adc60e..cf076719c27e 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -998,6 +998,7 @@ struct ath_softc { struct survey_info *cur_survey; struct survey_info survey[ATH9K_NUM_CHANNELS]; + spinlock_t intr_lock; struct tasklet_struct intr_tq; struct tasklet_struct bcon_tasklet; struct ath_hw *sc_ah; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index a2f7f48bbb92..fa4b3cc1ba22 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -669,6 +669,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, common->bt_ant_diversity = 1; spin_lock_init(&common->cc_lock); + spin_lock_init(&sc->intr_lock); spin_lock_init(&sc->sc_serial_rw); spin_lock_init(&sc->sc_pm_lock); spin_lock_init(&sc->chan_lock); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index bba85d1a6cd1..d937c39b3a0b 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -805,21 +805,12 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah) } EXPORT_SYMBOL(ath9k_hw_disable_interrupts); -void ath9k_hw_enable_interrupts(struct ath_hw *ah) +static void __ath9k_hw_enable_interrupts(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 sync_default = AR_INTR_SYNC_DEFAULT; u32 async_mask; - if (!(ah->imask & ATH9K_INT_GLOBAL)) - return; - - if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { - ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", - atomic_read(&ah->intr_ref_cnt)); - return; - } - if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; @@ -841,6 +832,39 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah) ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); } + +void ath9k_hw_resume_interrupts(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (!(ah->imask & ATH9K_INT_GLOBAL)) + return; + + if (atomic_read(&ah->intr_ref_cnt) != 0) { + ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", + atomic_read(&ah->intr_ref_cnt)); + return; + } + + __ath9k_hw_enable_interrupts(ah); +} +EXPORT_SYMBOL(ath9k_hw_resume_interrupts); + +void ath9k_hw_enable_interrupts(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (!(ah->imask & ATH9K_INT_GLOBAL)) + return; + + if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { + ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", + atomic_read(&ah->intr_ref_cnt)); + return; + } + + __ath9k_hw_enable_interrupts(ah); +} EXPORT_SYMBOL(ath9k_hw_enable_interrupts); void ath9k_hw_set_interrupts(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 3bab01435a86..770fc11b41d1 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -744,6 +744,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah); void ath9k_hw_enable_interrupts(struct ath_hw *ah); void ath9k_hw_disable_interrupts(struct ath_hw *ah); void ath9k_hw_kill_interrupts(struct ath_hw *ah); +void ath9k_hw_resume_interrupts(struct ath_hw *ah); void ar9002_hw_attach_mac_ops(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 0345d6ec564f..9e65d14e7b1e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -374,21 +374,20 @@ void ath9k_tasklet(unsigned long data) struct ath_common *common = ath9k_hw_common(ah); enum ath_reset_type type; unsigned long flags; - u32 status = sc->intrstatus; + u32 status; u32 rxmask; + spin_lock_irqsave(&sc->intr_lock, flags); + status = sc->intrstatus; + sc->intrstatus = 0; + spin_unlock_irqrestore(&sc->intr_lock, flags); + ath9k_ps_wakeup(sc); spin_lock(&sc->sc_pcu_lock); if (status & ATH9K_INT_FATAL) { type = RESET_TYPE_FATAL_INT; ath9k_queue_reset(sc, type); - - /* - * Increment the ref. counter here so that - * interrupts are enabled in the reset routine. - */ - atomic_inc(&ah->intr_ref_cnt); ath_dbg(common, RESET, "FATAL: Skipping interrupts\n"); goto out; } @@ -404,11 +403,6 @@ void ath9k_tasklet(unsigned long data) type = RESET_TYPE_BB_WATCHDOG; ath9k_queue_reset(sc, type); - /* - * Increment the ref. counter here so that - * interrupts are enabled in the reset routine. - */ - atomic_inc(&ah->intr_ref_cnt); ath_dbg(common, RESET, "BB_WATCHDOG: Skipping interrupts\n"); goto out; @@ -421,7 +415,6 @@ void ath9k_tasklet(unsigned long data) if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) { type = RESET_TYPE_TX_GTT; ath9k_queue_reset(sc, type); - atomic_inc(&ah->intr_ref_cnt); ath_dbg(common, RESET, "GTT: Skipping interrupts\n"); goto out; @@ -478,7 +471,7 @@ void ath9k_tasklet(unsigned long data) ath9k_btcoex_handle_interrupt(sc, status); /* re-enable hardware interrupt */ - ath9k_hw_enable_interrupts(ah); + ath9k_hw_resume_interrupts(ah); out: spin_unlock(&sc->sc_pcu_lock); ath9k_ps_restore(sc); @@ -542,7 +535,9 @@ irqreturn_t ath_isr(int irq, void *dev) return IRQ_NONE; /* Cache the status */ - sc->intrstatus = status; + spin_lock(&sc->intr_lock); + sc->intrstatus |= status; + spin_unlock(&sc->intr_lock); if (status & SCHED_INTR) sched = true; @@ -588,7 +583,7 @@ chip_reset: if (sched) { /* turn off every interrupt */ - ath9k_hw_disable_interrupts(ah); + ath9k_hw_kill_interrupts(ah); tasklet_schedule(&sc->intr_tq); } -- cgit v1.2.3 From bddb2afcb6c52a545f18fb9bcd4829828ebf4a3a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 7 Feb 2017 10:40:50 +0100 Subject: mac80211: add back lost debugfs files Somehow these files were never present or lost, but the code is there and they seem somewhat useful, so add them back. Signed-off-by: Johannes Berg --- net/mac80211/debugfs.c | 1 + net/mac80211/debugfs_sta.c | 1 + 2 files changed, 2 insertions(+) diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index f62cd0e13c58..47bea0babe78 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -356,6 +356,7 @@ void debugfs_hw_add(struct ieee80211_local *local) DEBUGFS_ADD(total_ps_buffered); DEBUGFS_ADD(wep_iv); + DEBUGFS_ADD(rate_ctrl_alg); DEBUGFS_ADD(queues); DEBUGFS_ADD(misc); #ifdef CONFIG_PM diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index f6003b8c2c33..42601820db20 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -522,6 +522,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) return; DEBUGFS_ADD(flags); + DEBUGFS_ADD(aid); DEBUGFS_ADD(num_ps_buf_frames); DEBUGFS_ADD(last_seq_ctrl); DEBUGFS_ADD(agg_status); -- cgit v1.2.3 From 473153291b8a131c86787fd9b076a529573e2c19 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 2 Feb 2017 19:14:32 -0800 Subject: virtio_net: wrap rtnl_lock in test for calling with lock already held For XDP use case and to allow ethtool reset tests it is useful to be able to use reset paths from contexts where rtnl lock is already held. This requries updating virtnet_set_queues and free_receive_bufs the two places where rtnl_lock is taken in virtio_net. To do this we use the following pattern, _foo(...) { do stuff } foo(...) { rtnl_lock(); _foo(...); rtnl_unlock()}; this allows us to use freeze()/restore() flow from both contexts. Signed-off-by: John Fastabend Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 29982c7f6080..bc814c079506 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1305,7 +1305,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi) rtnl_unlock(); } -static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) +static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct scatterlist sg; struct net_device *dev = vi->dev; @@ -1331,6 +1331,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) return 0; } +static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) +{ + int err; + + rtnl_lock(); + err = _virtnet_set_queues(vi, queue_pairs); + rtnl_unlock(); + return err; +} + static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -1583,7 +1593,7 @@ static int virtnet_set_channels(struct net_device *dev, return -EINVAL; get_online_cpus(); - err = virtnet_set_queues(vi, queue_pairs); + err = _virtnet_set_queues(vi, queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, queue_pairs); netif_set_real_num_rx_queues(dev, queue_pairs); @@ -1715,7 +1725,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) return -ENOMEM; } - err = virtnet_set_queues(vi, curr_qp + xdp_qp); + err = _virtnet_set_queues(vi, curr_qp + xdp_qp); if (err) { dev_warn(&dev->dev, "XDP Device queue allocation failure.\n"); return err; @@ -1724,7 +1734,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) if (prog) { prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); if (IS_ERR(prog)) { - virtnet_set_queues(vi, curr_qp); + _virtnet_set_queues(vi, curr_qp); return PTR_ERR(prog); } } @@ -1840,12 +1850,11 @@ static void virtnet_free_queues(struct virtnet_info *vi) kfree(vi->sq); } -static void free_receive_bufs(struct virtnet_info *vi) +static void _free_receive_bufs(struct virtnet_info *vi) { struct bpf_prog *old_prog; int i; - rtnl_lock(); for (i = 0; i < vi->max_queue_pairs; i++) { while (vi->rq[i].pages) __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); @@ -1855,6 +1864,12 @@ static void free_receive_bufs(struct virtnet_info *vi) if (old_prog) bpf_prog_put(old_prog); } +} + +static void free_receive_bufs(struct virtnet_info *vi) +{ + rtnl_lock(); + _free_receive_bufs(vi); rtnl_unlock(); } @@ -2293,9 +2308,7 @@ static int virtnet_probe(struct virtio_device *vdev) goto free_unregister_netdev; } - rtnl_lock(); virtnet_set_queues(vi, vi->curr_queue_pairs); - rtnl_unlock(); /* Assume link up if device can't report link status, otherwise get link status from config. */ @@ -2404,9 +2417,7 @@ static int virtnet_restore(struct virtio_device *vdev) netif_device_attach(vi->dev); - rtnl_lock(); virtnet_set_queues(vi, vi->curr_queue_pairs); - rtnl_unlock(); err = virtnet_cpu_notif_add(vi); if (err) -- cgit v1.2.3 From 0354e4d19cd5de51d6096a07ba44eda0ee61a6e9 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 2 Feb 2017 19:15:01 -0800 Subject: virtio_net: factor out xdp handler for readability At this point the do_xdp_prog is mostly if/else branches handling the different modes of virtio_net. So remove it and handle running the program in the per mode handlers. Signed-off-by: John Fastabend Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 86 ++++++++++++++++++++---------------------------- 1 file changed, 35 insertions(+), 51 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index bc814c079506..68221655740e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -398,52 +398,6 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi, return true; } -static u32 do_xdp_prog(struct virtnet_info *vi, - struct receive_queue *rq, - struct bpf_prog *xdp_prog, - void *data, int len) -{ - int hdr_padded_len; - struct xdp_buff xdp; - void *buf; - unsigned int qp; - u32 act; - - if (vi->mergeable_rx_bufs) { - hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); - xdp.data = data + hdr_padded_len; - xdp.data_end = xdp.data + (len - vi->hdr_len); - buf = data; - } else { /* small buffers */ - struct sk_buff *skb = data; - - xdp.data = skb->data; - xdp.data_end = xdp.data + len; - buf = skb->data; - } - - act = bpf_prog_run_xdp(xdp_prog, &xdp); - switch (act) { - case XDP_PASS: - return XDP_PASS; - case XDP_TX: - qp = vi->curr_queue_pairs - - vi->xdp_queue_pairs + - smp_processor_id(); - xdp.data = buf; - if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, - data))) - trace_xdp_exception(vi->dev, xdp_prog, act); - return XDP_TX; - default: - bpf_warn_invalid_xdp_action(act); - case XDP_ABORTED: - trace_xdp_exception(vi->dev, xdp_prog, act); - case XDP_DROP: - return XDP_DROP; - } -} - static struct sk_buff *receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -459,19 +413,34 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; + struct xdp_buff xdp; + unsigned int qp; u32 act; if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) goto err_xdp; - act = do_xdp_prog(vi, rq, xdp_prog, skb, len); + + xdp.data = skb->data; + xdp.data_end = xdp.data + len; + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { case XDP_PASS: break; case XDP_TX: + qp = vi->curr_queue_pairs - + vi->xdp_queue_pairs + + smp_processor_id(); + if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], + &xdp, skb))) + trace_xdp_exception(vi->dev, xdp_prog, act); rcu_read_unlock(); goto xdp_xmit; - case XDP_DROP: default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(vi->dev, xdp_prog, act); + case XDP_DROP: goto err_xdp; } } @@ -589,6 +558,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { struct page *xdp_page; + struct xdp_buff xdp; + unsigned int qp; + void *data; u32 act; /* This happens when rx buffer size is underestimated */ @@ -611,8 +583,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(hdr->hdr.gso_type)) goto err_xdp; - act = do_xdp_prog(vi, rq, xdp_prog, - page_address(xdp_page) + offset, len); + data = page_address(xdp_page) + offset; + xdp.data = data + vi->hdr_len; + xdp.data_end = xdp.data + (len - vi->hdr_len); + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { case XDP_PASS: /* We can only create skb based on xdp_page. */ @@ -626,13 +601,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: + qp = vi->curr_queue_pairs - + vi->xdp_queue_pairs + + smp_processor_id(); + if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], + &xdp, data))) + trace_xdp_exception(vi->dev, xdp_prog, act); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); if (unlikely(xdp_page != page)) goto err_xdp; rcu_read_unlock(); goto xdp_xmit; - case XDP_DROP: default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(vi->dev, xdp_prog, act); + case XDP_DROP: if (unlikely(xdp_page != page)) __free_pages(xdp_page, 0); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); -- cgit v1.2.3 From 722d82830a04ccff6c7390535fcdfb1e4d69d126 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 2 Feb 2017 19:15:32 -0800 Subject: virtio_net: remove duplicate queue pair binding in XDP Factor out qp assignment. Signed-off-by: John Fastabend Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 68221655740e..3b3c5c571b6d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -340,15 +340,19 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, static bool virtnet_xdp_xmit(struct virtnet_info *vi, struct receive_queue *rq, - struct send_queue *sq, struct xdp_buff *xdp, void *data) { struct virtio_net_hdr_mrg_rxbuf *hdr; unsigned int num_sg, len; + struct send_queue *sq; + unsigned int qp; void *xdp_sent; int err; + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); + sq = &vi->sq[qp]; + /* Free up any pending old buffers before queueing new ones. */ while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { if (vi->mergeable_rx_bufs) { @@ -414,7 +418,6 @@ static struct sk_buff *receive_small(struct net_device *dev, if (xdp_prog) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; struct xdp_buff xdp; - unsigned int qp; u32 act; if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) @@ -428,11 +431,7 @@ static struct sk_buff *receive_small(struct net_device *dev, case XDP_PASS: break; case XDP_TX: - qp = vi->curr_queue_pairs - - vi->xdp_queue_pairs + - smp_processor_id(); - if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], - &xdp, skb))) + if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp, skb))) trace_xdp_exception(vi->dev, xdp_prog, act); rcu_read_unlock(); goto xdp_xmit; @@ -559,7 +558,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (xdp_prog) { struct page *xdp_page; struct xdp_buff xdp; - unsigned int qp; void *data; u32 act; @@ -601,11 +599,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: - qp = vi->curr_queue_pairs - - vi->xdp_queue_pairs + - smp_processor_id(); - if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], - &xdp, data))) + if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp, data))) trace_xdp_exception(vi->dev, xdp_prog, act); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); if (unlikely(xdp_page != page)) -- cgit v1.2.3 From 9fe7bfce8b3e112e8e08c40deb72ee7e24c6f072 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 2 Feb 2017 19:16:01 -0800 Subject: virtio_net: refactor freeze/restore logic into virtnet reset logic For XDP we will need to reset the queues to allow for buffer headroom to be configured. In order to do this we need to essentially run the freeze()/restore() code path. Unfortunately the locking requirements between the freeze/restore and reset paths are different however so we can not simply reuse the code. This patch refactors the code path and adds a reset helper routine. Signed-off-by: John Fastabend Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 75 +++++++++++++++++++++++++++++------------------- drivers/virtio/virtio.c | 42 +++++++++++++++------------ include/linux/virtio.h | 4 +++ 3 files changed, 73 insertions(+), 48 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3b3c5c571b6d..4cc3da1e4070 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1661,6 +1661,49 @@ static const struct ethtool_ops virtnet_ethtool_ops = { .set_settings = virtnet_set_settings, }; +static void virtnet_freeze_down(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + int i; + + /* Make sure no work handler is accessing the device */ + flush_work(&vi->config_work); + + netif_device_detach(vi->dev); + cancel_delayed_work_sync(&vi->refill); + + if (netif_running(vi->dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); + } +} + +static int init_vqs(struct virtnet_info *vi); + +static int virtnet_restore_up(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + int err, i; + + err = init_vqs(vi); + if (err) + return err; + + virtio_device_ready(vdev); + + if (netif_running(vi->dev)) { + for (i = 0; i < vi->curr_queue_pairs; i++) + if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); + + for (i = 0; i < vi->max_queue_pairs; i++) + virtnet_napi_enable(&vi->rq[i]); + } + + netif_device_attach(vi->dev); + return err; +} + static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) { unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); @@ -2353,21 +2396,9 @@ static void virtnet_remove(struct virtio_device *vdev) static int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int i; virtnet_cpu_notif_remove(vi); - - /* Make sure no work handler is accessing the device */ - flush_work(&vi->config_work); - - netif_device_detach(vi->dev); - cancel_delayed_work_sync(&vi->refill); - - if (netif_running(vi->dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) - napi_disable(&vi->rq[i].napi); - } - + virtnet_freeze_down(vdev); remove_vq_common(vi); return 0; @@ -2376,25 +2407,11 @@ static int virtnet_freeze(struct virtio_device *vdev) static int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int err, i; + int err; - err = init_vqs(vi); + err = virtnet_restore_up(vdev); if (err) return err; - - virtio_device_ready(vdev); - - if (netif_running(vi->dev)) { - for (i = 0; i < vi->curr_queue_pairs; i++) - if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); - - for (i = 0; i < vi->max_queue_pairs; i++) - virtnet_napi_enable(&vi->rq[i]); - } - - netif_device_attach(vi->dev); - virtnet_set_queues(vi, vi->curr_queue_pairs); err = virtnet_cpu_notif_add(vi); diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 7062bb0975a5..400d70b69379 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -100,11 +100,6 @@ static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) dev->id.device, dev->id.vendor); } -static void add_status(struct virtio_device *dev, unsigned status) -{ - dev->config->set_status(dev, dev->config->get_status(dev) | status); -} - void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit) { @@ -145,14 +140,15 @@ void virtio_config_changed(struct virtio_device *dev) } EXPORT_SYMBOL_GPL(virtio_config_changed); -static void virtio_config_disable(struct virtio_device *dev) +void virtio_config_disable(struct virtio_device *dev) { spin_lock_irq(&dev->config_lock); dev->config_enabled = false; spin_unlock_irq(&dev->config_lock); } +EXPORT_SYMBOL_GPL(virtio_config_disable); -static void virtio_config_enable(struct virtio_device *dev) +void virtio_config_enable(struct virtio_device *dev) { spin_lock_irq(&dev->config_lock); dev->config_enabled = true; @@ -161,8 +157,15 @@ static void virtio_config_enable(struct virtio_device *dev) dev->config_change_pending = false; spin_unlock_irq(&dev->config_lock); } +EXPORT_SYMBOL_GPL(virtio_config_enable); + +void virtio_add_status(struct virtio_device *dev, unsigned int status) +{ + dev->config->set_status(dev, dev->config->get_status(dev) | status); +} +EXPORT_SYMBOL_GPL(virtio_add_status); -static int virtio_finalize_features(struct virtio_device *dev) +int virtio_finalize_features(struct virtio_device *dev) { int ret = dev->config->finalize_features(dev); unsigned status; @@ -173,7 +176,7 @@ static int virtio_finalize_features(struct virtio_device *dev) if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) return 0; - add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); + virtio_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); status = dev->config->get_status(dev); if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) { dev_err(&dev->dev, "virtio: device refuses features: %x\n", @@ -182,6 +185,7 @@ static int virtio_finalize_features(struct virtio_device *dev) } return 0; } +EXPORT_SYMBOL_GPL(virtio_finalize_features); static int virtio_dev_probe(struct device *_d) { @@ -193,7 +197,7 @@ static int virtio_dev_probe(struct device *_d) u64 driver_features_legacy; /* We have a driver! */ - add_status(dev, VIRTIO_CONFIG_S_DRIVER); + virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); /* Figure out what features the device supports. */ device_features = dev->config->get_features(dev); @@ -247,7 +251,7 @@ static int virtio_dev_probe(struct device *_d) return 0; err: - add_status(dev, VIRTIO_CONFIG_S_FAILED); + virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); return err; } @@ -265,7 +269,7 @@ static int virtio_dev_remove(struct device *_d) WARN_ON_ONCE(dev->config->get_status(dev)); /* Acknowledge the device's existence again. */ - add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); return 0; } @@ -316,7 +320,7 @@ int register_virtio_device(struct virtio_device *dev) dev->config->reset(dev); /* Acknowledge that we've seen the device. */ - add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); INIT_LIST_HEAD(&dev->vqs); @@ -325,7 +329,7 @@ int register_virtio_device(struct virtio_device *dev) err = device_register(&dev->dev); out: if (err) - add_status(dev, VIRTIO_CONFIG_S_FAILED); + virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); return err; } EXPORT_SYMBOL_GPL(register_virtio_device); @@ -365,18 +369,18 @@ int virtio_device_restore(struct virtio_device *dev) dev->config->reset(dev); /* Acknowledge that we've seen the device. */ - add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); /* Maybe driver failed before freeze. * Restore the failed status, for debugging. */ if (dev->failed) - add_status(dev, VIRTIO_CONFIG_S_FAILED); + virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); if (!drv) return 0; /* We have a driver! */ - add_status(dev, VIRTIO_CONFIG_S_DRIVER); + virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); ret = virtio_finalize_features(dev); if (ret) @@ -389,14 +393,14 @@ int virtio_device_restore(struct virtio_device *dev) } /* Finally, tell the device we're all set */ - add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); + virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); virtio_config_enable(dev); return 0; err: - add_status(dev, VIRTIO_CONFIG_S_FAILED); + virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); return ret; } EXPORT_SYMBOL_GPL(virtio_device_restore); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index d5eb5479a425..04b0d3f95043 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -132,12 +132,16 @@ static inline struct virtio_device *dev_to_virtio(struct device *_dev) return container_of(_dev, struct virtio_device, dev); } +void virtio_add_status(struct virtio_device *dev, unsigned int status); int register_virtio_device(struct virtio_device *dev); void unregister_virtio_device(struct virtio_device *dev); void virtio_break_device(struct virtio_device *dev); void virtio_config_changed(struct virtio_device *dev); +void virtio_config_disable(struct virtio_device *dev); +void virtio_config_enable(struct virtio_device *dev); +int virtio_finalize_features(struct virtio_device *dev); #ifdef CONFIG_PM_SLEEP int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); -- cgit v1.2.3 From 2de2f7f40ef92313d76c3df7f545be5d0899b1aa Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 2 Feb 2017 19:16:29 -0800 Subject: virtio_net: XDP support for adjust_head Add support for XDP adjust head by allocating a 256B header region that XDP programs can grow into. This is only enabled when a XDP program is loaded. In order to ensure that we do not have to unwind queue headroom push queue setup below bpf_prog_add. It reads better to do a prog ref unwind vs another queue setup call. At the moment this code must do a full reset to ensure old buffers without headroom on program add or with headroom on program removal are not used incorrectly in the datapath. Ideally we would only have to disable/enable the RX queues being updated but there is no API to do this at the moment in virtio so use the big hammer. In practice it is likely not that big of a problem as this will only happen when XDP is enabled/disabled changing programs does not require the reset. There is some risk that the driver may either have an allocation failure or for some reason fail to correctly negotiate with the underlying backend in this case the driver will be left uninitialized. I have not seen this ever happen on my test systems and for what its worth this same failure case can occur from probe and other contexts in virtio framework. Signed-off-by: John Fastabend Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 154 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 125 insertions(+), 29 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4cc3da1e4070..11e28530c83c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -41,6 +41,9 @@ module_param(gso, bool, 0444); #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 +/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ +#define VIRTIO_XDP_HEADROOM 256 + /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- @@ -367,6 +370,7 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi, } if (vi->mergeable_rx_bufs) { + xdp->data -= sizeof(struct virtio_net_hdr_mrg_rxbuf); /* Zero header and leave csum up to XDP layers */ hdr = xdp->data; memset(hdr, 0, vi->hdr_len); @@ -383,7 +387,9 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi, num_sg = 2; sg_init_table(sq->sg, 2); sg_set_buf(sq->sg, hdr, vi->hdr_len); - skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); + skb_to_sgvec(skb, sq->sg + 1, + xdp->data - xdp->data_hard_start, + xdp->data_end - xdp->data); } err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, data, GFP_ATOMIC); @@ -411,7 +417,6 @@ static struct sk_buff *receive_small(struct net_device *dev, struct bpf_prog *xdp_prog; len -= vi->hdr_len; - skb_trim(skb, len); rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -423,12 +428,16 @@ static struct sk_buff *receive_small(struct net_device *dev, if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) goto err_xdp; - xdp.data = skb->data; + xdp.data_hard_start = skb->data; + xdp.data = skb->data + VIRTIO_XDP_HEADROOM; xdp.data_end = xdp.data + len; act = bpf_prog_run_xdp(xdp_prog, &xdp); switch (act) { case XDP_PASS: + /* Recalculate length in case bpf program changed it */ + __skb_pull(skb, xdp.data - xdp.data_hard_start); + len = xdp.data_end - xdp.data; break; case XDP_TX: if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp, skb))) @@ -445,6 +454,7 @@ static struct sk_buff *receive_small(struct net_device *dev, } rcu_read_unlock(); + skb_trim(skb, len); return skb; err_xdp: @@ -493,7 +503,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, unsigned int *len) { struct page *page = alloc_page(GFP_ATOMIC); - unsigned int page_off = 0; + unsigned int page_off = VIRTIO_XDP_HEADROOM; if (!page) return NULL; @@ -529,7 +539,8 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, put_page(p); } - *len = page_off; + /* Headroom does not contribute to packet length */ + *len = page_off - VIRTIO_XDP_HEADROOM; return page; err_buf: __free_pages(page, 0); @@ -568,7 +579,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, page, offset, &len); if (!xdp_page) goto err_xdp; - offset = 0; + offset = VIRTIO_XDP_HEADROOM; } else { xdp_page = page; } @@ -581,19 +592,30 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(hdr->hdr.gso_type)) goto err_xdp; + /* Allow consuming headroom but reserve enough space to push + * the descriptor on if we get an XDP_TX return code. + */ data = page_address(xdp_page) + offset; + xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; xdp.data = data + vi->hdr_len; xdp.data_end = xdp.data + (len - vi->hdr_len); act = bpf_prog_run_xdp(xdp_prog, &xdp); switch (act) { case XDP_PASS: + /* recalculate offset to account for any header + * adjustments. Note other cases do not build an + * skb and avoid using offset + */ + offset = xdp.data - + page_address(xdp_page) - vi->hdr_len; + /* We can only create skb based on xdp_page. */ if (unlikely(xdp_page != page)) { rcu_read_unlock(); put_page(page); head_skb = page_to_skb(vi, rq, xdp_page, - 0, len, PAGE_SIZE); + offset, len, PAGE_SIZE); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); return head_skb; } @@ -760,23 +782,30 @@ frame_err: dev_kfree_skb(skb); } +static unsigned int virtnet_get_headroom(struct virtnet_info *vi) +{ + return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; +} + static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { + int headroom = GOOD_PACKET_LEN + virtnet_get_headroom(vi); + unsigned int xdp_headroom = virtnet_get_headroom(vi); struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; int err; - skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); + skb = __netdev_alloc_skb_ip_align(vi->dev, headroom, gfp); if (unlikely(!skb)) return -ENOMEM; - skb_put(skb, GOOD_PACKET_LEN); + skb_put(skb, headroom); hdr = skb_vnet_hdr(skb); sg_init_table(rq->sg, 2); sg_set_buf(rq->sg, hdr, vi->hdr_len); - skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); + skb_to_sgvec(skb, rq->sg + 1, xdp_headroom, skb->len - xdp_headroom); err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); if (err < 0) @@ -844,24 +873,27 @@ static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) return ALIGN(len, MERGEABLE_BUFFER_ALIGN); } -static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) +static int add_recvbuf_mergeable(struct virtnet_info *vi, + struct receive_queue *rq, gfp_t gfp) { struct page_frag *alloc_frag = &rq->alloc_frag; + unsigned int headroom = virtnet_get_headroom(vi); char *buf; unsigned long ctx; int err; unsigned int len, hole; len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); - if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) + if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) return -ENOMEM; buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; + buf += headroom; /* advance address leaving hole at front of pkt */ ctx = mergeable_buf_to_ctx(buf, len); get_page(alloc_frag->page); - alloc_frag->offset += len; + alloc_frag->offset += len + headroom; hole = alloc_frag->size - alloc_frag->offset; - if (hole < len) { + if (hole < len + headroom) { /* To avoid internal fragmentation, if there is very likely not * enough space for another buffer, add the remaining space to * the current buffer. This extra space is not included in @@ -895,7 +927,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, gfp |= __GFP_COLD; do { if (vi->mergeable_rx_bufs) - err = add_recvbuf_mergeable(rq, gfp); + err = add_recvbuf_mergeable(vi, rq, gfp); else if (vi->big_packets) err = add_recvbuf_big(vi, rq, gfp); else @@ -1679,6 +1711,7 @@ static void virtnet_freeze_down(struct virtio_device *vdev) } static int init_vqs(struct virtnet_info *vi); +static void _remove_vq_common(struct virtnet_info *vi); static int virtnet_restore_up(struct virtio_device *vdev) { @@ -1704,19 +1737,47 @@ static int virtnet_restore_up(struct virtio_device *vdev) return err; } +static int virtnet_reset(struct virtnet_info *vi) +{ + struct virtio_device *dev = vi->vdev; + int ret; + + virtio_config_disable(dev); + dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED; + virtnet_freeze_down(dev); + _remove_vq_common(vi); + + dev->config->reset(dev); + virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); + + ret = virtio_finalize_features(dev); + if (ret) + goto err; + + ret = virtnet_restore_up(dev); + if (ret) + goto err; + ret = _virtnet_set_queues(vi, vi->curr_queue_pairs); + if (ret) + goto err; + + virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); + virtio_config_enable(dev); + return 0; +err: + virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); + return ret; +} + static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) { unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); struct virtnet_info *vi = netdev_priv(dev); struct bpf_prog *old_prog; - u16 xdp_qp = 0, curr_qp; + u16 oxdp_qp, xdp_qp = 0, curr_qp; int i, err; - if (prog && prog->xdp_adjust_head) { - netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || @@ -1746,21 +1807,32 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) return -ENOMEM; } + if (prog) { + prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); + if (IS_ERR(prog)) + return PTR_ERR(prog); + } + err = _virtnet_set_queues(vi, curr_qp + xdp_qp); if (err) { dev_warn(&dev->dev, "XDP Device queue allocation failure.\n"); - return err; + goto virtio_queue_err; } - if (prog) { - prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); - if (IS_ERR(prog)) { - _virtnet_set_queues(vi, curr_qp); - return PTR_ERR(prog); - } + oxdp_qp = vi->xdp_queue_pairs; + + /* Changing the headroom in buffers is a disruptive operation because + * existing buffers must be flushed and reallocated. This will happen + * when a xdp program is initially added or xdp is disabled by removing + * the xdp program resulting in number of XDP queues changing. + */ + if (vi->xdp_queue_pairs != xdp_qp) { + vi->xdp_queue_pairs = xdp_qp; + err = virtnet_reset(vi); + if (err) + goto virtio_reset_err; } - vi->xdp_queue_pairs = xdp_qp; netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); for (i = 0; i < vi->max_queue_pairs; i++) { @@ -1771,6 +1843,21 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) } return 0; + +virtio_reset_err: + /* On reset error do our best to unwind XDP changes inflight and return + * error up to user space for resolution. The underlying reset hung on + * us so not much we can do here. + */ + dev_warn(&dev->dev, "XDP reset failure and queues unstable\n"); + vi->xdp_queue_pairs = oxdp_qp; +virtio_queue_err: + /* On queue set error we can unwind bpf ref count and user space can + * retry this is most likely an allocation failure. + */ + if (prog) + bpf_prog_sub(prog, vi->max_queue_pairs - 1); + return err; } static bool virtnet_xdp_query(struct net_device *dev) @@ -2361,6 +2448,15 @@ free: return err; } +static void _remove_vq_common(struct virtnet_info *vi) +{ + vi->vdev->config->reset(vi->vdev); + free_unused_bufs(vi); + _free_receive_bufs(vi); + free_receive_page_frags(vi); + virtnet_del_vqs(vi); +} + static void remove_vq_common(struct virtnet_info *vi) { vi->vdev->config->reset(vi->vdev); -- cgit v1.2.3 From 55601a880690cdeccdb5923c2493f0e3736f8f6b Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 4 Feb 2017 20:02:49 +0100 Subject: net: phy: Add 2000base-x, 2500base-x and rxaui modes The mv88e6390 ports 9 and 10 supports some additional PHY modes. Add these modes to the PHY core so they can be used in the binding. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/ethernet.txt | 3 +++ include/linux/phy.h | 9 +++++++++ 2 files changed, 12 insertions(+) diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt index 05150957ecfd..3a6916909d90 100644 --- a/Documentation/devicetree/bindings/net/ethernet.txt +++ b/Documentation/devicetree/bindings/net/ethernet.txt @@ -29,6 +29,9 @@ The following properties are common to the Ethernet controllers: * "smii" * "xgmii" * "trgmii" + * "2000base-x", + * "2500base-x", + * "rxaui" - phy-connection-type: the same as "phy-mode" property but described in ePAPR; - phy-handle: phandle, specifies a reference to a node representing a PHY device; this property is described in ePAPR and so preferred; diff --git a/include/linux/phy.h b/include/linux/phy.h index 43474f39ef65..28ae9eafec19 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -81,6 +81,9 @@ typedef enum { PHY_INTERFACE_MODE_MOCA, PHY_INTERFACE_MODE_QSGMII, PHY_INTERFACE_MODE_TRGMII, + PHY_INTERFACE_MODE_1000BASEX, + PHY_INTERFACE_MODE_2500BASEX, + PHY_INTERFACE_MODE_RXAUI, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -141,6 +144,12 @@ static inline const char *phy_modes(phy_interface_t interface) return "qsgmii"; case PHY_INTERFACE_MODE_TRGMII: return "trgmii"; + case PHY_INTERFACE_MODE_1000BASEX: + return "1000base-x"; + case PHY_INTERFACE_MODE_2500BASEX: + return "2500base-x"; + case PHY_INTERFACE_MODE_RXAUI: + return "rxaui"; default: return "unknown"; } -- cgit v1.2.3 From f39908d3b1c45208c6898550167bfa766fdd6bb8 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 4 Feb 2017 20:02:50 +0100 Subject: net: dsa: mv88e6xxx: Set the CMODE for mv88e6390 ports 9 & 10 Unlike most ports, ports 9 and 10 of the 6390X family have configurable PHY modes. Set the mode as part of adjust_link(). Ordering is important, because the SERDES interfaces connected to ports 9 and 10 can be split and assigned to other ports. The CMODE has to be correctly set before the SERDES interface on another port can be configured. Such configuration is likely to be performed in port_enable() and port_disabled(), called on slave_open() and slave_close(). The simple case is port 9 and 10 are used for 'CPU' or 'DSA'. In this case, the CMODE is set via a phy-mode in dsa_cpu_dsa_setup(), which is called early in the switch setup. When ports 9 or 10 are used as user ports, and have a fixed-phy, when the fixed fixed-phy is attached, dsa_slave_adjust_link() is called, which results in the adjust_link function being called, setting the cmode. The port_enable() will for other ports will be called much later. When ports 9 or 10 are used as user ports and have a real phy attached which does not use all the available SERDES interface, e.g. a 1Gbps SGMII, there is currently no mechanism in place to set the CMODE of the port from software. It must be hoped the stripping resistors are correct. At the same time, add a function to get the cmode. This will be needed when configuring the SERDES interfaces. Signed-off-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 8 +++++ drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 9 +++++ drivers/net/dsa/mv88e6xxx/port.c | 64 +++++++++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/port.h | 3 ++ 4 files changed, 84 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 22ce57256d34..e62d1476b63d 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -749,6 +749,12 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, goto restore_link; } + if (chip->info->ops->port_set_cmode) { + err = chip->info->ops->port_set_cmode(chip, port, mode); + if (err && err != -EOPNOTSUPP) + goto restore_link; + } + err = 0; restore_link: if (chip->info->ops->port_set_link(chip, port, link)) @@ -3520,6 +3526,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_pause_config = mv88e6390_port_pause_config, + .port_set_cmode = mv88e6390x_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3738,6 +3745,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6390_port_pause_config, + .port_set_cmode = mv88e6390x_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 9c5c0472b211..47ba2f73b879 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -58,6 +58,9 @@ #define PORT_STATUS_CMODE_100BASE_X 0x8 #define PORT_STATUS_CMODE_1000BASE_X 0x9 #define PORT_STATUS_CMODE_SGMII 0xa +#define PORT_STATUS_CMODE_2500BASEX 0xb +#define PORT_STATUS_CMODE_XAUI 0xc +#define PORT_STATUS_CMODE_RXAUI 0xd #define PORT_PCS_CTRL 0x01 #define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15) #define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14) @@ -838,6 +841,12 @@ struct mv88e6xxx_ops { int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port); int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port); + /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc. + * Some chips allow this to be configured on specific ports. + */ + int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode); + /* Snapshot the statistics for a port. The statistics can then * be read back a leisure but still with a consistent view. */ diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index d380a93b092c..d543a6817d61 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -11,6 +11,7 @@ * (at your option) any later version. */ +#include #include "mv88e6xxx.h" #include "port.h" @@ -304,6 +305,69 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) return mv88e6xxx_port_set_speed(chip, port, speed, true, true); } +int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode) +{ + u16 reg; + u16 cmode; + int err; + + if (mode == PHY_INTERFACE_MODE_NA) + return 0; + + if (port != 9 && port != 10) + return -EOPNOTSUPP; + + switch (mode) { + case PHY_INTERFACE_MODE_1000BASEX: + cmode = PORT_STATUS_CMODE_1000BASE_X; + break; + case PHY_INTERFACE_MODE_SGMII: + cmode = PORT_STATUS_CMODE_SGMII; + break; + case PHY_INTERFACE_MODE_2500BASEX: + cmode = PORT_STATUS_CMODE_2500BASEX; + break; + case PHY_INTERFACE_MODE_XGMII: + cmode = PORT_STATUS_CMODE_XAUI; + break; + case PHY_INTERFACE_MODE_RXAUI: + cmode = PORT_STATUS_CMODE_RXAUI; + break; + default: + cmode = 0; + } + + if (cmode) { + err = mv88e6xxx_port_read(chip, port, PORT_STATUS, ®); + if (err) + return err; + + reg &= ~PORT_STATUS_CMODE_MASK; + reg |= cmode; + + err = mv88e6xxx_port_write(chip, port, PORT_STATUS, reg); + if (err) + return err; + } + + return 0; +} + +int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) +{ + int err; + u16 reg; + + err = mv88e6xxx_port_read(chip, port, PORT_STATUS, ®); + if (err) + return err; + + *cmode = reg & PORT_STATUS_CMODE_MASK; + + return 0; +} + /* Offset 0x02: Pause Control * * Do not limit the period of time that this port can be paused for by diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 7b3bacaacbfe..cb871e306f62 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -67,5 +67,8 @@ int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port); int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port); int mv88e6097_port_pause_config(struct mv88e6xxx_chip *chip, int port); int mv88e6390_port_pause_config(struct mv88e6xxx_chip *chip, int port); +int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode); +int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); #endif /* _MV88E6XXX_PORT_H */ -- cgit v1.2.3 From cf3e80df13e534eb34a85835d5357c78d8689199 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 4 Feb 2017 20:12:24 +0100 Subject: net: dsa: mv88e6xxx: Implement Clause 45 access to SMI devices The mv88e6390 MDIO bus controllers can support for clause 45 accesses. The internal SERDES interfaces need this, and it is likely external 10GHz PHYs will be clause 45. Signed-off-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/global2.c | 122 +++++++++++++++++++++++++++++++--- drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 7 ++ 2 files changed, 120 insertions(+), 9 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 353e26bea3c3..50e4e0be4227 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -501,18 +501,110 @@ static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd) return mv88e6xxx_g2_smi_phy_wait(chip); } +static int mv88e6xxx_g2_smi_phy_write_addr(struct mv88e6xxx_chip *chip, + int addr, int device, int reg, + bool external) +{ + int cmd = SMI_CMD_OP_45_WRITE_ADDR | (addr << 5) | device; + int err; + + if (external) + cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; + + err = mv88e6xxx_g2_smi_phy_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, reg); + if (err) + return err; + + return mv88e6xxx_g2_smi_phy_cmd(chip, cmd); +} + +int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, int addr, + int reg_c45, u16 *val, bool external) +{ + int device = (reg_c45 >> 16) & 0x1f; + int reg = reg_c45 & 0xffff; + int err; + u16 cmd; + + err = mv88e6xxx_g2_smi_phy_write_addr(chip, addr, device, reg, + external); + if (err) + return err; + + cmd = GLOBAL2_SMI_PHY_CMD_OP_45_READ_DATA | (addr << 5) | device; + + if (external) + cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; + + err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd); + if (err) + return err; + + err = mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val); + if (err) + return err; + + err = *val; + + return 0; +} + +int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 *val, bool external) +{ + u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg; + int err; + + if (external) + cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; + + err = mv88e6xxx_g2_smi_phy_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd); + if (err) + return err; + + return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val); +} + int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus, int addr, int reg, u16 *val) { - u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg; struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + bool external = mdio_bus->external; + + if (reg & MII_ADDR_C45) + return mv88e6xxx_g2_smi_phy_read_c45(chip, addr, reg, val, + external); + return mv88e6xxx_g2_smi_phy_read_c22(chip, addr, reg, val, external); +} + +int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, int addr, + int reg_c45, u16 val, bool external) +{ + int device = (reg_c45 >> 16) & 0x1f; + int reg = reg_c45 & 0xffff; int err; + u16 cmd; - if (mdio_bus->external) + err = mv88e6xxx_g2_smi_phy_write_addr(chip, addr, device, reg, + external); + if (err) + return err; + + cmd = GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_DATA | (addr << 5) | device; + + if (external) cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; - err = mv88e6xxx_g2_smi_phy_wait(chip); + err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, val); if (err) return err; @@ -520,18 +612,16 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, if (err) return err; - return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val); + return 0; } -int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, - struct mii_bus *bus, - int addr, int reg, u16 val) +int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 val, bool external) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg; - struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; int err; - if (mdio_bus->external) + if (external) cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; err = mv88e6xxx_g2_smi_phy_wait(chip); @@ -545,6 +635,20 @@ int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_smi_phy_cmd(chip, cmd); } +int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val) +{ + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + bool external = mdio_bus->external; + + if (reg & MII_ADDR_C45) + return mv88e6xxx_g2_smi_phy_write_c45(chip, addr, reg, val, + external); + + return mv88e6xxx_g2_smi_phy_write_c22(chip, addr, reg, val, external); +} + static void mv88e6xxx_g2_irq_mask(struct irq_data *d) { struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d); diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 47ba2f73b879..c1d43ee11842 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -400,6 +400,13 @@ #define GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA ((0x2 << 10) | \ GLOBAL2_SMI_PHY_CMD_MODE_22 | \ GLOBAL2_SMI_PHY_CMD_BUSY) +#define GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_ADDR ((0x0 << 10) | \ + GLOBAL2_SMI_PHY_CMD_BUSY) +#define GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_DATA ((0x1 << 10) | \ + GLOBAL2_SMI_PHY_CMD_BUSY) +#define GLOBAL2_SMI_PHY_CMD_OP_45_READ_DATA ((0x3 << 10) | \ + GLOBAL2_SMI_PHY_CMD_BUSY) + #define GLOBAL2_SMI_PHY_DATA 0x19 #define GLOBAL2_SCRATCH_MISC 0x1a #define GLOBAL2_SCRATCH_BUSY BIT(15) -- cgit v1.2.3 From a23b296198cbf2b78101e7e745f9c45663c3f82e Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 4 Feb 2017 20:15:28 +0100 Subject: net: dsa: mv88e6xxx: Refactor remaining port setup Move the remaining port configuration code which varies per device into port.c, using ops were necessary. This makes mv88e6xxx_6185_family() and mv88e6xxx_6095_family() unused, so remove them. Signed-off-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 51 +++++++++++++---------------------- drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 7 +++++ drivers/net/dsa/mv88e6xxx/port.c | 48 +++++++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/port.h | 6 ++++- 4 files changed, 78 insertions(+), 34 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index e62d1476b63d..7b4e40b286e4 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -677,11 +677,6 @@ static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, return err; } -static bool mv88e6xxx_6095_family(struct mv88e6xxx_chip *chip) -{ - return chip->info->family == MV88E6XXX_FAMILY_6095; -} - static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip) { return chip->info->family == MV88E6XXX_FAMILY_6097; @@ -692,11 +687,6 @@ static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip) return chip->info->family == MV88E6XXX_FAMILY_6165; } -static bool mv88e6xxx_6185_family(struct mv88e6xxx_chip *chip) -{ - return chip->info->family == MV88E6XXX_FAMILY_6185; -} - static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip) { return chip->info->family == MV88E6XXX_FAMILY_6320; @@ -2585,31 +2575,23 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) * received packets as usual, disable ARP mirroring and don't send a * copy of all transmitted/received frames on this port to the CPU. */ - reg = 0; - if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || - mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || - mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) || - mv88e6xxx_6185_family(chip) || mv88e6xxx_6341_family(chip)) - reg = PORT_CONTROL_2_MAP_DA; - - if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) { - /* Set the upstream port this port should use */ - reg |= dsa_upstream_port(ds); - /* enable forwarding of unknown multicast addresses to - * the upstream port - */ - if (port == dsa_upstream_port(ds)) - reg |= PORT_CONTROL_2_FORWARD_UNKNOWN; - } - - reg |= PORT_CONTROL_2_8021Q_DISABLED; + err = mv88e6xxx_port_set_map_da(chip, port); + if (err) + return err; - if (reg) { - err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg); + reg = 0; + if (chip->info->ops->port_set_upstream_port) { + err = chip->info->ops->port_set_upstream_port( + chip, port, dsa_upstream_port(ds)); if (err) return err; } + err = mv88e6xxx_port_set_8021q_mode(chip, port, + PORT_CONTROL_2_8021Q_DISABLED); + if (err) + return err; + if (chip->info->ops->port_jumbo_config) { err = chip->info->ops->port_jumbo_config(chip, port); if (err) @@ -3144,7 +3126,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, .port_set_frame_mode = mv88e6085_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns, + .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns, + .port_set_upstream_port = mv88e6095_port_set_upstream_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3210,8 +3193,9 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns, .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_set_upstream_port = mv88e6095_port_set_upstream_port, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, @@ -3387,8 +3371,9 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, .port_set_frame_mode = mv88e6085_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns, + .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns, .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, + .port_set_upstream_port = mv88e6095_port_set_upstream_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index c1d43ee11842..8a21800374f3 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -168,6 +168,7 @@ #define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6) #define PORT_CONTROL_2_EGRESS_MONITOR BIT(5) #define PORT_CONTROL_2_INGRESS_MONITOR BIT(4) +#define PORT_CONTROL_2_UPSTREAM_MASK 0x0f #define PORT_RATE_CONTROL 0x09 #define PORT_RATE_CONTROL_2 0x0a #define PORT_ASSOC_VECTOR 0x0b @@ -854,6 +855,12 @@ struct mv88e6xxx_ops { int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); + /* Some devices have a per port register indicating what is + * the upstream port this port should forward to. + */ + int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port, + int upstream_port); + /* Snapshot the statistics for a port. The statistics can then * be read back a leisure but still with a consistent view. */ diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index d543a6817d61..8875784c4718 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -672,6 +672,40 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = { [PORT_CONTROL_2_8021Q_SECURE] = "Secure", }; +int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, + bool on) +{ + int err; + u16 reg; + + err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, ®); + if (err) + return err; + + if (on) + reg |= PORT_CONTROL_2_FORWARD_UNKNOWN; + else + reg &= ~PORT_CONTROL_2_FORWARD_UNKNOWN; + + return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg); +} + +int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, + int upstream_port) +{ + int err; + u16 reg; + + err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, ®); + if (err) + return err; + + reg &= ~PORT_CONTROL_2_UPSTREAM_MASK; + reg |= upstream_port; + + return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg); +} + int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port, u16 mode) { @@ -695,6 +729,20 @@ int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port, return 0; } +int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port) +{ + u16 reg; + int err; + + err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, ®); + if (err) + return err; + + reg |= PORT_CONTROL_2_MAP_DA; + + return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg); +} + int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port) { u16 reg; diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index cb871e306f62..c83cbb3f4491 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -58,6 +58,8 @@ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port, enum mv88e6xxx_frame_mode mode); int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, bool on); +int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, + bool on); int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, @@ -70,5 +72,7 @@ int mv88e6390_port_pause_config(struct mv88e6xxx_chip *chip, int port); int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); - +int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); +int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, + int upstream_port); #endif /* _MV88E6XXX_PORT_H */ -- cgit v1.2.3 From 14b89f36eed2993670906a3991bca496a5ebf1a6 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 4 Feb 2017 13:02:42 -0800 Subject: net: dsa: Rename and export dev_to_net_device() In preparation for using this function in net/dsa/dsa2.c, rename the function to make its scope DSA specific, and export it. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 1 + net/dsa/dsa.c | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index e9c940c8936f..2a21fa80f898 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -445,6 +445,7 @@ struct dsa_switch_driver { void register_switch_driver(struct dsa_switch_driver *type); void unregister_switch_driver(struct dsa_switch_driver *type); struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); +struct net_device *dsa_dev_to_net_device(struct device *dev); static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) { diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 22e44f691ab9..b6d4f6a23f06 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -492,7 +492,7 @@ struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev) } EXPORT_SYMBOL_GPL(dsa_host_dev_to_mii_bus); -static struct net_device *dev_to_net_device(struct device *dev) +struct net_device *dsa_dev_to_net_device(struct device *dev) { struct device *d; @@ -509,6 +509,7 @@ static struct net_device *dev_to_net_device(struct device *dev) return NULL; } +EXPORT_SYMBOL_GPL(dsa_dev_to_net_device); #ifdef CONFIG_OF static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, @@ -817,7 +818,7 @@ static int dsa_probe(struct platform_device *pdev) dev = pd->of_netdev; dev_hold(dev); } else { - dev = dev_to_net_device(pd->netdev); + dev = dsa_dev_to_net_device(pd->netdev); } if (dev == NULL) { ret = -EPROBE_DEFER; -- cgit v1.2.3 From 71e0bbde0d88047f66b25721f69a441d46083748 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 4 Feb 2017 13:02:43 -0800 Subject: net: dsa: Add support for platform data Allow drivers to use the new DSA API with platform data. Most of the code in net/dsa/dsa2.c does not rely so much on device_nodes and can get the same information from platform_data instead. We purposely do not support distributed configurations with platform data, so drivers should be providing a pointer to a 'struct dsa_chip_data' structure if they wish to communicate per-port layout. Multiple CPUs port could potentially be supported and dsa_chip_data is extended to receive up to one reference to an upstream network device per port described by a dsa_chip_data structure. dsa_dev_to_net_device() increments the network device's reference count, so we intentionally call dev_put() to be consistent with the DT-enabled path, until we have a generic notifier based solution. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 6 ++++ net/dsa/dsa2.c | 102 ++++++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 90 insertions(+), 18 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index 2a21fa80f898..b49b2004891e 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -45,6 +45,11 @@ struct dsa_chip_data { struct device *host_dev; int sw_addr; + /* + * Reference to network devices + */ + struct device *netdev[DSA_MAX_PORTS]; + /* set to size of eeprom if supported by the switch */ int eeprom_len; @@ -170,6 +175,7 @@ struct dsa_mall_tc_entry { struct dsa_port { struct dsa_switch *ds; unsigned int index; + const char *name; struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 1c546b6621ee..6f5f0a2ad256 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -78,19 +78,28 @@ static void dsa_dst_del_ds(struct dsa_switch_tree *dst, kref_put(&dst->refcount, dsa_free_dst); } +/* For platform data configurations, we need to have a valid name argument to + * differentiate a disabled port from an enabled one + */ static bool dsa_port_is_valid(struct dsa_port *port) { - return !!port->dn; + return !!(port->dn || port->name); } static bool dsa_port_is_dsa(struct dsa_port *port) { - return !!of_parse_phandle(port->dn, "link", 0); + if (port->name && !strcmp(port->name, "dsa")) + return true; + else + return !!of_parse_phandle(port->dn, "link", 0); } static bool dsa_port_is_cpu(struct dsa_port *port) { - return !!of_parse_phandle(port->dn, "ethernet", 0); + if (port->name && !strcmp(port->name, "cpu")) + return true; + else + return !!of_parse_phandle(port->dn, "ethernet", 0); } static bool dsa_ds_find_port_dn(struct dsa_switch *ds, @@ -250,10 +259,11 @@ static void dsa_cpu_port_unapply(struct dsa_port *port, u32 index, static int dsa_user_port_apply(struct dsa_port *port, u32 index, struct dsa_switch *ds) { - const char *name; + const char *name = port->name; int err; - name = of_get_property(port->dn, "label", NULL); + if (port->dn) + name = of_get_property(port->dn, "label", NULL); if (!name) name = "eth%d"; @@ -444,11 +454,16 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index, struct net_device *ethernet_dev; struct device_node *ethernet; - ethernet = of_parse_phandle(port->dn, "ethernet", 0); - if (!ethernet) - return -EINVAL; + if (port->dn) { + ethernet = of_parse_phandle(port->dn, "ethernet", 0); + if (!ethernet) + return -EINVAL; + ethernet_dev = of_find_net_device_by_node(ethernet); + } else { + ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]); + dev_put(ethernet_dev); + } - ethernet_dev = of_find_net_device_by_node(ethernet); if (!ethernet_dev) return -EPROBE_DEFER; @@ -551,6 +566,33 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds) return 0; } +static int dsa_parse_ports(struct dsa_chip_data *cd, struct dsa_switch *ds) +{ + bool valid_name_found = false; + unsigned int i; + + for (i = 0; i < DSA_MAX_PORTS; i++) { + if (!cd->port_names[i]) + continue; + + ds->ports[i].name = cd->port_names[i]; + + /* Initialize enabled_port_mask now for drv->setup() + * to have access to a correct value, just like what + * net/dsa/dsa.c::dsa_switch_setup_one does. + */ + if (!dsa_port_is_cpu(&ds->ports[i])) + ds->enabled_port_mask |= 1 << i; + + valid_name_found = true; + } + + if (!valid_name_found && i == DSA_MAX_PORTS) + return -EINVAL; + + return 0; +} + static int dsa_parse_member_dn(struct device_node *np, u32 *tree, u32 *index) { int err; @@ -575,6 +617,18 @@ static int dsa_parse_member_dn(struct device_node *np, u32 *tree, u32 *index) return 0; } +static int dsa_parse_member(struct dsa_chip_data *pd, u32 *tree, u32 *index) +{ + if (!pd) + return -ENODEV; + + /* We do not support complex trees with dsa_chip_data */ + *tree = 0; + *index = 0; + + return 0; +} + static struct device_node *dsa_get_ports(struct dsa_switch *ds, struct device_node *np) { @@ -591,23 +645,34 @@ static struct device_node *dsa_get_ports(struct dsa_switch *ds, static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev) { + struct dsa_chip_data *pdata = dev->platform_data; struct device_node *np = dev->of_node; struct dsa_switch_tree *dst; struct device_node *ports; u32 tree, index; int i, err; - err = dsa_parse_member_dn(np, &tree, &index); - if (err) - return err; + if (np) { + err = dsa_parse_member_dn(np, &tree, &index); + if (err) + return err; - ports = dsa_get_ports(ds, np); - if (IS_ERR(ports)) - return PTR_ERR(ports); + ports = dsa_get_ports(ds, np); + if (IS_ERR(ports)) + return PTR_ERR(ports); - err = dsa_parse_ports_dn(ports, ds); - if (err) - return err; + err = dsa_parse_ports_dn(ports, ds); + if (err) + return err; + } else { + err = dsa_parse_member(pdata, &tree, &index); + if (err) + return err; + + err = dsa_parse_ports(pdata, ds); + if (err) + return err; + } dst = dsa_get_dst(tree); if (!dst) { @@ -623,6 +688,7 @@ static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev) ds->dst = dst; ds->index = index; + ds->cd = pdata; /* Initialize the routing table */ for (i = 0; i < DSA_MAX_SWITCHES; ++i) -- cgit v1.2.3 From 648ea0134069cda7d4940f397bcc6901fb88752a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 4 Feb 2017 13:02:44 -0800 Subject: net: phy: Allow pre-declaration of MDIO devices Allow board support code to collect pre-declarations for MDIO devices by registering them with mdiobus_register_board_info(). SPI and I2C buses have a similar feature, we were missing this for MDIO devices, but this is particularly useful for e.g: MDIO-connected switches which need to provide their port layout (often board-specific) to a MDIO Ethernet switch driver. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Makefile | 3 +- drivers/net/phy/mdio-boardinfo.c | 86 ++++++++++++++++++++++++++++++++++++++++ drivers/net/phy/mdio-boardinfo.h | 19 +++++++++ drivers/net/phy/mdio_bus.c | 4 ++ drivers/net/phy/mdio_device.c | 11 +++++ include/linux/mdio.h | 3 ++ include/linux/mod_devicetable.h | 1 + include/linux/phy.h | 19 +++++++++ 8 files changed, 145 insertions(+), 1 deletion(-) create mode 100644 drivers/net/phy/mdio-boardinfo.c create mode 100644 drivers/net/phy/mdio-boardinfo.h diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 356859ac7c18..407b0b601ea8 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -1,6 +1,7 @@ # Makefile for Linux PHY drivers and MDIO bus drivers -libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o +libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o \ + mdio-boardinfo.o libphy-$(CONFIG_SWPHY) += swphy.o libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c new file mode 100644 index 000000000000..6b988f77da08 --- /dev/null +++ b/drivers/net/phy/mdio-boardinfo.c @@ -0,0 +1,86 @@ +/* + * mdio-boardinfo - Collect pre-declarations for MDIO devices + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include + +#include "mdio-boardinfo.h" + +static LIST_HEAD(mdio_board_list); +static DEFINE_MUTEX(mdio_board_lock); + +/** + * mdiobus_setup_mdiodev_from_board_info - create and setup MDIO devices + * from pre-collected board specific MDIO information + * @mdiodev: MDIO device pointer + * Context: can sleep + */ +void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus) +{ + struct mdio_board_entry *be; + struct mdio_device *mdiodev; + struct mdio_board_info *bi; + int ret; + + mutex_lock(&mdio_board_lock); + list_for_each_entry(be, &mdio_board_list, list) { + bi = &be->board_info; + + if (strcmp(bus->id, bi->bus_id)) + continue; + + mdiodev = mdio_device_create(bus, bi->mdio_addr); + if (IS_ERR(mdiodev)) + continue; + + strncpy(mdiodev->modalias, bi->modalias, + sizeof(mdiodev->modalias)); + mdiodev->bus_match = mdio_device_bus_match; + mdiodev->dev.platform_data = (void *)bi->platform_data; + + ret = mdio_device_register(mdiodev); + if (ret) { + mdio_device_free(mdiodev); + continue; + } + } + mutex_unlock(&mdio_board_lock); +} + +/** + * mdio_register_board_info - register MDIO devices for a given board + * @info: array of devices descriptors + * @n: number of descriptors provided + * Context: can sleep + * + * The board info passed can be marked with __initdata but be pointers + * such as platform_data etc. are copied as-is + */ +int mdiobus_register_board_info(const struct mdio_board_info *info, + unsigned int n) +{ + struct mdio_board_entry *be; + unsigned int i; + + be = kcalloc(n, sizeof(*be), GFP_KERNEL); + if (!be) + return -ENOMEM; + + for (i = 0; i < n; i++, be++, info++) { + memcpy(&be->board_info, info, sizeof(*info)); + mutex_lock(&mdio_board_lock); + list_add_tail(&be->list, &mdio_board_list); + mutex_unlock(&mdio_board_lock); + } + + return 0; +} diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h new file mode 100644 index 000000000000..00f98163e90e --- /dev/null +++ b/drivers/net/phy/mdio-boardinfo.h @@ -0,0 +1,19 @@ +/* + * mdio-boardinfo.h - board info interface internal to the mdio_bus + * component + */ + +#ifndef __MDIO_BOARD_INFO_H +#define __MDIO_BOARD_INFO_H + +#include +#include + +struct mdio_board_entry { + struct list_head list; + struct mdio_board_info board_info; +}; + +void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus); + +#endif /* __MDIO_BOARD_INFO_H */ diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 653d076eafe5..fa7d51f14869 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -41,6 +41,8 @@ #define CREATE_TRACE_POINTS #include +#include "mdio-boardinfo.h" + int mdiobus_register_device(struct mdio_device *mdiodev) { if (mdiodev->bus->mdio_map[mdiodev->addr]) @@ -343,6 +345,8 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) } } + mdiobus_setup_mdiodev_from_board_info(bus); + bus->state = MDIOBUS_REGISTERED; pr_info("%s: probed\n", bus->name); return 0; diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index fc3aaaa36b1d..e24f28924af8 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -34,6 +34,17 @@ static void mdio_device_release(struct device *dev) kfree(to_mdio_device(dev)); } +int mdio_device_bus_match(struct device *dev, struct device_driver *drv) +{ + struct mdio_device *mdiodev = to_mdio_device(dev); + struct mdio_driver *mdiodrv = to_mdio_driver(drv); + + if (mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY) + return 0; + + return strcmp(mdiodev->modalias, drv->name) == 0; +} + struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr) { struct mdio_device *mdiodev; diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 55a80d73cfc1..ca08ab16ecdc 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -10,6 +10,7 @@ #define __LINUX_MDIO_H__ #include +#include struct mii_bus; @@ -29,6 +30,7 @@ struct mdio_device { const struct dev_pm_ops *pm_ops; struct mii_bus *bus; + char modalias[MDIO_NAME_SIZE]; int (*bus_match)(struct device *dev, struct device_driver *drv); void (*device_free)(struct mdio_device *mdiodev); @@ -71,6 +73,7 @@ int mdio_device_register(struct mdio_device *mdiodev); void mdio_device_remove(struct mdio_device *mdiodev); int mdio_driver_register(struct mdio_driver *drv); void mdio_driver_unregister(struct mdio_driver *drv); +int mdio_device_bus_match(struct device *dev, struct device_driver *drv); static inline bool mdio_phy_id_is_c45(int phy_id) { diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 8a57f0b1242d..8850fcaf50db 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -501,6 +501,7 @@ struct platform_device_id { kernel_ulong_t driver_data; }; +#define MDIO_NAME_SIZE 32 #define MDIO_MODULE_PREFIX "mdio:" #define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" diff --git a/include/linux/phy.h b/include/linux/phy.h index 28ae9eafec19..d9bdf53e0514 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -886,6 +886,25 @@ void mdio_bus_exit(void); extern struct bus_type mdio_bus_type; +struct mdio_board_info { + const char *bus_id; + char modalias[MDIO_NAME_SIZE]; + int mdio_addr; + const void *platform_data; +}; + +#if IS_ENABLED(CONFIG_PHYLIB) +int mdiobus_register_board_info(const struct mdio_board_info *info, + unsigned int n); +#else +static inline int mdiobus_register_board_info(const struct mdio_board_info *i, + unsigned int n) +{ + return 0; +} +#endif + + /** * module_phy_driver() - Helper macro for registering PHY drivers * @__phy_drivers: array of PHY drivers to register -- cgit v1.2.3 From 575e93f7b5e622edfd031ebda660f5f3064f39a3 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 4 Feb 2017 13:02:45 -0800 Subject: ARM: orion: Register DSA switch as a MDIO device Utilize the ability to pass board specific MDIO bus information towards a particular MDIO device thus allowing us to provide the per-port switch layout to the Marvell 88E6XXX switch driver. Since we would end-up with conflicting registration paths, do not register the "dsa" platform device anymore. Note that the MDIO devices registered by code in net/dsa/dsa2.c does not parse a dsa_platform_data, but directly take a dsa_chip_data (specific to a single switch chip), so we update the different call sites to pass this structure down to orion_ge00_switch_init(). Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- arch/arm/mach-orion5x/common.c | 2 +- arch/arm/mach-orion5x/common.h | 4 ++-- arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c | 7 +------ arch/arm/mach-orion5x/rd88f5181l-ge-setup.c | 7 +------ arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c | 7 +------ arch/arm/mach-orion5x/wnr854t-setup.c | 2 +- arch/arm/mach-orion5x/wrt350n-v2-setup.c | 7 +------ arch/arm/plat-orion/common.c | 25 +++++++++++++++++++------ arch/arm/plat-orion/include/plat/common.h | 4 ++-- 9 files changed, 29 insertions(+), 36 deletions(-) diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 04910764c385..83a7ec4c16d0 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -105,7 +105,7 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data) /***************************************************************************** * Ethernet switch ****************************************************************************/ -void __init orion5x_eth_switch_init(struct dsa_platform_data *d) +void __init orion5x_eth_switch_init(struct dsa_chip_data *d) { orion_ge00_switch_init(d); } diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h index 8a4115bd441d..efeffc6b4ebb 100644 --- a/arch/arm/mach-orion5x/common.h +++ b/arch/arm/mach-orion5x/common.h @@ -3,7 +3,7 @@ #include -struct dsa_platform_data; +struct dsa_chip_data; struct mv643xx_eth_platform_data; struct mv_sata_platform_data; @@ -41,7 +41,7 @@ void orion5x_setup_wins(void); void orion5x_ehci0_init(void); void orion5x_ehci1_init(void); void orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data); -void orion5x_eth_switch_init(struct dsa_platform_data *d); +void orion5x_eth_switch_init(struct dsa_chip_data *d); void orion5x_i2c_init(void); void orion5x_sata_init(struct mv_sata_platform_data *sata_data); void orion5x_spi_init(void); diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c index dccadf68ea2b..a3c1336d30c9 100644 --- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c +++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c @@ -101,11 +101,6 @@ static struct dsa_chip_data rd88f5181l_fxo_switch_chip_data = { .port_names[7] = "lan3", }; -static struct dsa_platform_data __initdata rd88f5181l_fxo_switch_plat_data = { - .nr_chips = 1, - .chip = &rd88f5181l_fxo_switch_chip_data, -}; - static void __init rd88f5181l_fxo_init(void) { /* @@ -120,7 +115,7 @@ static void __init rd88f5181l_fxo_init(void) */ orion5x_ehci0_init(); orion5x_eth_init(&rd88f5181l_fxo_eth_data); - orion5x_eth_switch_init(&rd88f5181l_fxo_switch_plat_data); + orion5x_eth_switch_init(&rd88f5181l_fxo_switch_chip_data); orion5x_uart0_init(); mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET, diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c index affe5ec825de..252efe29bd1a 100644 --- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c +++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c @@ -102,11 +102,6 @@ static struct dsa_chip_data rd88f5181l_ge_switch_chip_data = { .port_names[7] = "lan3", }; -static struct dsa_platform_data __initdata rd88f5181l_ge_switch_plat_data = { - .nr_chips = 1, - .chip = &rd88f5181l_ge_switch_chip_data, -}; - static struct i2c_board_info __initdata rd88f5181l_ge_i2c_rtc = { I2C_BOARD_INFO("ds1338", 0x68), }; @@ -125,7 +120,7 @@ static void __init rd88f5181l_ge_init(void) */ orion5x_ehci0_init(); orion5x_eth_init(&rd88f5181l_ge_eth_data); - orion5x_eth_switch_init(&rd88f5181l_ge_switch_plat_data); + orion5x_eth_switch_init(&rd88f5181l_ge_switch_chip_data); orion5x_i2c_init(); orion5x_uart0_init(); diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c index 67ee8571b03c..f4f1dbe1d91d 100644 --- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c +++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c @@ -40,11 +40,6 @@ static struct dsa_chip_data rd88f6183ap_ge_switch_chip_data = { .port_names[5] = "cpu", }; -static struct dsa_platform_data __initdata rd88f6183ap_ge_switch_plat_data = { - .nr_chips = 1, - .chip = &rd88f6183ap_ge_switch_chip_data, -}; - static struct mtd_partition rd88f6183ap_ge_partitions[] = { { .name = "kernel", @@ -89,7 +84,7 @@ static void __init rd88f6183ap_ge_init(void) */ orion5x_ehci0_init(); orion5x_eth_init(&rd88f6183ap_ge_eth_data); - orion5x_eth_switch_init(&rd88f6183ap_ge_switch_plat_data); + orion5x_eth_switch_init(&rd88f6183ap_ge_switch_chip_data); spi_register_board_info(rd88f6183ap_ge_spi_slave_info, ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info)); orion5x_spi_init(); diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c index 4dbcdbe1de7c..ac3376fc269f 100644 --- a/arch/arm/mach-orion5x/wnr854t-setup.c +++ b/arch/arm/mach-orion5x/wnr854t-setup.c @@ -124,7 +124,7 @@ static void __init wnr854t_init(void) * Configure peripherals. */ orion5x_eth_init(&wnr854t_eth_data); - orion5x_eth_switch_init(&wnr854t_switch_plat_data); + orion5x_eth_switch_init(&wnr854t_switch_chip_data); orion5x_uart0_init(); mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET, diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c index a6a8c4648d74..9250bb2e429c 100644 --- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c +++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c @@ -191,11 +191,6 @@ static struct dsa_chip_data wrt350n_v2_switch_chip_data = { .port_names[7] = "lan4", }; -static struct dsa_platform_data __initdata wrt350n_v2_switch_plat_data = { - .nr_chips = 1, - .chip = &wrt350n_v2_switch_chip_data, -}; - static void __init wrt350n_v2_init(void) { /* @@ -210,7 +205,7 @@ static void __init wrt350n_v2_init(void) */ orion5x_ehci0_init(); orion5x_eth_init(&wrt350n_v2_eth_data); - orion5x_eth_switch_init(&wrt350n_v2_switch_plat_data); + orion5x_eth_switch_init(&wrt350n_v2_switch_chip_data); orion5x_uart0_init(); mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET, diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 272f49b2c68f..9255b6d67ba5 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Create a clkdev entry for a given device/clk */ void __init orion_clkdev_add(const char *con_id, const char *dev_id, @@ -470,15 +471,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, /***************************************************************************** * Ethernet switch ****************************************************************************/ -void __init orion_ge00_switch_init(struct dsa_platform_data *d) +static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii"; +static __initdata struct mdio_board_info + orion_ge00_switch_board_info; + +void __init orion_ge00_switch_init(struct dsa_chip_data *d) { - int i; + struct mdio_board_info *bd; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(d->port_names); i++) + if (!strcmp(d->port_names[i], "cpu")) + break; - d->netdev = &orion_ge00.dev; - for (i = 0; i < d->nr_chips; i++) - d->chip[i].host_dev = &orion_ge_mvmdio.dev; + bd = &orion_ge00_switch_board_info; + bd->bus_id = orion_ge00_mvmdio_bus_name; + bd->mdio_addr = d->sw_addr; + d->netdev[i] = &orion_ge00.dev; + strcpy(bd->modalias, "mv88e6085"); + bd->platform_data = d; - platform_device_register_data(NULL, "dsa", 0, d, sizeof(d)); + mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); } /***************************************************************************** diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h index 9347f3c58a6d..3647d3b33c20 100644 --- a/arch/arm/plat-orion/include/plat/common.h +++ b/arch/arm/plat-orion/include/plat/common.h @@ -12,7 +12,7 @@ #include #include -struct dsa_platform_data; +struct dsa_chip_data; struct mv_sata_platform_data; void __init orion_uart0_init(void __iomem *membase, @@ -57,7 +57,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, unsigned long irq); -void __init orion_ge00_switch_init(struct dsa_platform_data *d); +void __init orion_ge00_switch_init(struct dsa_chip_data *d); void __init orion_i2c_init(unsigned long mapbase, unsigned long irq, -- cgit v1.2.3 From c3afa9955382ea5496e8997b307690aaca6e6e2f Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 6 Feb 2017 15:25:52 +0000 Subject: net: wan: slic_ds26522: Use module_spi_driver to simplify the code module_spi_driver() makes the code simpler by eliminating boilerplate code. Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/wan/slic_ds26522.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c index 9d9b4e0def2a..2cdcee2be16c 100644 --- a/drivers/net/wan/slic_ds26522.c +++ b/drivers/net/wan/slic_ds26522.c @@ -249,15 +249,4 @@ static struct spi_driver slic_ds26522_driver = { .id_table = slic_ds26522_id, }; -static int __init slic_ds26522_init(void) -{ - return spi_register_driver(&slic_ds26522_driver); -} - -static void __exit slic_ds26522_exit(void) -{ - spi_unregister_driver(&slic_ds26522_driver); -} - -module_init(slic_ds26522_init); -module_exit(slic_ds26522_exit); +module_spi_driver(slic_ds26522_driver); -- cgit v1.2.3 From fee402211ff2d2f0a136a9d77b30d66aeea193b8 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 6 Feb 2017 15:26:17 +0000 Subject: net: wan: slic_ds26522: Remove .owner field for driver Remove .owner field if calls are used which set it automatically. Generated by: scripts/coccinelle/api/platform_no_drv_owner.cocci Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/wan/slic_ds26522.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c index 2cdcee2be16c..1f6bc8791d51 100644 --- a/drivers/net/wan/slic_ds26522.c +++ b/drivers/net/wan/slic_ds26522.c @@ -241,7 +241,6 @@ static struct spi_driver slic_ds26522_driver = { .driver = { .name = "ds26522", .bus = &spi_bus_type, - .owner = THIS_MODULE, .of_match_table = slic_ds26522_match, }, .probe = slic_ds26522_probe, -- cgit v1.2.3 From 89d82452d1fd283bb1e313bc7a1f7c324362cf02 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 6 Feb 2017 16:07:13 +0000 Subject: net/sched: act_mirred: remove duplicated include from act_mirred.c Remove duplicated include. Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- net/sched/act_mirred.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 84682f02b611..af49c7dca860 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -28,8 +28,6 @@ #include #include -#include - #define MIRRED_TAB_MASK 7 static LIST_HEAD(mirred_list); static DEFINE_SPINLOCK(mirred_list_lock); -- cgit v1.2.3 From bb4005bae3dab0dd21b239b1da193053151c07ba Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 6 Feb 2017 16:15:05 +0000 Subject: ipv6: sr: fix non static symbol warnings Fixes the following sparse warnings: net/ipv6/seg6_iptunnel.c:58:5: warning: symbol 'nla_put_srh' was not declared. Should it be static? net/ipv6/seg6_iptunnel.c:238:5: warning: symbol 'seg6_input' was not declared. Should it be static? net/ipv6/seg6_iptunnel.c:254:5: warning: symbol 'seg6_output' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- net/ipv6/seg6_iptunnel.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 6124e159c882..85582257d3af 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -55,8 +55,8 @@ static const struct nla_policy seg6_iptunnel_policy[SEG6_IPTUNNEL_MAX + 1] = { [SEG6_IPTUNNEL_SRH] = { .type = NLA_BINARY }, }; -int nla_put_srh(struct sk_buff *skb, int attrtype, - struct seg6_iptunnel_encap *tuninfo) +static int nla_put_srh(struct sk_buff *skb, int attrtype, + struct seg6_iptunnel_encap *tuninfo) { struct seg6_iptunnel_encap *data; struct nlattr *nla; @@ -235,7 +235,7 @@ static int seg6_do_srh(struct sk_buff *skb) return 0; } -int seg6_input(struct sk_buff *skb) +static int seg6_input(struct sk_buff *skb) { int err; @@ -251,7 +251,7 @@ int seg6_input(struct sk_buff *skb) return dst_input(skb); } -int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb) +static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *orig_dst = skb_dst(skb); struct dst_entry *dst = NULL; -- cgit v1.2.3 From 8d1fb01df8f64af14c833805dcbd05bf4582e028 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 6 Feb 2017 17:26:30 +0100 Subject: mlxsw: add psample dependency for spectrum When PSAMPLE is a loadable module, spectrum must not be built-in: drivers/net/built-in.o: In function `mlxsw_sp_rx_listener_sample_func': spectrum.c:(.text+0xe357e): undefined reference to `psample_sample_packet' This adds a Kconfig dependency to enforce usable configurations. Fixes: 98d0f7b9acda ("mlxsw: spectrum: Add packet sample offloading support") Signed-off-by: Arnd Bergmann Acked-by: Yotam Gigi Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 76a7574c3c7d..ef23eaedc2ff 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -73,6 +73,7 @@ config MLXSW_SWITCHX2 config MLXSW_SPECTRUM tristate "Mellanox Technologies Spectrum support" depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q + depends on PSAMPLE || PSAMPLE=n select PARMAN default m ---help--- -- cgit v1.2.3 From b08d46b01e995dd7b653b22d35bd1d958d6ee9b4 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 6 Feb 2017 13:01:16 -0800 Subject: net: phy: bcm7xxx: Add BCM74371 PHY ID Add the BCM74371 PHY ID to the list of supported chips. This is a 28nm technology Gigabit PHY SoC. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/bcm7xxx.c | 2 ++ include/linux/brcmphy.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index aa01020ab1b9..d1c2614dad3a 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -453,6 +453,7 @@ static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM74371, "Broadcom BCM74371"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), @@ -472,6 +473,7 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM7362, 0xfffffff0, }, { PHY_ID_BCM7425, 0xfffffff0, }, { PHY_ID_BCM7429, 0xfffffff0, }, + { PHY_ID_BCM74371, 0xfffffff0, }, { PHY_ID_BCM7439, 0xfffffff0, }, { PHY_ID_BCM7435, 0xfffffff0, }, { PHY_ID_BCM7445, 0xfffffff0, }, diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 5881d1fdc1fb..55e517130311 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -33,6 +33,7 @@ #define PHY_ID_BCM7425 0x600d86b0 #define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7435 0x600d8750 +#define PHY_ID_BCM74371 0xae0252e0 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 -- cgit v1.2.3 From 9b8805a325591cf5b6b9df71200de25a2bd721fd Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Mon, 6 Feb 2017 23:14:11 +0200 Subject: sock: add sk_dst_pending_confirm flag Add new sock flag to allow sockets to confirm neighbour. When same struct dst_entry can be used for many different neighbours we can not use it for pending confirmations. As not all call paths lock the socket use full word for the flag. Add sk_dst_confirm as replacement for dst_confirm when called for received packets. Signed-off-by: Julian Anastasov Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/sock.h | 12 ++++++++++++ net/core/sock.c | 2 ++ 2 files changed, 14 insertions(+) diff --git a/include/net/sock.h b/include/net/sock.h index 94e65fd70354..85d856b94b4b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -240,6 +240,7 @@ struct sock_common { * @sk_wq: sock wait queue and async head * @sk_rx_dst: receive input route used by early demux * @sk_dst_cache: destination cache + * @sk_dst_pending_confirm: need to confirm neighbour * @sk_policy: flow policy * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed @@ -393,6 +394,8 @@ struct sock { struct sk_buff_head sk_write_queue; __s32 sk_peek_off; int sk_write_pending; + __u32 sk_dst_pending_confirm; + /* Note: 32bit hole on 64bit arches */ long sk_sndtimeo; struct timer_list sk_timer; __u32 sk_priority; @@ -1764,6 +1767,7 @@ static inline void dst_negative_advice(struct sock *sk) if (ndst != dst) { rcu_assign_pointer(sk->sk_dst_cache, ndst); sk_tx_queue_clear(sk); + sk->sk_dst_pending_confirm = 0; } } } @@ -1774,6 +1778,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old_dst; sk_tx_queue_clear(sk); + sk->sk_dst_pending_confirm = 0; /* * This can be called while sk is owned by the caller only, * with no state that can be checked in a rcu_dereference_check() cond @@ -1789,6 +1794,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old_dst; sk_tx_queue_clear(sk); + sk->sk_dst_pending_confirm = 0; old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); dst_release(old_dst); } @@ -1809,6 +1815,12 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); +static inline void sk_dst_confirm(struct sock *sk) +{ + if (!sk->sk_dst_pending_confirm) + sk->sk_dst_pending_confirm = 1; +} + bool sk_mc_loop(struct sock *sk); static inline bool sk_can_gso(const struct sock *sk) diff --git a/net/core/sock.c b/net/core/sock.c index 8b35debfe454..b74356535559 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -502,6 +502,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { sk_tx_queue_clear(sk); + sk->sk_dst_pending_confirm = 0; RCU_INIT_POINTER(sk->sk_dst_cache, NULL); dst_release(dst); return NULL; @@ -1519,6 +1520,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) af_family_clock_key_strings[newsk->sk_family]); newsk->sk_dst_cache = NULL; + newsk->sk_dst_pending_confirm = 0; newsk->sk_wmem_queued = 0; newsk->sk_forward_alloc = 0; atomic_set(&newsk->sk_drops, 0); -- cgit v1.2.3 From 4ff0620354f2b39b9fe2a91c22c4de9d1fba0c8e Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Mon, 6 Feb 2017 23:14:12 +0200 Subject: net: add dst_pending_confirm flag to skbuff Add new skbuff flag to allow protocols to confirm neighbour. When same struct dst_entry can be used for many different neighbours we can not use it for pending confirmations. Add sock_confirm_neigh() helper to confirm the neighbour and use it for IPv4, IPv6 and VRF before dst_neigh_output. Signed-off-by: Julian Anastasov Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/vrf.c | 5 ++++- include/linux/skbuff.h | 12 ++++++++++++ include/net/sock.h | 14 ++++++++++++++ net/ipv4/ip_output.c | 5 ++++- net/ipv6/ip6_output.c | 1 + 5 files changed, 35 insertions(+), 2 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 264fc1585b3c..630eafdb79e8 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -378,6 +378,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk, if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { + sock_confirm_neigh(skb, neigh); ret = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); return ret; @@ -574,8 +575,10 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s neigh = __ipv4_neigh_lookup_noref(dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); - if (!IS_ERR(neigh)) + if (!IS_ERR(neigh)) { + sock_confirm_neigh(skb, neigh); ret = dst_neigh_output(dst, neigh, skb); + } rcu_read_unlock_bh(); err: diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c6a78e1892b6..f1adddc1c5ac 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -612,6 +612,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS + * @dst_pending_confirm: need to confirm neighbour * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark @@ -741,6 +742,7 @@ struct sk_buff { __u8 csum_level:2; __u8 csum_bad:1; + __u8 dst_pending_confirm:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif @@ -3698,6 +3700,16 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) return skb->queue_mapping != 0; } +static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val) +{ + skb->dst_pending_confirm = val; +} + +static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb) +{ + return skb->dst_pending_confirm != 0; +} + static inline struct sec_path *skb_sec_path(struct sk_buff *skb) { #ifdef CONFIG_XFRM diff --git a/include/net/sock.h b/include/net/sock.h index 85d856b94b4b..6f83e78eaa5a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1821,6 +1821,20 @@ static inline void sk_dst_confirm(struct sock *sk) sk->sk_dst_pending_confirm = 1; } +static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n) +{ + if (skb_get_dst_pending_confirm(skb)) { + struct sock *sk = skb->sk; + unsigned long now = jiffies; + + /* avoid dirtying neighbour */ + if (n->confirmed != now) + n->confirmed = now; + if (sk && sk->sk_dst_pending_confirm) + sk->sk_dst_pending_confirm = 0; + } +} + bool sk_mc_loop(struct sock *sk); static inline bool sk_can_gso(const struct sock *sk) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index b67719f45953..c9fc32fa3272 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -222,7 +222,10 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); if (!IS_ERR(neigh)) { - int res = dst_neigh_output(dst, neigh, skb); + int res; + + sock_confirm_neigh(skb, neigh); + res = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); return res; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index b6a94ff0bbd0..14d99fbf102e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -119,6 +119,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { + sock_confirm_neigh(skb, neigh); ret = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); return ret; -- cgit v1.2.3 From c86a773c78025f5b825bacd7b846f4fa60dc0317 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Mon, 6 Feb 2017 23:14:13 +0200 Subject: sctp: add dst_pending_confirm flag Add new transport flag to allow sockets to confirm neighbour. When same struct dst_entry can be used for many different neighbours we can not use it for pending confirmations. The flag is propagated from transport to every packet. It is reset when cached dst is reset. Reported-by: YueHaibing Fixes: 5110effee8fd ("net: Do delayed neigh confirmation.") Fixes: f2bb4bedf35d ("ipv4: Cache output routes in fib_info nexthops.") Signed-off-by: Julian Anastasov Acked-by: Eric Dumazet Acked-by: Neil Horman Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 6 ++---- include/net/sctp/structs.h | 4 ++++ net/sctp/associola.c | 3 +-- net/sctp/output.c | 10 +++++++++- net/sctp/outqueue.c | 2 +- net/sctp/sm_make_chunk.c | 6 ++---- net/sctp/sm_sideeffect.c | 2 +- net/sctp/socket.c | 4 ++-- net/sctp/transport.c | 16 +++++++++++++++- 9 files changed, 37 insertions(+), 16 deletions(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 3cfd365bcfbc..480b65a24aff 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -593,10 +593,8 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) */ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) { - if (t->dst && !dst_check(t->dst, t->dst_cookie)) { - dst_release(t->dst); - t->dst = NULL; - } + if (t->dst && !dst_check(t->dst, t->dst_cookie)) + sctp_transport_dst_release(t); return t->dst; } diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 231fa9ac50bd..6a685049f67f 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -804,6 +804,8 @@ struct sctp_transport { __u32 burst_limited; /* Holds old cwnd when max.burst is applied */ + __u32 dst_pending_confirm; /* need to confirm neighbour */ + /* Destination */ struct dst_entry *dst; /* Source address. */ @@ -950,6 +952,8 @@ unsigned long sctp_transport_timeout(struct sctp_transport *); void sctp_transport_reset(struct sctp_transport *); void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32); void sctp_transport_immediate_rtx(struct sctp_transport *); +void sctp_transport_dst_release(struct sctp_transport *t); +void sctp_transport_dst_confirm(struct sctp_transport *t); /* This is the structure we use to queue packets as they come into diff --git a/net/sctp/associola.c b/net/sctp/associola.c index e50dc6d7543f..2a6835b4562b 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -832,8 +832,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, if (transport->state != SCTP_UNCONFIRMED) transport->state = SCTP_INACTIVE; else { - dst_release(transport->dst); - transport->dst = NULL; + sctp_transport_dst_release(transport); ulp_notify = false; } diff --git a/net/sctp/output.c b/net/sctp/output.c index 07ab5062e541..814eac047467 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -546,6 +546,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) struct sctp_association *asoc = tp->asoc; struct sctp_chunk *chunk, *tmp; int pkt_count, gso = 0; + int confirm; struct dst_entry *dst; struct sk_buff *head; struct sctphdr *sh; @@ -624,7 +625,14 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) asoc->peer.last_sent_to = tp; } head->ignore_df = packet->ipfragok; - tp->af_specific->sctp_xmit(head, tp); + confirm = tp->dst_pending_confirm; + if (confirm) + skb_set_dst_pending_confirm(head, 1); + /* neighbour should be confirmed on successful transmission or + * positive error + */ + if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm) + tp->dst_pending_confirm = 0; out: list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 65abe22d8691..db352e5d61f8 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1654,7 +1654,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, if (forward_progress) { if (transport->dst) - dst_confirm(transport->dst); + sctp_transport_dst_confirm(transport); } } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index ad3445b3408e..c7d3249f88ec 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3333,8 +3333,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc, local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { - dst_release(transport->dst); - transport->dst = NULL; + sctp_transport_dst_release(transport); } break; case SCTP_PARAM_DEL_IP: @@ -3348,8 +3347,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc, local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { - dst_release(transport->dst); - transport->dst = NULL; + sctp_transport_dst_release(transport); } break; default: diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index a4552712b882..51abcc90fe75 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -755,7 +755,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, * forward progress. */ if (t->dst) - dst_confirm(t->dst); + sctp_transport_dst_confirm(t); /* The receiver of the HEARTBEAT ACK should also perform an * RTT measurement for that destination transport address diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 5fc7122c76de..a4609a0be76d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -592,7 +592,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk, list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { /* Clear the source and route cache */ - dst_release(trans->dst); + sctp_transport_dst_release(trans); trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); trans->ssthresh = asoc->peer.i.a_rwnd; @@ -843,7 +843,7 @@ skip_mkasconf: */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { - dst_release(transport->dst); + sctp_transport_dst_release(transport); sctp_transport_route(transport, NULL, sctp_sk(asoc->base.sk)); } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index baa1ac00d7b5..5b63ceb3bf37 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -240,7 +240,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) { /* If we don't have a fresh route, look one up */ if (!transport->dst || transport->dst->obsolete) { - dst_release(transport->dst); + sctp_transport_dst_release(transport); transport->af_specific->get_dst(transport, &transport->saddr, &transport->fl, sk); } @@ -672,3 +672,17 @@ void sctp_transport_immediate_rtx(struct sctp_transport *t) sctp_transport_hold(t); } } + +/* Drop dst */ +void sctp_transport_dst_release(struct sctp_transport *t) +{ + dst_release(t->dst); + t->dst = NULL; + t->dst_pending_confirm = 0; +} + +/* Schedule neighbour confirm */ +void sctp_transport_dst_confirm(struct sctp_transport *t) +{ + t->dst_pending_confirm = 1; +} -- cgit v1.2.3 From c3a2e8370534f810cac6050169db0ed3e0f94f0b Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Mon, 6 Feb 2017 23:14:14 +0200 Subject: tcp: replace dst_confirm with sk_dst_confirm When same struct dst_entry can be used for many different neighbours we can not use it for pending confirmations. Use the new sk_dst_confirm() helper to propagate the indication from received packets to sock_confirm_neigh(). Reported-by: YueHaibing Fixes: 5110effee8fd ("net: Do delayed neigh confirmation.") Fixes: f2bb4bedf35d ("ipv4: Cache output routes in fib_info nexthops.") Tested-by: YueHaibing Signed-off-by: Julian Anastasov Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 12 +++--------- net/ipv4/tcp_metrics.c | 7 ++----- net/ipv4/tcp_output.c | 2 ++ 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 27c95acbb52f..2c0ff327b6df 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3644,11 +3644,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); - if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { - struct dst_entry *dst = __sk_dst_get(sk); - if (dst) - dst_confirm(dst); - } + if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) + sk_dst_confirm(sk); if (icsk->icsk_pending == ICSK_TIME_RETRANS) tcp_schedule_loss_probe(sk); @@ -5995,7 +5992,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) break; case TCP_FIN_WAIT1: { - struct dst_entry *dst; int tmo; /* If we enter the TCP_FIN_WAIT1 state and we are a @@ -6022,9 +6018,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tcp_set_state(sk, TCP_FIN_WAIT2); sk->sk_shutdown |= SEND_SHUTDOWN; - dst = __sk_dst_get(sk); - if (dst) - dst_confirm(dst); + sk_dst_confirm(sk); if (!sock_flag(sk, SOCK_DEAD)) { /* Wake up lingering close() */ diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index b9ed0d50aead..0f46e5fe31ad 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -375,12 +375,10 @@ void tcp_update_metrics(struct sock *sk) u32 val; int m; + sk_dst_confirm(sk); if (sysctl_tcp_nometrics_save || !dst) return; - if (dst->flags & DST_HOST) - dst_confirm(dst); - rcu_read_lock(); if (icsk->icsk_backoff || !tp->srtt_us) { /* This session failed to estimate rtt. Why? @@ -493,11 +491,10 @@ void tcp_init_metrics(struct sock *sk) struct tcp_metrics_block *tm; u32 val, crtt = 0; /* cached RTT scaled by 8 */ + sk_dst_confirm(sk); if (!dst) goto reset; - dst_confirm(dst); - rcu_read_lock(); tm = tcp_get_metrics(sk, dst, true); if (!tm) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ccf1ef4dcba4..61f272a99a49 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -980,6 +980,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, skb_set_hash_from_sk(skb, sk); atomic_add(skb->truesize, &sk->sk_wmem_alloc); + skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); + /* Build TCP header and checksum it. */ th = (struct tcphdr *)skb->data; th->source = inet->inet_sport; -- cgit v1.2.3 From 63fca65d08632fbec9d9b655f671cf08aa1aeeb8 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Mon, 6 Feb 2017 23:14:15 +0200 Subject: net: add confirm_neigh method to dst_ops Add confirm_neigh method to dst_ops and use it from IPv4 and IPv6 to lookup and confirm the neighbour. Its usage via the new helper dst_confirm_neigh() should be restricted to MSG_PROBE users for performance reasons. For XFRM prefer the last tunnel address, if present. With help from Steffen Klassert. Signed-off-by: Julian Anastasov Acked-by: Steffen Klassert Signed-off-by: David S. Miller --- include/net/arp.h | 16 ++++++++++++++++ include/net/dst.h | 7 +++++++ include/net/dst_ops.h | 2 ++ include/net/ndisc.h | 17 +++++++++++++++++ net/ipv4/route.c | 19 +++++++++++++++++++ net/ipv6/route.c | 16 ++++++++++++++++ net/xfrm/xfrm_policy.c | 19 +++++++++++++++++++ 7 files changed, 96 insertions(+) diff --git a/include/net/arp.h b/include/net/arp.h index 5e0f891d476c..65619a2de6f4 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -35,6 +35,22 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 return n; } +static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key) +{ + struct neighbour *n; + + rcu_read_lock_bh(); + n = __ipv4_neigh_lookup_noref(dev, key); + if (n) { + unsigned long now = jiffies; + + /* avoid dirtying neighbour */ + if (n->confirmed != now) + n->confirmed = now; + } + rcu_read_unlock_bh(); +} + void arp_init(void); int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg); void arp_send(int type, int ptype, __be32 dest_ip, diff --git a/include/net/dst.h b/include/net/dst.h index 6835d224d47b..3a3b34b83b00 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -477,6 +477,13 @@ static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst return IS_ERR(n) ? NULL : n; } +static inline void dst_confirm_neigh(const struct dst_entry *dst, + const void *daddr) +{ + if (dst->ops->confirm_neigh) + dst->ops->confirm_neigh(dst, daddr); +} + static inline void dst_link_failure(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 8a2b66d8d78d..c84b3287e38b 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -33,6 +33,8 @@ struct dst_ops { struct neighbour * (*neigh_lookup)(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr); + void (*confirm_neigh)(const struct dst_entry *dst, + const void *daddr); struct kmem_cache *kmem_cachep; diff --git a/include/net/ndisc.h b/include/net/ndisc.h index d562a2fe4860..8a0214654b6b 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -391,6 +391,23 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons return n; } +static inline void __ipv6_confirm_neigh(struct net_device *dev, + const void *pkey) +{ + struct neighbour *n; + + rcu_read_lock_bh(); + n = __ipv6_neigh_lookup_noref(dev, pkey); + if (n) { + unsigned long now = jiffies; + + /* avoid dirtying neighbour */ + if (n->confirmed != now) + n->confirmed = now; + } + rcu_read_unlock_bh(); +} + int ndisc_init(void); int ndisc_late_init(void); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 4b7c231c1aef..cb494a5050f7 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -154,6 +154,7 @@ static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr); +static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr); static struct dst_ops ipv4_dst_ops = { .family = AF_INET, @@ -168,6 +169,7 @@ static struct dst_ops ipv4_dst_ops = { .redirect = ip_do_redirect, .local_out = __ip_local_out, .neigh_lookup = ipv4_neigh_lookup, + .confirm_neigh = ipv4_confirm_neigh, }; #define ECN_OR_COST(class) TC_PRIO_##class @@ -461,6 +463,23 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, return neigh_create(&arp_tbl, pkey, dev); } +static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr) +{ + struct net_device *dev = dst->dev; + const __be32 *pkey = daddr; + const struct rtable *rt; + + rt = (const struct rtable *)dst; + if (rt->rt_gateway) + pkey = (const __be32 *)&rt->rt_gateway; + else if (!daddr || + (rt->rt_flags & + (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) + return; + + __ipv4_confirm_neigh(dev, *(__force u32 *)pkey); +} + #define IP_IDENTS_SZ 2048u static atomic_t *ip_idents __read_mostly; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8ffa24cc8899..98b183f1bc8b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -223,6 +223,21 @@ static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, return neigh_create(&nd_tbl, daddr, dst->dev); } +static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) +{ + struct net_device *dev = dst->dev; + struct rt6_info *rt = (struct rt6_info *)dst; + + daddr = choose_neigh_daddr(rt, NULL, daddr); + if (!daddr) + return; + if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) + return; + if (ipv6_addr_is_multicast((const struct in6_addr *)daddr)) + return; + __ipv6_confirm_neigh(dev, daddr); +} + static struct dst_ops ip6_dst_ops_template = { .family = AF_INET6, .gc = ip6_dst_gc, @@ -239,6 +254,7 @@ static struct dst_ops ip6_dst_ops_template = { .redirect = rt6_do_redirect, .local_out = __ip6_local_out, .neigh_lookup = ip6_neigh_lookup, + .confirm_neigh = ip6_confirm_neigh, }; static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 99ad1af2927f..0a0f63d3cc96 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2856,6 +2856,23 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, return dst->path->ops->neigh_lookup(dst, skb, daddr); } +static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr) +{ + const struct dst_entry *path = dst->path; + + for (; dst != path; dst = dst->child) { + const struct xfrm_state *xfrm = dst->xfrm; + + if (xfrm->props.mode == XFRM_MODE_TRANSPORT) + continue; + if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) + daddr = xfrm->coaddr; + else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) + daddr = &xfrm->id.daddr; + } + path->ops->confirm_neigh(path, daddr); +} + int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) { int err = 0; @@ -2882,6 +2899,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->link_failure = xfrm_link_failure; if (likely(dst_ops->neigh_lookup == NULL)) dst_ops->neigh_lookup = xfrm_neigh_lookup; + if (likely(!dst_ops->confirm_neigh)) + dst_ops->confirm_neigh = xfrm_confirm_neigh; if (likely(afinfo->garbage_collect == NULL)) afinfo->garbage_collect = xfrm_garbage_collect_deferred; rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo); -- cgit v1.2.3 From 0dec879f636f11b0ffda1cb5fd96a1754c59ead3 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Mon, 6 Feb 2017 23:14:16 +0200 Subject: net: use dst_confirm_neigh for UDP, RAW, ICMP, L2TP When same struct dst_entry can be used for many different neighbours we can not use it for pending confirmations. The datagram protocols can use MSG_CONFIRM to confirm the neighbour. When used with MSG_PROBE we do not reach the code where neighbour is confirmed, so we have to do the same slow lookup by using the dst_confirm_neigh() helper. When MSG_PROBE is not used, ip_append_data/ip6_append_data will set the skb flag dst_pending_confirm. Reported-by: YueHaibing Fixes: 5110effee8fd ("net: Do delayed neigh confirmation.") Fixes: f2bb4bedf35d ("ipv4: Cache output routes in fib_info nexthops.") Signed-off-by: Julian Anastasov Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/ip_output.c | 6 ++++++ net/ipv4/ping.c | 3 ++- net/ipv4/raw.c | 6 +++++- net/ipv4/udp.c | 3 ++- net/ipv6/ip6_output.c | 6 ++++++ net/ipv6/raw.c | 6 +++++- net/ipv6/route.c | 27 ++++++++++++++------------- net/ipv6/udp.c | 3 ++- net/l2tp/l2tp_ip6.c | 3 ++- 9 files changed, 44 insertions(+), 19 deletions(-) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index c9fc32fa3272..7a719f1ae556 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -889,6 +889,9 @@ static inline int ip_ufo_append_data(struct sock *sk, skb->csum = 0; + if (flags & MSG_CONFIRM) + skb_set_dst_pending_confirm(skb, 1); + __skb_queue_tail(queue, skb); } else if (skb_is_gso(skb)) { goto append; @@ -1089,6 +1092,9 @@ alloc_new_skb: exthdrlen = 0; csummode = CHECKSUM_NONE; + if ((flags & MSG_CONFIRM) && !skb_prev) + skb_set_dst_pending_confirm(skb, 1); + /* * Put the packet on the pending queue. */ diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 592db6a3a0e9..6ee792d83d5b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -848,7 +848,8 @@ out: return err; do_confirm: - dst_confirm(&rt->dst); + if (msg->msg_flags & MSG_PROBE) + dst_confirm_neigh(&rt->dst, &fl4.daddr); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 4e49e5cb001c..8119e1f66e03 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -383,6 +383,9 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); + if (flags & MSG_CONFIRM) + skb_set_dst_pending_confirm(skb, 1); + skb->transport_header = skb->network_header; err = -EFAULT; if (memcpy_from_msg(iph, msg, length)) @@ -666,7 +669,8 @@ out: return len; do_confirm: - dst_confirm(&rt->dst); + if (msg->msg_flags & MSG_PROBE) + dst_confirm_neigh(&rt->dst, &fl4.daddr); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index cf6ba3387401..4a1ba04565d1 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1101,7 +1101,8 @@ out: return err; do_confirm: - dst_confirm(&rt->dst); + if (msg->msg_flags & MSG_PROBE) + dst_confirm_neigh(&rt->dst, &fl4->daddr); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 14d99fbf102e..d299040613a0 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1145,6 +1145,9 @@ static inline int ip6_ufo_append_data(struct sock *sk, skb->protocol = htons(ETH_P_IPV6); skb->csum = 0; + if (flags & MSG_CONFIRM) + skb_set_dst_pending_confirm(skb, 1); + __skb_queue_tail(queue, skb); } else if (skb_is_gso(skb)) { goto append; @@ -1517,6 +1520,9 @@ alloc_new_skb: exthdrlen = 0; dst_exthdrlen = 0; + if ((flags & MSG_CONFIRM) && !skb_prev) + skb_set_dst_pending_confirm(skb, 1); + /* * Put the packet on the pending queue */ diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ea89073c8247..f174e76e6505 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -654,6 +654,9 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, skb->ip_summed = CHECKSUM_NONE; + if (flags & MSG_CONFIRM) + skb_set_dst_pending_confirm(skb, 1); + skb->transport_header = skb->network_header; err = memcpy_from_msg(iph, msg, length); if (err) @@ -934,7 +937,8 @@ out: txopt_put(opt_to_free); return err < 0 ? err : len; do_confirm: - dst_confirm(dst); + if (msg->msg_flags & MSG_PROBE) + dst_confirm_neigh(dst, &fl6.daddr); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 98b183f1bc8b..f54f4265b37f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1381,6 +1381,7 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, const struct ipv6hdr *iph, u32 mtu) { + const struct in6_addr *daddr, *saddr; struct rt6_info *rt6 = (struct rt6_info *)dst; if (rt6->rt6i_flags & RTF_LOCAL) @@ -1389,26 +1390,26 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, if (dst_metric_locked(dst, RTAX_MTU)) return; - dst_confirm(dst); + if (iph) { + daddr = &iph->daddr; + saddr = &iph->saddr; + } else if (sk) { + daddr = &sk->sk_v6_daddr; + saddr = &inet6_sk(sk)->saddr; + } else { + daddr = NULL; + saddr = NULL; + } + dst_confirm_neigh(dst, daddr); mtu = max_t(u32, mtu, IPV6_MIN_MTU); if (mtu >= dst_mtu(dst)) return; if (!rt6_cache_allowed_for_pmtu(rt6)) { rt6_do_update_pmtu(rt6, mtu); - } else { - const struct in6_addr *daddr, *saddr; + } else if (daddr) { struct rt6_info *nrt6; - if (iph) { - daddr = &iph->daddr; - saddr = &iph->saddr; - } else if (sk) { - daddr = &sk->sk_v6_daddr; - saddr = &inet6_sk(sk)->saddr; - } else { - return; - } nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr); if (nrt6) { rt6_do_update_pmtu(nrt6, mtu); @@ -2332,7 +2333,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu * Look, redirects are sent only in response to data packets, * so that this nexthop apparently is reachable. --ANK */ - dst_confirm(&rt->dst); + dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr); neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1); if (!neigh) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index b4c6516a3a0c..51346fa70298 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1308,7 +1308,8 @@ out: return err; do_confirm: - dst_confirm(dst); + if (msg->msg_flags & MSG_PROBE) + dst_confirm_neigh(dst, &fl6.daddr); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 4b06eb415f68..734798a00ca0 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -658,7 +658,8 @@ out: return err < 0 ? err : len; do_confirm: - dst_confirm(dst); + if (msg->msg_flags & MSG_PROBE) + dst_confirm_neigh(dst, &fl6.daddr); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; -- cgit v1.2.3 From 51ce8bd4d17a761e1a90a34a1b5c9b762cce7553 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Mon, 6 Feb 2017 23:14:17 +0200 Subject: net: pending_confirm is not used anymore When same struct dst_entry can be used for many different neighbours we can not use it for pending confirmations. As last step, we can remove the pending_confirm flag. Reported-by: YueHaibing Fixes: 5110effee8fd ("net: Do delayed neigh confirmation.") Fixes: f2bb4bedf35d ("ipv4: Cache output routes in fib_info nexthops.") Signed-off-by: Julian Anastasov Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/dst.h | 14 ++------------ net/core/dst.c | 1 - 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/include/net/dst.h b/include/net/dst.h index 3a3b34b83b00..84a1043dd6a1 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -59,8 +59,6 @@ struct dst_entry { #define DST_XFRM_QUEUE 0x0100 #define DST_METADATA 0x0200 - unsigned short pending_confirm; - short error; /* A non-zero value of dst->obsolete forces by-hand validation @@ -78,6 +76,8 @@ struct dst_entry { #define DST_OBSOLETE_KILL -2 unsigned short header_len; /* more space at head required */ unsigned short trailer_len; /* space to reserve at tail */ + unsigned short __pad3; + #ifdef CONFIG_IP_ROUTE_CLASSID __u32 tclassid; #else @@ -440,7 +440,6 @@ static inline void dst_rcu_free(struct rcu_head *head) static inline void dst_confirm(struct dst_entry *dst) { - dst->pending_confirm = 1; } static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, @@ -448,15 +447,6 @@ static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, { const struct hh_cache *hh; - if (dst->pending_confirm) { - unsigned long now = jiffies; - - dst->pending_confirm = 0; - /* avoid dirtying neighbour */ - if (n->confirmed != now) - n->confirmed = now; - } - hh = &n->hh; if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) return neigh_hh_output(hh, skb); diff --git a/net/core/dst.c b/net/core/dst.c index b5cbbe07f786..960e503b5a52 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -190,7 +190,6 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, dst->__use = 0; dst->lastuse = jiffies; dst->flags = flags; - dst->pending_confirm = 0; dst->next = NULL; if (!(flags & DST_NOCOUNT)) dst_entries_add(ops, 1); -- cgit v1.2.3 From b44700e975848a9a569a509244672ff886ec99b3 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Mon, 6 Feb 2017 15:34:52 -0600 Subject: net: qcom/emac: add ethool support for setting pause parameters To support setting the pause parameters, the driver can no longer just mirror the PHY. The set_pauseparam feature allows the driver to force the setting in the MAC, regardless of how the PHY is configured. This means that we now need to maintain an internal state for pause frame support, and so get_pauseparam also needs to be updated. If the interface is already running when the setting is changed, then the interface is reset. Note that if the MAC is configured to enable RX pause frame support (i.e. it transmits pause frames to throttle the other end), but the PHY is configured to block those frames, then the feature will not work. Also some buffer size initialization code into emac_init_adapter(), so that it lives with similar code, including the initializtion of pause frame support. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-ethtool.c | 30 ++++++++++++++++------- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 18 ++++++++++---- drivers/net/ethernet/qualcomm/emac/emac.c | 11 ++++++--- drivers/net/ethernet/qualcomm/emac/emac.h | 7 ++++++ 4 files changed, 48 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c index cfc57d2c64f9..c418a6e9a591 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -148,16 +148,26 @@ static void emac_get_ringparam(struct net_device *netdev, static void emac_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { - struct phy_device *phydev = netdev->phydev; + struct emac_adapter *adpt = netdev_priv(netdev); - if (phydev) { - if (phydev->autoneg) - pause->autoneg = 1; - if (phydev->pause) - pause->rx_pause = 1; - if (phydev->pause != phydev->asym_pause) - pause->tx_pause = 1; - } + pause->autoneg = adpt->automatic ? AUTONEG_ENABLE : AUTONEG_DISABLE; + pause->rx_pause = adpt->rx_flow_control ? 1 : 0; + pause->tx_pause = adpt->tx_flow_control ? 1 : 0;; +} + +static int emac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + adpt->automatic = pause->autoneg == AUTONEG_ENABLE; + adpt->rx_flow_control = pause->rx_pause != 0; + adpt->tx_flow_control = pause->tx_pause != 0; + + if (netif_running(netdev)) + return emac_reinit_locked(adpt); + + return 0; } static const struct ethtool_ops emac_ethtool_ops = { @@ -172,7 +182,9 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_ethtool_stats = emac_get_ethtool_stats, .get_ringparam = emac_get_ringparam, + .get_pauseparam = emac_get_pauseparam, + .set_pauseparam = emac_set_pauseparam, .nway_reset = emac_nway_reset, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index b991219862b1..4b3e0144ad5e 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -565,11 +565,19 @@ static void emac_mac_start(struct emac_adapter *adpt) mac |= TXEN | RXEN; /* enable RX/TX */ - /* Configure MAC flow control to match the PHY's settings. */ - if (phydev->pause) - mac |= RXFC; - if (phydev->pause != phydev->asym_pause) - mac |= TXFC; + /* Configure MAC flow control. If set to automatic, then match + * whatever the PHY does. Otherwise, enable or disable it, depending + * on what the user configured via ethtool. + */ + mac &= ~(RXFC | TXFC); + + if (adpt->automatic) { + /* If it's set to automatic, then update our local values */ + adpt->rx_flow_control = phydev->pause; + adpt->tx_flow_control = phydev->pause != phydev->asym_pause; + } + mac |= adpt->rx_flow_control ? RXFC : 0; + mac |= adpt->tx_flow_control ? TXFC : 0; /* setup link speed */ mac &= ~SPEED_MASK; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 3387c0a88746..28a8cdc36485 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -436,6 +436,10 @@ static void emac_init_adapter(struct emac_adapter *adpt) { u32 reg; + adpt->rrd_size = EMAC_RRD_SIZE; + adpt->tpd_size = EMAC_TPD_SIZE; + adpt->rfd_size = EMAC_RFD_SIZE; + /* descriptors */ adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS; adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS; @@ -456,6 +460,9 @@ static void emac_init_adapter(struct emac_adapter *adpt) /* others */ adpt->preamble = EMAC_PREAMBLE_DEF; + + /* default to automatic flow control */ + adpt->automatic = true; } /* Get the clock */ @@ -675,10 +682,6 @@ static int emac_probe(struct platform_device *pdev) netdev->watchdog_timeo = EMAC_WATCHDOG_TIME; netdev->irq = adpt->irq.irq; - adpt->rrd_size = EMAC_RRD_SIZE; - adpt->tpd_size = EMAC_TPD_SIZE; - adpt->rfd_size = EMAC_RFD_SIZE; - netdev->netdev_ops = &emac_netdev_ops; emac_init_adapter(adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index ef91dcc7f646..e77fb6966cbb 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -306,6 +306,13 @@ struct emac_adapter { unsigned int rxbuf_size; + /* Flow control / pause frames support. If automatic=True, do whatever + * the PHY does. Otherwise, use tx_flow_control and rx_flow_control. + */ + bool automatic; + bool tx_flow_control; + bool rx_flow_control; + /* Ring parameter */ u8 tpd_burst; u8 rfd_burst; -- cgit v1.2.3 From 6bb19474391d17954fee9a9997ecca25b35dfd46 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:32 -0500 Subject: bnxt_en: Refactor rx SKB function. Minor refactoring of bnxt_rx_skb() so that it can easily be replaced by a new function that handles packets in a single page. Also, use a function pointer bp->rx_skb_func() to switch to a new function when we add the new mode in the next patch. Add a new field data_ptr that points to the packet data in the bnxt_sw_rx_bd structure. The original data field is changed to void pointer so that it can either hold the kmalloc'ed data or a page pointer. The last parameter of bnxt_rx_skb() which was the length parameter is changed to include the payload offset of the packet in the upper 16 bit. The offset is needed to support the rx page mode and is not used in this existing function. v3: Added a new data_ptr parameter to bp->rx_skb_func(). The caller has the option to modify the starting address of the packet. This will be needed when XDP with headroom support is added. v2: Changed the name of the last parameter to offset_and_len to make the code more clear. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 49 ++++++++++++++++++++++--------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 11 +++++-- 2 files changed, 44 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index aff3dc114a5b..94f3d7805036 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -607,6 +607,7 @@ static inline int bnxt_alloc_rx_data(struct bnxt *bp, return -ENOMEM; rx_buf->data = data; + rx_buf->data_ptr = data + BNXT_RX_OFFSET; dma_unmap_addr_set(rx_buf, mapping, mapping); rxbd->rx_bd_haddr = cpu_to_le64(mapping); @@ -615,7 +616,7 @@ static inline int bnxt_alloc_rx_data(struct bnxt *bp, } static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, - u8 *data) + void *data) { u16 prod = rxr->rx_prod; struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; @@ -625,6 +626,7 @@ static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, cons_rx_buf = &rxr->rx_buf_ring[cons]; prod_rx_buf->data = data; + prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; dma_unmap_addr_set(prod_rx_buf, mapping, dma_unmap_addr(cons_rx_buf, mapping)); @@ -755,11 +757,13 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - u16 prod, u8 *data, dma_addr_t dma_addr, - unsigned int len) + void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) { - int err; + u16 prod = rxr->rx_prod; struct sk_buff *skb; + int err; err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); if (unlikely(err)) { @@ -776,7 +780,7 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, } skb_reserve(skb, BNXT_RX_OFFSET); - skb_put(skb, len); + skb_put(skb, offset_and_len & 0xffff); return skb; } @@ -878,7 +882,8 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, PCI_DMA_FROMDEVICE); - memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET); + memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, + len + NET_IP_ALIGN); dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, @@ -951,6 +956,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, } prod_rx_buf->data = tpa_info->data; + prod_rx_buf->data_ptr = tpa_info->data_ptr; mapping = tpa_info->mapping; dma_unmap_addr_set(prod_rx_buf, mapping, mapping); @@ -960,6 +966,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, prod_bd->rx_bd_haddr = cpu_to_le64(mapping); tpa_info->data = cons_rx_buf->data; + tpa_info->data_ptr = cons_rx_buf->data_ptr; cons_rx_buf->data = NULL; tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping); @@ -1192,12 +1199,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u8 agg_id = TPA_END_AGG_ID(tpa_end); - u8 *data, agg_bufs; + u8 *data_ptr, agg_bufs; u16 cp_cons = RING_CMP(*raw_cons); unsigned int len; struct bnxt_tpa_info *tpa_info; dma_addr_t mapping; struct sk_buff *skb; + void *data; if (unlikely(bnapi->in_reset)) { int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); @@ -1209,7 +1217,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, tpa_info = &rxr->rx_tpa[agg_id]; data = tpa_info->data; - prefetch(data); + data_ptr = tpa_info->data_ptr; + prefetch(data_ptr); len = tpa_info->len; mapping = tpa_info->mapping; @@ -1232,7 +1241,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (len <= bp->rx_copy_thresh) { - skb = bnxt_copy_skb(bnapi, data, len, mapping); + skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); return NULL; @@ -1248,6 +1257,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } tpa_info->data = new_data; + tpa_info->data_ptr = new_data + BNXT_RX_OFFSET; tpa_info->mapping = new_mapping; skb = build_skb(data, 0); @@ -1316,9 +1326,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); struct bnxt_sw_rx_bd *rx_buf; unsigned int len; - u8 *data, agg_bufs, cmp_type; + u8 *data_ptr, agg_bufs, cmp_type; dma_addr_t dma_addr; struct sk_buff *skb; + void *data; int rc = 0; rxcmp = (struct rx_cmp *) @@ -1363,13 +1374,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, cons = rxcmp->rx_cmp_opaque; rx_buf = &rxr->rx_buf_ring[cons]; data = rx_buf->data; + data_ptr = rx_buf->data_ptr; if (unlikely(cons != rxr->rx_next_cons)) { int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); bnxt_sched_reset(bp, rxr); return rc1; } - prefetch(data); + prefetch(data_ptr); agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; @@ -1396,14 +1408,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, dma_addr = dma_unmap_addr(rx_buf, mapping); if (len <= bp->rx_copy_thresh) { - skb = bnxt_copy_skb(bnapi, data, len, dma_addr); + skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { rc = -ENOMEM; goto next_rx; } } else { - skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len); + skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, + len); if (!skb) { rc = -ENOMEM; goto next_rx; @@ -1880,7 +1893,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) for (j = 0; j < max_idx; j++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; - u8 *data = rx_buf->data; + void *data = rx_buf->data; if (!data) continue; @@ -2326,6 +2339,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) return -ENOMEM; rxr->rx_tpa[i].data = data; + rxr->rx_tpa[i].data_ptr = data + BNXT_RX_OFFSET; rxr->rx_tpa[i].mapping = mapping; } } else { @@ -2550,6 +2564,12 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->cp_ring_mask = bp->cp_bit - 1; } +static int bnxt_set_rx_skb_mode(struct bnxt *bp) +{ + bp->rx_skb_func = bnxt_rx_skb; + return 0; +} + static void bnxt_free_vnic_attributes(struct bnxt *bp) { int i; @@ -7299,6 +7319,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_port_led_qcaps(bp); + bnxt_set_rx_skb_mode(bp); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); bnxt_set_max_func_irqs(bp, max_irqs); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 52a1cc061ba3..11b5a4aff1e6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -516,7 +516,8 @@ struct bnxt_sw_tx_bd { }; struct bnxt_sw_rx_bd { - u8 *data; + void *data; + u8 *data_ptr; DEFINE_DMA_UNMAP_ADDR(mapping); }; @@ -576,7 +577,8 @@ struct bnxt_tx_ring_info { }; struct bnxt_tpa_info { - u8 *data; + void *data; + u8 *data_ptr; dma_addr_t mapping; u16 len; unsigned short gso_type; @@ -988,6 +990,11 @@ struct bnxt { struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int, struct sk_buff *); + struct sk_buff * (*rx_skb_func)(struct bnxt *, + struct bnxt_rx_ring_info *, + u16, void *, u8 *, dma_addr_t, + unsigned int); + u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ u32 rx_ring_size; -- cgit v1.2.3 From 11cd119d31a71b37c2362fc621f225e2aa12aea1 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:33 -0500 Subject: bnxt_en: Don't use DEFINE_DMA_UNMAP_ADDR to store DMA address in RX path. To support XDP_TX, we need the RX buffer's DMA address to transmit the packet. Convert the DMA address field to a permanent field in bnxt_sw_rx_bd. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 21 +++++++++------------ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 +- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 94f3d7805036..43eb52ced2ca 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -608,7 +608,7 @@ static inline int bnxt_alloc_rx_data(struct bnxt *bp, rx_buf->data = data; rx_buf->data_ptr = data + BNXT_RX_OFFSET; - dma_unmap_addr_set(rx_buf, mapping, mapping); + rx_buf->mapping = mapping; rxbd->rx_bd_haddr = cpu_to_le64(mapping); @@ -628,8 +628,7 @@ static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, prod_rx_buf->data = data; prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; - dma_unmap_addr_set(prod_rx_buf, mapping, - dma_unmap_addr(cons_rx_buf, mapping)); + prod_rx_buf->mapping = cons_rx_buf->mapping; prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; @@ -816,7 +815,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, * a sw_prod index that equals the cons index, so we * need to clear the cons entry now. */ - mapping = dma_unmap_addr(cons_rx_buf, mapping); + mapping = cons_rx_buf->mapping; page = cons_rx_buf->page; cons_rx_buf->page = NULL; @@ -959,7 +958,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, prod_rx_buf->data_ptr = tpa_info->data_ptr; mapping = tpa_info->mapping; - dma_unmap_addr_set(prod_rx_buf, mapping, mapping); + prod_rx_buf->mapping = mapping; prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; @@ -968,7 +967,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, tpa_info->data = cons_rx_buf->data; tpa_info->data_ptr = cons_rx_buf->data_ptr; cons_rx_buf->data = NULL; - tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping); + tpa_info->mapping = cons_rx_buf->mapping; tpa_info->len = le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> @@ -1405,7 +1404,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; - dma_addr = dma_unmap_addr(rx_buf, mapping); + dma_addr = rx_buf->mapping; if (len <= bp->rx_copy_thresh) { skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); @@ -1881,7 +1880,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) dma_unmap_single( &pdev->dev, - dma_unmap_addr(tpa_info, mapping), + tpa_info->mapping, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); @@ -1898,8 +1897,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) if (!data) continue; - dma_unmap_single(&pdev->dev, - dma_unmap_addr(rx_buf, mapping), + dma_unmap_single(&pdev->dev, rx_buf->mapping, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); @@ -1916,8 +1914,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) if (!page) continue; - dma_unmap_page(&pdev->dev, - dma_unmap_addr(rx_agg_buf, mapping), + dma_unmap_page(&pdev->dev, rx_agg_buf->mapping, BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE); rx_agg_buf->page = NULL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 11b5a4aff1e6..b4a04a3945e9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -518,7 +518,7 @@ struct bnxt_sw_tx_bd { struct bnxt_sw_rx_bd { void *data; u8 *data_ptr; - DEFINE_DMA_UNMAP_ADDR(mapping); + dma_addr_t mapping; }; struct bnxt_sw_rx_agg_bd { -- cgit v1.2.3 From 745fc05c9db1f17da076861c7f57507e13f28a3a Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:34 -0500 Subject: bnxt_en: Add bp->rx_dir field for rx buffer DMA direction. When driver is running in XDP mode, rx buffers are DMA mapped as DMA_BIDIRECTIONAL. Add a field so the code will map/unmap rx buffers according to this field. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 27 ++++++++++++--------------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 43eb52ced2ca..ee2cada76f0f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -584,7 +584,7 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, return NULL; *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET, - bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + bp->rx_buf_use_size, bp->rx_dir); if (dma_mapping_error(&pdev->dev, *mapping)) { kfree(data); @@ -772,7 +772,7 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, skb = build_skb(data, 0); dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + bp->rx_dir); if (!skb) { kfree(data); return NULL; @@ -878,15 +878,14 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, if (!skb) return NULL; - dma_sync_single_for_cpu(&pdev->dev, mapping, - bp->rx_copy_thresh, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, + bp->rx_dir); memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, len + NET_IP_ALIGN); - dma_sync_single_for_device(&pdev->dev, mapping, - bp->rx_copy_thresh, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, + bp->rx_dir); skb_put(skb, len); return skb; @@ -1261,7 +1260,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = build_skb(data, 0); dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + bp->rx_dir); if (!skb) { kfree(data); @@ -1878,11 +1877,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) if (!data) continue; - dma_unmap_single( - &pdev->dev, - tpa_info->mapping, - bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, tpa_info->mapping, + bp->rx_buf_use_size, + bp->rx_dir); tpa_info->data = NULL; @@ -1898,8 +1895,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) continue; dma_unmap_single(&pdev->dev, rx_buf->mapping, - bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + bp->rx_buf_use_size, bp->rx_dir); rx_buf->data = NULL; @@ -2563,6 +2559,7 @@ void bnxt_set_ring_params(struct bnxt *bp) static int bnxt_set_rx_skb_mode(struct bnxt *bp) { + bp->rx_dir = DMA_FROM_DEVICE; bp->rx_skb_func = bnxt_rx_skb; return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index b4a04a3945e9..353b6d179ed2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -997,6 +997,7 @@ struct bnxt { u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ + enum dma_data_direction rx_dir; u32 rx_ring_size; u32 rx_agg_ring_size; u32 rx_copy_thresh; -- cgit v1.2.3 From b3dba77cf0acb6e44b368979026df975658332bc Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:35 -0500 Subject: bnxt_en: Parameterize RX buffer offsets. Convert the global constants BNXT_RX_OFFSET and BNXT_RX_DMA_OFFSET to device parameters. This will make it easier to support XDP with headroom support which requires different RX buffer offsets. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 15 +++++++++------ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 ++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ee2cada76f0f..c0f2167bbbb9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -583,7 +583,7 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, if (!data) return NULL; - *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET, + *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset, bp->rx_buf_use_size, bp->rx_dir); if (dma_mapping_error(&pdev->dev, *mapping)) { @@ -607,7 +607,7 @@ static inline int bnxt_alloc_rx_data(struct bnxt *bp, return -ENOMEM; rx_buf->data = data; - rx_buf->data_ptr = data + BNXT_RX_OFFSET; + rx_buf->data_ptr = data + bp->rx_offset; rx_buf->mapping = mapping; rxbd->rx_bd_haddr = cpu_to_le64(mapping); @@ -778,7 +778,7 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, return NULL; } - skb_reserve(skb, BNXT_RX_OFFSET); + skb_reserve(skb, bp->rx_offset); skb_put(skb, offset_and_len & 0xffff); return skb; } @@ -1255,7 +1255,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } tpa_info->data = new_data; - tpa_info->data_ptr = new_data + BNXT_RX_OFFSET; + tpa_info->data_ptr = new_data + bp->rx_offset; tpa_info->mapping = new_mapping; skb = build_skb(data, 0); @@ -1267,7 +1267,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); return NULL; } - skb_reserve(skb, BNXT_RX_OFFSET); + skb_reserve(skb, bp->rx_offset); skb_put(skb, len); } @@ -2332,7 +2332,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) return -ENOMEM; rxr->rx_tpa[i].data = data; - rxr->rx_tpa[i].data_ptr = data + BNXT_RX_OFFSET; + rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; rxr->rx_tpa[i].mapping = mapping; } } else { @@ -2348,6 +2348,9 @@ static int bnxt_init_rx_rings(struct bnxt *bp) { int i, rc = 0; + bp->rx_offset = BNXT_RX_OFFSET; + bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; + for (i = 0; i < bp->rx_nr_rings; i++) { rc = bnxt_init_one_rx_ring(bp, i); if (rc) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 353b6d179ed2..d8fc87110e5e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -997,6 +997,8 @@ struct bnxt { u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ + u16 rx_offset; + u16 rx_dma_offset; enum dma_data_direction rx_dir; u32 rx_ring_size; u32 rx_agg_ring_size; -- cgit v1.2.3 From c61fb99cae51958a9096d8540c8c05e74cfa7e59 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:36 -0500 Subject: bnxt_en: Add RX page mode support. This mode is to support XDP. In this mode, each rx ring is configured with page sized buffers for linear placement of each packet. MTU will be restricted to what the page sized buffers can support. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 135 ++++++++++++++++++++++++++---- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 7 ++ 2 files changed, 124 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c0f2167bbbb9..0bcd46576415 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -573,6 +573,25 @@ next_tx_int: } } +static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, + gfp_t gfp) +{ + struct device *dev = &bp->pdev->dev; + struct page *page; + + page = alloc_page(gfp); + if (!page) + return NULL; + + *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir); + if (dma_mapping_error(dev, *mapping)) { + __free_page(page); + return NULL; + } + *mapping += bp->rx_dma_offset; + return page; +} + static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, gfp_t gfp) { @@ -599,19 +618,28 @@ static inline int bnxt_alloc_rx_data(struct bnxt *bp, { struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; - u8 *data; dma_addr_t mapping; - data = __bnxt_alloc_rx_data(bp, &mapping, gfp); - if (!data) - return -ENOMEM; + if (BNXT_RX_PAGE_MODE(bp)) { + struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp); - rx_buf->data = data; - rx_buf->data_ptr = data + bp->rx_offset; + if (!page) + return -ENOMEM; + + rx_buf->data = page; + rx_buf->data_ptr = page_address(page) + bp->rx_offset; + } else { + u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); + + if (!data) + return -ENOMEM; + + rx_buf->data = data; + rx_buf->data_ptr = data + bp->rx_offset; + } rx_buf->mapping = mapping; rxbd->rx_bd_haddr = cpu_to_le64(mapping); - return 0; } @@ -754,6 +782,51 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, rxr->rx_sw_agg_prod = sw_prod; } +static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 cons, void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) +{ + unsigned int payload = offset_and_len >> 16; + unsigned int len = offset_and_len & 0xffff; + struct skb_frag_struct *frag; + struct page *page = data; + u16 prod = rxr->rx_prod; + struct sk_buff *skb; + int off, err; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, data); + return NULL; + } + dma_addr -= bp->rx_dma_offset; + dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir); + + if (unlikely(!payload)) + payload = eth_get_headlen(data_ptr, len); + + skb = napi_alloc_skb(&rxr->bnapi->napi, payload); + if (!skb) { + __free_page(page); + return NULL; + } + + off = (void *)data_ptr - page_address(page); + skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); + memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, + payload + NET_IP_ALIGN); + + frag = &skb_shinfo(skb)->frags[0]; + skb_frag_size_sub(frag, payload); + frag->page_offset += payload; + skb->data_len -= payload; + skb->tail += payload; + + return skb; +} + static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, void *data, u8 *data_ptr, @@ -1329,6 +1402,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, struct sk_buff *skb; void *data; int rc = 0; + u32 misc; rxcmp = (struct rx_cmp *) &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; @@ -1381,8 +1455,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } prefetch(data_ptr); - agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> - RX_CMP_AGG_BUFS_SHIFT; + misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); + agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; if (agg_bufs) { if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) @@ -1413,8 +1487,11 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, goto next_rx; } } else { + u32 payload; + + payload = misc & RX_CMP_PAYLOAD_OFFSET; skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, - len); + payload | len); if (!skb) { rc = -ENOMEM; goto next_rx; @@ -1899,7 +1976,10 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) rx_buf->data = NULL; - kfree(data); + if (BNXT_RX_PAGE_MODE(bp)) + __free_page(data); + else + kfree(data); } for (j = 0; j < max_agg_idx; j++) { @@ -2348,8 +2428,13 @@ static int bnxt_init_rx_rings(struct bnxt *bp) { int i, rc = 0; - bp->rx_offset = BNXT_RX_OFFSET; - bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; + if (BNXT_RX_PAGE_MODE(bp)) { + bp->rx_offset = NET_IP_ALIGN; + bp->rx_dma_offset = 0; + } else { + bp->rx_offset = BNXT_RX_OFFSET; + bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; + } for (i = 0; i < bp->rx_nr_rings; i++) { rc = bnxt_init_one_rx_ring(bp, i); @@ -2560,10 +2645,24 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->cp_ring_mask = bp->cp_bit - 1; } -static int bnxt_set_rx_skb_mode(struct bnxt *bp) +int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) { - bp->rx_dir = DMA_FROM_DEVICE; - bp->rx_skb_func = bnxt_rx_skb; + if (page_mode) { + if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) + return -EOPNOTSUPP; + bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU; + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + bp->rx_dir = DMA_BIDIRECTIONAL; + bp->rx_skb_func = bnxt_rx_page_skb; + } else { + bp->dev->max_mtu = BNXT_MAX_MTU; + bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; + bp->rx_dir = DMA_FROM_DEVICE; + bp->rx_skb_func = bnxt_rx_skb; + } return 0; } @@ -7275,7 +7374,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* MTU range: 60 - 9500 */ dev->min_mtu = ETH_ZLEN; - dev->max_mtu = 9500; + dev->max_mtu = BNXT_MAX_MTU; bnxt_dcb_init(bp); @@ -7316,7 +7415,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_port_led_qcaps(bp); - bnxt_set_rx_skb_mode(bp); + bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); bnxt_set_max_func_irqs(bp, max_irqs); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index d8fc87110e5e..2144a0e024a2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -416,6 +416,10 @@ struct rx_tpa_end_cmp_ext { #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT) +#define BNXT_MAX_MTU 9500 +#define BNXT_MAX_PAGE_MODE_MTU \ + ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN) + #define BNXT_MIN_PKT_SIZE 52 #define BNXT_NUM_TESTS(bp) 0 @@ -967,6 +971,7 @@ struct bnxt { #define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \ BNXT_FLAG_ROCEV2_CAP) #define BNXT_FLAG_NO_AGG_RINGS 0x20000 + #define BNXT_FLAG_RX_PAGE_MODE 0x40000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ @@ -978,6 +983,7 @@ struct bnxt { #define BNXT_NPAR(bp) ((bp)->port_partition_type) #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp)) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) +#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) struct bnxt_en_dev *edev; struct bnxt_en_dev * (*ulp_probe)(struct net_device *); @@ -1170,6 +1176,7 @@ struct bnxt { #define BNXT_MAX_PHY_I2C_RESP_SIZE 64 void bnxt_set_ring_params(struct bnxt *); +int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); int _hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message(struct bnxt *, void *, u32, int); -- cgit v1.2.3 From 4e5dbbda4c40a239e2ed4bbc98f2aa320e4dcca2 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:37 -0500 Subject: bnxt_en: Use event bit map in RX path. In the current code, we have separate rx_event and agg_event parameters to keep track of rx and aggregation events. Combine these events into an u8 event mask with different bits defined for different events. This way, it is easier to expand the logic to include XDP tx events. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 30 +++++++++++++++--------------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 +++ 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0bcd46576415..32c808fa4fef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1265,7 +1265,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, u32 *raw_cons, struct rx_tpa_end_cmp *tpa_end, struct rx_tpa_end_cmp_ext *tpa_end1, - bool *agg_event) + u8 *event) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; @@ -1300,7 +1300,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) return ERR_PTR(-EBUSY); - *agg_event = true; + *event |= BNXT_AGG_EVENT; cp_cons = NEXT_CMP(cp_cons); } @@ -1386,7 +1386,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, * -EIO - packet aborted due to hw error indicated in BD */ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, - bool *agg_event) + u8 *event) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; @@ -1423,13 +1423,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, (struct rx_tpa_start_cmp_ext *)rxcmp1); + *event |= BNXT_RX_EVENT; goto next_rx_no_prod; } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, (struct rx_tpa_end_cmp *)rxcmp, - (struct rx_tpa_end_cmp_ext *)rxcmp1, - agg_event); + (struct rx_tpa_end_cmp_ext *)rxcmp1, event); if (unlikely(IS_ERR(skb))) return -EBUSY; @@ -1440,6 +1440,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, napi_gro_receive(&bnapi->napi, skb); rc = 1; } + *event |= BNXT_RX_EVENT; goto next_rx_no_prod; } @@ -1463,8 +1464,9 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, return -EBUSY; cp_cons = NEXT_CMP(cp_cons); - *agg_event = true; + *event |= BNXT_AGG_EVENT; } + *event |= BNXT_RX_EVENT; rx_buf->data = NULL; if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { @@ -1715,8 +1717,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) u32 cons; int tx_pkts = 0; int rx_pkts = 0; - bool rx_event = false; - bool agg_event = false; + u8 event = 0; struct tx_cmp *txcmp; while (1) { @@ -1738,12 +1739,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) if (unlikely(tx_pkts > bp->tx_wake_thresh)) rx_pkts = budget; } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); if (likely(rc >= 0)) rx_pkts += rc; else if (rc == -EBUSY) /* partial completion */ break; - rx_event = true; } else if (unlikely((TX_CMP_TYPE(txcmp) == CMPL_BASE_TYPE_HWRM_DONE) || (TX_CMP_TYPE(txcmp) == @@ -1768,12 +1768,12 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) if (tx_pkts) bnxt_tx_int(bp, bnapi, tx_pkts); - if (rx_event) { + if (event & BNXT_RX_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); - if (agg_event) { + if (event & BNXT_AGG_EVENT) { writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); writel(DB_KEY_RX | rxr->rx_agg_prod, @@ -1794,7 +1794,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) u32 cp_cons, tmp_raw_cons; u32 raw_cons = cpr->cp_raw_cons; u32 rx_pkts = 0; - bool agg_event = false; + u8 event = 0; while (1) { int rc; @@ -1818,7 +1818,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) rxcmp1->rx_cmp_cfa_code_errors_v2 |= cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); if (likely(rc == -EIO)) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ @@ -1841,7 +1841,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); - if (agg_event) { + if (event & BNXT_AGG_EVENT) { writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 2144a0e024a2..7f59ce9894c3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -511,6 +511,9 @@ struct rx_tpa_end_cmp_ext { #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ BNXT_HWRM_REQ_MAX_SIZE) +#define BNXT_RX_EVENT 1 +#define BNXT_AGG_EVENT 2 + struct bnxt_sw_tx_bd { struct sk_buff *skb; DEFINE_DMA_UNMAP_ADDR(mapping); -- cgit v1.2.3 From d1e7925e6d80ce5f9ef6deb8f3cec7526f5c443c Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:38 -0500 Subject: bnxt_en: Centralize logic to reserve rings. Currently, bnxt_setup_tc() and bnxt_set_channels() have similar and duplicated code to check and reserve rx and tx rings. Add a new function bnxt_reserve_rings() to centralize the logic. This will make it easier to add XDP_TX support which requires allocating a new set of TX rings. Also, the tx ring checking logic in bnxt_setup_msix() can be removed. The rings have been reserved before hand. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 68 ++++++++++++++--------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 39 +++---------- 3 files changed, 52 insertions(+), 57 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 32c808fa4fef..1b051f930598 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4212,7 +4212,7 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) return rc; } -int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) +static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) { struct hwrm_func_cfg_input req = {0}; int rc; @@ -5005,19 +5005,12 @@ static void bnxt_setup_msix(struct bnxt *bp) tcs = netdev_get_num_tc(dev); if (tcs > 1) { - bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; - if (bp->tx_nr_rings_per_tc == 0) { - netdev_reset_tc(dev); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - } else { - int i, off, count; + int i, off, count; - bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; - for (i = 0; i < tcs; i++) { - count = bp->tx_nr_rings_per_tc; - off = i * count; - netdev_set_tc_queue(dev, i, count, off); - } + for (i = 0; i < tcs; i++) { + count = bp->tx_nr_rings_per_tc; + off = i * count; + netdev_set_tc_queue(dev, i, count, off); } } @@ -6579,6 +6572,37 @@ static void bnxt_sp_task(struct work_struct *work) clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } +/* Under rtnl_lock */ +int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs) +{ + int max_rx, max_tx, tx_sets = 1; + int tx_rings_needed; + bool sh = true; + int rc; + + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) + sh = false; + + if (tcs) + tx_sets = tcs; + + rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); + if (rc) + return rc; + + if (max_rx < rx) + return -ENOMEM; + + tx_rings_needed = tx * tx_sets; + if (max_tx < tx_rings_needed) + return -ENOMEM; + + if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) || + tx_rings_needed < (tx * tx_sets)) + return -ENOMEM; + return 0; +} + static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) { int rc; @@ -6741,6 +6765,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) { struct bnxt *bp = netdev_priv(dev); bool sh = false; + int rc; if (tc > bp->max_tc) { netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", @@ -6754,19 +6779,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) if (bp->flags & BNXT_FLAG_SHARED_RINGS) sh = true; - if (tc) { - int max_rx_rings, max_tx_rings, req_tx_rings, rsv_tx_rings, rc; - - req_tx_rings = bp->tx_nr_rings_per_tc * tc; - rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); - if (rc || req_tx_rings > max_tx_rings) - return -ENOMEM; - - rsv_tx_rings = req_tx_rings; - if (bnxt_hwrm_reserve_tx_rings(bp, &rsv_tx_rings) || - rsv_tx_rings < req_tx_rings) - return -ENOMEM; - } + rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, + bp->rx_nr_rings, tc); + if (rc) + return rc; /* Needs to close the device and do hw resource re-allocations */ if (netif_running(bp->dev)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7f59ce9894c3..3a079b834a9c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1188,7 +1188,6 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, int bmap_size); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); -int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings); int bnxt_hwrm_set_coal(struct bnxt *); unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); @@ -1202,6 +1201,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool); +int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); void bnxt_restore_pf_fw_resources(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 24818e1e59f3..6f2568d2354e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -387,10 +387,9 @@ static int bnxt_set_channels(struct net_device *dev, struct ethtool_channels *channel) { struct bnxt *bp = netdev_priv(dev); - int max_rx_rings, max_tx_rings, tcs; - int req_tx_rings, rsv_tx_rings; - u32 rc = 0; + int req_tx_rings, req_rx_rings, tcs; bool sh = false; + int rc = 0; if (channel->other_count) return -EINVAL; @@ -410,32 +409,14 @@ static int bnxt_set_channels(struct net_device *dev, if (channel->combined_count) sh = true; - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); - tcs = netdev_get_num_tc(dev); - if (tcs > 1) - max_tx_rings /= tcs; - - if (sh && - channel->combined_count > max_t(int, max_rx_rings, max_tx_rings)) - return -ENOMEM; - - if (!sh && (channel->rx_count > max_rx_rings || - channel->tx_count > max_tx_rings)) - return -ENOMEM; req_tx_rings = sh ? channel->combined_count : channel->tx_count; - req_tx_rings = min_t(int, req_tx_rings, max_tx_rings); - if (tcs > 1) - req_tx_rings *= tcs; - - rsv_tx_rings = req_tx_rings; - if (bnxt_hwrm_reserve_tx_rings(bp, &rsv_tx_rings)) - return -ENOMEM; - - if (rsv_tx_rings < req_tx_rings) { - netdev_warn(dev, "Unable to allocate the requested tx rings\n"); - return -ENOMEM; + req_rx_rings = sh ? channel->combined_count : channel->rx_count; + rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs); + if (rc) { + netdev_warn(dev, "Unable to allocate the requested rings\n"); + return rc; } if (netif_running(dev)) { @@ -454,10 +435,8 @@ static int bnxt_set_channels(struct net_device *dev, if (sh) { bp->flags |= BNXT_FLAG_SHARED_RINGS; - bp->rx_nr_rings = min_t(int, channel->combined_count, - max_rx_rings); - bp->tx_nr_rings_per_tc = min_t(int, channel->combined_count, - max_tx_rings); + bp->rx_nr_rings = channel->combined_count; + bp->tx_nr_rings_per_tc = channel->combined_count; } else { bp->flags &= ~BNXT_FLAG_SHARED_RINGS; bp->rx_nr_rings = channel->rx_count; -- cgit v1.2.3 From a960dec98861b009b4227d2ae3b94a142c83eb96 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:39 -0500 Subject: bnxt_en: Add tx ring mapping logic. To support XDP_TX, we need to add a set of dedicated TX rings, each associated with the NAPI of an RX ring. To assign XDP rings and regular rings in a flexible way, we add a bp->tx_ring_map[] array to do the remapping. The netdev txq index is stored in the new field txq_index so that we can retrieve the netdev txq when handling TX completions. In this patch, before we introduce XDP_TX, the mapping is 1:1. v2: Fixed a bug in bnxt_tx_int(). Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 15 ++++++++++++--- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 ++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1b051f930598..811bc825bf90 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -262,8 +262,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(dev, i); + txr = &bp->tx_ring[bp->tx_ring_map[i]]; prod = txr->tx_prod; free_size = bnxt_tx_avail(bp, txr); @@ -509,8 +509,7 @@ tx_dma_error: static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; - int index = txr - &bp->tx_ring[0]; - struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); u16 cons = txr->tx_cons; struct pci_dev *pdev = bp->pdev; int i; @@ -2975,6 +2974,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_stats(bp); bnxt_free_ring_grps(bp); bnxt_free_vnics(bp); + kfree(bp->tx_ring_map); + bp->tx_ring_map = NULL; kfree(bp->tx_ring); bp->tx_ring = NULL; kfree(bp->rx_ring); @@ -3027,6 +3028,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) if (!bp->tx_ring) return -ENOMEM; + bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), + GFP_KERNEL); + + if (!bp->tx_ring_map) + return -ENOMEM; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) j = 0; else @@ -3035,6 +3042,8 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) for (i = 0; i < bp->tx_nr_rings; i++, j++) { bp->tx_ring[i].bnapi = bp->bnapi[j]; bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; + bp->tx_ring_map[i] = i; + bp->tx_ring[i].txq_index = i; } rc = bnxt_alloc_stats(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 3a079b834a9c..97ccce184694 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -566,6 +566,7 @@ struct bnxt_tx_ring_info { struct bnxt_napi *bnapi; u16 tx_prod; u16 tx_cons; + u16 txq_index; void __iomem *tx_doorbell; struct tx_bd *tx_desc_ring[MAX_TX_PAGES]; @@ -995,6 +996,7 @@ struct bnxt { struct bnxt_rx_ring_info *rx_ring; struct bnxt_tx_ring_info *tx_ring; + u16 *tx_ring_map; struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int, struct sk_buff *); -- cgit v1.2.3 From 5f4492493e75dafc5cbb96eabe0f146c2ffb1e3d Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:40 -0500 Subject: bnxt_en: Add a set of TX rings to support XDP. Add logic for an extra set of TX rings for XDP. If enabled, this set of TX rings equals the number of RX rings and shares the same IRQ as the RX ring set. A new field bp->tx_nr_rings_xdp is added to keep track of these TX XDP rings. Adjust all other relevant functions to handle bp->tx_nr_rings_xdp. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 21 +++++++++++++-------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 ++- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 16 ++++++++++++---- 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 811bc825bf90..412a8de8c2c8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2218,6 +2218,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); } ring->queue_id = bp->q_info[j].queue_id; + if (i < bp->tx_nr_rings_xdp) + continue; if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) j++; } @@ -3042,8 +3044,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) for (i = 0; i < bp->tx_nr_rings; i++, j++) { bp->tx_ring[i].bnapi = bp->bnapi[j]; bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; - bp->tx_ring_map[i] = i; - bp->tx_ring[i].txq_index = i; + bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; + if (i >= bp->tx_nr_rings_xdp) + bp->tx_ring[i].txq_index = i - + bp->tx_nr_rings_xdp; } rc = bnxt_alloc_stats(bp); @@ -4966,7 +4970,8 @@ static int bnxt_set_real_num_queues(struct bnxt *bp) int rc; struct net_device *dev = bp->dev; - rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); + rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - + bp->tx_nr_rings_xdp); if (rc) return rc; @@ -6582,7 +6587,7 @@ static void bnxt_sp_task(struct work_struct *work) } /* Under rtnl_lock */ -int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs) +int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp) { int max_rx, max_tx, tx_sets = 1; int tx_rings_needed; @@ -6602,12 +6607,12 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs) if (max_rx < rx) return -ENOMEM; - tx_rings_needed = tx * tx_sets; + tx_rings_needed = tx * tx_sets + tx_xdp; if (max_tx < tx_rings_needed) return -ENOMEM; if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) || - tx_rings_needed < (tx * tx_sets)) + tx_rings_needed < (tx * tx_sets + tx_xdp)) return -ENOMEM; return 0; } @@ -6788,8 +6793,8 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) if (bp->flags & BNXT_FLAG_SHARED_RINGS) sh = true; - rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, - bp->rx_nr_rings, tc); + rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + tc, bp->tx_nr_rings_xdp); if (rc) return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 97ccce184694..4c3289a7fea8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1026,6 +1026,7 @@ struct bnxt { int tx_nr_pages; int tx_nr_rings; int tx_nr_rings_per_tc; + int tx_nr_rings_xdp; int tx_wake_thresh; int tx_push_thresh; @@ -1203,7 +1204,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool); -int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs); +int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); void bnxt_restore_pf_fw_resources(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6f2568d2354e..7aa248db10c6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -389,6 +389,7 @@ static int bnxt_set_channels(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); int req_tx_rings, req_rx_rings, tcs; bool sh = false; + int tx_xdp = 0; int rc = 0; if (channel->other_count) @@ -413,7 +414,14 @@ static int bnxt_set_channels(struct net_device *dev, req_tx_rings = sh ? channel->combined_count : channel->tx_count; req_rx_rings = sh ? channel->combined_count : channel->rx_count; - rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs); + if (bp->tx_nr_rings_xdp) { + if (!sh) { + netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); + return -EINVAL; + } + tx_xdp = req_rx_rings; + } + rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp); if (rc) { netdev_warn(dev, "Unable to allocate the requested rings\n"); return rc; @@ -442,10 +450,10 @@ static int bnxt_set_channels(struct net_device *dev, bp->rx_nr_rings = channel->rx_count; bp->tx_nr_rings_per_tc = channel->tx_count; } - - bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + bp->tx_nr_rings_xdp = tx_xdp; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; if (tcs > 1) - bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; -- cgit v1.2.3 From fa3e93e86cc3d1809fba67cb138883ed4bb74a5f Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:41 -0500 Subject: bnxt_en: Refactor tx completion path. XDP_TX requires a different function to handle completion. Add a function pointer to handle tx completion logic. Regular TX rings will be assigned the current bnxt_tx_int() for the ->tx_int() function pointer. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 ++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 5 +++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 412a8de8c2c8..64dc94d62a95 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1765,7 +1765,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); if (tx_pkts) - bnxt_tx_int(bp, bnapi, tx_pkts); + bnapi->tx_int(bp, bnapi, tx_pkts); if (event & BNXT_RX_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; @@ -3048,6 +3048,9 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) if (i >= bp->tx_nr_rings_xdp) bp->tx_ring[i].txq_index = i - bp->tx_nr_rings_xdp; + else + bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; + bp->bnapi[j]->tx_int = bnxt_tx_int; } rc = bnxt_alloc_stats(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 4c3289a7fea8..6dc43f5a7509 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -664,6 +664,11 @@ struct bnxt_napi { struct bnxt_rx_ring_info *rx_ring; struct bnxt_tx_ring_info *tx_ring; + void (*tx_int)(struct bnxt *, struct bnxt_napi *, + int); + u32 flags; +#define BNXT_NAPI_FLAG_XDP 0x1 + bool in_reset; }; -- cgit v1.2.3 From c6d30e8391b85e00eb544e6cf047ee0160ee9938 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:42 -0500 Subject: bnxt_en: Add basic XDP support. Add basic ndo_xdp support to setup and query program, configure the NIC to run in rx page mode, and support XDP_PASS, XDP_DROP, XDP_ABORTED actions only. v3: Pass modified offset and length to stack for XDP_PASS. Remove Kconfig option. v2: Added trace_xdp_exception() Added dma_syncs. Added XDP headroom support. Signed-off-by: Michael Chan Tested-by: Andy Gospodarek Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/Makefile | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 36 +++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 9 +- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 157 ++++++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 18 +++ 5 files changed, 214 insertions(+), 8 deletions(-) create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 6082ed1b5ea0..a7ca45b251cb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 64dc94d62a95..665fe4fbf5d0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -53,6 +54,7 @@ #include "bnxt_sriov.h" #include "bnxt_ethtool.h" #include "bnxt_dcb.h" +#include "bnxt_xdp.h" #define BNXT_TX_TIMEOUT (5 * HZ) @@ -642,8 +644,7 @@ static inline int bnxt_alloc_rx_data(struct bnxt *bp, return 0; } -static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, - void *data) +void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) { u16 prod = rxr->rx_prod; struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; @@ -1480,6 +1481,11 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; dma_addr = rx_buf->mapping; + if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { + rc = 1; + goto next_rx; + } + if (len <= bp->rx_copy_thresh) { skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); @@ -1490,7 +1496,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } else { u32 payload; - payload = misc & RX_CMP_PAYLOAD_OFFSET; + if (rx_buf->data_ptr == data_ptr) + payload = misc & RX_CMP_PAYLOAD_OFFSET; + else + payload = 0; skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, payload | len); if (!skb) { @@ -2080,6 +2089,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; + if (rxr->xdp_prog) + bpf_prog_put(rxr->xdp_prog); + kfree(rxr->rx_tpa); rxr->rx_tpa = NULL; @@ -2367,6 +2379,15 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) ring = &rxr->rx_ring_struct; bnxt_init_rxbd_pages(ring, type); + if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { + rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); + if (IS_ERR(rxr->xdp_prog)) { + int rc = PTR_ERR(rxr->xdp_prog); + + rxr->xdp_prog = NULL; + return rc; + } + } prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { @@ -2430,8 +2451,8 @@ static int bnxt_init_rx_rings(struct bnxt *bp) int i, rc = 0; if (BNXT_RX_PAGE_MODE(bp)) { - bp->rx_offset = NET_IP_ALIGN; - bp->rx_dma_offset = 0; + bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; + bp->rx_dma_offset = XDP_PACKET_HEADROOM; } else { bp->rx_offset = BNXT_RX_OFFSET; bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; @@ -2560,7 +2581,7 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) return pages; } -static void bnxt_set_tpa_flags(struct bnxt *bp) +void bnxt_set_tpa_flags(struct bnxt *bp) { bp->flags &= ~BNXT_FLAG_TPA; if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) @@ -7107,6 +7128,7 @@ static const struct net_device_ops bnxt_netdev_ops = { #endif .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, + .ndo_xdp = bnxt_xdp, }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -7131,6 +7153,8 @@ static void bnxt_remove_one(struct pci_dev *pdev) pci_iounmap(pdev, bp->bar0); kfree(bp->edev); bp->edev = NULL; + if (bp->xdp_prog) + bpf_prog_put(bp->xdp_prog); free_netdev(dev); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 6dc43f5a7509..db4a41069b71 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -418,7 +418,8 @@ struct rx_tpa_end_cmp_ext { #define BNXT_MAX_MTU 9500 #define BNXT_MAX_PAGE_MODE_MTU \ - ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN) + ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ + XDP_PACKET_HEADROOM) #define BNXT_MIN_PKT_SIZE 52 @@ -618,6 +619,8 @@ struct bnxt_rx_ring_info { void __iomem *rx_doorbell; void __iomem *rx_agg_doorbell; + struct bpf_prog *xdp_prog; + struct rx_bd *rx_desc_ring[MAX_RX_PAGES]; struct bnxt_sw_rx_bd *rx_buf_ring; @@ -1167,6 +1170,8 @@ struct bnxt { u8 num_leds; struct bnxt_led_info leds[BNXT_MAX_LED]; + + struct bpf_prog *xdp_prog; }; #define BNXT_RX_STATS_OFFSET(counter) \ @@ -1186,6 +1191,8 @@ struct bnxt { #define SFF_MODULE_ID_QSFP28 0x11 #define BNXT_MAX_PHY_I2C_RESP_SIZE 64 +void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data); +void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_ring_params(struct bnxt *); int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c new file mode 100644 index 000000000000..b822e461cbde --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -0,0 +1,157 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_xdp.h" + +/* returns the following: + * true - packet consumed by XDP and new buffer is allocated. + * false - packet should be passed to the stack. + */ +bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) +{ + struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); + struct bnxt_sw_rx_bd *rx_buf; + struct pci_dev *pdev; + struct xdp_buff xdp; + dma_addr_t mapping; + void *orig_data; + u32 offset; + u32 act; + + if (!xdp_prog) + return false; + + pdev = bp->pdev; + rx_buf = &rxr->rx_buf_ring[cons]; + offset = bp->rx_offset; + + xdp.data_hard_start = *data_ptr - offset; + xdp.data = *data_ptr; + xdp.data_end = *data_ptr + *len; + orig_data = xdp.data; + mapping = rx_buf->mapping - bp->rx_dma_offset; + + dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); + + rcu_read_lock(); + act = bpf_prog_run_xdp(xdp_prog, &xdp); + rcu_read_unlock(); + + if (orig_data != xdp.data) { + offset = xdp.data - xdp.data_hard_start; + *data_ptr = xdp.data_hard_start + offset; + *len = xdp.data_end - xdp.data; + } + switch (act) { + case XDP_PASS: + return false; + + default: + bpf_warn_invalid_xdp_action(act); + /* Fall thru */ + case XDP_ABORTED: + trace_xdp_exception(bp->dev, xdp_prog, act); + /* Fall thru */ + case XDP_DROP: + bnxt_reuse_rx_data(rxr, cons, page); + break; + } + return true; +} + +/* Under rtnl_lock */ +static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) +{ + struct net_device *dev = bp->dev; + int tx_xdp = 0, rc, tc; + struct bpf_prog *old; + + if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { + netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n", + bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); + return -EOPNOTSUPP; + } + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { + netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); + return -EOPNOTSUPP; + } + if (prog) + tx_xdp = bp->rx_nr_rings; + + tc = netdev_get_num_tc(dev); + if (!tc) + tc = 1; + rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + tc, tx_xdp); + if (rc) { + netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); + return rc; + } + if (netif_running(dev)) + bnxt_close_nic(bp, true, false); + + old = xchg(&bp->xdp_prog, prog); + if (old) + bpf_prog_put(old); + + if (prog) { + bnxt_set_rx_skb_mode(bp, true); + } else { + int rx, tx; + + bnxt_set_rx_skb_mode(bp, false); + bnxt_get_max_rings(bp, &rx, &tx, true); + if (rx > 1) { + bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features |= NETIF_F_LRO; + } + } + bp->tx_nr_rings_xdp = tx_xdp; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; + bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); + bp->num_stat_ctxs = bp->cp_nr_rings; + bnxt_set_tpa_flags(bp); + bnxt_set_ring_params(bp); + + if (netif_running(dev)) + return bnxt_open_nic(bp, true, false); + + return 0; +} + +int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + switch (xdp->command) { + case XDP_SETUP_PROG: + rc = bnxt_xdp_set(bp, xdp->prog); + break; + case XDP_QUERY_PROG: + xdp->prog_attached = !!bp->xdp_prog; + rc = 0; + break; + default: + rc = -EINVAL; + break; + } + return rc; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h new file mode 100644 index 000000000000..0bb7b7d97ec3 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -0,0 +1,18 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_XDP_H +#define BNXT_XDP_H + +bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct page *page, u8 **data_ptr, unsigned int *len, + u8 *event); +int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp); + +#endif -- cgit v1.2.3 From 38413406277fd060f46855ad527f6f8d4cf2652d Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 6 Feb 2017 16:55:43 -0500 Subject: bnxt_en: Add support for XDP_TX action. Add dedicated transmit function and transmit completion handler for XDP. The XDP transmit logic and completion logic are different than regular TX ring. The TX buffer is recycled back to the RX ring when it completes. v3: Improved the buffer recyling scheme for XDP_TX. v2: Add trace_xdp_exception(). Add dma_sync. Signed-off-by: Michael Chan Tested-by: Andy Gospodarek Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 36 ++++++------ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 19 +++++- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 83 +++++++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 1 + 4 files changed, 122 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 665fe4fbf5d0..cda1c787e8e1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -212,16 +212,7 @@ static bool bnxt_vf_pciid(enum board_idx idx) #define BNXT_CP_DB_IRQ_DIS(db) \ writel(DB_CP_IRQ_DIS_FLAGS, db) -static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) -{ - /* Tell compiler to fetch tx indices from memory. */ - barrier(); - - return bp->tx_ring_size - - ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); -} - -static const u16 bnxt_lhint_arr[] = { +const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_512_AND_SMALLER, TX_BD_FLAGS_LHINT_512_TO_1023, TX_BD_FLAGS_LHINT_1024_TO_2047, @@ -613,9 +604,8 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, return data; } -static inline int bnxt_alloc_rx_data(struct bnxt *bp, - struct bnxt_rx_ring_info *rxr, - u16 prod, gfp_t gfp) +int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp) { struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; @@ -1766,6 +1756,18 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) break; } + if (event & BNXT_TX_EVENT) { + struct bnxt_tx_ring_info *txr = bnapi->tx_ring; + void __iomem *db = txr->tx_doorbell; + u16 prod = txr->tx_prod; + + /* Sync BD data before updating doorbell */ + wmb(); + + writel(DB_KEY_TX | prod, db); + writel(DB_KEY_TX | prod, db); + } + cpr->cp_raw_cons = raw_cons; /* ACK completion ring before freeing tx ring and producing new * buffers in rx/agg rings to prevent overflowing the completion @@ -3066,12 +3068,14 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) bp->tx_ring[i].bnapi = bp->bnapi[j]; bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; - if (i >= bp->tx_nr_rings_xdp) + if (i >= bp->tx_nr_rings_xdp) { bp->tx_ring[i].txq_index = i - bp->tx_nr_rings_xdp; - else + bp->bnapi[j]->tx_int = bnxt_tx_int; + } else { bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; - bp->bnapi[j]->tx_int = bnxt_tx_int; + bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; + } } rc = bnxt_alloc_stats(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index db4a41069b71..9f07b9cf8965 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -514,13 +514,17 @@ struct rx_tpa_end_cmp_ext { #define BNXT_RX_EVENT 1 #define BNXT_AGG_EVENT 2 +#define BNXT_TX_EVENT 4 struct bnxt_sw_tx_bd { struct sk_buff *skb; DEFINE_DMA_UNMAP_ADDR(mapping); u8 is_gso; u8 is_push; - unsigned short nr_frags; + union { + unsigned short nr_frags; + u16 rx_prod; + }; }; struct bnxt_sw_rx_bd { @@ -1191,6 +1195,19 @@ struct bnxt { #define SFF_MODULE_ID_QSFP28 0x11 #define BNXT_MAX_PHY_I2C_RESP_SIZE 64 +static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) +{ + /* Tell compiler to fetch tx indices from memory. */ + barrier(); + + return bp->tx_ring_size - + ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); +} + +extern const u16 bnxt_lhint_arr[]; + +int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp); void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data); void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_ring_params(struct bnxt *); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index b822e461cbde..899c30fb5188 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -19,6 +19,65 @@ #include "bnxt.h" #include "bnxt_xdp.h" +static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + dma_addr_t mapping, u32 len, u16 rx_prod) +{ + struct bnxt_sw_tx_bd *tx_buf; + struct tx_bd_ext *txbd1; + struct tx_bd *txbd; + u32 flags; + u16 prod; + + prod = txr->tx_prod; + tx_buf = &txr->tx_buf_ring[prod]; + tx_buf->rx_prod = rx_prod; + + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | + (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW | + TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9]; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + txbd->tx_bd_opaque = prod; + txbd->tx_bd_haddr = cpu_to_le64(mapping); + + prod = NEXT_TX(prod); + txbd1 = (struct tx_bd_ext *) + &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + + txbd1->tx_bd_hsize_lflags = cpu_to_le32(0); + txbd1->tx_bd_mss = cpu_to_le32(0); + txbd1->tx_bd_cfa_action = cpu_to_le32(0); + txbd1->tx_bd_cfa_meta = cpu_to_le32(0); + + prod = NEXT_TX(prod); + txr->tx_prod = prod; +} + +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) +{ + struct bnxt_tx_ring_info *txr = bnapi->tx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + struct bnxt_sw_tx_bd *tx_buf; + u16 tx_cons = txr->tx_cons; + u16 last_tx_cons = tx_cons; + u16 rx_prod; + int i; + + for (i = 0; i < nr_pkts; i++) { + last_tx_cons = tx_cons; + tx_cons = NEXT_TX(tx_cons); + tx_cons = NEXT_TX(tx_cons); + } + txr->tx_cons = tx_cons; + if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) { + rx_prod = rxr->rx_prod; + } else { + tx_buf = &txr->tx_buf_ring[last_tx_cons]; + rx_prod = tx_buf->rx_prod; + } + writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell); +} + /* returns the following: * true - packet consumed by XDP and new buffer is allocated. * false - packet should be passed to the stack. @@ -27,11 +86,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) { struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); + struct bnxt_tx_ring_info *txr; struct bnxt_sw_rx_bd *rx_buf; struct pci_dev *pdev; struct xdp_buff xdp; dma_addr_t mapping; void *orig_data; + u32 tx_avail; u32 offset; u32 act; @@ -39,6 +100,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, return false; pdev = bp->pdev; + txr = rxr->bnapi->tx_ring; rx_buf = &rxr->rx_buf_ring[cons]; offset = bp->rx_offset; @@ -54,6 +116,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, act = bpf_prog_run_xdp(xdp_prog, &xdp); rcu_read_unlock(); + tx_avail = bnxt_tx_avail(bp, txr); + /* If the tx ring is not full, we must not update the rx producer yet + * because we may still be transmitting on some BDs. + */ + if (tx_avail != bp->tx_ring_size) + *event &= ~BNXT_RX_EVENT; + if (orig_data != xdp.data) { offset = xdp.data - xdp.data_hard_start; *data_ptr = xdp.data_hard_start + offset; @@ -63,6 +132,20 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, case XDP_PASS: return false; + case XDP_TX: + if (tx_avail < 2) { + trace_xdp_exception(bp->dev, xdp_prog, act); + bnxt_reuse_rx_data(rxr, cons, page); + return true; + } + + *event = BNXT_TX_EVENT; + dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, + bp->rx_dir); + bnxt_xmit_xdp(bp, txr, mapping + offset, *len, + NEXT_RX(rxr->rx_prod)); + bnxt_reuse_rx_data(rxr, cons, page); + return true; default: bpf_warn_invalid_xdp_action(act); /* Fall thru */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 0bb7b7d97ec3..b529f2c5355b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -10,6 +10,7 @@ #ifndef BNXT_XDP_H #define BNXT_XDP_H +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event); -- cgit v1.2.3 From 219189e764be35372c298bee6492fc4c870b6ffd Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 7 Feb 2017 00:53:45 +0200 Subject: net: ethernet: ti: cpsw: remove netif_trans_update No need to update jiffies in txq->trans_start twice, it's supposed to be done in netdev_start_xmit() and anyway is re-written. Also, no reason to update trans time in case of an error. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 35a95dcc755b..4d1c0c3042c7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1601,8 +1601,6 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, struct cpdma_chan *txch; int ret, q_idx; - netif_trans_update(ndev); - if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { cpsw_err(priv, tx_err, "packet pad failed\n"); ndev->stats.tx_dropped++; -- cgit v1.2.3 From 1105a2d3b36d1603043662d7f46c962b30ebca9e Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 6 Feb 2017 15:55:20 -0800 Subject: net: mv643xx_eth: Do not clobber PHY link outside of state machine Calling phy_read_status() means that we may call into genphy_read_status() which in turn will use genphy_update_link() which can make changes to phydev->link outside of the state machine's state transitions. This is an invalid behavior that is now caught as of 811a919135b9 ("phy state machine: failsafe leave invalid RUNNING state") Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 20cb7f0de601..25642dee49d3 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1504,9 +1504,7 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp, int err; u32 supported, advertising; - err = phy_read_status(dev->phydev); - if (err == 0) - err = phy_ethtool_ksettings_get(dev->phydev, cmd); + err = phy_ethtool_ksettings_get(dev->phydev, cmd); /* * The MAC does not support 1000baseT_Half. -- cgit v1.2.3 From 482ff9fdc84dd71099d54edb322a2b32d2f331e4 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 6 Feb 2017 15:55:21 -0800 Subject: net: pxa168_eth: Do not clobber PHY link outside of state machine Calling phy_read_status() means that we may call into genphy_read_status() which in turn will use genphy_update_link() which can make changes to phydev->link outside of the state machine's state transitions. This is an invalid behavior that is now caught as of 811a919135b9 ("phy state machine: failsafe leave invalid RUNNING state") Since we don't have anything special, switch to the generic phy_ethtool_get_link_ksettings() function now. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/pxa168_eth.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3376a19f1e19..28cb36d9e50a 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -274,8 +274,6 @@ enum hash_table_entry { HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 }; -static int pxa168_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd); static int pxa168_init_hw(struct pxa168_eth_private *pep); static int pxa168_init_phy(struct net_device *dev); static void eth_port_reset(struct net_device *dev); @@ -987,10 +985,6 @@ static int pxa168_init_phy(struct net_device *dev) if (err) return err; - err = pxa168_get_link_ksettings(dev, &cmd); - if (err) - return err; - cmd.base.phy_address = pep->phy_addr; cmd.base.speed = pep->phy_speed; cmd.base.duplex = pep->phy_duplex; @@ -1370,18 +1364,6 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, return -EOPNOTSUPP; } -static int pxa168_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - int err; - - err = phy_read_status(dev->phydev); - if (err == 0) - err = phy_ethtool_ksettings_get(dev->phydev, cmd); - - return err; -} - static void pxa168_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -1396,7 +1378,7 @@ static const struct ethtool_ops pxa168_ethtool_ops = { .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, - .get_link_ksettings = pxa168_get_link_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; -- cgit v1.2.3 From 6136c8fe53a8a28906e090ddd8914f9846eb8df8 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 6 Feb 2017 15:55:22 -0800 Subject: net: netcp: Do not clobber PHY link outside of state machine Calling phy_read_status() means that we may call into genphy_read_status() which in turn will use genphy_update_link() which can make changes to phydev->link outside of the state machine's state transitions. This is an invalid behavior that is now caught as off 811a919135b9 ("phy state machine: failsafe leave invalid RUNNING state") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index f7bb241b17ab..eece3e2eec14 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2313,7 +2313,6 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) dev_dbg(priv->dev, "phy found: id is: 0x%s\n", phydev_name(slave->phy)); phy_start(slave->phy); - phy_read_status(slave->phy); } return 0; } @@ -3119,7 +3118,6 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, dev_dbg(dev, "phy found: id is: 0x%s\n", phydev_name(slave->phy)); phy_start(slave->phy); - phy_read_status(slave->phy); } } } -- cgit v1.2.3 From e69e46261063a25c3907bed16a2e9d18b115d1fd Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 6 Feb 2017 15:55:23 -0800 Subject: net: dsa: Do not clobber PHY link outside of state machine Calling phy_read_status() means that we may call into genphy_read_status() which in turn will use genphy_update_link() which can make changes to phydev->link outside of the state machine's state transitions. This is an invalid behavior that is now caught as of 811a919135b9 ("phy state machine: failsafe leave invalid RUNNING state") Reported-by: Zefir Kurtisi Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- net/dsa/slave.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 061a49c29cef..c34872e1febc 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -684,14 +684,10 @@ dsa_slave_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct dsa_slave_priv *p = netdev_priv(dev); - int err; + int err = -EOPNOTSUPP; - err = -EOPNOTSUPP; - if (p->phy != NULL) { - err = phy_read_status(p->phy); - if (err == 0) - err = phy_ethtool_ksettings_get(p->phy, cmd); - } + if (p->phy != NULL) + err = phy_ethtool_ksettings_get(p->phy, cmd); return err; } -- cgit v1.2.3 From 252ae5330daa121586e9713b704068851e8565d9 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Tue, 7 Feb 2017 06:21:34 +0100 Subject: Documentation: devicetree: Add PHY no lane swap binding Add the documentation to avoid PHY lane swapping. This is a boolean entry to notify the phy device drivers that the TX/RX lanes NO need to be swapped. The use case for this binding mostly happens after wrong HW configuration of PHY IC during bootstrap. Signed-off-by: Lukasz Majewski Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/phy.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index fb5056b22685..b55857696fc3 100644 --- a/Documentation/devicetree/bindings/net/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt @@ -39,6 +39,10 @@ Optional Properties: - enet-phy-lane-swap: If set, indicates the PHY will swap the TX/RX lanes to compensate for the board being designed with the lanes swapped. +- enet-phy-lane-no-swap: If set, indicates that PHY will disable swap of the + TX/RX lanes. This property allows the PHY to work correcly after e.g. wrong + bootstrap configuration caused by issues in PCB layout design. + - eee-broken-100tx: - eee-broken-1000t: - eee-broken-10gt: -- cgit v1.2.3 From fc6d39c39581f3c12c95f166ce95ef8beb2047e8 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Tue, 7 Feb 2017 06:20:23 +0100 Subject: net: phy: dp83867: Add lane swapping support in the DP83867 TI's PHY driver This patch adds support for enabling or disabling the lane swapping (called "port mirroring" in PHY's CFG4 register) feature of the DP83867 TI's PHY device. One use case is when bootstrap configuration enables this feature (because of e.g. LED_0 wrong wiring) so then one needs to disable it in software (at u-boot/Linux). Signed-off-by: Lukasz Majewski Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/dp83867.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index ca1b462bf7b2..be6fa24baf42 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -32,6 +32,7 @@ #define DP83867_CFG3 0x1e /* Extended Registers */ +#define DP83867_CFG4 0x0031 #define DP83867_RGMIICTL 0x0032 #define DP83867_RGMIIDCTL 0x0086 #define DP83867_IO_MUX_CFG 0x0170 @@ -70,11 +71,21 @@ #define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0 #define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f +/* CFG4 bits */ +#define DP83867_CFG4_PORT_MIRROR_EN BIT(0) + +enum { + DP83867_PORT_MIRROING_KEEP, + DP83867_PORT_MIRROING_EN, + DP83867_PORT_MIRROING_DIS, +}; + struct dp83867_private { int rx_id_delay; int tx_id_delay; int fifo_depth; int io_impedance; + int port_mirroring; }; static int dp83867_ack_interrupt(struct phy_device *phydev) @@ -111,6 +122,24 @@ static int dp83867_config_intr(struct phy_device *phydev) return phy_write(phydev, MII_DP83867_MICR, micr_status); } +static int dp83867_config_port_mirroring(struct phy_device *phydev) +{ + struct dp83867_private *dp83867 = + (struct dp83867_private *)phydev->priv; + u16 val; + + val = phy_read_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR); + + if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN) + val |= DP83867_CFG4_PORT_MIRROR_EN; + else + val &= ~DP83867_CFG4_PORT_MIRROR_EN; + + phy_write_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR, val); + + return 0; +} + #ifdef CONFIG_OF_MDIO static int dp83867_of_init(struct phy_device *phydev) { @@ -144,6 +173,12 @@ static int dp83867_of_init(struct phy_device *phydev) phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) return ret; + if (of_property_read_bool(of_node, "enet-phy-lane-swap")) + dp83867->port_mirroring = DP83867_PORT_MIRROING_EN; + + if (of_property_read_bool(of_node, "enet-phy-lane-no-swap")) + dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS; + return of_property_read_u32(of_node, "ti,fifo-depth", &dp83867->fifo_depth); } @@ -228,6 +263,9 @@ static int dp83867_config_init(struct phy_device *phydev) phy_write(phydev, DP83867_CFG3, val); } + if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP) + dp83867_config_port_mirroring(phydev); + return 0; } -- cgit v1.2.3 From ac6e058b75be71208e98a5808453aae9a17be480 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Tue, 7 Feb 2017 06:20:24 +0100 Subject: net: phy: dp83867: Recover from "port mirroring" N/A MODE4 The DP83867 when not properly bootstrapped - especially with LED_0 pin - can enter N/A MODE4 for "port mirroring" feature. To provide normal operation of the PHY, one needs not only to explicitly disable the port mirroring feature, but as well stop some IC internal testing (which disables RGMII communication). To do that the STRAP_STS1 (0x006E) register must be read and RESERVED bit 11 examined. When it is set, the another RESERVED bit (11) at PHYCR (0x0010) register must be clear to disable testing mode and enable RGMII communication. Thorough explanation of the problem can be found at following e2e thread: "DP83867IR: Problem with RESERVED bits in PHY Control Register (PHYCR) - Linux driver" https://e2e.ti.com/support/interface/ethernet/f/903/p/571313/2096954#2096954 Signed-off-by: Lukasz Majewski Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/dp83867.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index be6fa24baf42..19865530e0b1 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -34,6 +34,7 @@ /* Extended Registers */ #define DP83867_CFG4 0x0031 #define DP83867_RGMIICTL 0x0032 +#define DP83867_STRAP_STS1 0x006E #define DP83867_RGMIIDCTL 0x0086 #define DP83867_IO_MUX_CFG 0x0170 @@ -58,9 +59,13 @@ #define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1) #define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0) +/* STRAP_STS1 bits */ +#define DP83867_STRAP_STS1_RESERVED BIT(11) + /* PHY CTRL bits */ #define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14 #define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14) +#define DP83867_PHYCR_RESERVED_MASK BIT(11) /* RGMIIDCTL bits */ #define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4 @@ -192,7 +197,7 @@ static int dp83867_of_init(struct phy_device *phydev) static int dp83867_config_init(struct phy_device *phydev) { struct dp83867_private *dp83867; - int ret, val; + int ret, val, bs; u16 delay; if (!phydev->priv) { @@ -215,6 +220,22 @@ static int dp83867_config_init(struct phy_device *phydev) return val; val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK; val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT); + + /* The code below checks if "port mirroring" N/A MODE4 has been + * enabled during power on bootstrap. + * + * Such N/A mode enabled by mistake can put PHY IC in some + * internal testing mode and disable RGMII transmission. + * + * In this particular case one needs to check STRAP_STS1 + * register's bit 11 (marked as RESERVED). + */ + + bs = phy_read_mmd_indirect(phydev, DP83867_STRAP_STS1, + DP83867_DEVADDR); + if (bs & DP83867_STRAP_STS1_RESERVED) + val &= ~DP83867_PHYCR_RESERVED_MASK; + ret = phy_write(phydev, MII_DP83867_PHYCTRL, val); if (ret) return ret; -- cgit v1.2.3 From ee467fbaaf8eed86efc335753bd95971aa444f7a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 7 Feb 2017 10:44:31 +0300 Subject: sfc: fix an off by one bug This bug is harmless because it's just a sanity check and we always pass valid values for "encap_type" but the test is off by one. Fixes: 9b4108012517 ("sfc: insert catch-all filters for encapsulated traffic") Signed-off-by: Dan Carpenter Acked-by: Bert Kenward Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 0475f1831b92..dec0c8083ff3 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -5134,7 +5134,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, /* quick bounds check (BCAST result impossible) */ BUILD_BUG_ON(EFX_EF10_BCAST != 0); - if (encap_type > ARRAY_SIZE(map) || map[encap_type] == 0) { + if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { WARN_ON(1); return -EINVAL; } -- cgit v1.2.3 From 1f02b5f42f53af516c4f5f747390e66d7a8f0bfe Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 7 Feb 2017 10:56:38 +0000 Subject: net: bridge: remove redundant check to see if err is set The error check on err is redundant as it is being checked previously each time it has been updated. Remove this redundant check. Detected with CoverityScan, CID#140030("Logically dead code") Signed-off-by: Colin Ian King Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_netlink.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 1cbdc5b96aa7..a8f6acd23e30 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -612,9 +612,6 @@ static int br_afspec(struct net_bridge *br, return err; break; } - - if (err) - return err; } return err; -- cgit v1.2.3 From bb580ad698aeb4e5455d701c228c50355f84c056 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 7 Feb 2017 12:46:46 +0100 Subject: bridge: tunnel: fix attribute checks in br_parse_vlan_tunnel_info These checks should go after the attributes have been parsed otherwise we're using tb uninitialized. Fixes: efa5356b0d97 ("bridge: per vlan dst_metadata netlink support") Reported-by: Colin Ian King Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_netlink_tunnel.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c index 99c68012c9d4..62eaf750bd9e 100644 --- a/net/bridge/br_netlink_tunnel.c +++ b/net/bridge/br_netlink_tunnel.c @@ -229,15 +229,15 @@ int br_parse_vlan_tunnel_info(struct nlattr *attr, memset(tinfo, 0, sizeof(*tinfo)); - if (!tb[IFLA_BRIDGE_VLAN_TUNNEL_ID] || - !tb[IFLA_BRIDGE_VLAN_TUNNEL_VID]) - return -EINVAL; - err = nla_parse_nested(tb, IFLA_BRIDGE_VLAN_TUNNEL_MAX, attr, vlan_tunnel_policy); if (err < 0) return err; + if (!tb[IFLA_BRIDGE_VLAN_TUNNEL_ID] || + !tb[IFLA_BRIDGE_VLAN_TUNNEL_VID]) + return -EINVAL; + tun_id = nla_get_u32(tb[IFLA_BRIDGE_VLAN_TUNNEL_ID]); vid = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_VID]); if (vid >= VLAN_VID_MASK) -- cgit v1.2.3 From 85c727b5948344c8d559e2fda8925e9ddd41c29a Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Tue, 7 Feb 2017 11:37:56 -0200 Subject: sctp: drop __packed from almost all SCTP structures __packed is considered harmful as it potentially generates code that doesn't perform well and its usage should be avoided as much as possible. This patch drops __packed from all SCTP structures except one, which is sctp_signed_cookie. In there it's required, as per changelog on commit 9834a2bb4970 ("[SCTP]: Fix sctp_cookie alignment in the packet."). After this patch, no alignment changes neither in x86 or x86_64 and no exceptions were noticed during testing on both archs. Code size for SCTP module also didn't change with this patch. Cc: David Miller Cc: David Laight Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/linux/sctp.h | 80 +++++++++++++++++++++++----------------------- include/net/sctp/structs.h | 2 +- 2 files changed, 41 insertions(+), 41 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index a9e790685af3..2408c6877ca0 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -62,7 +62,7 @@ typedef struct sctphdr { __be16 dest; __be32 vtag; __le32 checksum; -} __packed sctp_sctphdr_t; +} sctp_sctphdr_t; static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) { @@ -74,7 +74,7 @@ typedef struct sctp_chunkhdr { __u8 type; __u8 flags; __be16 length; -} __packed sctp_chunkhdr_t; +} sctp_chunkhdr_t; /* Section 3.2. Chunk Type Values. @@ -165,7 +165,7 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 }; typedef struct sctp_paramhdr { __be16 type; __be16 length; -} __packed sctp_paramhdr_t; +} sctp_paramhdr_t; typedef enum { @@ -233,12 +233,12 @@ typedef struct sctp_datahdr { __be16 ssn; __be32 ppid; __u8 payload[0]; -} __packed sctp_datahdr_t; +} sctp_datahdr_t; typedef struct sctp_data_chunk { sctp_chunkhdr_t chunk_hdr; sctp_datahdr_t data_hdr; -} __packed sctp_data_chunk_t; +} sctp_data_chunk_t; /* DATA Chuck Specific Flags */ enum { @@ -264,78 +264,78 @@ typedef struct sctp_inithdr { __be16 num_inbound_streams; __be32 initial_tsn; __u8 params[0]; -} __packed sctp_inithdr_t; +} sctp_inithdr_t; typedef struct sctp_init_chunk { sctp_chunkhdr_t chunk_hdr; sctp_inithdr_t init_hdr; -} __packed sctp_init_chunk_t; +} sctp_init_chunk_t; /* Section 3.3.2.1. IPv4 Address Parameter (5) */ typedef struct sctp_ipv4addr_param { sctp_paramhdr_t param_hdr; struct in_addr addr; -} __packed sctp_ipv4addr_param_t; +} sctp_ipv4addr_param_t; /* Section 3.3.2.1. IPv6 Address Parameter (6) */ typedef struct sctp_ipv6addr_param { sctp_paramhdr_t param_hdr; struct in6_addr addr; -} __packed sctp_ipv6addr_param_t; +} sctp_ipv6addr_param_t; /* Section 3.3.2.1 Cookie Preservative (9) */ typedef struct sctp_cookie_preserve_param { sctp_paramhdr_t param_hdr; __be32 lifespan_increment; -} __packed sctp_cookie_preserve_param_t; +} sctp_cookie_preserve_param_t; /* Section 3.3.2.1 Host Name Address (11) */ typedef struct sctp_hostname_param { sctp_paramhdr_t param_hdr; uint8_t hostname[0]; -} __packed sctp_hostname_param_t; +} sctp_hostname_param_t; /* Section 3.3.2.1 Supported Address Types (12) */ typedef struct sctp_supported_addrs_param { sctp_paramhdr_t param_hdr; __be16 types[0]; -} __packed sctp_supported_addrs_param_t; +} sctp_supported_addrs_param_t; /* Appendix A. ECN Capable (32768) */ typedef struct sctp_ecn_capable_param { sctp_paramhdr_t param_hdr; -} __packed sctp_ecn_capable_param_t; +} sctp_ecn_capable_param_t; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ typedef struct sctp_adaptation_ind_param { struct sctp_paramhdr param_hdr; __be32 adaptation_ind; -} __packed sctp_adaptation_ind_param_t; +} sctp_adaptation_ind_param_t; /* ADDIP Section 4.2.7 Supported Extensions Parameter */ typedef struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; -} __packed sctp_supported_ext_param_t; +} sctp_supported_ext_param_t; /* AUTH Section 3.1 Random */ typedef struct sctp_random_param { sctp_paramhdr_t param_hdr; __u8 random_val[0]; -} __packed sctp_random_param_t; +} sctp_random_param_t; /* AUTH Section 3.2 Chunk List */ typedef struct sctp_chunks_param { sctp_paramhdr_t param_hdr; __u8 chunks[0]; -} __packed sctp_chunks_param_t; +} sctp_chunks_param_t; /* AUTH Section 3.3 HMAC Algorithm */ typedef struct sctp_hmac_algo_param { sctp_paramhdr_t param_hdr; __be16 hmac_ids[0]; -} __packed sctp_hmac_algo_param_t; +} sctp_hmac_algo_param_t; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): * The INIT ACK chunk is used to acknowledge the initiation of an SCTP @@ -347,13 +347,13 @@ typedef sctp_init_chunk_t sctp_initack_chunk_t; typedef struct sctp_cookie_param { sctp_paramhdr_t p; __u8 body[0]; -} __packed sctp_cookie_param_t; +} sctp_cookie_param_t; /* Section 3.3.3.1 Unrecognized Parameters (8) */ typedef struct sctp_unrecognized_param { sctp_paramhdr_t param_hdr; sctp_paramhdr_t unrecognized; -} __packed sctp_unrecognized_param_t; +} sctp_unrecognized_param_t; @@ -368,7 +368,7 @@ typedef struct sctp_unrecognized_param { typedef struct sctp_gap_ack_block { __be16 start; __be16 end; -} __packed sctp_gap_ack_block_t; +} sctp_gap_ack_block_t; typedef __be32 sctp_dup_tsn_t; @@ -383,12 +383,12 @@ typedef struct sctp_sackhdr { __be16 num_gap_ack_blocks; __be16 num_dup_tsns; sctp_sack_variable_t variable[0]; -} __packed sctp_sackhdr_t; +} sctp_sackhdr_t; typedef struct sctp_sack_chunk { sctp_chunkhdr_t chunk_hdr; sctp_sackhdr_t sack_hdr; -} __packed sctp_sack_chunk_t; +} sctp_sack_chunk_t; /* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): @@ -400,12 +400,12 @@ typedef struct sctp_sack_chunk { typedef struct sctp_heartbeathdr { sctp_paramhdr_t info; -} __packed sctp_heartbeathdr_t; +} sctp_heartbeathdr_t; typedef struct sctp_heartbeat_chunk { sctp_chunkhdr_t chunk_hdr; sctp_heartbeathdr_t hb_hdr; -} __packed sctp_heartbeat_chunk_t; +} sctp_heartbeat_chunk_t; /* For the abort and shutdown ACK we must carry the init tag in the @@ -414,7 +414,7 @@ typedef struct sctp_heartbeat_chunk { */ typedef struct sctp_abort_chunk { sctp_chunkhdr_t uh; -} __packed sctp_abort_chunk_t; +} sctp_abort_chunk_t; /* For the graceful shutdown we must carry the tag (in common header) @@ -422,12 +422,12 @@ typedef struct sctp_abort_chunk { */ typedef struct sctp_shutdownhdr { __be32 cum_tsn_ack; -} __packed sctp_shutdownhdr_t; +} sctp_shutdownhdr_t; struct sctp_shutdown_chunk_t { sctp_chunkhdr_t chunk_hdr; sctp_shutdownhdr_t shutdown_hdr; -} __packed; +}; /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ @@ -435,12 +435,12 @@ typedef struct sctp_errhdr { __be16 cause; __be16 length; __u8 variable[0]; -} __packed sctp_errhdr_t; +} sctp_errhdr_t; typedef struct sctp_operr_chunk { sctp_chunkhdr_t chunk_hdr; sctp_errhdr_t err_hdr; -} __packed sctp_operr_chunk_t; +} sctp_operr_chunk_t; /* RFC 2960 3.3.10 - Operation Error * @@ -530,7 +530,7 @@ typedef struct sctp_ecnehdr { typedef struct sctp_ecne_chunk { sctp_chunkhdr_t chunk_hdr; sctp_ecnehdr_t ence_hdr; -} __packed sctp_ecne_chunk_t; +} sctp_ecne_chunk_t; /* RFC 2960. Appendix A. Explicit Congestion Notification. * Congestion Window Reduced (CWR) (13) @@ -542,7 +542,7 @@ typedef struct sctp_cwrhdr { typedef struct sctp_cwr_chunk { sctp_chunkhdr_t chunk_hdr; sctp_cwrhdr_t cwr_hdr; -} __packed sctp_cwr_chunk_t; +} sctp_cwr_chunk_t; /* PR-SCTP * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) @@ -593,17 +593,17 @@ typedef struct sctp_cwr_chunk { struct sctp_fwdtsn_skip { __be16 stream; __be16 ssn; -} __packed; +}; struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; struct sctp_fwdtsn_skip skip[0]; -} __packed; +}; struct sctp_fwdtsn_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_fwdtsn_hdr fwdtsn_hdr; -} __packed; +}; /* ADDIP @@ -641,17 +641,17 @@ struct sctp_fwdtsn_chunk { typedef struct sctp_addip_param { sctp_paramhdr_t param_hdr; __be32 crr_id; -} __packed sctp_addip_param_t; +} sctp_addip_param_t; typedef struct sctp_addiphdr { __be32 serial; __u8 params[0]; -} __packed sctp_addiphdr_t; +} sctp_addiphdr_t; typedef struct sctp_addip_chunk { sctp_chunkhdr_t chunk_hdr; sctp_addiphdr_t addip_hdr; -} __packed sctp_addip_chunk_t; +} sctp_addip_chunk_t; /* AUTH * Section 4.1 Authentication Chunk (AUTH) @@ -706,12 +706,12 @@ typedef struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; __u8 hmac[0]; -} __packed sctp_authhdr_t; +} sctp_authhdr_t; typedef struct sctp_auth_chunk { sctp_chunkhdr_t chunk_hdr; sctp_authhdr_t auth_hdr; -} __packed sctp_auth_chunk_t; +} sctp_auth_chunk_t; struct sctp_infox { struct sctp_info *sctpinfo; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 6a685049f67f..387c802bf248 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -374,7 +374,7 @@ typedef struct sctp_sender_hb_info { union sctp_addr daddr; unsigned long sent_at; __u64 hb_nonce; -} __packed sctp_sender_hb_info_t; +} sctp_sender_hb_info_t; struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp); void sctp_stream_free(struct sctp_stream *stream); -- cgit v1.2.3 From a8cab863a75f2d353588d4c3c37dc37c77b96c83 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Tue, 7 Feb 2017 06:43:23 -0800 Subject: bridge: remove unnecessary check for vtbegin in br_fill_vlan_tinfo_range vtbegin should not be NULL in this function, Its already checked by the caller. this should silence the below smatch complaint: net/bridge/br_netlink_tunnel.c:144 br_fill_vlan_tinfo_range() error: we previously assumed 'vtbegin' could be null (see line 130) net/bridge/br_netlink_tunnel.c 129 130 if (vtbegin && vtend && (vtend->vid - vtbegin->vid) > 0) { ^^^^^^^ Check for NULL. Fixes: efa5356b0d97 ("bridge: per vlan dst_metadata netlink support") Reported-By: Dan Carpenter Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- net/bridge/br_netlink_tunnel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c index 62eaf750bd9e..f38473509647 100644 --- a/net/bridge/br_netlink_tunnel.c +++ b/net/bridge/br_netlink_tunnel.c @@ -127,7 +127,7 @@ static int br_fill_vlan_tinfo_range(struct sk_buff *skb, { int err; - if (vtbegin && vtend && (vtend->vid - vtbegin->vid) > 0) { + if (vtend && (vtend->vid - vtbegin->vid) > 0) { /* add range to skb */ err = br_fill_vlan_tinfo(skb, vtbegin->vid, vtbegin->tinfo.tunnel_id, -- cgit v1.2.3 From 9bcdef3288f2f4d8b05d522128198fdcb3f07885 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 7 Feb 2017 17:27:47 +0100 Subject: spectrum: acl_tcam: Fix catchall prio value This fixes an issue reported by smatch: mlxsw_sp_acl_tcam_chunk_create() warn: impossible condition '(priority == (-1)) => (0-u32max == u64max)' Reported-by: Or Gerlitz Reported-by: Ido Schimmel Reported-by: Dan Carpenter Fixes: 22a677661f56 ("mlxsw: spectrum: Introduce ACL core with simple TCAM implementation") Signed-off-by: Jiri Pirko Acked-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index a0a968e47ae6..7382832215fa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -561,7 +561,7 @@ mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); } -#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (-1UL) +#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) static int mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, -- cgit v1.2.3 From ca6d4480f87db9d9470d3d7bbe445953fa105e57 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 7 Feb 2017 08:46:46 -0800 Subject: bridge: avoid unnecessary read of jiffies Jiffies is volatile so read it once. Signed-off-by: Stephen Hemminger Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 6 ++++-- net/bridge/br_input.c | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 5028691fa68a..5693168e88b6 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -592,13 +592,15 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n", source->dev->name, addr, vid); } else { + unsigned long now = jiffies; + /* fastpath: update of existing entry */ if (unlikely(source != fdb->dst)) { fdb->dst = source; fdb_modified = true; } - if (jiffies != fdb->updated) - fdb->updated = jiffies; + if (now != fdb->updated) + fdb->updated = now; if (unlikely(added_by_user)) fdb->added_by_user = 1; if (unlikely(fdb_modified)) diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 220943f920d2..4615a9b3e26c 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -195,11 +195,13 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb } if (dst) { + unsigned long now = jiffies; + if (dst->is_local) return br_pass_frame_up(skb); - if (jiffies != dst->used) - dst->used = jiffies; + if (now != dst->used) + dst->used = now; br_forward(dst->dst, skb, local_rcv, false); } else { if (!mcast_hit) -- cgit v1.2.3 From 76e0e70e6452b971a69cc9794ff4a6715c11f7f2 Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Tue, 7 Feb 2017 12:10:58 -0800 Subject: liquidio: do not dereference pointer if it's NULL Fix smatch errors by not dereferencing iq pointer if it's NULL. See http://marc.info/?l=kernel-janitors&m=148637299004834&w=2 Reported-by: Dan Carpenter Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index c12cfa4113cc..a3c7b999e526 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2447,7 +2447,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) /* force enable interrupt if reg cnts are high to avoid wraparound */ if ((work_done < budget && tx_done) || - (iq->pkt_in_done >= MAX_REG_CNT) || + (iq && iq->pkt_in_done >= MAX_REG_CNT) || (droq->pkt_count >= MAX_REG_CNT)) { tx_done = 1; napi_complete_done(napi, work_done); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 631f1c0f9e4d..0536cb9f6182 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1632,7 +1632,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) /* force enable interrupt if reg cnts are high to avoid wraparound */ if ((work_done < budget && tx_done) || - (iq->pkt_in_done >= MAX_REG_CNT) || + (iq && iq->pkt_in_done >= MAX_REG_CNT) || (droq->pkt_count >= MAX_REG_CNT)) { tx_done = 1; napi_complete_done(napi, work_done); -- cgit v1.2.3 From 7ca00409b5bd6e79a03d1cf3982dc429f371c02b Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Mon, 12 Dec 2016 13:57:02 +0200 Subject: iwlwifi: pcie: move msix conf functions above other functions msix configuration functions should be called by other functions. For example by pcie_d3_resume, move it above to enable it. Signed-off-by: Haim Dreyfuss Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 206 ++++++++++++------------ 1 file changed, 103 insertions(+), 103 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index b28d99f61a35..f795ebea4c4a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1076,6 +1076,109 @@ static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) return hw_rfkill; } +struct iwl_causes_list { + u32 cause_num; + u32 mask_reg; + u8 addr; +}; + +static struct iwl_causes_list causes_list[] = { + {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, + {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, + {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, + {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, + {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, + {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, + {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, + {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, + {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, + {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, + {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, + {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, + {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, + {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, +}; + +static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; + int i; + + /* + * Access all non RX causes and map them to the default irq. + * In case we are missing at least one interrupt vector, + * the first interrupt vector will serve non-RX and FBQ causes. + */ + for (i = 0; i < ARRAY_SIZE(causes_list); i++) { + iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); + iwl_clear_bit(trans, causes_list[i].mask_reg, + causes_list[i].cause_num); + } +} + +static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 offset = + trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; + u32 val, idx; + + /* + * The first RX queue - fallback queue, which is designated for + * management frame, command responses etc, is always mapped to the + * first interrupt vector. The other RX queues are mapped to + * the other (N - 2) interrupt vectors. + */ + val = BIT(MSIX_FH_INT_CAUSES_Q(0)); + for (idx = 1; idx < trans->num_rx_queues; idx++) { + iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), + MSIX_FH_INT_CAUSES_Q(idx - offset)); + val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); + } + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); + + val = MSIX_FH_INT_CAUSES_Q(0); + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) + val |= MSIX_NON_AUTO_CLEAR_CAUSE; + iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); + + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) + iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); +} + +static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) +{ + struct iwl_trans *trans = trans_pcie->trans; + + if (!trans_pcie->msix_enabled) { + if (trans->cfg->mq_rx_supported) + iwl_write_prph(trans, UREG_CHICK, + UREG_CHICK_MSI_ENABLE); + return; + } + + iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); + + /* + * Each cause from the causes list above and the RX causes is + * represented as a byte in the IVAR table. The first nibble + * represents the bound interrupt vector of the cause, the second + * represents no auto clear for this cause. This will be set if its + * interrupt vector is bound to serve other causes. + */ + iwl_pcie_map_rx_causes(trans); + + iwl_pcie_map_non_rx_causes(trans); + + trans_pcie->fh_init_mask = + ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); + trans_pcie->fh_mask = trans_pcie->fh_init_mask; + trans_pcie->hw_init_mask = + ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); + trans_pcie->hw_mask = trans_pcie->hw_init_mask; +} + static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1405,109 +1508,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } -struct iwl_causes_list { - u32 cause_num; - u32 mask_reg; - u8 addr; -}; - -static struct iwl_causes_list causes_list[] = { - {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, - {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, - {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, - {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, - {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, - {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, - {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, - {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, - {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, - {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, - {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, - {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, - {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, - {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, -}; - -static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; - int i; - - /* - * Access all non RX causes and map them to the default irq. - * In case we are missing at least one interrupt vector, - * the first interrupt vector will serve non-RX and FBQ causes. - */ - for (i = 0; i < ARRAY_SIZE(causes_list); i++) { - iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); - iwl_clear_bit(trans, causes_list[i].mask_reg, - causes_list[i].cause_num); - } -} - -static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 offset = - trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; - u32 val, idx; - - /* - * The first RX queue - fallback queue, which is designated for - * management frame, command responses etc, is always mapped to the - * first interrupt vector. The other RX queues are mapped to - * the other (N - 2) interrupt vectors. - */ - val = BIT(MSIX_FH_INT_CAUSES_Q(0)); - for (idx = 1; idx < trans->num_rx_queues; idx++) { - iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), - MSIX_FH_INT_CAUSES_Q(idx - offset)); - val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); - } - iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); - - val = MSIX_FH_INT_CAUSES_Q(0); - if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) - val |= MSIX_NON_AUTO_CLEAR_CAUSE; - iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); - - if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) - iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); -} - -static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) -{ - struct iwl_trans *trans = trans_pcie->trans; - - if (!trans_pcie->msix_enabled) { - if (trans->cfg->mq_rx_supported) - iwl_write_prph(trans, UREG_CHICK, - UREG_CHICK_MSI_ENABLE); - return; - } - - iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); - - /* - * Each cause from the causes list above and the RX causes is - * represented as a byte in the IVAR table. The first nibble - * represents the bound interrupt vector of the cause, the second - * represents no auto clear for this cause. This will be set if its - * interrupt vector is bound to serve other causes. - */ - iwl_pcie_map_rx_causes(trans); - - iwl_pcie_map_non_rx_causes(trans); - - trans_pcie->fh_init_mask = - ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); - trans_pcie->fh_mask = trans_pcie->fh_init_mask; - trans_pcie->hw_init_mask = - ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); - trans_pcie->hw_mask = trans_pcie->hw_init_mask; -} - static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, struct iwl_trans *trans) { -- cgit v1.2.3 From b699b71d82e77203be43bda5ff1b72516f129581 Mon Sep 17 00:00:00 2001 From: Pichugin Dmitry Date: Sat, 28 Jan 2017 17:06:53 +0300 Subject: cfg80211 debugfs: Cleanup some checkpatch issues This fixes the checkpatch.pl warnings: * Macros should not use a trailing semicolon. * Spaces required around that '='. * Symbolic permissions 'S_IRUGO' are not preferred. Signed-off-by: Dmitriy Pichugin Signed-off-by: Johannes Berg --- net/wireless/debugfs.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 5d453916a417..30fc6eb352bc 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -17,7 +17,7 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ - struct wiphy *wiphy= file->private_data; \ + struct wiphy *wiphy = file->private_data; \ char buf[buflen]; \ int res; \ \ @@ -29,14 +29,14 @@ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ -}; +} DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", - wiphy->rts_threshold) + wiphy->rts_threshold); DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", wiphy->frag_threshold); DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d", - wiphy->retry_short) + wiphy->retry_short); DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", wiphy->retry_long); @@ -103,7 +103,7 @@ static const struct file_operations ht40allow_map_ops = { }; #define DEBUGFS_ADD(name) \ - debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops); + debugfs_create_file(#name, 0444, phyd, &rdev->wiphy, &name## _ops) void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) { -- cgit v1.2.3 From fe8de3da13bdbcbe8b583a3bbadf677da0f04f83 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 6 Feb 2017 10:49:27 +0000 Subject: mac80211: fils_aead: Use crypto api CMAC shash rather than bare cipher Switch the FILS AEAD code to use a cmac(aes) shash instantiated by the crypto API rather than reusing the open coded implementation in aes_cmac_vector(). This makes the code more understandable, and allows platforms to implement cmac(aes) in a more secure (*) and efficient way than is typically possible when using the AES cipher directly. So replace the crypto_cipher by a crypto_shash, and update the aes_s2v() routine to call the shash interface directly. * In particular, the generic table based AES implementation is sensitive to known-plaintext timing attacks on the key, to which AES based MAC algorithms are especially vulnerable, given that their plaintext is not usually secret. Time invariant alternatives are available (e.g., based on SIMD algorithms), but may incur a setup cost that is prohibitive when operating on a single block at a time, which is why they don't usually expose the cipher API. Signed-off-by: Ard Biesheuvel Signed-off-by: Johannes Berg --- net/mac80211/Kconfig | 1 + net/mac80211/aes_cmac.h | 4 --- net/mac80211/fils_aead.c | 74 +++++++++++++++++++++--------------------------- 3 files changed, 34 insertions(+), 45 deletions(-) diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 3891cbd2adea..76e30f4797fb 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig @@ -6,6 +6,7 @@ config MAC80211 select CRYPTO_AES select CRYPTO_CCM select CRYPTO_GCM + select CRYPTO_CMAC select CRC32 ---help--- This option enables the hardware independent IEEE 802.11 diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h index c827e1d5de8b..3702041f44fd 100644 --- a/net/mac80211/aes_cmac.h +++ b/net/mac80211/aes_cmac.h @@ -11,10 +11,6 @@ #include -void gf_mulx(u8 *pad); -void aes_cmac_vector(struct crypto_cipher *tfm, size_t num_elem, - const u8 *addr[], const size_t *len, u8 *mac, - size_t mac_len); struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[], size_t key_len); void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad, diff --git a/net/mac80211/fils_aead.c b/net/mac80211/fils_aead.c index ecfdd97758a3..061a9211ecf5 100644 --- a/net/mac80211/fils_aead.c +++ b/net/mac80211/fils_aead.c @@ -9,66 +9,58 @@ #include #include +#include #include #include "ieee80211_i.h" #include "aes_cmac.h" #include "fils_aead.h" -static int aes_s2v(struct crypto_cipher *tfm, +static void gf_mulx(u8 *pad) +{ + u64 a = get_unaligned_be64(pad); + u64 b = get_unaligned_be64(pad + 8); + + put_unaligned_be64((a << 1) | (b >> 63), pad); + put_unaligned_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0), pad + 8); +} + +static int aes_s2v(struct crypto_shash *tfm, size_t num_elem, const u8 *addr[], size_t len[], u8 *v) { - u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE]; + u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE] = {}; + SHASH_DESC_ON_STACK(desc, tfm); size_t i; - const u8 *data[2]; - size_t data_len[2], data_elems; + + desc->tfm = tfm; /* D = AES-CMAC(K, ) */ - memset(tmp, 0, AES_BLOCK_SIZE); - data[0] = tmp; - data_len[0] = AES_BLOCK_SIZE; - aes_cmac_vector(tfm, 1, data, data_len, d, AES_BLOCK_SIZE); + crypto_shash_digest(desc, tmp, AES_BLOCK_SIZE, d); for (i = 0; i < num_elem - 1; i++) { /* D = dbl(D) xor AES_CMAC(K, Si) */ gf_mulx(d); /* dbl */ - aes_cmac_vector(tfm, 1, &addr[i], &len[i], tmp, - AES_BLOCK_SIZE); + crypto_shash_digest(desc, addr[i], len[i], tmp); crypto_xor(d, tmp, AES_BLOCK_SIZE); } + crypto_shash_init(desc); + if (len[i] >= AES_BLOCK_SIZE) { /* len(Sn) >= 128 */ - size_t j; - const u8 *pos; - /* T = Sn xorend D */ - - /* Use a temporary buffer to perform xorend on Sn (addr[i]) to - * avoid modifying the const input argument. - */ - data[0] = addr[i]; - data_len[0] = len[i] - AES_BLOCK_SIZE; - pos = addr[i] + data_len[0]; - for (j = 0; j < AES_BLOCK_SIZE; j++) - tmp[j] = pos[j] ^ d[j]; - data[1] = tmp; - data_len[1] = AES_BLOCK_SIZE; - data_elems = 2; + crypto_shash_update(desc, addr[i], len[i] - AES_BLOCK_SIZE); + crypto_xor(d, addr[i] + len[i] - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); } else { /* len(Sn) < 128 */ /* T = dbl(D) xor pad(Sn) */ gf_mulx(d); /* dbl */ - memset(tmp, 0, AES_BLOCK_SIZE); - memcpy(tmp, addr[i], len[i]); - tmp[len[i]] = 0x80; - crypto_xor(d, tmp, AES_BLOCK_SIZE); - data[0] = d; - data_len[0] = sizeof(d); - data_elems = 1; + crypto_xor(d, addr[i], len[i]); + d[len[i]] ^= 0x80; } /* V = AES-CMAC(K, T) */ - aes_cmac_vector(tfm, data_elems, data, data_len, v, AES_BLOCK_SIZE); + crypto_shash_finup(desc, d, AES_BLOCK_SIZE, v); return 0; } @@ -80,7 +72,7 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len, size_t len[], u8 *out) { u8 v[AES_BLOCK_SIZE]; - struct crypto_cipher *tfm; + struct crypto_shash *tfm; struct crypto_skcipher *tfm2; struct skcipher_request *req; int res; @@ -95,14 +87,14 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len, /* S2V */ - tfm = crypto_alloc_cipher("aes", 0, 0); + tfm = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); /* K1 for S2V */ - res = crypto_cipher_setkey(tfm, key, key_len); + res = crypto_shash_setkey(tfm, key, key_len); if (!res) res = aes_s2v(tfm, num_elem, addr, len, v); - crypto_free_cipher(tfm); + crypto_free_shash(tfm); if (res) return res; @@ -157,7 +149,7 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len, size_t num_elem, const u8 *addr[], size_t len[], u8 *out) { - struct crypto_cipher *tfm; + struct crypto_shash *tfm; struct crypto_skcipher *tfm2; struct skcipher_request *req; struct scatterlist src[1], dst[1]; @@ -210,14 +202,14 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len, /* S2V */ - tfm = crypto_alloc_cipher("aes", 0, 0); + tfm = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); /* K1 for S2V */ - res = crypto_cipher_setkey(tfm, key, key_len); + res = crypto_shash_setkey(tfm, key, key_len); if (!res) res = aes_s2v(tfm, num_elem, addr, len, check); - crypto_free_cipher(tfm); + crypto_free_shash(tfm); if (res) return res; if (memcmp(check, frame_iv, AES_BLOCK_SIZE) != 0) -- cgit v1.2.3 From 26717828b75dd5c46e97f7f4a9b937d038bb2852 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 6 Feb 2017 10:49:28 +0000 Subject: mac80211: aes-cmac: switch to shash CMAC driver Instead of open coding the CMAC algorithm in the mac80211 driver using byte wide xors and calls into the crypto layer for each block of data, instantiate a cmac(aes) synchronous hash and pass all the data into it directly. This does not only simplify the code, it also allows the use of more efficient and more secure implementations, especially on platforms where SIMD ciphers have a considerable setup cost. Signed-off-by: Ard Biesheuvel Signed-off-by: Johannes Berg --- net/mac80211/aes_cmac.c | 126 ++++++++++-------------------------------------- net/mac80211/aes_cmac.h | 11 +++-- net/mac80211/key.h | 2 +- 3 files changed, 32 insertions(+), 107 deletions(-) diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c index d0bd5fff5f0a..2fb65588490c 100644 --- a/net/mac80211/aes_cmac.c +++ b/net/mac80211/aes_cmac.c @@ -22,126 +22,50 @@ #define CMAC_TLEN_256 16 /* CMAC TLen = 128 bits (16 octets) */ #define AAD_LEN 20 +static const u8 zero[CMAC_TLEN_256]; -void gf_mulx(u8 *pad) -{ - int i, carry; - - carry = pad[0] & 0x80; - for (i = 0; i < AES_BLOCK_SIZE - 1; i++) - pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7); - pad[AES_BLOCK_SIZE - 1] <<= 1; - if (carry) - pad[AES_BLOCK_SIZE - 1] ^= 0x87; -} - -void aes_cmac_vector(struct crypto_cipher *tfm, size_t num_elem, - const u8 *addr[], const size_t *len, u8 *mac, - size_t mac_len) -{ - u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE]; - const u8 *pos, *end; - size_t i, e, left, total_len; - - memset(cbc, 0, AES_BLOCK_SIZE); - - total_len = 0; - for (e = 0; e < num_elem; e++) - total_len += len[e]; - left = total_len; - - e = 0; - pos = addr[0]; - end = pos + len[0]; - - while (left >= AES_BLOCK_SIZE) { - for (i = 0; i < AES_BLOCK_SIZE; i++) { - cbc[i] ^= *pos++; - if (pos >= end) { - e++; - pos = addr[e]; - end = pos + len[e]; - } - } - if (left > AES_BLOCK_SIZE) - crypto_cipher_encrypt_one(tfm, cbc, cbc); - left -= AES_BLOCK_SIZE; - } - - memset(pad, 0, AES_BLOCK_SIZE); - crypto_cipher_encrypt_one(tfm, pad, pad); - gf_mulx(pad); - - if (left || total_len == 0) { - for (i = 0; i < left; i++) { - cbc[i] ^= *pos++; - if (pos >= end) { - e++; - pos = addr[e]; - end = pos + len[e]; - } - } - cbc[left] ^= 0x80; - gf_mulx(pad); - } - - for (i = 0; i < AES_BLOCK_SIZE; i++) - pad[i] ^= cbc[i]; - crypto_cipher_encrypt_one(tfm, pad, pad); - memcpy(mac, pad, mac_len); -} - - -void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad, +void ieee80211_aes_cmac(struct crypto_shash *tfm, const u8 *aad, const u8 *data, size_t data_len, u8 *mic) { - const u8 *addr[3]; - size_t len[3]; - u8 zero[CMAC_TLEN]; + SHASH_DESC_ON_STACK(desc, tfm); + u8 out[AES_BLOCK_SIZE]; - memset(zero, 0, CMAC_TLEN); - addr[0] = aad; - len[0] = AAD_LEN; - addr[1] = data; - len[1] = data_len - CMAC_TLEN; - addr[2] = zero; - len[2] = CMAC_TLEN; + desc->tfm = tfm; - aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN); + crypto_shash_init(desc); + crypto_shash_update(desc, aad, AAD_LEN); + crypto_shash_update(desc, data, data_len - CMAC_TLEN); + crypto_shash_finup(desc, zero, CMAC_TLEN, out); + + memcpy(mic, out, CMAC_TLEN); } -void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad, +void ieee80211_aes_cmac_256(struct crypto_shash *tfm, const u8 *aad, const u8 *data, size_t data_len, u8 *mic) { - const u8 *addr[3]; - size_t len[3]; - u8 zero[CMAC_TLEN_256]; + SHASH_DESC_ON_STACK(desc, tfm); - memset(zero, 0, CMAC_TLEN_256); - addr[0] = aad; - len[0] = AAD_LEN; - addr[1] = data; - len[1] = data_len - CMAC_TLEN_256; - addr[2] = zero; - len[2] = CMAC_TLEN_256; + desc->tfm = tfm; - aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN_256); + crypto_shash_init(desc); + crypto_shash_update(desc, aad, AAD_LEN); + crypto_shash_update(desc, data, data_len - CMAC_TLEN_256); + crypto_shash_finup(desc, zero, CMAC_TLEN_256, mic); } -struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[], - size_t key_len) +struct crypto_shash *ieee80211_aes_cmac_key_setup(const u8 key[], + size_t key_len) { - struct crypto_cipher *tfm; + struct crypto_shash *tfm; - tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); + tfm = crypto_alloc_shash("cmac(aes)", 0, 0); if (!IS_ERR(tfm)) - crypto_cipher_setkey(tfm, key, key_len); + crypto_shash_setkey(tfm, key, key_len); return tfm; } - -void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm) +void ieee80211_aes_cmac_key_free(struct crypto_shash *tfm) { - crypto_free_cipher(tfm); + crypto_free_shash(tfm); } diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h index 3702041f44fd..fef531f42003 100644 --- a/net/mac80211/aes_cmac.h +++ b/net/mac80211/aes_cmac.h @@ -10,13 +10,14 @@ #define AES_CMAC_H #include +#include -struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[], - size_t key_len); -void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad, +struct crypto_shash *ieee80211_aes_cmac_key_setup(const u8 key[], + size_t key_len); +void ieee80211_aes_cmac(struct crypto_shash *tfm, const u8 *aad, const u8 *data, size_t data_len, u8 *mic); -void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad, +void ieee80211_aes_cmac_256(struct crypto_shash *tfm, const u8 *aad, const u8 *data, size_t data_len, u8 *mic); -void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm); +void ieee80211_aes_cmac_key_free(struct crypto_shash *tfm); #endif /* AES_CMAC_H */ diff --git a/net/mac80211/key.h b/net/mac80211/key.h index 4aa20cef0859..ebdb80b85dc3 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h @@ -93,7 +93,7 @@ struct ieee80211_key { } ccmp; struct { u8 rx_pn[IEEE80211_CMAC_PN_LEN]; - struct crypto_cipher *tfm; + struct crypto_shash *tfm; u32 replays; /* dot11RSNAStatsCMACReplays */ u32 icverrors; /* dot11RSNAStatsCMACICVErrors */ } aes_cmac; -- cgit v1.2.3 From b2347a322d1f6f93f1a39fe17ed08628fc959351 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 7 Feb 2017 16:20:53 +0300 Subject: mac80211: check for allocation failure in debugfs code kmalloc() can fail. Also let's move the allocation out of the declaration block so it's easier to read. Fixes: 4a5eccaa9350 ("mac80211: Show pending txqlen in debugfs.") Signed-off-by: Dan Carpenter Signed-off-by: Johannes Berg --- net/mac80211/debugfs.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 47bea0babe78..5fae001f286c 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -249,12 +249,19 @@ static ssize_t misc_read(struct file *file, char __user *user_buf, struct ieee80211_local *local = file->private_data; /* Max len of each line is 16 characters, plus 9 for 'pending:\n' */ size_t bufsz = IEEE80211_MAX_QUEUES * 16 + 9; - char *buf = kzalloc(bufsz, GFP_KERNEL); - char *pos = buf, *end = buf + bufsz - 1; + char *buf; + char *pos, *end; ssize_t rv; int i; int ln; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos = buf; + end = buf + bufsz - 1; + pos += scnprintf(pos, end - pos, "pending:\n"); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { -- cgit v1.2.3 From a4956dca0764569640374ae1afb8be54a23201b8 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 7 Feb 2017 22:13:56 +0200 Subject: cfg80211: make rdev assignment clearer in nl80211_testmode_dump() Avoid assigning rdev to NULL when we already have it and getting it again from the wiphy index, by moving this code to relevant if block. Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 63dfa60a29ef..a7b4318f735d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -8585,6 +8585,12 @@ static int nl80211_testmode_dump(struct sk_buff *skb, * so we need to offset by 1. */ phy_idx = cb->args[0] - 1; + + rdev = cfg80211_rdev_by_wiphy_idx(phy_idx); + if (!rdev) { + err = -ENOENT; + goto out_err; + } } else { struct nlattr **attrbuf = genl_family_attrbuf(&nl80211_fam); @@ -8599,7 +8605,6 @@ static int nl80211_testmode_dump(struct sk_buff *skb, goto out_err; } phy_idx = rdev->wiphy_idx; - rdev = NULL; if (attrbuf[NL80211_ATTR_TESTDATA]) cb->args[1] = (long)attrbuf[NL80211_ATTR_TESTDATA]; @@ -8610,12 +8615,6 @@ static int nl80211_testmode_dump(struct sk_buff *skb, data_len = nla_len((void *)cb->args[1]); } - rdev = cfg80211_rdev_by_wiphy_idx(phy_idx); - if (!rdev) { - err = -ENOENT; - goto out_err; - } - if (!rdev->ops->testmode_dump) { err = -EOPNOTSUPP; goto out_err; -- cgit v1.2.3 From 66cd794e3c30b8af3b6befe42a378557efb3114a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 7 Feb 2017 22:40:44 +0200 Subject: nl80211: add HT/VHT capabilities to AP parameters For the benefit of drivers that rebuild IEs in firmware, parse the IEs for HT/VHT capabilities and the respective membership selector in the (extended) supported rates. This avoids duplicating the same code into all drivers that need this information. Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 3 ++- include/net/cfg80211.h | 8 ++++++++ net/wireless/nl80211.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 02768de209d6..0dd9498c694f 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1043,8 +1043,9 @@ struct ieee80211_mgmt { } u; } __packed __aligned(2); -/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */ +/* Supported rates membership selectors */ #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 +#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 870549480e9b..5cfd2806a078 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -748,6 +748,10 @@ struct cfg80211_bitrate_mask { * @pbss: If set, start as a PCP instead of AP. Relevant for DMG * networks. * @beacon_rate: bitrate to be used for beacons + * @ht_cap: HT capabilities (or %NULL if HT isn't enabled) + * @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled) + * @ht_required: stations must support HT + * @vht_required: stations must support VHT */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@ -768,6 +772,10 @@ struct cfg80211_ap_settings { const struct cfg80211_acl_data *acl; bool pbss; struct cfg80211_bitrate_mask beacon_rate; + + const struct ieee80211_ht_cap *ht_cap; + const struct ieee80211_vht_cap *vht_cap; + bool ht_required, vht_required; }; /** diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a7b4318f735d..c853746f47bc 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3,7 +3,7 @@ * * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright 2015-2016 Intel Deutschland GmbH + * Copyright 2015-2017 Intel Deutschland GmbH */ #include @@ -3743,6 +3743,49 @@ static int nl80211_parse_beacon(struct nlattr *attrs[], return 0; } +static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params, + const u8 *rates) +{ + int i; + + if (!rates) + return; + + for (i = 0; i < rates[1]; i++) { + if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_HT_PHY) + params->ht_required = true; + if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_VHT_PHY) + params->vht_required = true; + } +} + +/* + * Since the nl80211 API didn't include, from the beginning, attributes about + * HT/VHT requirements/capabilities, we parse them out of the IEs for the + * benefit of drivers that rebuild IEs in the firmware. + */ +static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params) +{ + const struct cfg80211_beacon_data *bcn = ¶ms->beacon; + size_t ies_len = bcn->beacon_ies_len; + const u8 *ies = bcn->beacon_ies; + const u8 *rates; + const u8 *cap; + + rates = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies, ies_len); + nl80211_check_ap_rate_selectors(params, rates); + + rates = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies, ies_len); + nl80211_check_ap_rate_selectors(params, rates); + + cap = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len); + if (cap && cap[1] >= sizeof(*params->ht_cap)) + params->ht_cap = (void *)(cap + 2); + cap = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, ies, ies_len); + if (cap && cap[1] >= sizeof(*params->vht_cap)) + params->vht_cap = (void *)(cap + 2); +} + static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, struct cfg80211_ap_settings *params) { @@ -3971,6 +4014,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) return PTR_ERR(params.acl); } + nl80211_calculate_ap_params(¶ms); + wdev_lock(wdev); err = rdev_start_ap(rdev, dev, ¶ms); if (!err) { -- cgit v1.2.3 From aad1e812eee31a0e075709c247577b0328a6deab Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Fri, 27 Jan 2017 12:27:44 +0000 Subject: nl80211: fix validation of scheduled scan info for wowlan netdetect For wowlan netdetect a separate limit is defined for the number of matchsets. Currently, this limit is ignored and the regular limit for scheduled scan matchsets, ie. struct wiphy::max_match_sets, is used for the net-detect case as well. Cc: Johannes Berg Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c853746f47bc..b455898df63c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6912,7 +6912,7 @@ nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans, static struct cfg80211_sched_scan_request * nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, - struct nlattr **attrs) + struct nlattr **attrs, int max_match_sets) { struct cfg80211_sched_scan_request *request; struct nlattr *attr; @@ -6977,7 +6977,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, if (!n_match_sets && default_match_rssi != NL80211_SCAN_RSSI_THOLD_OFF) n_match_sets = 1; - if (n_match_sets > wiphy->max_match_sets) + if (n_match_sets > max_match_sets) return ERR_PTR(-EINVAL); if (attrs[NL80211_ATTR_IE]) @@ -7277,7 +7277,8 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, return -EINPROGRESS; sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev, - info->attrs); + info->attrs, + rdev->wiphy.max_match_sets); err = PTR_ERR_OR_ZERO(sched_scan_req); if (err) @@ -10089,7 +10090,8 @@ static int nl80211_parse_wowlan_nd(struct cfg80211_registered_device *rdev, if (err) goto out; - trig->nd_config = nl80211_parse_sched_scan(&rdev->wiphy, NULL, tb); + trig->nd_config = nl80211_parse_sched_scan(&rdev->wiphy, NULL, tb, + wowlan->max_nd_match_sets); err = PTR_ERR_OR_ZERO(trig->nd_config); if (err) trig->nd_config = NULL; -- cgit v1.2.3 From 769f07d8f0fb6a68a0eda6308bbe890bff894fd7 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Wed, 25 Jan 2017 12:43:40 +0100 Subject: mac80211: Pass new RSSI level in CQM RSSI notification Extend ieee80211_cqm_rssi_notify with a rssi_level parameter so that this information can be passed to netlink clients in the next patch, if available. Most drivers will have this value at hand. wl1251 receives events from the firmware that only tell it whether latest measurement is above or below threshold so we don't pass any value at this time (parameter is 0). Signed-off-by: Andrew Zaborowski Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 2 ++ drivers/net/wireless/rsi/rsi_91x_mac80211.c | 2 +- drivers/net/wireless/st/cw1200/sta.c | 2 +- drivers/net/wireless/ti/wl1251/event.c | 4 ++-- drivers/net/wireless/ti/wlcore/event.c | 3 ++- include/net/mac80211.h | 2 ++ net/mac80211/mlme.c | 7 ++++--- net/mac80211/trace.h | 11 +++++++---- 8 files changed, 21 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 0e60e38b2acf..e06a2e323cc8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -571,6 +571,7 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, ieee80211_cqm_rssi_notify( vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + sig, GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { @@ -580,6 +581,7 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, ieee80211_cqm_rssi_notify( vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + sig, GFP_KERNEL); } } diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index dadaa73ab49d..e3216473aecb 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -877,7 +877,7 @@ static void rsi_perform_cqm(struct rsi_common *common, common->cqm_info.last_cqm_event_rssi = rssi; rsi_dbg(INFO_ZONE, "CQM: Notifying event: %d\n", event); - ieee80211_cqm_rssi_notify(adapter->vifs[0], event, GFP_KERNEL); + ieee80211_cqm_rssi_notify(adapter->vifs[0], event, rssi, GFP_KERNEL); return; } diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index daf06a4f842e..a52224836a2b 100644 --- a/drivers/net/wireless/st/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -1019,7 +1019,7 @@ void cw1200_event_handler(struct work_struct *work) NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW : NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; pr_debug("[CQM] RSSI event: %d.\n", rcpi_rssi); - ieee80211_cqm_rssi_notify(priv->vif, cqm_evt, + ieee80211_cqm_rssi_notify(priv->vif, cqm_evt, rcpi_rssi, GFP_KERNEL); break; } diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c index d0593bc1f1a9..f5acd24d0e2b 100644 --- a/drivers/net/wireless/ti/wl1251/event.c +++ b/drivers/net/wireless/ti/wl1251/event.c @@ -150,7 +150,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox) "ROAMING_TRIGGER_LOW_RSSI_EVENT"); ieee80211_cqm_rssi_notify(wl->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, - GFP_KERNEL); + 0, GFP_KERNEL); } if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) { @@ -158,7 +158,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox) "ROAMING_TRIGGER_REGAINED_RSSI_EVENT"); ieee80211_cqm_rssi_notify(wl->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, - GFP_KERNEL); + 0, GFP_KERNEL); } } diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c index 4b59f67724de..f2e90d223d94 100644 --- a/drivers/net/wireless/ti/wlcore/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -129,7 +129,8 @@ void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr) vif = wl12xx_wlvif_to_vif(wlvif); if (event != wlvif->last_rssi_event) - ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL); + ieee80211_cqm_rssi_notify(vif, event, metric, + GFP_KERNEL); wlvif->last_rssi_event = event; } } diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 33624ffbd5a5..b9a08cd1d97d 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -5278,6 +5278,7 @@ void ieee80211_resume_disconnect(struct ieee80211_vif *vif); * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @rssi_event: the RSSI trigger event type + * @rssi_level: new RSSI level value or 0 if not available * @gfp: context flags * * When the %IEEE80211_VIF_SUPPORTS_CQM_RSSI is set, and a connection quality @@ -5286,6 +5287,7 @@ void ieee80211_resume_disconnect(struct ieee80211_vif *vif); */ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event rssi_event, + s32 rssi_level, gfp_t gfp); /** diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 8a6344518674..ee423688c92e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3419,14 +3419,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, - GFP_KERNEL); + sig, GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { ifmgd->last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, - GFP_KERNEL); + sig, GFP_KERNEL); } } @@ -5041,11 +5041,12 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event rssi_event, + s32 rssi_level, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); - trace_api_cqm_rssi_notify(sdata, rssi_event); + trace_api_cqm_rssi_notify(sdata, rssi_event, rssi_level); cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp); } diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 92a47afaa989..f78d9f4f8711 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -1996,23 +1996,26 @@ TRACE_EVENT(api_connection_loss, TRACE_EVENT(api_cqm_rssi_notify, TP_PROTO(struct ieee80211_sub_if_data *sdata, - enum nl80211_cqm_rssi_threshold_event rssi_event), + enum nl80211_cqm_rssi_threshold_event rssi_event, + s32 rssi_level), - TP_ARGS(sdata, rssi_event), + TP_ARGS(sdata, rssi_event, rssi_level), TP_STRUCT__entry( VIF_ENTRY __field(u32, rssi_event) + __field(s32, rssi_level) ), TP_fast_assign( VIF_ASSIGN; __entry->rssi_event = rssi_event; + __entry->rssi_level = rssi_level; ), TP_printk( - VIF_PR_FMT " event:%d", - VIF_PR_ARG, __entry->rssi_event + VIF_PR_FMT " event:%d rssi:%d", + VIF_PR_ARG, __entry->rssi_event, __entry->rssi_level ) ); -- cgit v1.2.3 From bee427b86217b78a0a5fc85575cc155e4c32bbf9 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Wed, 25 Jan 2017 12:43:41 +0100 Subject: cfg80211: Pass new RSSI level in CQM RSSI notification Update the drivers to pass the RSSI level as a cfg80211_cqm_rssi_notify parameter and pass this value to userspace in a new nl80211 attribute. This helps both userspace and also helps in the implementation of the multiple RSSI thresholds CQM mechanism. Note for marvell/mwifiex I pass 0 for the RSSI value because the new RSSI value is not available to the driver at the time of the cfg80211_cqm_rssi_notify call, but the driver queries the new value immediately after that, so it is actually available just a moment later if we wanted to defer caling cfg80211_cqm_rssi_notify until that moment. Without this, the new cfg80211 code (patch 3) will call .get_station which will send a duplicate HostCmd_CMD_RSSI_INFO command to the hardware. Signed-off-by: Andrew Zaborowski Signed-off-by: Johannes Berg --- drivers/net/wireless/marvell/mwifiex/sta_event.c | 4 ++-- drivers/net/wireless/rndis_wlan.c | 2 +- include/net/cfg80211.h | 3 ++- include/uapi/linux/nl80211.h | 3 +++ net/mac80211/mlme.c | 2 +- net/wireless/nl80211.c | 9 +++++++-- net/wireless/trace.h | 11 +++++++---- 7 files changed, 23 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 9df0c4dc06ed..5cc3aa7c31cd 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -824,7 +824,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_RSSI_LOW: cfg80211_cqm_rssi_notify(priv->netdev, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, - GFP_KERNEL); + 0, GFP_KERNEL); mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO, HostCmd_ACT_GEN_GET, 0, NULL, false); priv->subsc_evt_rssi_state = RSSI_LOW_RECVD; @@ -839,7 +839,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_RSSI_HIGH: cfg80211_cqm_rssi_notify(priv->netdev, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, - GFP_KERNEL); + 0, GFP_KERNEL); mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO, HostCmd_ACT_GEN_GET, 0, NULL, false); priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD; diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 603c90470225..785334f7a538 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -3187,7 +3187,7 @@ static void rndis_do_cqm(struct usbnet *usbdev, s32 rssi) return; priv->last_cqm_event_rssi = rssi; - cfg80211_cqm_rssi_notify(usbdev->net, event, GFP_KERNEL); + cfg80211_cqm_rssi_notify(usbdev->net, event, rssi, GFP_KERNEL); } #define DEVICE_POLLER_JIFFIES (HZ) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5cfd2806a078..a2c18b53e053 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5390,6 +5390,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event * @dev: network device * @rssi_event: the triggered RSSI event + * @rssi_level: new RSSI level value or 0 if not available * @gfp: context flags * * This function is called when a configured connection quality monitoring @@ -5397,7 +5398,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, */ void cfg80211_cqm_rssi_notify(struct net_device *dev, enum nl80211_cqm_rssi_threshold_event rssi_event, - gfp_t gfp); + s32 rssi_level, gfp_t gfp); /** * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index d6c62ee9bd1d..cd547b864595 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3952,6 +3952,8 @@ enum nl80211_ps_state { * %NL80211_CMD_NOTIFY_CQM. Set to 0 to turn off TX error reporting. * @NL80211_ATTR_CQM_BEACON_LOSS_EVENT: flag attribute that's set in a beacon * loss event + * @NL80211_ATTR_CQM_RSSI_LEVEL: the RSSI value in dBm that triggered the + * RSSI threshold event. * @__NL80211_ATTR_CQM_AFTER_LAST: internal * @NL80211_ATTR_CQM_MAX: highest key attribute */ @@ -3965,6 +3967,7 @@ enum nl80211_attr_cqm { NL80211_ATTR_CQM_TXE_PKTS, NL80211_ATTR_CQM_TXE_INTVL, NL80211_ATTR_CQM_BEACON_LOSS_EVENT, + NL80211_ATTR_CQM_RSSI_LEVEL, /* keep last */ __NL80211_ATTR_CQM_AFTER_LAST, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ee423688c92e..6e90301154d5 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -5048,7 +5048,7 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, trace_api_cqm_rssi_notify(sdata, rssi_event, rssi_level); - cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp); + cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, rssi_level, gfp); } EXPORT_SYMBOL(ieee80211_cqm_rssi_notify); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index b455898df63c..9d738f75bd4e 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -9474,6 +9474,7 @@ nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] = { [NL80211_ATTR_CQM_TXE_RATE] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_TXE_PKTS] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_TXE_INTVL] = { .type = NLA_U32 }, + [NL80211_ATTR_CQM_RSSI_LEVEL] = { .type = NLA_S32 }, }; static int nl80211_set_cqm_txe(struct genl_info *info, @@ -13959,11 +13960,11 @@ static void cfg80211_send_cqm(struct sk_buff *msg, gfp_t gfp) void cfg80211_cqm_rssi_notify(struct net_device *dev, enum nl80211_cqm_rssi_threshold_event rssi_event, - gfp_t gfp) + s32 rssi_level, gfp_t gfp) { struct sk_buff *msg; - trace_cfg80211_cqm_rssi_notify(dev, rssi_event); + trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level); if (WARN_ON(rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW && rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH)) @@ -13977,6 +13978,10 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, rssi_event)) goto nla_put_failure; + if (rssi_level && nla_put_s32(msg, NL80211_ATTR_CQM_RSSI_LEVEL, + rssi_level)) + goto nla_put_failure; + cfg80211_send_cqm(msg, gfp); return; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index ea1b47e04fa4..2419c390f150 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2490,18 +2490,21 @@ TRACE_EVENT(cfg80211_mgmt_tx_status, TRACE_EVENT(cfg80211_cqm_rssi_notify, TP_PROTO(struct net_device *netdev, - enum nl80211_cqm_rssi_threshold_event rssi_event), - TP_ARGS(netdev, rssi_event), + enum nl80211_cqm_rssi_threshold_event rssi_event, + s32 rssi_level), + TP_ARGS(netdev, rssi_event, rssi_level), TP_STRUCT__entry( NETDEV_ENTRY __field(enum nl80211_cqm_rssi_threshold_event, rssi_event) + __field(s32, rssi_level) ), TP_fast_assign( NETDEV_ASSIGN; __entry->rssi_event = rssi_event; + __entry->rssi_level = rssi_level; ), - TP_printk(NETDEV_PR_FMT ", rssi event: %d", - NETDEV_PR_ARG, __entry->rssi_event) + TP_printk(NETDEV_PR_FMT ", rssi event: %d, level: %d", + NETDEV_PR_ARG, __entry->rssi_event, __entry->rssi_level) ); TRACE_EVENT(cfg80211_reg_can_beacon, -- cgit v1.2.3 From 8373005805043efa038ca00401b9bf6959031854 Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Tue, 13 Dec 2016 12:40:34 +0200 Subject: iwlwifi: pcie: separate between SW and HW MSIX configuration The MSIX configuration flow includes two different stages: configuring the HW by writing to the IVAR table and configuring the SW to reflect the HW configuration. The HW configuration is needed on each HW reset, whereas the SW configuration is only needed during the init flow. Signed-off-by: Haim Dreyfuss Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f795ebea4c4a..e7a26f594386 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1147,7 +1147,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); } -static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) +static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) { struct iwl_trans *trans = trans_pcie->trans; @@ -1170,12 +1170,20 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) iwl_pcie_map_rx_causes(trans); iwl_pcie_map_non_rx_causes(trans); +} + +static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) +{ + struct iwl_trans *trans = trans_pcie->trans; + + iwl_pcie_conf_msix_hw(trans_pcie); - trans_pcie->fh_init_mask = - ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); + if (!trans_pcie->msix_enabled) + return; + + trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); trans_pcie->fh_mask = trans_pcie->fh_init_mask; - trans_pcie->hw_init_mask = - ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); + trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); trans_pcie->hw_mask = trans_pcie->hw_init_mask; } @@ -1675,6 +1683,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) iwl_pcie_apm_init(trans); iwl_pcie_init_msix(trans_pcie); + /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); -- cgit v1.2.3 From d7270d619aaf4ce0e3d079fbdcae274476883e47 Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Mon, 12 Dec 2016 14:09:49 +0200 Subject: iwlwifi: pcie: re-configure IVAR table after suspend-resume During the suspend/resume flow some HW blocks are reset. This causes the IVAR table to be completely erased. This table is where interrupt causes are bound to specific IRQs. When the table is empty the interrupt handlers are not called correctly. Fix this by reconfiguring the IVAR table after resume. Fixes: 2e5d4a8f61dc ("iwlwifi: pcie: Add new configuration to enable MSIX") Signed-off-by: Haim Dreyfuss Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index e7a26f594386..bb2a9a151957 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1152,13 +1152,19 @@ static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) struct iwl_trans *trans = trans_pcie->trans; if (!trans_pcie->msix_enabled) { - if (trans->cfg->mq_rx_supported) + if (trans->cfg->mq_rx_supported && + test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSI_ENABLE); return; } - - iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); + /* + * The IVAR table needs to be configured again after reset, + * but if the device is disabled, we can't write to + * prph. + */ + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); /* * Each cause from the causes list above and the RX causes is @@ -1457,6 +1463,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; int ret; @@ -1469,11 +1476,15 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_enable_rx_wake(trans, true); /* - * Also enables interrupts - none will happen as the device doesn't - * know we're waking it up, only when the opmode actually tells it - * after this call. + * Reconfigure IVAR table in case of MSIX or reset ict table in + * MSI mode since HW reset erased it. + * Also enables interrupts - none will happen as + * the device doesn't know we're waking it up, only when + * the opmode actually tells it after this call. */ - iwl_pcie_reset_ict(trans); + iwl_pcie_conf_msix_hw(trans_pcie); + if (!trans_pcie->msix_enabled) + iwl_pcie_reset_ict(trans); iwl_enable_interrupts(trans); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); -- cgit v1.2.3 From f4a1f04a3f66aebbe8665a28d9cf0b5e3d1c6f9d Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Thu, 15 Dec 2016 10:22:36 +0200 Subject: iwlwifi: pcie: Re-configure IVAR table after stop device When getting RF_KILL and disabling radio, the device gets stopped and reset. This erases the IVAR table that matches the interrupt to its cause, and is essential for MSIX proper functionality. Till now, the table wasn't re-configured after the reset, and therefore the interrupt that enabled radio didn't fire on the right irq, and the driver didn't handle it correctly. To fix this, configure the IVAR table again after resetting the device. Signed-off-by: Golan Ben-Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index bb2a9a151957..7f05fc56587a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1245,6 +1245,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); usleep_range(1000, 2000); + /* + * Upon stop, the IVAR table gets erased, so msi-x won't + * work. This causes a bug in RF-KILL flows, since the interrupt + * that enables radio won't fire on the correct irq, and the + * driver won't be able to handle the interrupt. + * Configure the IVAR table again after reset. + */ + iwl_pcie_conf_msix_hw(trans_pcie); + /* * Upon stop, the APM issues an interrupt if HW RF kill is set. * This is a bug in certain verions of the hardware. -- cgit v1.2.3 From 3374c3ab7f61cf820854cfe36405e4dcfc22a211 Mon Sep 17 00:00:00 2001 From: Gregory Greenman Date: Wed, 21 Dec 2016 16:18:44 +0200 Subject: iwlwifi: mvm: fix a print of NSS for HT rate Handling of the number of space time streams was missing for HT rate in rate printing function. Fix it. Signed-off-by: Gregory Greenman Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 80f99c365b6a..13be9a5b83ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -3616,6 +3616,8 @@ int rs_pretty_print_rate(char *buf, const u32 rate) } else if (rate & RATE_MCS_HT_MSK) { type = "HT"; mcs = rate & RATE_HT_MCS_INDEX_MSK; + nss = ((rate & RATE_HT_MCS_NSS_MSK) + >> RATE_HT_MCS_NSS_POS) + 1; } else { type = "Unknown"; /* shouldn't happen */ } -- cgit v1.2.3 From c56108b58ab870892277940a1def0d6b153f3e26 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 1 Jan 2017 18:42:23 +0200 Subject: iwlwifi: mvm: fix references to first_agg_queue in DQA mode In DQA mode, first_agg_queue is initialized to IWL_MVM_DQA_MIN_DATA_QUEUE. This causes two bugs in the tx response flow: 1. When TX fails, we set IEEE80211_TX_STAT_AMPDU_NO_BACK regardless if we actually have aggregation open on the queue. This causes mac80211 to send a BAR frame even though there is no aggregation open. Fix that by simply checking the AMPDU flag that is set on by mac80211 for AMPDU packets. 2. When reclaiming frames in aggregation mode, we reclaim based on scheduler ssn and not the SN. The reason is that scheduler ssn may be ahead of SN due to a hole in the BA window that was filled. However, if we have aggregations open on IWL_MVM_DQA_BSS_CLIENT_QUEUE the reclaim flow will still go to the code of non-aggregation instead of the aggregation code since IWL_MVM_DQA_BSS_CLIENT_QUEUE is smaller than IWL_MVM_DQA_MIN_DATA_QUEUE, although it is a valid aggregation queue. Fix that by always using the aggregation reclaim code by default in DQA mode (currently it is implicitly used by default for all queues except the reserved BSS queue). Fixes: cf961e16620f ("iwlwifi: mvm: support dqa-mode agg on non-shared queue") Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 4ba639eda7a3..1d147599cca9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1274,8 +1274,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, memset(&info->status, 0, sizeof(info->status)); - info->flags &= ~IEEE80211_TX_CTL_AMPDU; - /* inform mac80211 about what happened with the frame */ switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: @@ -1298,10 +1296,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); /* Single frame failure in an AMPDU queue => send BAR */ - if (txq_id >= mvm->first_agg_queue && + if (info->flags & IEEE80211_TX_CTL_AMPDU && !(info->flags & IEEE80211_TX_STAT_ACK) && !(info->flags & IEEE80211_TX_STAT_TX_FILTERED)) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + info->flags &= ~IEEE80211_TX_CTL_AMPDU; /* W/A FW bug: seq_ctl is wrong when the status isn't success */ if (status != TX_STATUS_SUCCESS) { @@ -1336,7 +1335,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ieee80211_tx_status(mvm->hw, skb); } - if (txq_id >= mvm->first_agg_queue) { + if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) { /* If this is an aggregation queue, we use the ssn since: * ssn = wifi seq_num % 256. * The seq_ctl is the sequence control of the packet to which -- cgit v1.2.3 From 5351f9ab254c30d41659924265f1ecd7b4758d9e Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 3 Jan 2017 21:03:35 +0200 Subject: iwlwifi: mvm: fix reorder timer re-arming When NSSN is behind the reorder buffer due to timeout the reorder timer isn't getting re-armed until NSSN catches up. Fix it. Fixes: 0690405fef29 ("iwlwifi: mvm: add reorder timeout per frame") Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index c154ab42c80d..d79e9c2a2654 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -418,7 +418,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, /* ignore nssn smaller than head sn - this can happen due to timeout */ if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) - return; + goto set_timer; while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { int index = ssn % reorder_buf->buf_size; @@ -441,6 +441,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, } reorder_buf->head_sn = nssn; +set_timer: if (reorder_buf->num_stored && !reorder_buf->removed) { u16 index = reorder_buf->head_sn % reorder_buf->buf_size; -- cgit v1.2.3 From d45cb20e123c5d7d6cd56301bc98f0bfd725cd77 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 28 Dec 2016 10:43:02 +0200 Subject: iwlwifi: mvm: use the PROBE_RESP_QUEUE to send deauth to unknown station When we send a deauth to a station we don't know about, we need to use the PROBE_RESP queue. This can happen when we send a deauth to a station that is not associated to us. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 1d147599cca9..dd2b4a300819 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -506,15 +506,17 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, switch (info->control.vif->type) { case NL80211_IFTYPE_AP: /* - * handle legacy hostapd as well, where station may be added - * only after assoc. + * Handle legacy hostapd as well, where station may be added + * only after assoc. Take care of the case where we send a + * deauth to a station that we don't have. */ - if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc)) + if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) || + ieee80211_is_deauth(fc)) return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; if (info->hw_queue == info->control.vif->cab_queue) return info->hw_queue; - WARN_ON_ONCE(1); + WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc)); return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) -- cgit v1.2.3 From c078ca3b0c5bf82c2b31906c446d6e2ad8ea0783 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Tue, 17 Jan 2017 22:51:26 +0100 Subject: netfilter: nft_exthdr: Add support for existence check If NFT_EXTHDR_F_PRESENT is set, exthdr will not copy any header field data into *dest, but instead set it to 1 if the header is found and 0 otherwise. Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 6 ++++++ net/netfilter/nft_exthdr.c | 22 ++++++++++++++++++++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 7b730cab99bd..53aac8b8ed6b 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -704,6 +704,10 @@ enum nft_payload_attributes { }; #define NFTA_PAYLOAD_MAX (__NFTA_PAYLOAD_MAX - 1) +enum nft_exthdr_flags { + NFT_EXTHDR_F_PRESENT = (1 << 0), +}; + /** * enum nft_exthdr_attributes - nf_tables IPv6 extension header expression netlink attributes * @@ -711,6 +715,7 @@ enum nft_payload_attributes { * @NFTA_EXTHDR_TYPE: extension header type (NLA_U8) * @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32) * @NFTA_EXTHDR_LEN: extension header length (NLA_U32) + * @NFTA_EXTHDR_FLAGS: extension header flags (NLA_U32) */ enum nft_exthdr_attributes { NFTA_EXTHDR_UNSPEC, @@ -718,6 +723,7 @@ enum nft_exthdr_attributes { NFTA_EXTHDR_TYPE, NFTA_EXTHDR_OFFSET, NFTA_EXTHDR_LEN, + NFTA_EXTHDR_FLAGS, __NFTA_EXTHDR_MAX }; #define NFTA_EXTHDR_MAX (__NFTA_EXTHDR_MAX - 1) diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index 47beb3abcc9d..a89e5ab150db 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -23,6 +23,7 @@ struct nft_exthdr { u8 offset; u8 len; enum nft_registers dreg:8; + u8 flags; }; static void nft_exthdr_eval(const struct nft_expr *expr, @@ -35,8 +36,12 @@ static void nft_exthdr_eval(const struct nft_expr *expr, int err; err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); - if (err < 0) + if (priv->flags & NFT_EXTHDR_F_PRESENT) { + *dest = (err >= 0); + return; + } else if (err < 0) { goto err; + } offset += priv->offset; dest[priv->len / NFT_REG32_SIZE] = 0; @@ -52,6 +57,7 @@ static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 }, [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 }, [NFTA_EXTHDR_LEN] = { .type = NLA_U32 }, + [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 }, }; static int nft_exthdr_init(const struct nft_ctx *ctx, @@ -59,7 +65,7 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_exthdr *priv = nft_expr_priv(expr); - u32 offset, len; + u32 offset, len, flags = 0; int err; if (tb[NFTA_EXTHDR_DREG] == NULL || @@ -76,10 +82,20 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, if (err < 0) return err; + if (tb[NFTA_EXTHDR_FLAGS]) { + err = nft_parse_u32_check(tb[NFTA_EXTHDR_FLAGS], U8_MAX, &flags); + if (err < 0) + return err; + + if (flags & ~NFT_EXTHDR_F_PRESENT) + return -EINVAL; + } + priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); priv->offset = offset; priv->len = len; priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); + priv->flags = flags; return nft_validate_register_store(ctx, priv->dreg, NULL, NFT_DATA_VALUE, priv->len); @@ -97,6 +113,8 @@ static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len))) goto nla_put_failure; + if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags))) + goto nla_put_failure; return 0; nla_put_failure: -- cgit v1.2.3 From 5cb82a38c6b5152b1deaba0c1596ce63222a4710 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 18 Jan 2017 18:30:07 +0100 Subject: netfilter: nf_tables: pass netns to set->ops->remove() This new parameter is required by the new bitmap set type that comes in a follow up patch. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 3 ++- net/netfilter/nf_tables_api.c | 6 +++--- net/netfilter/nft_set_hash.c | 3 ++- net/netfilter/nft_set_rbtree.c | 3 ++- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 7dfdb517f0be..a721bcb1210c 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -298,7 +298,8 @@ struct nft_set_ops { bool (*deactivate_one)(const struct net *net, const struct nft_set *set, void *priv); - void (*remove)(const struct nft_set *set, + void (*remove)(const struct net *net, + const struct nft_set *set, const struct nft_set_elem *elem); void (*walk)(const struct nft_ctx *ctx, struct nft_set *set, diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 57eeae63f597..3643ce345b59 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3752,7 +3752,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, return 0; err6: - set->ops->remove(set, &elem); + set->ops->remove(ctx->net, set, &elem); err5: kfree(trans); err4: @@ -4804,7 +4804,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_setelem_notify(&trans->ctx, te->set, &te->elem, NFT_MSG_DELSETELEM, 0); - te->set->ops->remove(te->set, &te->elem); + te->set->ops->remove(net, te->set, &te->elem); atomic_dec(&te->set->nelems); te->set->ndeact--; break; @@ -4925,7 +4925,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb) case NFT_MSG_NEWSETELEM: te = (struct nft_trans_elem *)trans->data; - te->set->ops->remove(te->set, &te->elem); + te->set->ops->remove(net, te->set, &te->elem); atomic_dec(&te->set->nelems); break; case NFT_MSG_DELSETELEM: diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index e36069fb76ae..bb157bd47fe8 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -203,7 +203,8 @@ static void *nft_hash_deactivate(const struct net *net, return he; } -static void nft_hash_remove(const struct nft_set *set, +static void nft_hash_remove(const struct net *net, + const struct nft_set *set, const struct nft_set_elem *elem) { struct nft_hash *priv = nft_set_priv(set); diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index f06f55ee516d..9fbd70da1633 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -151,7 +151,8 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set, return err; } -static void nft_rbtree_remove(const struct nft_set *set, +static void nft_rbtree_remove(const struct net *net, + const struct nft_set *set, const struct nft_set_elem *elem) { struct nft_rbtree *priv = nft_set_priv(set); -- cgit v1.2.3 From baa2d42cff088d9f962697a10e0345b444c8c409 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 18 Jan 2017 18:30:08 +0100 Subject: netfilter: nf_tables: use struct nft_set_iter in set element flush Instead of struct nft_set_dump_args, remove unnecessary wrapper structure. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 3643ce345b59..790ffed82930 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3936,15 +3936,13 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk, return -EBUSY; if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) { - struct nft_set_dump_args args = { - .iter = { - .genmask = genmask, - .fn = nft_flush_set, - }, + struct nft_set_iter iter = { + .genmask = genmask, + .fn = nft_flush_set, }; - set->ops->walk(&ctx, set, &args.iter); + set->ops->walk(&ctx, set, &iter); - return args.iter.err; + return iter.err; } nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { -- cgit v1.2.3 From 1ba1c41408df8a9d2f8b9b67e4c9e6f59b29d8ee Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 18 Jan 2017 18:30:09 +0100 Subject: netfilter: nf_tables: rename deactivate_one() to flush() Although semantics are similar to deactivate() with no implicit element lookup, this is only called from the set flush path, so better rename this to flush(). Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 8 ++++---- net/netfilter/nf_tables_api.c | 2 +- net/netfilter/nft_set_hash.c | 8 ++++---- net/netfilter/nft_set_rbtree.c | 8 ++++---- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index a721bcb1210c..ab155644d489 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -260,7 +260,7 @@ struct nft_expr; * @insert: insert new element into set * @activate: activate new element in the next generation * @deactivate: lookup for element and deactivate it in the next generation - * @deactivate_one: deactivate element in the next generation + * @flush: deactivate element in the next generation * @remove: remove element from set * @walk: iterate over all set elemeennts * @privsize: function to return size of set private data @@ -295,9 +295,9 @@ struct nft_set_ops { void * (*deactivate)(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem); - bool (*deactivate_one)(const struct net *net, - const struct nft_set *set, - void *priv); + bool (*flush)(const struct net *net, + const struct nft_set *set, + void *priv); void (*remove)(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 790ffed82930..c09b11eb36fc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3898,7 +3898,7 @@ static int nft_flush_set(const struct nft_ctx *ctx, if (!trans) return -ENOMEM; - if (!set->ops->deactivate_one(ctx->net, set, elem->priv)) { + if (!set->ops->flush(ctx->net, set, elem->priv)) { err = -ENOENT; goto err1; } diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index bb157bd47fe8..2f10ac3b1b10 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -167,8 +167,8 @@ static void nft_hash_activate(const struct net *net, const struct nft_set *set, nft_set_elem_clear_busy(&he->ext); } -static bool nft_hash_deactivate_one(const struct net *net, - const struct nft_set *set, void *priv) +static bool nft_hash_flush(const struct net *net, + const struct nft_set *set, void *priv) { struct nft_hash_elem *he = priv; @@ -195,7 +195,7 @@ static void *nft_hash_deactivate(const struct net *net, rcu_read_lock(); he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params); if (he != NULL && - !nft_hash_deactivate_one(net, set, he)) + !nft_hash_flush(net, set, he)) he = NULL; rcu_read_unlock(); @@ -398,7 +398,7 @@ static struct nft_set_ops nft_hash_ops __read_mostly = { .insert = nft_hash_insert, .activate = nft_hash_activate, .deactivate = nft_hash_deactivate, - .deactivate_one = nft_hash_deactivate_one, + .flush = nft_hash_flush, .remove = nft_hash_remove, .lookup = nft_hash_lookup, .update = nft_hash_update, diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 9fbd70da1633..81b8a4c2c061 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -172,8 +172,8 @@ static void nft_rbtree_activate(const struct net *net, nft_set_elem_change_active(net, set, &rbe->ext); } -static bool nft_rbtree_deactivate_one(const struct net *net, - const struct nft_set *set, void *priv) +static bool nft_rbtree_flush(const struct net *net, + const struct nft_set *set, void *priv) { struct nft_rbtree_elem *rbe = priv; @@ -214,7 +214,7 @@ static void *nft_rbtree_deactivate(const struct net *net, parent = parent->rb_right; continue; } - nft_rbtree_deactivate_one(net, set, rbe); + nft_rbtree_flush(net, set, rbe); return rbe; } } @@ -305,7 +305,7 @@ static struct nft_set_ops nft_rbtree_ops __read_mostly = { .insert = nft_rbtree_insert, .remove = nft_rbtree_remove, .deactivate = nft_rbtree_deactivate, - .deactivate_one = nft_rbtree_deactivate_one, + .flush = nft_rbtree_flush, .activate = nft_rbtree_activate, .lookup = nft_rbtree_lookup, .walk = nft_rbtree_walk, -- cgit v1.2.3 From 1f48ff6c5393aa7fe290faf5d633164f105b0aa7 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 18 Jan 2017 18:30:10 +0100 Subject: netfilter: nf_tables: add flush field to struct nft_set_iter This provides context to walk callback iterator, thus, we know if the walk happens from the set flush path. This is required by the new bitmap set type coming in a follow up patch which has no real struct nft_set_ext, so it has to allocate it based on the two bit compact element representation. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 1 + net/netfilter/nf_tables_api.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index ab155644d489..5830f594842e 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -203,6 +203,7 @@ struct nft_set_elem { struct nft_set; struct nft_set_iter { u8 genmask; + bool flush; unsigned int count; unsigned int skip; int err; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c09b11eb36fc..7ae810b03462 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3121,6 +3121,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, iter.count = 0; iter.err = 0; iter.fn = nf_tables_bind_check_setelem; + iter.flush = false; set->ops->walk(ctx, set, &iter); if (iter.err < 0) @@ -3374,6 +3375,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) args.iter.count = 0; args.iter.err = 0; args.iter.fn = nf_tables_dump_setelem; + args.iter.flush = false; set->ops->walk(&ctx, set, &args.iter); nla_nest_end(skb, nest); @@ -3939,6 +3941,7 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk, struct nft_set_iter iter = { .genmask = genmask, .fn = nft_flush_set, + .flush = true, }; set->ops->walk(&ctx, set, &iter); @@ -5089,6 +5092,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, iter.count = 0; iter.err = 0; iter.fn = nf_tables_loop_check_setelem; + iter.flush = false; set->ops->walk(ctx, set, &iter); if (iter.err < 0) -- cgit v1.2.3 From 55af753cd9fda9c5300f5318253b08bd15fb412e Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 18 Jan 2017 18:30:11 +0100 Subject: netfilter: nf_tables: rename struct nft_set_estimate class field Use lookup as field name instead, to prepare the introduction of the memory class in a follow up patch. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 4 ++-- net/netfilter/nf_tables_api.c | 12 ++++++------ net/netfilter/nft_set_hash.c | 2 +- net/netfilter/nft_set_rbtree.c | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5830f594842e..d76ac2f80a40 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -244,11 +244,11 @@ enum nft_set_class { * characteristics * * @size: required memory - * @class: lookup performance class + * @lookup: lookup performance class */ struct nft_set_estimate { unsigned int size; - enum nft_set_class class; + enum nft_set_class lookup; }; struct nft_set_ext; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 7ae810b03462..fa7cd1679079 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2401,9 +2401,9 @@ nft_select_set_ops(const struct nlattr * const nla[], features &= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_TIMEOUT; } - bops = NULL; - best.size = ~0; - best.class = ~0; + bops = NULL; + best.size = ~0; + best.lookup = ~0; list_for_each_entry(ops, &nf_tables_set_ops, list) { if ((ops->features & features) != features) @@ -2413,15 +2413,15 @@ nft_select_set_ops(const struct nlattr * const nla[], switch (policy) { case NFT_SET_POL_PERFORMANCE: - if (est.class < best.class) + if (est.lookup < best.lookup) break; - if (est.class == best.class && est.size < best.size) + if (est.lookup == best.lookup && est.size < best.size) break; continue; case NFT_SET_POL_MEMORY: if (est.size < best.size) break; - if (est.size == best.size && est.class < best.class) + if (est.size == best.size && est.lookup < best.lookup) break; continue; default: diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 2f10ac3b1b10..e58e7f02138b 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -384,7 +384,7 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features, est->size = esize + 2 * sizeof(struct nft_hash_elem *); } - est->class = NFT_SET_CLASS_O_1; + est->lookup = NFT_SET_CLASS_O_1; return true; } diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 81b8a4c2c061..2b6ea10c4bbd 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -291,7 +291,7 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, else est->size = nsize; - est->class = NFT_SET_CLASS_O_LOG_N; + est->lookup = NFT_SET_CLASS_O_LOG_N; return true; } -- cgit v1.2.3 From 0b5a78749260560f41e3b7c1f60f2c7dd9aff4f0 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 18 Jan 2017 18:30:12 +0100 Subject: netfilter: nf_tables: add space notation to sets The space notation allows us to classify the set backend implementation based on the amount of required memory. This provides an order of the set representation scalability in terms of memory. The size field is still left in place so use this if the userspace provides no explicit number of elements, so we cannot calculate the real memory that this set needs. This also helps us break ties in the set backend selection routine, eg. two backend implementations provide the same performance. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 22 +++++++++++++++++----- net/netfilter/nft_set_hash.c | 1 + net/netfilter/nft_set_rbtree.c | 1 + 4 files changed, 21 insertions(+), 5 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index d76ac2f80a40..21ce50e6d0c5 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -245,10 +245,12 @@ enum nft_set_class { * * @size: required memory * @lookup: lookup performance class + * @space: memory class */ struct nft_set_estimate { unsigned int size; enum nft_set_class lookup; + enum nft_set_class space; }; struct nft_set_ext; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index fa7cd1679079..cb6ae46f6c48 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2404,6 +2404,7 @@ nft_select_set_ops(const struct nlattr * const nla[], bops = NULL; best.size = ~0; best.lookup = ~0; + best.space = ~0; list_for_each_entry(ops, &nf_tables_set_ops, list) { if ((ops->features & features) != features) @@ -2415,14 +2416,25 @@ nft_select_set_ops(const struct nlattr * const nla[], case NFT_SET_POL_PERFORMANCE: if (est.lookup < best.lookup) break; - if (est.lookup == best.lookup && est.size < best.size) - break; + if (est.lookup == best.lookup) { + if (!desc->size) { + if (est.space < best.space) + break; + } else if (est.size < best.size) { + break; + } + } continue; case NFT_SET_POL_MEMORY: - if (est.size < best.size) - break; - if (est.size == best.size && est.lookup < best.lookup) + if (!desc->size) { + if (est.space < best.space) + break; + if (est.space == best.space && + est.lookup < best.lookup) + break; + } else if (est.size < best.size) { break; + } continue; default: break; diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index e58e7f02138b..6938bc890f31 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -385,6 +385,7 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features, } est->lookup = NFT_SET_CLASS_O_1; + est->space = NFT_SET_CLASS_O_N; return true; } diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 2b6ea10c4bbd..3387ed7dd231 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -292,6 +292,7 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, est->size = nsize; est->lookup = NFT_SET_CLASS_O_LOG_N; + est->space = NFT_SET_CLASS_O_N; return true; } -- cgit v1.2.3 From 665153ff575207f3a092cfcea3c51238612a7b58 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 18 Jan 2017 18:30:13 +0100 Subject: netfilter: nf_tables: add bitmap set type This patch adds a new bitmap set type. This bitmap uses two bits to represent one element. These two bits determine the element state in the current and the future generation that fits into the nf_tables commit protocol. When dumping elements back to userspace, the two bits are expanded into a struct nft_set_ext object. If no NFTA_SET_DESC_SIZE is specified, the existing automatic set backend selection prefers bitmap over hash in case of keys whose size is <= 16 bit. If the set size is know, the bitmap set type is selected if with 16 bit kets and more than 390 elements in the set, otherwise the hash table set implementation is used. For 8 bit keys, the bitmap consumes 66 bytes. For 16 bit keys, the bitmap takes 16388 bytes. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 6 + net/netfilter/Makefile | 1 + net/netfilter/nft_set_bitmap.c | 314 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 321 insertions(+) create mode 100644 net/netfilter/nft_set_bitmap.c diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index dfbe9deeb8c4..ea479ed43373 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -509,6 +509,12 @@ config NFT_SET_HASH This option adds the "hash" set type that is used to build one-way mappings between matchings and actions. +config NFT_SET_BITMAP + tristate "Netfilter nf_tables bitmap set module" + help + This option adds the "bitmap" set type that is used to build sets + whose keys are smaller or equal to 16 bits. + config NFT_COUNTER tristate "Netfilter nf_tables counter module" help diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 6b3034f12661..c9b78e7b342f 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -93,6 +93,7 @@ obj-$(CONFIG_NFT_REJECT) += nft_reject.o obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o obj-$(CONFIG_NFT_SET_RBTREE) += nft_set_rbtree.o obj-$(CONFIG_NFT_SET_HASH) += nft_set_hash.o +obj-$(CONFIG_NFT_SET_BITMAP) += nft_set_bitmap.o obj-$(CONFIG_NFT_COUNTER) += nft_counter.o obj-$(CONFIG_NFT_LOG) += nft_log.o obj-$(CONFIG_NFT_MASQ) += nft_masq.o diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c new file mode 100644 index 000000000000..97f9649bcc7e --- /dev/null +++ b/net/netfilter/nft_set_bitmap.c @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2017 Pablo Neira Ayuso + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* This bitmap uses two bits to represent one element. These two bits determine + * the element state in the current and the future generation. + * + * An element can be in three states. The generation cursor is represented using + * the ^ character, note that this cursor shifts on every succesful transaction. + * If no transaction is going on, we observe all elements are in the following + * state: + * + * 11 = this element is active in the current generation. In case of no updates, + * ^ it stays active in the next generation. + * 00 = this element is inactive in the current generation. In case of no + * ^ updates, it stays inactive in the next generation. + * + * On transaction handling, we observe these two temporary states: + * + * 01 = this element is inactive in the current generation and it becomes active + * ^ in the next one. This happens when the element is inserted but commit + * path has not yet been executed yet, so activation is still pending. On + * transaction abortion, the element is removed. + * 10 = this element is active in the current generation and it becomes inactive + * ^ in the next one. This happens when the element is deactivated but commit + * path has not yet been executed yet, so removal is still pending. On + * transation abortion, the next generation bit is reset to go back to + * restore its previous state. + */ +struct nft_bitmap { + u16 bitmap_size; + u8 bitmap[]; +}; + +static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off) +{ + u32 k = (key << 1); + + *idx = k / BITS_PER_BYTE; + *off = k % BITS_PER_BYTE; +} + +/* Fetch the two bits that represent the element and check if it is active based + * on the generation mask. + */ +static inline bool +nft_bitmap_active(const u8 *bitmap, u32 idx, u32 off, u8 genmask) +{ + return (bitmap[idx] & (0x3 << off)) & (genmask << off); +} + +static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext) +{ + const struct nft_bitmap *priv = nft_set_priv(set); + u8 genmask = nft_genmask_cur(net); + u32 idx, off; + + nft_bitmap_location(*key, &idx, &off); + + return nft_bitmap_active(priv->bitmap, idx, off, genmask); +} + +static int nft_bitmap_insert(const struct net *net, const struct nft_set *set, + const struct nft_set_elem *elem, + struct nft_set_ext **_ext) +{ + struct nft_bitmap *priv = nft_set_priv(set); + struct nft_set_ext *ext = elem->priv; + u8 genmask = nft_genmask_next(net); + u32 idx, off; + + nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); + if (nft_bitmap_active(priv->bitmap, idx, off, genmask)) + return -EEXIST; + + /* Enter 01 state. */ + priv->bitmap[idx] |= (genmask << off); + + return 0; +} + +static void nft_bitmap_remove(const struct net *net, + const struct nft_set *set, + const struct nft_set_elem *elem) +{ + struct nft_bitmap *priv = nft_set_priv(set); + struct nft_set_ext *ext = elem->priv; + u8 genmask = nft_genmask_next(net); + u32 idx, off; + + nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); + /* Enter 00 state. */ + priv->bitmap[idx] &= ~(genmask << off); +} + +static void nft_bitmap_activate(const struct net *net, + const struct nft_set *set, + const struct nft_set_elem *elem) +{ + struct nft_bitmap *priv = nft_set_priv(set); + struct nft_set_ext *ext = elem->priv; + u8 genmask = nft_genmask_next(net); + u32 idx, off; + + nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); + /* Enter 11 state. */ + priv->bitmap[idx] |= (genmask << off); +} + +static bool nft_bitmap_flush(const struct net *net, + const struct nft_set *set, void *ext) +{ + struct nft_bitmap *priv = nft_set_priv(set); + u8 genmask = nft_genmask_next(net); + u32 idx, off; + + nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); + /* Enter 10 state, similar to deactivation. */ + priv->bitmap[idx] &= ~(genmask << off); + + return true; +} + +static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set, + const struct nft_set_elem *elem) +{ + struct nft_set_ext_tmpl tmpl; + struct nft_set_ext *ext; + + nft_set_ext_prepare(&tmpl); + nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); + + ext = kzalloc(tmpl.len, GFP_KERNEL); + if (!ext) + return NULL; + + nft_set_ext_init(ext, &tmpl); + memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen); + + return ext; +} + +static void *nft_bitmap_deactivate(const struct net *net, + const struct nft_set *set, + const struct nft_set_elem *elem) +{ + struct nft_bitmap *priv = nft_set_priv(set); + u8 genmask = nft_genmask_next(net); + struct nft_set_ext *ext; + u32 idx, off, key = 0; + + memcpy(&key, elem->key.val.data, set->klen); + nft_bitmap_location(key, &idx, &off); + + if (!nft_bitmap_active(priv->bitmap, idx, off, genmask)) + return NULL; + + /* We have no real set extension since this is a bitmap, allocate this + * dummy object that is released from the commit/abort path. + */ + ext = nft_bitmap_ext_alloc(set, elem); + if (!ext) + return NULL; + + /* Enter 10 state. */ + priv->bitmap[idx] &= ~(genmask << off); + + return ext; +} + +static void nft_bitmap_walk(const struct nft_ctx *ctx, + struct nft_set *set, + struct nft_set_iter *iter) +{ + const struct nft_bitmap *priv = nft_set_priv(set); + struct nft_set_ext_tmpl tmpl; + struct nft_set_elem elem; + struct nft_set_ext *ext; + int idx, off; + u16 key; + + nft_set_ext_prepare(&tmpl); + nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); + + for (idx = 0; idx < priv->bitmap_size; idx++) { + for (off = 0; off < BITS_PER_BYTE; off += 2) { + if (iter->count < iter->skip) + goto cont; + + if (!nft_bitmap_active(priv->bitmap, idx, off, + iter->genmask)) + goto cont; + + ext = kzalloc(tmpl.len, GFP_KERNEL); + if (!ext) { + iter->err = -ENOMEM; + return; + } + nft_set_ext_init(ext, &tmpl); + key = ((idx * BITS_PER_BYTE) + off) >> 1; + memcpy(nft_set_ext_key(ext), &key, set->klen); + + elem.priv = ext; + iter->err = iter->fn(ctx, set, iter, &elem); + + /* On set flush, this dummy extension object is released + * from the commit/abort path. + */ + if (!iter->flush) + kfree(ext); + + if (iter->err < 0) + return; +cont: + iter->count++; + } + } +} + +/* The bitmap size is pow(2, key length in bits) / bits per byte. This is + * multiplied by two since each element takes two bits. For 8 bit keys, the + * bitmap consumes 66 bytes. For 16 bit keys, 16388 bytes. + */ +static inline u32 nft_bitmap_size(u32 klen) +{ + return ((2 << ((klen * BITS_PER_BYTE) - 1)) / BITS_PER_BYTE) << 1; +} + +static inline u32 nft_bitmap_total_size(u32 klen) +{ + return sizeof(struct nft_bitmap) + nft_bitmap_size(klen); +} + +static unsigned int nft_bitmap_privsize(const struct nlattr * const nla[]) +{ + u32 klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN])); + + return nft_bitmap_total_size(klen); +} + +static int nft_bitmap_init(const struct nft_set *set, + const struct nft_set_desc *desc, + const struct nlattr * const nla[]) +{ + struct nft_bitmap *priv = nft_set_priv(set); + + priv->bitmap_size = nft_bitmap_total_size(set->klen); + + return 0; +} + +static void nft_bitmap_destroy(const struct nft_set *set) +{ +} + +static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features, + struct nft_set_estimate *est) +{ + /* Make sure bitmaps we don't get bitmaps larger than 16 Kbytes. */ + if (desc->klen > 2) + return false; + + est->size = nft_bitmap_total_size(desc->klen); + est->lookup = NFT_SET_CLASS_O_1; + est->space = NFT_SET_CLASS_O_1; + + return true; +} + +static struct nft_set_ops nft_bitmap_ops __read_mostly = { + .privsize = nft_bitmap_privsize, + .estimate = nft_bitmap_estimate, + .init = nft_bitmap_init, + .destroy = nft_bitmap_destroy, + .insert = nft_bitmap_insert, + .remove = nft_bitmap_remove, + .deactivate = nft_bitmap_deactivate, + .flush = nft_bitmap_flush, + .activate = nft_bitmap_activate, + .lookup = nft_bitmap_lookup, + .walk = nft_bitmap_walk, + .owner = THIS_MODULE, +}; + +static int __init nft_bitmap_module_init(void) +{ + return nft_register_set(&nft_bitmap_ops); +} + +static void __exit nft_bitmap_module_exit(void) +{ + nft_unregister_set(&nft_bitmap_ops); +} + +module_init(nft_bitmap_module_init); +module_exit(nft_bitmap_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pablo Neira Ayuso "); +MODULE_ALIAS_NFT_SET(); -- cgit v1.2.3 From ab23821f7ecfb022a4aec78fb6f4fd0f6aa1ccab Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 3 Feb 2017 13:35:48 +0100 Subject: netfilter: nft_ct: add zone id get support Just like with counters the direction attribute is optional. We set priv->dir to MAX unconditionally to avoid duplicating the assignment for all keys with optional direction. For keys where direction is mandatory, existing code already returns an error. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 2 ++ net/netfilter/nft_ct.c | 22 +++++++++++++++++++--- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 53aac8b8ed6b..3e60ed78c538 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -870,6 +870,7 @@ enum nft_rt_attributes { * @NFT_CT_PKTS: conntrack packets * @NFT_CT_BYTES: conntrack bytes * @NFT_CT_AVGPKT: conntrack average bytes per packet + * @NFT_CT_ZONE: conntrack zone */ enum nft_ct_keys { NFT_CT_STATE, @@ -889,6 +890,7 @@ enum nft_ct_keys { NFT_CT_PKTS, NFT_CT_BYTES, NFT_CT_AVGPKT, + NFT_CT_ZONE, }; /** diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 66a2377510e1..5bd4cdfdcda5 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -151,6 +151,18 @@ static void nft_ct_get_eval(const struct nft_expr *expr, case NFT_CT_PROTOCOL: *dest = nf_ct_protonum(ct); return; +#ifdef CONFIG_NF_CONNTRACK_ZONES + case NFT_CT_ZONE: { + const struct nf_conntrack_zone *zone = nf_ct_zone(ct); + + if (priv->dir < IP_CT_DIR_MAX) + *dest = nf_ct_zone_id(zone, priv->dir); + else + *dest = zone->id; + + return; + } +#endif default: break; } @@ -266,6 +278,7 @@ static int nft_ct_get_init(const struct nft_ctx *ctx, int err; priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY])); + priv->dir = IP_CT_DIR_MAX; switch (priv->key) { case NFT_CT_DIRECTION: if (tb[NFTA_CT_DIRECTION] != NULL) @@ -333,11 +346,13 @@ static int nft_ct_get_init(const struct nft_ctx *ctx, case NFT_CT_BYTES: case NFT_CT_PKTS: case NFT_CT_AVGPKT: - /* no direction? return sum of original + reply */ - if (tb[NFTA_CT_DIRECTION] == NULL) - priv->dir = IP_CT_DIR_MAX; len = sizeof(u64); break; +#ifdef CONFIG_NF_CONNTRACK_ZONES + case NFT_CT_ZONE: + len = sizeof(u16); + break; +#endif default: return -EOPNOTSUPP; } @@ -465,6 +480,7 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr) case NFT_CT_BYTES: case NFT_CT_PKTS: case NFT_CT_AVGPKT: + case NFT_CT_ZONE: if (priv->dir < IP_CT_DIR_MAX && nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir)) goto nla_put_failure; -- cgit v1.2.3 From 5c178d81b69f08ca3195427a6ea9a46d9af23127 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 3 Feb 2017 13:35:49 +0100 Subject: netfilter: nft_ct: prepare for key-dependent error unwind Next patch will add ZONE_ID set support which will need similar error unwind (put operation) as conntrack labels. Prepare for this: remove the 'label_got' boolean in favor of a switch statement that can be extended in next patch. As we already have that in the set_destroy function place that in a separate function and call it from the set init function. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_ct.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 5bd4cdfdcda5..2d82df2737da 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -386,12 +386,24 @@ static int nft_ct_get_init(const struct nft_ctx *ctx, return 0; } +static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) +{ + switch (priv->key) { +#ifdef CONFIG_NF_CONNTRACK_LABELS + case NFT_CT_LABELS: + nf_connlabels_put(ctx->net); + break; +#endif + default: + break; + } +} + static int nft_ct_set_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_ct *priv = nft_expr_priv(expr); - bool label_got = false; unsigned int len; int err; @@ -412,7 +424,6 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1); if (err) return err; - label_got = true; break; #endif default: @@ -431,8 +442,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, return 0; err1: - if (label_got) - nf_connlabels_put(ctx->net); + __nft_ct_set_destroy(ctx, priv); return err; } @@ -447,16 +457,7 @@ static void nft_ct_set_destroy(const struct nft_ctx *ctx, { struct nft_ct *priv = nft_expr_priv(expr); - switch (priv->key) { -#ifdef CONFIG_NF_CONNTRACK_LABELS - case NFT_CT_LABELS: - nf_connlabels_put(ctx->net); - break; -#endif - default: - break; - } - + __nft_ct_set_destroy(ctx, priv); nft_ct_netns_put(ctx->net, ctx->afi->family); } -- cgit v1.2.3 From edee4f1e92458299505ff007733f676b00c516a1 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 3 Feb 2017 13:35:50 +0100 Subject: netfilter: nft_ct: add zone id set support zones allow tracking multiple connections sharing identical tuples, this is needed e.g. when tracking distinct vlans with overlapping ip addresses (conntrack is l2 agnostic). Thus the zone has to be set before the packet is picked up by the connection tracker. This is done by means of 'conntrack templates' which are conntrack structures used solely to pass this info from one netfilter hook to the next. The iptables CT target instantiates these connection tracking templates once per rule, i.e. the template is fixed/tied to particular zone, can be read-only and therefore be re-used by as many skbs simultaneously as needed. We can't follow this model because we want to take the zone id from an sreg at rule eval time so we could e.g. fill in the zone id from the packets vlan id or a e.g. nftables key : value maps. To avoid cost of per packet alloc/free of the template, use a percpu template 'scratch' object and use the refcount to detect the (unlikely) case where the template is still attached to another skb (i.e., previous skb was nfqueued ...). Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_ct.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 143 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 2d82df2737da..c6b8022c0e47 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -32,6 +32,11 @@ struct nft_ct { }; }; +#ifdef CONFIG_NF_CONNTRACK_ZONES +static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template); +static unsigned int nft_ct_pcpu_template_refcnt __read_mostly; +#endif + static u64 nft_ct_get_eval_counter(const struct nf_conn_counter *c, enum nft_ct_keys k, enum ip_conntrack_dir d) @@ -191,6 +196,53 @@ err: regs->verdict.code = NFT_BREAK; } +#ifdef CONFIG_NF_CONNTRACK_ZONES +static void nft_ct_set_zone_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + struct nf_conntrack_zone zone = { .dir = NF_CT_DEFAULT_ZONE_DIR }; + const struct nft_ct *priv = nft_expr_priv(expr); + struct sk_buff *skb = pkt->skb; + enum ip_conntrack_info ctinfo; + u16 value = regs->data[priv->sreg]; + struct nf_conn *ct; + + ct = nf_ct_get(skb, &ctinfo); + if (ct) /* already tracked */ + return; + + zone.id = value; + + switch (priv->dir) { + case IP_CT_DIR_ORIGINAL: + zone.dir = NF_CT_ZONE_DIR_ORIG; + break; + case IP_CT_DIR_REPLY: + zone.dir = NF_CT_ZONE_DIR_REPL; + break; + default: + break; + } + + ct = this_cpu_read(nft_ct_pcpu_template); + + if (likely(atomic_read(&ct->ct_general.use) == 1)) { + nf_ct_zone_add(ct, &zone); + } else { + /* previous skb got queued to userspace */ + ct = nf_ct_tmpl_alloc(nft_net(pkt), &zone, GFP_ATOMIC); + if (!ct) { + regs->verdict.code = NF_DROP; + return; + } + } + + atomic_inc(&ct->ct_general.use); + nf_ct_set(skb, ct, IP_CT_NEW); +} +#endif + static void nft_ct_set_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -269,6 +321,45 @@ static void nft_ct_netns_put(struct net *net, uint8_t family) nf_ct_netns_put(net, family); } +#ifdef CONFIG_NF_CONNTRACK_ZONES +static void nft_ct_tmpl_put_pcpu(void) +{ + struct nf_conn *ct; + int cpu; + + for_each_possible_cpu(cpu) { + ct = per_cpu(nft_ct_pcpu_template, cpu); + if (!ct) + break; + nf_ct_put(ct); + per_cpu(nft_ct_pcpu_template, cpu) = NULL; + } +} + +static bool nft_ct_tmpl_alloc_pcpu(void) +{ + struct nf_conntrack_zone zone = { .id = 0 }; + struct nf_conn *tmp; + int cpu; + + if (nft_ct_pcpu_template_refcnt) + return true; + + for_each_possible_cpu(cpu) { + tmp = nf_ct_tmpl_alloc(&init_net, &zone, GFP_KERNEL); + if (!tmp) { + nft_ct_tmpl_put_pcpu(); + return false; + } + + atomic_set(&tmp->ct_general.use, 1); + per_cpu(nft_ct_pcpu_template, cpu) = tmp; + } + + return true; +} +#endif + static int nft_ct_get_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -393,6 +484,11 @@ static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) case NFT_CT_LABELS: nf_connlabels_put(ctx->net); break; +#endif +#ifdef CONFIG_NF_CONNTRACK_ZONES + case NFT_CT_ZONE: + if (--nft_ct_pcpu_template_refcnt == 0) + nft_ct_tmpl_put_pcpu(); #endif default: break; @@ -407,6 +503,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, unsigned int len; int err; + priv->dir = IP_CT_DIR_MAX; priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY])); switch (priv->key) { #ifdef CONFIG_NF_CONNTRACK_MARK @@ -425,11 +522,29 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, if (err) return err; break; +#endif +#ifdef CONFIG_NF_CONNTRACK_ZONES + case NFT_CT_ZONE: + if (!nft_ct_tmpl_alloc_pcpu()) + return -ENOMEM; + nft_ct_pcpu_template_refcnt++; + break; #endif default: return -EOPNOTSUPP; } + if (tb[NFTA_CT_DIRECTION]) { + priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]); + switch (priv->dir) { + case IP_CT_DIR_ORIGINAL: + case IP_CT_DIR_REPLY: + break; + default: + return -EINVAL; + } + } + priv->sreg = nft_parse_register(tb[NFTA_CT_SREG]); err = nft_validate_register_load(priv->sreg, len); if (err < 0) @@ -504,6 +619,17 @@ static int nft_ct_set_dump(struct sk_buff *skb, const struct nft_expr *expr) goto nla_put_failure; if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key))) goto nla_put_failure; + + switch (priv->key) { + case NFT_CT_ZONE: + if (priv->dir < IP_CT_DIR_MAX && + nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir)) + goto nla_put_failure; + break; + default: + break; + } + return 0; nla_put_failure: @@ -529,6 +655,17 @@ static const struct nft_expr_ops nft_ct_set_ops = { .dump = nft_ct_set_dump, }; +#ifdef CONFIG_NF_CONNTRACK_ZONES +static const struct nft_expr_ops nft_ct_set_zone_ops = { + .type = &nft_ct_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)), + .eval = nft_ct_set_zone_eval, + .init = nft_ct_set_init, + .destroy = nft_ct_set_destroy, + .dump = nft_ct_set_dump, +}; +#endif + static const struct nft_expr_ops * nft_ct_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) @@ -542,8 +679,13 @@ nft_ct_select_ops(const struct nft_ctx *ctx, if (tb[NFTA_CT_DREG]) return &nft_ct_get_ops; - if (tb[NFTA_CT_SREG]) + if (tb[NFTA_CT_SREG]) { +#ifdef CONFIG_NF_CONNTRACK_ZONES + if (nla_get_be32(tb[NFTA_CT_KEY]) == htonl(NFT_CT_ZONE)) + return &nft_ct_set_zone_ops; +#endif return &nft_ct_set_ops; + } return ERR_PTR(-EINVAL); } -- cgit v1.2.3 From 935b7f643018878bd9d4193eea8b575aff736b9b Mon Sep 17 00:00:00 2001 From: Manuel Messner Date: Tue, 7 Feb 2017 03:14:53 +0100 Subject: netfilter: nft_exthdr: add TCP option matching This patch implements the kernel side of the TCP option patch. Signed-off-by: Manuel Messner Reviewed-by: Florian Westphal Acked-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 17 ++++- net/netfilter/Kconfig | 4 +- net/netfilter/nft_exthdr.c | 119 +++++++++++++++++++++++++++---- 3 files changed, 124 insertions(+), 16 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 3e60ed78c538..207951516ede 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -709,13 +709,27 @@ enum nft_exthdr_flags { }; /** - * enum nft_exthdr_attributes - nf_tables IPv6 extension header expression netlink attributes + * enum nft_exthdr_op - nf_tables match options + * + * @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers + * @NFT_EXTHDR_OP_TCP: match against tcp options + */ +enum nft_exthdr_op { + NFT_EXTHDR_OP_IPV6, + NFT_EXTHDR_OP_TCPOPT, + __NFT_EXTHDR_OP_MAX +}; +#define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1) + +/** + * enum nft_exthdr_attributes - nf_tables extension header expression netlink attributes * * @NFTA_EXTHDR_DREG: destination register (NLA_U32: nft_registers) * @NFTA_EXTHDR_TYPE: extension header type (NLA_U8) * @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32) * @NFTA_EXTHDR_LEN: extension header length (NLA_U32) * @NFTA_EXTHDR_FLAGS: extension header flags (NLA_U32) + * @NFTA_EXTHDR_OP: option match type (NLA_U8) */ enum nft_exthdr_attributes { NFTA_EXTHDR_UNSPEC, @@ -724,6 +738,7 @@ enum nft_exthdr_attributes { NFTA_EXTHDR_OFFSET, NFTA_EXTHDR_LEN, NFTA_EXTHDR_FLAGS, + NFTA_EXTHDR_OP, __NFTA_EXTHDR_MAX }; #define NFTA_EXTHDR_MAX (__NFTA_EXTHDR_MAX - 1) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index ea479ed43373..9b28864cc36a 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -467,10 +467,10 @@ config NF_TABLES_NETDEV This option enables support for the "netdev" table. config NFT_EXTHDR - tristate "Netfilter nf_tables IPv6 exthdr module" + tristate "Netfilter nf_tables exthdr module" help This option adds the "exthdr" expression that you can use to match - IPv6 extension headers. + IPv6 extension headers and tcp options. config NFT_META tristate "Netfilter nf_tables meta module" diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index a89e5ab150db..c308920b194c 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -15,20 +15,29 @@ #include #include #include -// FIXME: -#include +#include struct nft_exthdr { u8 type; u8 offset; u8 len; + u8 op; enum nft_registers dreg:8; u8 flags; }; -static void nft_exthdr_eval(const struct nft_expr *expr, - struct nft_regs *regs, - const struct nft_pktinfo *pkt) +static unsigned int optlen(const u8 *opt, unsigned int offset) +{ + /* Beware zero-length options: make finite progress */ + if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0) + return 1; + else + return opt[offset + 1]; +} + +static void nft_exthdr_ipv6_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) { struct nft_exthdr *priv = nft_expr_priv(expr); u32 *dest = ®s->data[priv->dreg]; @@ -52,6 +61,53 @@ err: regs->verdict.code = NFT_BREAK; } +static void nft_exthdr_tcp_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE]; + struct nft_exthdr *priv = nft_expr_priv(expr); + unsigned int i, optl, tcphdr_len, offset; + u32 *dest = ®s->data[priv->dreg]; + struct tcphdr *tcph; + u8 *opt; + + if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP) + goto err; + + tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buff); + if (!tcph) + goto err; + + tcphdr_len = __tcp_hdrlen(tcph); + if (tcphdr_len < sizeof(*tcph)) + goto err; + + tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, tcphdr_len, buff); + if (!tcph) + goto err; + + opt = (u8 *)tcph; + for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) { + optl = optlen(opt, i); + + if (priv->type != opt[i]) + continue; + + if (i + optl > tcphdr_len || priv->len + priv->offset > optl) + goto err; + + offset = i + priv->offset; + dest[priv->len / NFT_REG32_SIZE] = 0; + memcpy(dest, opt + offset, priv->len); + + return; + } + +err: + regs->verdict.code = NFT_BREAK; +} + static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { [NFTA_EXTHDR_DREG] = { .type = NLA_U32 }, [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 }, @@ -65,13 +121,13 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_exthdr *priv = nft_expr_priv(expr); - u32 offset, len, flags = 0; + u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6; int err; - if (tb[NFTA_EXTHDR_DREG] == NULL || - tb[NFTA_EXTHDR_TYPE] == NULL || - tb[NFTA_EXTHDR_OFFSET] == NULL || - tb[NFTA_EXTHDR_LEN] == NULL) + if (!tb[NFTA_EXTHDR_DREG] || + !tb[NFTA_EXTHDR_TYPE] || + !tb[NFTA_EXTHDR_OFFSET] || + !tb[NFTA_EXTHDR_LEN]) return -EINVAL; err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset); @@ -91,11 +147,18 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, return -EINVAL; } + if (tb[NFTA_EXTHDR_OP]) { + err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op); + if (err < 0) + return err; + } + priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); priv->offset = offset; priv->len = len; priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); priv->flags = flags; + priv->op = op; return nft_validate_register_store(ctx, priv->dreg, NULL, NFT_DATA_VALUE, priv->len); @@ -115,6 +178,8 @@ static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags))) goto nla_put_failure; + if (nla_put_be32(skb, NFTA_EXTHDR_OP, htonl(priv->op))) + goto nla_put_failure; return 0; nla_put_failure: @@ -122,17 +187,45 @@ nla_put_failure: } static struct nft_expr_type nft_exthdr_type; -static const struct nft_expr_ops nft_exthdr_ops = { +static const struct nft_expr_ops nft_exthdr_ipv6_ops = { + .type = &nft_exthdr_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), + .eval = nft_exthdr_ipv6_eval, + .init = nft_exthdr_init, + .dump = nft_exthdr_dump, +}; + +static const struct nft_expr_ops nft_exthdr_tcp_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), - .eval = nft_exthdr_eval, + .eval = nft_exthdr_tcp_eval, .init = nft_exthdr_init, .dump = nft_exthdr_dump, }; +static const struct nft_expr_ops * +nft_exthdr_select_ops(const struct nft_ctx *ctx, + const struct nlattr * const tb[]) +{ + u32 op; + + if (!tb[NFTA_EXTHDR_OP]) + return &nft_exthdr_ipv6_ops; + + op = ntohl(nla_get_u32(tb[NFTA_EXTHDR_OP])); + switch (op) { + case NFT_EXTHDR_OP_TCPOPT: + return &nft_exthdr_tcp_ops; + case NFT_EXTHDR_OP_IPV6: + return &nft_exthdr_ipv6_ops; + } + + return ERR_PTR(-EOPNOTSUPP); +} + static struct nft_expr_type nft_exthdr_type __read_mostly = { .name = "exthdr", - .ops = &nft_exthdr_ops, + .select_ops = &nft_exthdr_select_ops, .policy = nft_exthdr_policy, .maxattr = NFTA_EXTHDR_MAX, .owner = THIS_MODULE, -- cgit v1.2.3 From dab55d1083db11d731df5c0d53865efe2bf8e9fe Mon Sep 17 00:00:00 2001 From: Waldemar Rymarkiewicz Date: Thu, 2 Feb 2017 18:53:42 +0100 Subject: ath10k: remove unneeded semicolon Remove redundant semicolon after switch statement. Signed-off-by: Waldemar Rymarkiewicz Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 2743a9bcd556..02a3fc81fbe3 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2492,7 +2492,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); break; - }; + } return true; } EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); -- cgit v1.2.3 From 949c2d0096753d518ef6e0bd8418c8086747196b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 7 Feb 2017 15:33:28 +0100 Subject: wil6210: include moduleparam.h This now declares a module parameter, so include the necessary header file for that. Signed-off-by: Johannes Berg Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index e25e78e71f54..83155b5ddbfb 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -15,6 +15,7 @@ */ #include +#include #include "wil6210.h" #include "wmi.h" -- cgit v1.2.3 From 5524ddd4c1f1624a7a96e6078a5ed81f493e7398 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 7 Feb 2017 23:29:33 +0100 Subject: ath10k: select WANT_DEV_COREDUMP This is necessary so that - if ath10k is the only driver using dev_coredump*() - the functionality is built into the kernel. Signed-off-by: Johannes Berg Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index db1ca629cbd6..b4241cf9b7ed 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -3,6 +3,7 @@ config ATH10K depends on MAC80211 && HAS_DMA select ATH_COMMON select CRC32 + select WANT_DEV_COREDUMP ---help--- This module adds support for wireless adapters based on Atheros IEEE 802.11ac family of chipsets. -- cgit v1.2.3 From 9587a01a7ead9efc5032c16e0d9668de58be1186 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Thu, 2 Feb 2017 22:33:13 +0100 Subject: brcmfmac: merge two brcmf_err macros into one MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows simplifying the code by adding a simple IS_ENABLED check for CONFIG_BRCMDB symbol. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h index 6687812770cc..1fe4aa973a92 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h @@ -45,20 +45,16 @@ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#ifndef CONFIG_BRCM_TRACING /* Macro for error messages. net_ratelimit() is used when driver * debugging is not selected. When debugging the driver error * messages are as important as other tracing or even more so. */ -#ifndef CONFIG_BRCM_TRACING -#ifdef CONFIG_BRCMDBG -#define brcmf_err(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__) -#else #define brcmf_err(fmt, ...) \ do { \ - if (net_ratelimit()) \ + if (IS_ENABLED(CONFIG_BRCMDBG) || net_ratelimit()) \ pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \ } while (0) -#endif #else __printf(2, 3) void __brcmf_err(const char *func, const char *fmt, ...); -- cgit v1.2.3 From 087fa712a00685dac4bcc64b7c3dc8ae6bee8026 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Thu, 2 Feb 2017 22:33:14 +0100 Subject: brcmfmac: switch to C function (__brcmf_err) for printing errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will allow extending code and using more detailed messages e.g. with the help of dev_err. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../net/wireless/broadcom/brcm80211/brcmfmac/common.c | 16 ++++++++++++++++ drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h | 6 +++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index f7c8c2e80349..33b133f7e63a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -218,6 +218,22 @@ done: return err; } +#ifndef CONFIG_BRCM_TRACING +void __brcmf_err(const char *func, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + pr_err("%s: %pV", func, &vaf); + + va_end(args); +} +#endif + #if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG) void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h index 1fe4aa973a92..441a6661eac0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h @@ -45,6 +45,8 @@ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +__printf(2, 3) +void __brcmf_err(const char *func, const char *fmt, ...); #ifndef CONFIG_BRCM_TRACING /* Macro for error messages. net_ratelimit() is used when driver * debugging is not selected. When debugging the driver error @@ -53,11 +55,9 @@ #define brcmf_err(fmt, ...) \ do { \ if (IS_ENABLED(CONFIG_BRCMDBG) || net_ratelimit()) \ - pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \ + __brcmf_err(__func__, fmt, ##__VA_ARGS__); \ } while (0) #else -__printf(2, 3) -void __brcmf_err(const char *func, const char *fmt, ...); #define brcmf_err(fmt, ...) \ __brcmf_err(__func__, fmt, ##__VA_ARGS__) #endif -- cgit v1.2.3 From d0630555650a394cf5743268820511f527a561a5 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Thu, 2 Feb 2017 22:33:15 +0100 Subject: brcmfmac: merge two remaining brcmf_err macros MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now we always have __brcmf_err function we can do perfectly fine with just one macro. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h index 441a6661eac0..066126123e96 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h @@ -47,20 +47,16 @@ __printf(2, 3) void __brcmf_err(const char *func, const char *fmt, ...); -#ifndef CONFIG_BRCM_TRACING -/* Macro for error messages. net_ratelimit() is used when driver - * debugging is not selected. When debugging the driver error - * messages are as important as other tracing or even more so. +/* Macro for error messages. When debugging / tracing the driver all error + * messages are important to us. */ #define brcmf_err(fmt, ...) \ do { \ - if (IS_ENABLED(CONFIG_BRCMDBG) || net_ratelimit()) \ + if (IS_ENABLED(CONFIG_BRCMDBG) || \ + IS_ENABLED(CONFIG_BRCM_TRACING) || \ + net_ratelimit()) \ __brcmf_err(__func__, fmt, ##__VA_ARGS__); \ } while (0) -#else -#define brcmf_err(fmt, ...) \ - __brcmf_err(__func__, fmt, ##__VA_ARGS__) -#endif #if defined(DEBUG) || defined(CONFIG_BRCM_TRACING) __printf(3, 4) -- cgit v1.2.3 From d5efe1535af07223b3ba81823a595be04f96969c Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Tue, 7 Feb 2017 09:14:21 -0600 Subject: rtlwifi: Move items out of rtl_pci_priv and rtl_usb_priv In commit 6773386f977c ("rtlwifi: rtl8192c-common: Fix "BUG: KASAN:"), a BUG detected when CONFIG_KASAN=y was fixed by reordering the layouts of struct rtl_pci_priv, and struct rtl_usb_priv so that the variables used by both PCI and USB drivers have the same offsets in both structs. The better fix of relocating the critical variables into struct rtl_priv was deferred as these changes do not have to be applied to stable kernels. This change also removes CamelCase variables with pLed0 => pled0. Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8188ee/hw.c | 14 +- .../net/wireless/realtek/rtlwifi/rtl8188ee/led.c | 19 +-- .../wireless/realtek/rtlwifi/rtl8192c/dm_common.c | 143 +++++++++--------- .../net/wireless/realtek/rtlwifi/rtl8192ce/hw.c | 166 ++++++++++----------- .../net/wireless/realtek/rtlwifi/rtl8192ce/led.c | 19 +-- .../net/wireless/realtek/rtlwifi/rtl8192cu/hw.c | 3 +- .../net/wireless/realtek/rtlwifi/rtl8192cu/led.c | 17 ++- .../net/wireless/realtek/rtlwifi/rtl8192de/hw.c | 10 +- .../net/wireless/realtek/rtlwifi/rtl8192de/led.c | 19 +-- .../net/wireless/realtek/rtlwifi/rtl8192ee/hw.c | 6 +- .../net/wireless/realtek/rtlwifi/rtl8192ee/led.c | 14 +- .../net/wireless/realtek/rtlwifi/rtl8192se/hw.c | 12 +- .../net/wireless/realtek/rtlwifi/rtl8192se/led.c | 19 +-- .../net/wireless/realtek/rtlwifi/rtl8723ae/hw.c | 8 +- .../net/wireless/realtek/rtlwifi/rtl8723ae/led.c | 19 +-- .../net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 8 +- .../net/wireless/realtek/rtlwifi/rtl8723be/led.c | 15 +- .../net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 10 +- .../net/wireless/realtek/rtlwifi/rtl8821ae/led.c | 24 ++- drivers/net/wireless/realtek/rtlwifi/usb.h | 1 - drivers/net/wireless/realtek/rtlwifi/wifi.h | 1 + 21 files changed, 261 insertions(+), 286 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 679e214415d9..0ba26d27d11c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -817,19 +817,18 @@ static bool _rtl88ee_llt_table_init(struct ieee80211_hw *hw) static void _rtl88ee_gen_refresh_led_state(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; if (rtlpriv->rtlhal.up_first_time) return; if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) - rtl88ee_sw_led_on(hw, pLed0); + rtl88ee_sw_led_on(hw, pled0); else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT) - rtl88ee_sw_led_on(hw, pLed0); + rtl88ee_sw_led_on(hw, pled0); else - rtl88ee_sw_led_off(hw, pLed0); + rtl88ee_sw_led_off(hw, pled0); } static bool _rtl88ee_init_mac(struct ieee80211_hw *hw) @@ -1931,14 +1930,13 @@ exit: static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; switch (rtlhal->oem_id) { case RT_CID_819X_HP: - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; break; case RT_CID_819X_LENOVO: case RT_CID_DEFAULT: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c index 6ea7fd7bb527..df3e214460db 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c @@ -67,7 +67,6 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, @@ -79,7 +78,7 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) case LED_PIN_LED0: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); ledcfg &= 0xf0; - if (pcipriv->ledctl.led_opendrain) { + if (rtlpriv->ledctl.led_opendrain) { rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3) | BIT(5) | BIT(6))); ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); @@ -104,24 +103,26 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl88ee_init_sw_leds(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - _rtl88ee_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0); - _rtl88ee_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + _rtl88ee_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl88ee_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1); } static void _rtl88ee_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; + switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: case LED_CTL_NO_LINK: - rtl88ee_sw_led_on(hw, pLed0); + rtl88ee_sw_led_on(hw, pled0); break; case LED_CTL_POWER_OFF: - rtl88ee_sw_led_off(hw, pLed0); + rtl88ee_sw_led_off(hw, pled0); break; default: break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c index bdc132bef822..0b5a06ffa482 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c @@ -638,7 +638,6 @@ EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo); static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); static u64 last_txok_cnt; @@ -651,20 +650,20 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) u32 edca_be_dl = 0x5ea42b; bool bt_change_edca = false; - if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) || - (last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) { + if ((last_bt_edca_ul != rtlpriv->btcoexist.bt_edca_ul) || + (last_bt_edca_dl != rtlpriv->btcoexist.bt_edca_dl)) { rtlpriv->dm.current_turbo_edca = false; - last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul; - last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl; + last_bt_edca_ul = rtlpriv->btcoexist.bt_edca_ul; + last_bt_edca_dl = rtlpriv->btcoexist.bt_edca_dl; } - if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) { - edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul; + if (rtlpriv->btcoexist.bt_edca_ul != 0) { + edca_be_ul = rtlpriv->btcoexist.bt_edca_ul; bt_change_edca = true; } - if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) { - edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl; + if (rtlpriv->btcoexist.bt_edca_dl != 0) { + edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } @@ -673,7 +672,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) return; } - if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) { + if ((!mac->ht_enable) && (!rtlpriv->btcoexist.bt_coexistence)) { if (!(edca_be_ul & 0xffff0000)) edca_be_ul |= 0x005e0000; @@ -1471,7 +1470,6 @@ EXPORT_SYMBOL(rtl92c_dm_watchdog); u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); long undec_sm_pwdb; u8 curr_bt_rssi_state = 0x00; @@ -1510,8 +1508,8 @@ u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw) else curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW); - if (curr_bt_rssi_state != rtlpcipriv->bt_coexist.bt_rssi_state) { - rtlpcipriv->bt_coexist.bt_rssi_state = curr_bt_rssi_state; + if (curr_bt_rssi_state != rtlpriv->btcoexist.bt_rssi_state) { + rtlpriv->btcoexist.bt_rssi_state = curr_bt_rssi_state; return true; } else { return false; @@ -1522,7 +1520,6 @@ EXPORT_SYMBOL(rtl92c_bt_rssi_state_change); static bool rtl92c_bt_state_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u32 polling, ratio_tx, ratio_pri; u32 bt_tx, bt_pri; @@ -1542,14 +1539,14 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw) return false; bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1); - if (bt_state != rtlpcipriv->bt_coexist.bt_cur_state) { - rtlpcipriv->bt_coexist.bt_cur_state = bt_state; + if (bt_state != rtlpriv->btcoexist.bt_cur_state) { + rtlpriv->btcoexist.bt_cur_state = bt_state; - if (rtlpcipriv->bt_coexist.reg_bt_sco == 3) { - rtlpcipriv->bt_coexist.bt_service = BT_IDLE; + if (rtlpriv->btcoexist.reg_bt_sco == 3) { + rtlpriv->btcoexist.bt_service = BT_IDLE; bt_state = bt_state | - ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ? + ((rtlpriv->btcoexist.bt_ant_isolation == 1) ? 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) | BIT_OFFSET_LEN_MASK_32(2, 1); rtl_write_byte(rtlpriv, 0x4fd, bt_state); @@ -1559,10 +1556,10 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw) ratio_tx = bt_tx * 1000 / polling; ratio_pri = bt_pri * 1000 / polling; - rtlpcipriv->bt_coexist.ratio_tx = ratio_tx; - rtlpcipriv->bt_coexist.ratio_pri = ratio_pri; + rtlpriv->btcoexist.ratio_tx = ratio_tx; + rtlpriv->btcoexist.ratio_pri = ratio_pri; - if (bt_state && rtlpcipriv->bt_coexist.reg_bt_sco == 3) { + if (bt_state && rtlpriv->btcoexist.reg_bt_sco == 3) { if ((ratio_tx < 30) && (ratio_pri < 30)) cur_service_type = BT_IDLE; @@ -1577,17 +1574,17 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw) else cur_service_type = BT_OTHER_ACTION; - if (cur_service_type != rtlpcipriv->bt_coexist.bt_service) { - rtlpcipriv->bt_coexist.bt_service = cur_service_type; + if (cur_service_type != rtlpriv->btcoexist.bt_service) { + rtlpriv->btcoexist.bt_service = cur_service_type; bt_state = bt_state | - ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ? + ((rtlpriv->btcoexist.bt_ant_isolation == 1) ? 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) | - ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) ? + ((rtlpriv->btcoexist.bt_service != BT_IDLE) ? 0 : BIT_OFFSET_LEN_MASK_32(2, 1)); /* Add interrupt migration when bt is not ini * idle state (no traffic). */ - if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) { + if (rtlpriv->btcoexist.bt_service != BT_IDLE) { rtl_write_word(rtlpriv, 0x504, 0x0ccc); rtl_write_byte(rtlpriv, 0x506, 0x54); rtl_write_byte(rtlpriv, 0x507, 0x54); @@ -1626,80 +1623,77 @@ static bool rtl92c_bt_wifi_connect_change(struct ieee80211_hw *hw) static void rtl92c_bt_set_normal(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); - - - if (rtlpcipriv->bt_coexist.bt_service == BT_OTHERBUSY) { - rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72b; - rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72b; - } else if (rtlpcipriv->bt_coexist.bt_service == BT_BUSY) { - rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82f; - rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82f; - } else if (rtlpcipriv->bt_coexist.bt_service == BT_SCO) { - if (rtlpcipriv->bt_coexist.ratio_tx > 160) { - rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72f; - rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72f; + + if (rtlpriv->btcoexist.bt_service == BT_OTHERBUSY) { + rtlpriv->btcoexist.bt_edca_ul = 0x5ea72b; + rtlpriv->btcoexist.bt_edca_dl = 0x5ea72b; + } else if (rtlpriv->btcoexist.bt_service == BT_BUSY) { + rtlpriv->btcoexist.bt_edca_ul = 0x5eb82f; + rtlpriv->btcoexist.bt_edca_dl = 0x5eb82f; + } else if (rtlpriv->btcoexist.bt_service == BT_SCO) { + if (rtlpriv->btcoexist.ratio_tx > 160) { + rtlpriv->btcoexist.bt_edca_ul = 0x5ea72f; + rtlpriv->btcoexist.bt_edca_dl = 0x5ea72f; } else { - rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea32b; - rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea42b; + rtlpriv->btcoexist.bt_edca_ul = 0x5ea32b; + rtlpriv->btcoexist.bt_edca_dl = 0x5ea42b; } } else { - rtlpcipriv->bt_coexist.bt_edca_ul = 0; - rtlpcipriv->bt_coexist.bt_edca_dl = 0; + rtlpriv->btcoexist.bt_edca_ul = 0; + rtlpriv->btcoexist.bt_edca_dl = 0; } - if ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) && - (rtlpriv->mac80211.mode == WIRELESS_MODE_G || + if ((rtlpriv->btcoexist.bt_service != BT_IDLE) && + (rtlpriv->mac80211.mode == WIRELESS_MODE_G || (rtlpriv->mac80211.mode == (WIRELESS_MODE_G | WIRELESS_MODE_B))) && - (rtlpcipriv->bt_coexist.bt_rssi_state & + (rtlpriv->btcoexist.bt_rssi_state & BT_RSSI_STATE_BG_EDCA_LOW)) { - rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82b; - rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82b; + rtlpriv->btcoexist.bt_edca_ul = 0x5eb82b; + rtlpriv->btcoexist.bt_edca_dl = 0x5eb82b; } } static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); /* Only enable HW BT coexist when BT in "Busy" state. */ if (rtlpriv->mac80211.vendor == PEER_CISCO && - rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) { + rtlpriv->btcoexist.bt_service == BT_OTHER_ACTION) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); } else { - if ((rtlpcipriv->bt_coexist.bt_service == BT_BUSY) && - (rtlpcipriv->bt_coexist.bt_rssi_state & + if ((rtlpriv->btcoexist.bt_service == BT_BUSY) && + (rtlpriv->btcoexist.bt_rssi_state & BT_RSSI_STATE_NORMAL_POWER)) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); - } else if ((rtlpcipriv->bt_coexist.bt_service == + } else if ((rtlpriv->btcoexist.bt_service == BT_OTHER_ACTION) && (rtlpriv->mac80211.mode < WIRELESS_MODE_N_24G) && - (rtlpcipriv->bt_coexist.bt_rssi_state & + (rtlpriv->btcoexist.bt_rssi_state & BT_RSSI_STATE_SPECIAL_LOW)) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); - } else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) { + } else if (rtlpriv->btcoexist.bt_service == BT_PAN) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte); } else { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte); } } - if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) + if (rtlpriv->btcoexist.bt_service == BT_PAN) rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x10100); else rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x0); - if (rtlpcipriv->bt_coexist.bt_rssi_state & + if (rtlpriv->btcoexist.bt_rssi_state & BT_RSSI_STATE_NORMAL_POWER) { rtl92c_bt_set_normal(hw); } else { - rtlpcipriv->bt_coexist.bt_edca_ul = 0; - rtlpcipriv->bt_coexist.bt_edca_dl = 0; + rtlpriv->btcoexist.bt_edca_ul = 0; + rtlpriv->btcoexist.bt_edca_dl = 0; } - if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) { + if (rtlpriv->btcoexist.bt_service != BT_IDLE) { rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, @@ -1707,12 +1701,12 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte) } else { rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0, - rtlpcipriv->bt_coexist.bt_rfreg_origin_1e); + rtlpriv->btcoexist.bt_rfreg_origin_1e); } if (!rtlpriv->dm.dynamic_txpower_enable) { - if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) { - if (rtlpcipriv->bt_coexist.bt_rssi_state & + if (rtlpriv->btcoexist.bt_service != BT_IDLE) { + if (rtlpriv->btcoexist.bt_rssi_state & BT_RSSI_STATE_TXPOWER_LOW) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_BT2; @@ -1732,37 +1726,34 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte) static void rtl92c_check_bt_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 tmp1byte = 0; if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version) && - rtlpcipriv->bt_coexist.bt_coexistence) + rtlpriv->btcoexist.bt_coexistence) tmp1byte |= BIT(5); - if (rtlpcipriv->bt_coexist.bt_cur_state) { - if (rtlpcipriv->bt_coexist.bt_ant_isolation) + if (rtlpriv->btcoexist.bt_cur_state) { + if (rtlpriv->btcoexist.bt_ant_isolation) rtl92c_bt_ant_isolation(hw, tmp1byte); } else { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte); rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0, - rtlpcipriv->bt_coexist.bt_rfreg_origin_1e); + rtlpriv->btcoexist.bt_rfreg_origin_1e); - rtlpcipriv->bt_coexist.bt_edca_ul = 0; - rtlpcipriv->bt_coexist.bt_edca_dl = 0; + rtlpriv->btcoexist.bt_edca_ul = 0; + rtlpriv->btcoexist.bt_edca_dl = 0; } } void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); - + struct rtl_priv *rtlpriv = rtl_priv(hw); bool wifi_connect_change; bool bt_state_change; bool rssi_state_change; - if ((rtlpcipriv->bt_coexist.bt_coexistence) && - (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) { - + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) { wifi_connect_change = rtl92c_bt_wifi_connect_change(hw); bt_state_change = rtl92c_bt_state_change(hw); rssi_state_change = rtl92c_bt_rssi_state_change(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index 611987dfc207..9956026bae0a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -148,7 +148,6 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -276,8 +275,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) u8 *p_regtoset = NULL; u8 index = 0; - if ((rtlpcipriv->bt_coexist.bt_coexistence) && - (rtlpcipriv->bt_coexist.bt_coexist_type == + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) p_regtoset = regtoset_bt; else @@ -655,26 +654,25 @@ static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw) static void _rtl92ce_gen_refresh_led_state(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; if (rtlpci->up_first_time) return; if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) - rtl92ce_sw_led_on(hw, pLed0); + rtl92ce_sw_led_on(hw, pled0); else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT) - rtl92ce_sw_led_on(hw, pLed0); + rtl92ce_sw_led_on(hw, pled0); else - rtl92ce_sw_led_off(hw, pLed0); + rtl92ce_sw_led_off(hw, pled0); } static bool _rtl92ce_init_mac(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -683,7 +681,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw) u16 retry; rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); - if (rtlpcipriv->bt_coexist.bt_coexistence) { + if (rtlpriv->btcoexist.bt_coexistence) { u32 value32; value32 = rtl_read_dword(rtlpriv, REG_APS_FSMCO); value32 |= (SOP_ABG | SOP_AMB | XOP_BTCK); @@ -692,7 +690,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0F); - if (rtlpcipriv->bt_coexist.bt_coexistence) { + if (rtlpriv->btcoexist.bt_coexistence) { u32 u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL); u4b_tmp &= (~0x00024800); @@ -726,7 +724,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x82); udelay(2); - if (rtlpcipriv->bt_coexist.bt_coexistence) { + if (rtlpriv->btcoexist.bt_coexistence) { bytetmp = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+2) & 0xfd; rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+2, bytetmp); } @@ -798,7 +796,6 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u8 reg_bw_opmode; u32 reg_prsr; @@ -828,8 +825,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000); rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504); - if ((rtlpcipriv->bt_coexist.bt_coexistence) && - (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x97427431); else rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841); @@ -848,8 +845,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_PIFS, 0x1C); rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16); - if ((rtlpcipriv->bt_coexist.bt_coexistence) && - (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) { + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) { rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020); rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0402); } else { @@ -857,8 +854,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020); } - if ((rtlpcipriv->bt_coexist.bt_coexistence) && - (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666); else rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666); @@ -1313,7 +1310,6 @@ void rtl92ce_disable_interrupt(struct ieee80211_hw *hw) static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u8 u1b_tmp; u32 u4b_tmp; @@ -1331,9 +1327,9 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00000000); u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL); - if ((rtlpcipriv->bt_coexist.bt_coexistence) && - ((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) || - (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8))) { + if ((rtlpriv->btcoexist.bt_coexistence) && + ((rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) || + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC8))) { rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00F30000 | (u1b_tmp << 8)); } else { @@ -1345,7 +1341,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80); if (!IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23); - if (rtlpcipriv->bt_coexist.bt_coexistence) { + if (rtlpriv->btcoexist.bt_coexistence) { u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL); u4b_tmp |= 0x03824800; rtl_write_dword(rtlpriv, REG_AFE_XTAL_CTRL, u4b_tmp); @@ -1724,12 +1720,11 @@ exit: static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); switch (rtlhal->oem_id) { case RT_CID_819X_HP: - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; break; case RT_CID_819X_LENOVO: case RT_CID_DEFAULT: @@ -1782,7 +1777,6 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -1838,12 +1832,12 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw, break; } - if ((rtlpcipriv->bt_coexist.bt_coexistence) && - (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) && - (rtlpcipriv->bt_coexist.bt_cur_state) && - (rtlpcipriv->bt_coexist.bt_ant_isolation) && - ((rtlpcipriv->bt_coexist.bt_service == BT_SCO) || - (rtlpcipriv->bt_coexist.bt_service == BT_BUSY))) + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) && + (rtlpriv->btcoexist.bt_cur_state) && + (rtlpriv->btcoexist.bt_ant_isolation) && + ((rtlpriv->btcoexist.bt_service == BT_SCO) || + (rtlpriv->btcoexist.bt_service == BT_BUSY))) ratr_value &= 0x0fffcfc0; else ratr_value &= 0x0FFFFFFF; @@ -2237,65 +2231,64 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index, static void rtl8192ce_bt_var_init(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); - - rtlpcipriv->bt_coexist.bt_coexistence = - rtlpcipriv->bt_coexist.eeprom_bt_coexist; - rtlpcipriv->bt_coexist.bt_ant_num = - rtlpcipriv->bt_coexist.eeprom_bt_ant_num; - rtlpcipriv->bt_coexist.bt_coexist_type = - rtlpcipriv->bt_coexist.eeprom_bt_type; - - if (rtlpcipriv->bt_coexist.reg_bt_iso == 2) - rtlpcipriv->bt_coexist.bt_ant_isolation = - rtlpcipriv->bt_coexist.eeprom_bt_ant_isol; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->btcoexist.bt_coexistence = + rtlpriv->btcoexist.eeprom_bt_coexist; + rtlpriv->btcoexist.bt_ant_num = + rtlpriv->btcoexist.eeprom_bt_ant_num; + rtlpriv->btcoexist.bt_coexist_type = + rtlpriv->btcoexist.eeprom_bt_type; + + if (rtlpriv->btcoexist.reg_bt_iso == 2) + rtlpriv->btcoexist.bt_ant_isolation = + rtlpriv->btcoexist.eeprom_bt_ant_isol; else - rtlpcipriv->bt_coexist.bt_ant_isolation = - rtlpcipriv->bt_coexist.reg_bt_iso; - - rtlpcipriv->bt_coexist.bt_radio_shared_type = - rtlpcipriv->bt_coexist.eeprom_bt_radio_shared; - - if (rtlpcipriv->bt_coexist.bt_coexistence) { - - if (rtlpcipriv->bt_coexist.reg_bt_sco == 1) - rtlpcipriv->bt_coexist.bt_service = BT_OTHER_ACTION; - else if (rtlpcipriv->bt_coexist.reg_bt_sco == 2) - rtlpcipriv->bt_coexist.bt_service = BT_SCO; - else if (rtlpcipriv->bt_coexist.reg_bt_sco == 4) - rtlpcipriv->bt_coexist.bt_service = BT_BUSY; - else if (rtlpcipriv->bt_coexist.reg_bt_sco == 5) - rtlpcipriv->bt_coexist.bt_service = BT_OTHERBUSY; + rtlpriv->btcoexist.bt_ant_isolation = + rtlpriv->btcoexist.reg_bt_iso; + + rtlpriv->btcoexist.bt_radio_shared_type = + rtlpriv->btcoexist.eeprom_bt_radio_shared; + + if (rtlpriv->btcoexist.bt_coexistence) { + if (rtlpriv->btcoexist.reg_bt_sco == 1) + rtlpriv->btcoexist.bt_service = BT_OTHER_ACTION; + else if (rtlpriv->btcoexist.reg_bt_sco == 2) + rtlpriv->btcoexist.bt_service = BT_SCO; + else if (rtlpriv->btcoexist.reg_bt_sco == 4) + rtlpriv->btcoexist.bt_service = BT_BUSY; + else if (rtlpriv->btcoexist.reg_bt_sco == 5) + rtlpriv->btcoexist.bt_service = BT_OTHERBUSY; else - rtlpcipriv->bt_coexist.bt_service = BT_IDLE; + rtlpriv->btcoexist.bt_service = BT_IDLE; - rtlpcipriv->bt_coexist.bt_edca_ul = 0; - rtlpcipriv->bt_coexist.bt_edca_dl = 0; - rtlpcipriv->bt_coexist.bt_rssi_state = 0xff; + rtlpriv->btcoexist.bt_edca_ul = 0; + rtlpriv->btcoexist.bt_edca_dl = 0; + rtlpriv->btcoexist.bt_rssi_state = 0xff; } } void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, bool auto_load_fail, u8 *hwinfo) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); u8 val; if (!auto_load_fail) { - rtlpcipriv->bt_coexist.eeprom_bt_coexist = + rtlpriv->btcoexist.eeprom_bt_coexist = ((hwinfo[RF_OPTION1] & 0xe0) >> 5); val = hwinfo[RF_OPTION4]; - rtlpcipriv->bt_coexist.eeprom_bt_type = ((val & 0xe) >> 1); - rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (val & 0x1); - rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4); - rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = + rtlpriv->btcoexist.eeprom_bt_type = ((val & 0xe) >> 1); + rtlpriv->btcoexist.eeprom_bt_ant_num = (val & 0x1); + rtlpriv->btcoexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4); + rtlpriv->btcoexist.eeprom_bt_radio_shared = ((val & 0x20) >> 5); } else { - rtlpcipriv->bt_coexist.eeprom_bt_coexist = 0; - rtlpcipriv->bt_coexist.eeprom_bt_type = BT_2WIRE; - rtlpcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2; - rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = 0; - rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED; + rtlpriv->btcoexist.eeprom_bt_coexist = 0; + rtlpriv->btcoexist.eeprom_bt_type = BT_2WIRE; + rtlpriv->btcoexist.eeprom_bt_ant_num = ANT_X2; + rtlpriv->btcoexist.eeprom_bt_ant_isol = 0; + rtlpriv->btcoexist.eeprom_bt_radio_shared = BT_RADIO_SHARED; } rtl8192ce_bt_var_init(hw); @@ -2303,14 +2296,14 @@ void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); /* 0:Low, 1:High, 2:From Efuse. */ - rtlpcipriv->bt_coexist.reg_bt_iso = 2; + rtlpriv->btcoexist.reg_bt_iso = 2; /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */ - rtlpcipriv->bt_coexist.reg_bt_sco = 3; + rtlpriv->btcoexist.reg_bt_sco = 3; /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ - rtlpcipriv->bt_coexist.reg_bt_sco = 0; + rtlpriv->btcoexist.reg_bt_sco = 0; } @@ -2318,23 +2311,22 @@ void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u8 u1_tmp; - if (rtlpcipriv->bt_coexist.bt_coexistence && - ((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) || - rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8)) { + if (rtlpriv->btcoexist.bt_coexistence && + ((rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) || + rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC8)) { - if (rtlpcipriv->bt_coexist.bt_ant_isolation) + if (rtlpriv->btcoexist.bt_ant_isolation) rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) & BIT_OFFSET_LEN_MASK_32(0, 1); u1_tmp = u1_tmp | - ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ? + ((rtlpriv->btcoexist.bt_ant_isolation == 1) ? 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) | - ((rtlpcipriv->bt_coexist.bt_service == BT_SCO) ? + ((rtlpriv->btcoexist.bt_service == BT_SCO) ? 0 : BIT_OFFSET_LEN_MASK_32(2, 1)); rtl_write_byte(rtlpriv, 0x4fd, u1_tmp); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c index bdaa848995ae..7edf5af9046e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c @@ -67,7 +67,6 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n", @@ -80,7 +79,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; case LED_PIN_LED0: ledcfg &= 0xf0; - if (pcipriv->ledctl.led_opendrain) + if (rtlpriv->ledctl.led_opendrain) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(1) | BIT(5) | BIT(6))); else @@ -100,24 +99,26 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl92ce_init_sw_leds(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); - _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1); } static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; + switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: case LED_CTL_NO_LINK: - rtl92ce_sw_led_on(hw, pLed0); + rtl92ce_sw_led_on(hw, pled0); break; case LED_CTL_POWER_OFF: - rtl92ce_sw_led_off(hw, pLed0); + rtl92ce_sw_led_off(hw, pled0); break; default: break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 9db6ec62787a..f95a64507f17 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -393,12 +393,11 @@ exit: static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); switch (rtlhal->oem_id) { case RT_CID_819X_HP: - usb_priv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; break; case RT_CID_819X_LENOVO: case RT_CID_DEFAULT: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c index 70ea6c5692a5..66d2784de67d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c @@ -67,7 +67,6 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw); u8 ledcfg; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n", @@ -78,7 +77,7 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; case LED_PIN_LED0: ledcfg &= 0xf0; - if (usbpriv->ledctl.led_opendrain) + if (rtlpriv->ledctl.led_opendrain) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(1) | BIT(5) | BIT(6))); else @@ -99,16 +98,18 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl92cu_init_sw_leds(struct ieee80211_hw *hw) { - struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw); - _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0); - _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + _rtl92cu_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl92cu_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1); } void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw) { - struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw); - _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0)); - _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + _rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led0); + _rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led1); } static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index 1bd1893bb401..cf28d25c551f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -614,19 +614,19 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw) static void _rtl92de_gen_refresh_led_state(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; if (rtlpci->up_first_time) return; if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) - rtl92de_sw_led_on(hw, pLed0); + rtl92de_sw_led_on(hw, pled0); else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT) - rtl92de_sw_led_on(hw, pLed0); + rtl92de_sw_led_on(hw, pled0); else - rtl92de_sw_led_off(hw, pLed0); + rtl92de_sw_led_off(hw, pled0); } static bool _rtl92de_init_mac(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c index 4be787e53279..8851038c9eba 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c @@ -76,7 +76,6 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n", @@ -89,7 +88,7 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; case LED_PIN_LED0: ledcfg &= 0xf0; - if (pcipriv->ledctl.led_opendrain) + if (rtlpriv->ledctl.led_opendrain) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(1) | BIT(5) | BIT(6))); else @@ -110,24 +109,26 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl92de_init_sw_leds(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); - _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1); } static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; + switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: case LED_CTL_NO_LINK: - rtl92de_sw_led_on(hw, pLed0); + rtl92de_sw_led_on(hw, pled0); break; case LED_CTL_POWER_OFF: - rtl92de_sw_led_off(hw, pLed0); + rtl92de_sw_led_off(hw, pled0); break; default: break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index b44244a8a22f..56ca7f5351ea 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -735,9 +735,8 @@ static bool _rtl92ee_llt_table_init(struct ieee80211_hw *hw) static void _rtl92ee_gen_refresh_led_state(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0; + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; if (rtlpriv->rtlhal.up_first_time) return; @@ -2166,10 +2165,9 @@ exit: static void _rtl92ee_hal_customized_behavior(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RT Customized ID: 0x%02X\n", rtlhal->oem_id); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c index 47da05dd3076..96c64785108b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c @@ -99,26 +99,26 @@ void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl92ee_init_sw_leds(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); - _rtl92ee_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0); - _rtl92ee_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1); + _rtl92ee_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl92ee_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1); } static void _rtl92ee_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_led *pLed0 = &pcipriv->ledctl.sw_led0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: case LED_CTL_NO_LINK: - rtl92ee_sw_led_on(hw, pLed0); + rtl92ee_sw_led_on(hw, pled0); break; case LED_CTL_POWER_OFF: - rtl92ee_sw_led_off(hw, pLed0); + rtl92ee_sw_led_off(hw, pled0); break; default: break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index d5e86b6fad11..ba1bd782238b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c @@ -753,13 +753,12 @@ static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw) /* After MACIO reset,we must refresh LED state. */ if ((ppsc->rfoff_reason == RF_CHANGE_BY_IPS) || (ppsc->rfoff_reason == 0)) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; enum rf_pwrstate rfpwr_state_toset; rfpwr_state_toset = _rtl92se_rf_onoff_detect(hw); if (rfpwr_state_toset == ERFON) - rtl92se_sw_led_on(hw, pLed0); + rtl92se_sw_led_on(hw, pled0); } } @@ -1395,16 +1394,15 @@ static void _rtl92se_gen_refreshledstate(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; if (rtlpci->up_first_time == 1) return; if (rtlpriv->psc.rfoff_reason == RF_CHANGE_BY_IPS) - rtl92se_sw_led_on(hw, pLed0); + rtl92se_sw_led_on(hw, pled0); else - rtl92se_sw_led_off(hw, pLed0); + rtl92se_sw_led_off(hw, pled0); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c index c740aeb0e83f..33c307aca911 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c @@ -38,9 +38,10 @@ static void _rtl92se_init_led(struct ieee80211_hw *hw, void rtl92se_init_sw_leds(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - _rtl92se_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); - _rtl92se_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + _rtl92se_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl92se_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1); } void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) @@ -73,7 +74,6 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv; - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; rtlpriv = rtl_priv(hw); @@ -89,7 +89,7 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; case LED_PIN_LED0: ledcfg &= 0xf0; - if (pcipriv->ledctl.led_opendrain) + if (rtlpriv->ledctl.led_opendrain) rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(1))); else rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3))); @@ -109,16 +109,17 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) static void _rtl92se_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; + switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: case LED_CTL_NO_LINK: - rtl92se_sw_led_on(hw, pLed0); + rtl92se_sw_led_on(hw, pled0); break; case LED_CTL_POWER_OFF: - rtl92se_sw_led_off(hw, pLed0); + rtl92se_sw_led_off(hw, pled0); break; default: break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index bb9de2f6e695..859c045bd37c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -664,9 +664,8 @@ static bool _rtl8723e_llt_table_init(struct ieee80211_hw *hw) static void _rtl8723e_gen_refresh_led_state(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0; + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; if (rtlpriv->rtlhal.up_first_time) return; @@ -1790,13 +1789,12 @@ exit: static void _rtl8723e_hal_customized_behavior(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; switch (rtlhal->oem_id) { case RT_CID_819X_HP: - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; break; case RT_CID_819X_LENOVO: case RT_CID_DEFAULT: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c index e1e6d24f1daa..d567b0df0e9f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c @@ -68,7 +68,6 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, @@ -81,7 +80,7 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; case LED_PIN_LED0: ledcfg &= 0xf0; - if (pcipriv->ledctl.led_opendrain) { + if (rtlpriv->ledctl.led_opendrain) { ledcfg &= 0x90; /* Set to software control. */ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3))); ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); @@ -109,24 +108,26 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl8723e_init_sw_leds(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - _rtl8723e_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0); - _rtl8723e_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + _rtl8723e_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl8723e_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1); } static void _rtl8723e_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; + switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: case LED_CTL_NO_LINK: - rtl8723e_sw_led_on(hw, pLed0); + rtl8723e_sw_led_on(hw, pled0); break; case LED_CTL_POWER_OFF: - rtl8723e_sw_led_off(hw, pLed0); + rtl8723e_sw_led_off(hw, pled0); break; default: break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index e6e5e5bedf63..1acbfb86472c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -809,9 +809,8 @@ static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw) static void _rtl8723be_gen_refresh_led_state(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; if (rtlpriv->rtlhal.up_first_time) return; @@ -2228,13 +2227,12 @@ exit: static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; switch (rtlhal->oem_id) { case RT_CID_819X_HP: - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; break; case RT_CID_819X_LENOVO: case RT_CID_DEFAULT: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c index 8232e010d090..4f7890d62c21 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c @@ -67,7 +67,6 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, @@ -80,7 +79,7 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; case LED_PIN_LED0: ledcfg &= 0xf0; - if (pcipriv->ledctl.led_opendrain) { + if (rtlpriv->ledctl.led_opendrain) { ledcfg &= 0x90; /* Set to software control. */ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3))); ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); @@ -108,16 +107,18 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl8723be_init_sw_leds(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); - _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + _rtl8723be_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl8723be_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1); } static void _rtl8723be_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; + switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 4f83eee1ff75..363d2f28da1f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -890,9 +890,8 @@ static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw) static void _rtl8821ae_gen_refresh_led_state(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0; + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (rtlpriv->rtlhal.up_first_time) @@ -3098,7 +3097,6 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); int params[] = {RTL_EEPROM_ID, EEPROM_VID, EEPROM_DID, EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR, EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, @@ -3193,7 +3191,7 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ "SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n", rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type); - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; if (rtlhal->oem_id == RT_CID_DEFAULT) { switch (rtlefuse->eeprom_oemid) { @@ -3224,10 +3222,10 @@ exit: struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; switch (rtlhal->oem_id) { case RT_CID_819X_HP: - pcipriv->ledctl.led_opendrain = true; + rtlpriv->ledctl.led_opendrain = true; break; case RT_CID_819X_LENOVO: case RT_CID_DEFAULT: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c index fcb3b28c6b8f..405c7541b386 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c @@ -101,7 +101,6 @@ void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, @@ -114,7 +113,7 @@ void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; case LED_PIN_LED0: ledcfg &= 0xf0; - if (pcipriv->ledctl.led_opendrain) { + if (rtlpriv->ledctl.led_opendrain) { ledcfg &= 0x90; /* Set to software control. */ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3))); ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); @@ -143,7 +142,6 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { u16 ledreg = REG_LEDCFG1; struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); switch (pled->ledpin) { case LED_PIN_LED0: @@ -163,7 +161,7 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) "In SwLedOff,LedAddr:%X LEDPIN=%d\n", ledreg, pled->ledpin); /*Open-drain arrangement for controlling the LED*/ - if (pcipriv->ledctl.led_opendrain) { + if (rtlpriv->ledctl.led_opendrain) { u8 ledcfg = rtl_read_byte(rtlpriv, ledreg); ledreg &= 0xd0; /* Set to software control.*/ @@ -182,17 +180,17 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); - _rtl8821ae_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0); - _rtl8821ae_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1); + _rtl8821ae_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl8821ae_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1); } static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_led *pLed0 = &pcipriv->ledctl.sw_led0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); switch (ledaction) { @@ -200,15 +198,15 @@ static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw, case LED_CTL_LINK: case LED_CTL_NO_LINK: if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) - rtl8812ae_sw_led_on(hw, pLed0); + rtl8812ae_sw_led_on(hw, pled0); else - rtl8821ae_sw_led_on(hw, pLed0); + rtl8821ae_sw_led_on(hw, pled0); break; case LED_CTL_POWER_OFF: if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) - rtl8812ae_sw_led_off(hw, pLed0); + rtl8812ae_sw_led_off(hw, pled0); else - rtl8821ae_sw_led_off(hw, pLed0); + rtl8821ae_sw_led_off(hw, pled0); break; default: break; diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h index cdb9e06db89e..c91cec04bfaf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.h +++ b/drivers/net/wireless/realtek/rtlwifi/usb.h @@ -147,7 +147,6 @@ struct rtl_usb { struct rtl_usb_priv { struct bt_coexist_info bt_coexist; - struct rtl_led_ctl ledctl; struct rtl_usb dev; }; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 71f766093294..65ef42b37651 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2583,6 +2583,7 @@ struct rtl_priv { struct rtl_dm dm; struct rtl_security sec; struct rtl_efuse efuse; + struct rtl_led_ctl ledctl; struct rtl_ps_ctl psc; struct rate_adaptive ra; -- cgit v1.2.3 From 93c7018ec16bb83399dd4db61c361a6d6aba0d5a Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 8 Feb 2017 12:18:09 +0100 Subject: rt2x00usb: do not anchor rx and tx urb's We might kill TX or RX urb during rt2x00usb_flush_entry(), what can cause anchor list corruption like shown below: [ 2074.035633] WARNING: CPU: 2 PID: 14480 at lib/list_debug.c:33 __list_add+0xac/0xc0 [ 2074.035634] list_add corruption. prev->next should be next (ffff88020f362c28), but was dead000000000100. (prev=ffff8801d161bb70). [ 2074.035670] Call Trace: [ 2074.035672] [] dump_stack+0x63/0x8c [ 2074.035674] [] __warn+0xd1/0xf0 [ 2074.035676] [] warn_slowpath_fmt+0x5f/0x80 [ 2074.035678] [] ? rt2x00usb_register_write_lock+0x3d/0x60 [rt2800usb] [ 2074.035679] [] __list_add+0xac/0xc0 [ 2074.035681] [] usb_anchor_urb+0x4c/0xa0 [ 2074.035683] [] rt2x00usb_kick_rx_entry+0xaf/0x100 [rt2x00usb] [ 2074.035684] [] rt2x00usb_clear_entry+0x22/0x30 [rt2x00usb] To fix do not anchor TX and RX urb's, it is not needed as during shutdown we kill those urbs in rt2x00usb_free_entries(). Cc: Vishal Thanki Fixes: 8b4c0009313f ("rt2x00usb: Use usb anchor to manage URB") Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00usb.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index 5a2bf9f63cd7..fe13dd07cc2a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -319,10 +319,8 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) entry->skb->data, length, rt2x00usb_interrupt_txdone, entry); - usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor); status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { - usb_unanchor_urb(entry_priv->urb); if (status == -ENODEV) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); @@ -410,10 +408,8 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) entry->skb->data, entry->skb->len, rt2x00usb_interrupt_rxdone, entry); - usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor); status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { - usb_unanchor_urb(entry_priv->urb); if (status == -ENODEV) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); -- cgit v1.2.3 From 0488a6121dfe6cbd44de15ea3627913b7549a1e9 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 8 Feb 2017 12:18:10 +0100 Subject: rt2x00usb: fix anchor initialization If device fail to initialize we can OOPS in rt2x00lib_remove_dev(), due to using uninitialized usb_anchor structure: [ 855.435820] ieee80211 phy3: rt2x00usb_vendor_request: Error - Vendor Request 0x07 failed for offset 0x1000 with error -19 [ 855.435826] ieee80211 phy3: rt2800_probe_rt: Error - Invalid RT chipset 0x0000, rev 0000 detected [ 855.435829] ieee80211 phy3: rt2x00lib_probe_dev: Error - Failed to allocate device [ 855.435845] BUG: unable to handle kernel NULL pointer dereference at 0000000000000028 [ 855.435900] IP: _raw_spin_lock_irq+0xd/0x30 [ 855.435926] PGD 0 [ 855.435953] Oops: 0002 [#1] SMP [ 855.437011] Call Trace: [ 855.437029] ? usb_kill_anchored_urbs+0x27/0xc0 [ 855.437061] rt2x00lib_remove_dev+0x190/0x1c0 [rt2x00lib] [ 855.437097] rt2x00lib_probe_dev+0x246/0x7a0 [rt2x00lib] [ 855.437149] ? ieee80211_roc_setup+0x9e/0xd0 [mac80211] [ 855.437183] ? __kmalloc+0x1af/0x1f0 [ 855.437207] ? rt2x00usb_probe+0x13d/0xc50 [rt2x00usb] [ 855.437240] rt2x00usb_probe+0x155/0xc50 [rt2x00usb] [ 855.437273] rt2800usb_probe+0x15/0x20 [rt2800usb] [ 855.437304] usb_probe_interface+0x159/0x2d0 [ 855.437333] driver_probe_device+0x2bb/0x460 Patch changes initialization sequence to fix the problem. Cc: Vishal Thanki Fixes: 8b4c0009313f ("rt2x00usb: Use usb anchor to manage URB") Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00usb.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index fe13dd07cc2a..c696f0ad6a68 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -825,10 +825,6 @@ int rt2x00usb_probe(struct usb_interface *usb_intf, if (retval) goto exit_free_device; - retval = rt2x00lib_probe_dev(rt2x00dev); - if (retval) - goto exit_free_reg; - rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev, sizeof(struct usb_anchor), GFP_KERNEL); @@ -836,10 +832,17 @@ int rt2x00usb_probe(struct usb_interface *usb_intf, retval = -ENOMEM; goto exit_free_reg; } - init_usb_anchor(rt2x00dev->anchor); + + retval = rt2x00lib_probe_dev(rt2x00dev); + if (retval) + goto exit_free_anchor; + return 0; +exit_free_anchor: + usb_kill_anchored_urbs(rt2x00dev->anchor); + exit_free_reg: rt2x00usb_free_reg(rt2x00dev); -- cgit v1.2.3 From 80a97eae304631f57ff8560f87c0b18b95321443 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 8 Feb 2017 13:51:29 +0100 Subject: rt61pci: use entry directly Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt61pci.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index 5306a3b2622d..8adb5f3abe15 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -1903,8 +1903,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry, rt2x00_desc_read(txd, 5, &word); rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid); - rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, - skbdesc->entry->entry_idx); + rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx); rt2x00_set_field32(&word, TXD_W5_TX_POWER, TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power)); rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); -- cgit v1.2.3 From 2ceb813798e1fd33e71a574771828c0f298e077b Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 8 Feb 2017 13:51:30 +0100 Subject: rt2x00: call entry directly in rt2x00_dump_frame Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2400pci.c | 2 +- drivers/net/wireless/ralink/rt2x00/rt2500pci.c | 2 +- drivers/net/wireless/ralink/rt2x00/rt2500usb.c | 2 +- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 2 +- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 4 ++-- drivers/net/wireless/ralink/rt2x00/rt2x00debug.c | 7 ++++--- drivers/net/wireless/ralink/rt2x00/rt2x00dev.c | 4 ++-- drivers/net/wireless/ralink/rt2x00/rt2x00queue.c | 2 +- drivers/net/wireless/ralink/rt2x00/rt61pci.c | 2 +- drivers/net/wireless/ralink/rt2x00/rt73usb.c | 2 +- 10 files changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 085c5b423bdf..19874439ac40 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c @@ -1200,7 +1200,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry, /* * Dump beacon to userspace through debugfs. */ - rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); out: /* * Enable beaconing again. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index 9832fd50c793..791434de8052 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c @@ -1349,7 +1349,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry, /* * Dump beacon to userspace through debugfs. */ - rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); out: /* * Enable beaconing again. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index cd3ab5a9e98d..62357465fe29 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -1170,7 +1170,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry, /* * Dump beacon to userspace through debugfs. */ - rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * USB devices cannot blindly pass the skb->len as the diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 572cdea4ca25..8223a1520316 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1014,7 +1014,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) /* * Dump beacon to userspace through debugfs. */ - rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * Write entire beacon with TXWI and padding to register. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index ea299c4e7ada..26869b3bef45 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -1400,11 +1400,11 @@ void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop); */ #ifdef CONFIG_RT2X00_LIB_DEBUGFS void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, - enum rt2x00_dump_type type, struct sk_buff *skb); + enum rt2x00_dump_type type, struct queue_entry *entry); #else static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, enum rt2x00_dump_type type, - struct sk_buff *skb) + struct queue_entry *entry) { } #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index 72ae530e4a3b..964aefdc11f0 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -157,9 +157,10 @@ void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, } void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, - enum rt2x00_dump_type type, struct sk_buff *skb) + enum rt2x00_dump_type type, struct queue_entry *entry) { struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; + struct sk_buff *skb = entry->skb; struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); struct sk_buff *skbcopy; struct rt2x00dump_hdr *dump_hdr; @@ -196,8 +197,8 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf); dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev); dump_hdr->type = cpu_to_le16(type); - dump_hdr->queue_index = skbdesc->entry->queue->qid; - dump_hdr->entry_index = skbdesc->entry->entry_idx; + dump_hdr->queue_index = entry->queue->qid; + dump_hdr->entry_index = entry->entry_idx; dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec); dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 4b08007f93f7..dd6678109b7e 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -363,7 +363,7 @@ void rt2x00lib_txdone(struct queue_entry *entry, * Send frame to debugfs immediately, after this call is completed * we are going to overwrite the skb->cb array. */ - rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb); + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry); /* * Determine if the frame has been successfully transmitted and @@ -772,7 +772,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) */ rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc); rt2x00debug_update_crypto(rt2x00dev, &rxdesc); - rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb); + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry); /* * Initialize RX status information, and send frame diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index b2364d378774..380daf4e1b8d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -544,7 +544,7 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, * All processing on the frame has been completed, this means * it is now ready to be dumped to userspace through debugfs. */ - rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); + rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); } static void rt2x00queue_kick_tx_queue(struct data_queue *queue, diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index 8adb5f3abe15..973d418b8113 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -1988,7 +1988,7 @@ static void rt61pci_write_beacon(struct queue_entry *entry, /* * Dump beacon to userspace through debugfs. */ - rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * Write entire beacon with descriptor and padding to register. diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index 1a29c4d205a5..bb8d307a789f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -1557,7 +1557,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry, /* * Dump beacon to userspace through debugfs. */ - rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * Write entire beacon with descriptor and padding to register. -- cgit v1.2.3 From cf81db30a6edcca791b1bfa5348a162471121d11 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 8 Feb 2017 13:51:31 +0100 Subject: rt2x00: remove queue_entry from skbdesc queue_entry field of skbdesc is not read any more, remove it to allow skbdesc contain other data. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00queue.c | 3 --- drivers/net/wireless/ralink/rt2x00/rt2x00queue.h | 2 -- 2 files changed, 5 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 380daf4e1b8d..e1660b92b20c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -83,7 +83,6 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) */ skbdesc = get_skb_frame_desc(skb); memset(skbdesc, 0, sizeof(*skbdesc)); - skbdesc->entry = entry; if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) { dma_addr_t skb_dma; @@ -689,7 +688,6 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, goto out; } - skbdesc->entry = entry; entry->skb = skb; /* @@ -774,7 +772,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, */ skbdesc = get_skb_frame_desc(intf->beacon->skb); memset(skbdesc, 0, sizeof(*skbdesc)); - skbdesc->entry = intf->beacon; /* * Send beacon to hardware. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h index 2233b911a1d7..22d18818e850 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h @@ -116,8 +116,6 @@ struct skb_frame_desc { __le32 iv[2]; dma_addr_t skb_dma; - - struct queue_entry *entry; }; /** -- cgit v1.2.3 From 04fa3e680b4dd2fdd11d0152fb9b6067e7aac140 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sat, 7 Jan 2017 20:11:47 +0200 Subject: iwlwifi: pcie: don't increment / decrement a bool David reported that the code I added uses the decrement and increment operator on a boolean variable. Fix that. Fixes: 0cd58eaab148 ("iwlwifi: pcie: allow the op_mode to block the tx queues") Reported-by: David Binderman Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index cf5bda06042c..10937309641a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -279,7 +279,7 @@ struct iwl_txq { bool frozen; u8 active; bool ampdu; - bool block; + int block; unsigned long wd_timeout; struct sk_buff_head overflow_q; -- cgit v1.2.3 From 6eac0e817aee2518a96b5ce9d02b904e0667f370 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sat, 7 Jan 2017 23:03:40 +0200 Subject: iwlwifi: make RTPM depend on EXPERT Enabling the RTPM Kconfig option can be fairly risky. Runtime PM must be validated against a specific platform before it can be safely enabled. Hence, it makes no sense for distros and other big OS vendors to enable it since they ship code to various systems and unknown platform. Make sure that this is hinted properly by making the IWLWIFI_PCIE_RTPM Kconfig option depend on EXPERT. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=172411 Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/Kconfig | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index b64db47b31bb..c5f2ddf9b0fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -90,13 +90,16 @@ config IWLWIFI_BCAST_FILTERING config IWLWIFI_PCIE_RTPM bool "Enable runtime power management mode for PCIe devices" - depends on IWLMVM && PM + depends on IWLMVM && PM && EXPERT default false help Say Y here to enable runtime power management for PCIe devices. If enabled, the device will go into low power mode when idle for a short period of time, allowing for improved - power saving during runtime. + power saving during runtime. Note that this feature requires + a tight integration with the platform. It is not recommended + to enable this feature without proper validation with the + specific target platform. If unsure, say N. -- cgit v1.2.3 From 1aa0ec5cdf60b23dfc152f0e9e205f58b0a546b2 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sat, 7 Jan 2017 19:57:46 +0200 Subject: iwlwifi: dvm: don't call << operator with a negative value In https://bugzilla.kernel.org/show_bug.cgi?id=177341 Bob reported a UBSAN WARNING on rs.c. Undefined behaviour in drivers/net/wireless/intel/iwlwifi/dvm/rs.c:746:18 This because i = index - 1; for (mask = (1 << i); i >= 0; i--, mask >>= 1) is unsafe: i could be negative and hence we can call << on a negative value. This bug doesn't have any real impact since the condition of the for loop will prevent any usage of mask. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=177341 Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/dvm/rs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index 710dbbefd551..ff44ebc5829d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -740,7 +740,10 @@ static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask, /* Find the previous rate that is in the rate mask */ i = index - 1; - for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (i >= 0) + mask = BIT(i); + + for (; i >= 0; i--, mask >>= 1) { if (rate_mask & mask) { low = i; break; -- cgit v1.2.3 From 43d59a4ce78b6460b32076ec7ac821fd8678cdc2 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sat, 7 Jan 2017 19:57:46 +0200 Subject: iwlwifi: mvm: don't call << operator with a negative value In https://bugzilla.kernel.org/show_bug.cgi?id=177341 Bob reported a UBSAN WARNING on rs.c in iwldvm. Fix the same bug in iwlmvm. This because i = index - 1; for (mask = (1 << i); i >= 0; i--, mask >>= 1) is unsafe: i could be negative and hence we can call << on a negative value. This bug doesn't have any real impact since the condition of the for loop will prevent any usage of mask. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=177341 Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 13be9a5b83ee..ce907c58ebf6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -972,7 +972,9 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask, /* Find the previous rate that is in the rate mask */ i = index - 1; - for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (i >= 0) + mask = BIT(i); + for (; i >= 0; i--, mask >>= 1) { if (rate_mask & mask) { low = i; break; -- cgit v1.2.3 From 2b18824a5d1eae58946e3c8a60d48f0b1ff32265 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 11 Dec 2016 17:12:47 +0200 Subject: iwlwifi: pcie: set STATUS_RFKILL immediately after interrupt Currently, when getting a RFKILL interrupt, the transport enters a flow in which it stops the device, disables other interrupts, etc. After stopping the device, the transport resets the hw, and sleeps. During the sleep, a context switch occurs and host commands are sent by upper layers (e.g. mvm) to the fw. This is possible since the op_mode layer and the transport layer hold different mutexes. Since the STATUS_RFKILL bit isn't set, the transport layer doesn't recognize that RFKILL was toggled on, and no commands can actually be sent, so it enqueues the command to the tx queue and sets a timer on the queue. After switching context back to stopping the device, STATUS_RFKILL is set, and then the transport can't send the command to the fw. This eventually results in a queue hang. Fix this by setting STATUS_RFKILL immediately when the interrupt is fired. Signed-off-by: Golan Ben-Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index e1bf6da20909..de94dfdf2ec9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1609,6 +1609,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) mutex_lock(&trans_pcie->mutex); hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans->status); + IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rfkill ? "disable radio" : "enable radio"); @@ -1617,7 +1620,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) iwl_trans_pcie_rf_kill(trans, hw_rfkill); mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { - set_bit(STATUS_RFKILL, &trans->status); if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) IWL_DEBUG_RF_KILL(trans, @@ -1954,6 +1956,9 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) mutex_lock(&trans_pcie->mutex); hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans->status); + IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rfkill ? "disable radio" : "enable radio"); @@ -1962,7 +1967,6 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) iwl_trans_pcie_rf_kill(trans, hw_rfkill); mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { - set_bit(STATUS_RFKILL, &trans->status); if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) IWL_DEBUG_RF_KILL(trans, -- cgit v1.2.3 From b45242c99f503d0a526c2b1f647ad562f9cf9dd5 Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Mon, 9 Jan 2017 15:39:15 +0200 Subject: iwlwifi: mvm: Fix CSA received immediately after association The session protection set for association is only removed when BSS_CHANGED_BEACON_INFO is set and BSS_CHANGED_ASSOC is not set. However, mac80211 may set both on association (in case a beacon was already received). In this case, mac80211 will not set BSS_CHANGED_BEACON_INFO on the next beacons because it has already notified the beacon change, so the session protection is never removed (until the session protection ends). When a CSA is received within this time, the station will fail to folllow the channel switch because it cannot schedule the time event. Fix this by removing the session protection when BSS_CHANGED_BEACON_INFO and BSS_CHANGED_ASSOC are both set. Signed-off-by: Avraham Stern Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 15ecd3aba71b..7253b92792bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2008,16 +2008,16 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); - } else if (changes & BSS_CHANGED_BEACON_INFO) { + } + + if (changes & BSS_CHANGED_BEACON_INFO) { /* - * We received a beacon _after_ association so + * We received a beacon from the associated AP so * remove the session protection. */ iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); - } - if (changes & BSS_CHANGED_BEACON_INFO) { iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); } -- cgit v1.2.3 From 735a0045f9ea8372bcf3e599cbd3aa891a216b26 Mon Sep 17 00:00:00 2001 From: "Goodstein, Mordechay" Date: Sun, 15 Jan 2017 16:00:12 +0200 Subject: iwlwifi: mvm: avoid race condition in ADD_STA. The race happens when we send ADD_STA(auth->assoc) -> LQ_CMD between the commands the FW sometimes loses the medium for AUX, and sends a ndp to the AP and the flow becomes, ADD_STA -> send ndp -> LQ_CMD the problem is that there's no rates yet defined for sending the ndp and FW generates an assert. The fix: change the order of the commands to LQ_CMD -> ADD_STA Signed-off-by: Mordechay Goodstein Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 7253b92792bf..d37b1695c64e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2627,11 +2627,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, mvmvif->ap_assoc_sta_count++; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } + + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + true); ret = iwl_mvm_update_sta(mvm, vif, sta); - if (ret == 0) - iwl_mvm_rs_rate_init(mvm, sta, - mvmvif->phy_ctxt->channel->band, - true); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { -- cgit v1.2.3 From cd4d23c1ea9bb3769f2859b3b7b9ef24355bcba4 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Mon, 16 Jan 2017 15:07:03 +0200 Subject: iwlwifi: mvm: Fix removal of IGTK When removing an IGTK, iwl_mvm_send_sta_igtk() was called before station ID was retrieved, so the function was invoked with an invalid station ID. Fix this by first getting the station ID. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=192411 Signed-off-by: Ilan Peer Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 1bad933b3ad4..c35fabdf85da 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -3047,6 +3047,11 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, /* Get the station from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); + if (!mvm_sta) { + IWL_ERR(mvm, "Failed to find station\n"); + return -EINVAL; + } + sta_id = mvm_sta->sta_id; IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); @@ -3074,8 +3079,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, return 0; } - sta_id = mvm_sta->sta_id; - ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); if (ret) return ret; -- cgit v1.2.3 From 0c8d0a4770cb6863f78a25c8d211f42e9c82251c Mon Sep 17 00:00:00 2001 From: Golan Ben-Ami Date: Wed, 25 Jan 2017 15:11:30 +0200 Subject: iwlwifi: mvm: avoid exceeding the allowed print length Divide a mfuart related print so it won't exceed the allowed MAX_MSG_LEN (110 bytes) per print. Fixes: 19f63c531b85 ("iwlwifi: mvm: support v2 of mfuart load notification") Signed-off-by: Golan Ben-Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index c42ef8681b75..45cb4f476e76 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1396,19 +1396,15 @@ void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; + IWL_DEBUG_INFO(mvm, + "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", + le32_to_cpu(mfuart_notif->installed_ver), + le32_to_cpu(mfuart_notif->external_ver), + le32_to_cpu(mfuart_notif->status), + le32_to_cpu(mfuart_notif->duration)); + if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif)) IWL_DEBUG_INFO(mvm, - "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x, image size: 0x%08x\n", - le32_to_cpu(mfuart_notif->installed_ver), - le32_to_cpu(mfuart_notif->external_ver), - le32_to_cpu(mfuart_notif->status), - le32_to_cpu(mfuart_notif->duration), + "MFUART: image size: 0x%08x\n", le32_to_cpu(mfuart_notif->image_size)); - else - IWL_DEBUG_INFO(mvm, - "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", - le32_to_cpu(mfuart_notif->installed_ver), - le32_to_cpu(mfuart_notif->external_ver), - le32_to_cpu(mfuart_notif->status), - le32_to_cpu(mfuart_notif->duration)); } -- cgit v1.2.3 From ff4cf0e5ce952488074aa7f47734af1794f55fbc Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 7 Feb 2017 16:15:27 +0300 Subject: net: dsa: bcm_sf2: cleanup bcm_sf2_cfp_rule_get() a little This patch doesn't affect how the code works. My static checker complains that the mask and shift doesn't make sense because 0xffffff << 16 goes beyond the end of 32 bits. It should be 0xffff instead but the existing code won't cause runtime bugs. Also the casting here is not needed and not consistent with the rest of the code. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2_cfp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index c71be3e0dc2d..346dd9a1232d 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -441,17 +441,17 @@ static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); /* IPv4 dst [15:8] */ - ipv4 = (u16)(reg & 0xff) << 8; + ipv4 = (reg & 0xff) << 8; reg = core_readl(priv, CORE_CFP_DATA_PORT(1)); /* IPv4 dst [31:16] */ - ipv4 |= (u32)((reg >> 8) & 0xffffff) << 16; + ipv4 |= ((reg >> 8) & 0xffff) << 16; /* IPv4 dst [7:0] */ ipv4 |= (reg >> 24) & 0xff; v4_spec->ip4dst = cpu_to_be32(ipv4); nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); /* IPv4 src [15:8] */ - ipv4 = (u16)(reg & 0xff) << 8; + ipv4 = (reg & 0xff) << 8; reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); if (!(reg & SLICE_VALID)) @@ -460,7 +460,7 @@ static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, /* IPv4 src [7:0] */ ipv4 |= (reg >> 24) & 0xff; /* IPv4 src [31:16] */ - ipv4 |= ((reg >> 8) & 0xffffff) << 16; + ipv4 |= ((reg >> 8) & 0xffff) << 16; v4_spec->ip4src = cpu_to_be32(ipv4); nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); -- cgit v1.2.3 From 04d8a0a5f3b6887543850d991a5e37c4ec90e250 Mon Sep 17 00:00:00 2001 From: Raju Lakkaraju Date: Tue, 7 Feb 2017 19:10:26 +0530 Subject: net: phy: Add LED mode driver for Microsemi PHYs. LED Mode: Microsemi PHY support 2 LEDs (LED[0] and LED[1]) to display different status information that can be selected by setting LED mode. LED Mode parameter (vsc8531, led-0-mode) and (vsc8531, led-1-mode) get from Device Tree. Signed-off-by: Raju Lakkaraju Signed-off-by: David S. Miller --- .../devicetree/bindings/net/mscc-phy-vsc8531.txt | 10 +++ drivers/net/phy/mscc.c | 85 +++++++++++++++++++++- include/dt-bindings/net/mscc-phy-vsc8531.h | 29 ++++++++ 3 files changed, 123 insertions(+), 1 deletion(-) create mode 100644 include/dt-bindings/net/mscc-phy-vsc8531.h diff --git a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt index bdefefc66594..0eedabe22cc3 100644 --- a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt +++ b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt @@ -27,6 +27,14 @@ Optional properties: 'vddmac'. Default value is 0%. Ref: Table:1 - Edge rate change (below). +- vsc8531,led-0-mode : LED mode. Specify how the LED[0] should behave. + Allowed values are define in + "include/dt-bindings/net/mscc-phy-vsc8531.h". + Default value is VSC8531_LINK_1000_ACTIVITY (1). +- vsc8531,led-1-mode : LED mode. Specify how the LED[1] should behave. + Allowed values are define in + "include/dt-bindings/net/mscc-phy-vsc8531.h". + Default value is VSC8531_LINK_100_ACTIVITY (2). Table: 1 - Edge rate change ----------------------------------------------------------------| @@ -60,4 +68,6 @@ Example: compatible = "ethernet-phy-id0007.0570"; vsc8531,vddmac = <3300>; vsc8531,edge-slowdown = <7>; + vsc8531,led-0-mode = ; + vsc8531,led-1-mode = ; }; diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index e03ead81fffb..650c2667d523 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -13,6 +13,7 @@ #include #include #include +#include enum rgmii_rx_clock_delay { RGMII_RX_CLK_DELAY_0_2_NS = 0, @@ -52,6 +53,11 @@ enum rgmii_rx_clock_delay { #define MSCC_PHY_DEV_AUX_CNTL 28 #define HP_AUTO_MDIX_X_OVER_IND_MASK 0x2000 +#define MSCC_PHY_LED_MODE_SEL 29 +#define LED_1_MODE_SEL_MASK 0x00F0 +#define LED_0_MODE_SEL_MASK 0x000F +#define LED_1_MODE_SEL_POS 4 + #define MSCC_EXT_PAGE_ACCESS 31 #define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */ #define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */ @@ -99,6 +105,8 @@ enum rgmii_rx_clock_delay { struct vsc8531_private { int rate_magic; + u8 led_0_mode; + u8 led_1_mode; }; #ifdef CONFIG_OF_MDIO @@ -123,6 +131,29 @@ static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page) return rc; } +static int vsc85xx_led_cntl_set(struct phy_device *phydev, + u8 led_num, + u8 mode) +{ + int rc; + u16 reg_val; + + mutex_lock(&phydev->lock); + reg_val = phy_read(phydev, MSCC_PHY_LED_MODE_SEL); + if (led_num) { + reg_val &= ~LED_1_MODE_SEL_MASK; + reg_val |= (((u16)mode << LED_1_MODE_SEL_POS) & + LED_1_MODE_SEL_MASK); + } else { + reg_val &= ~LED_0_MODE_SEL_MASK; + reg_val |= ((u16)mode & LED_0_MODE_SEL_MASK); + } + rc = phy_write(phydev, MSCC_PHY_LED_MODE_SEL, reg_val); + mutex_unlock(&phydev->lock); + + return rc; +} + static int vsc85xx_mdix_get(struct phy_device *phydev, u8 *mdix) { u16 reg_val; @@ -370,11 +401,41 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) return -EINVAL; } + +static int vsc85xx_dt_led_mode_get(struct phy_device *phydev, + char *led, + u8 default_mode) +{ + struct device *dev = &phydev->mdio.dev; + struct device_node *of_node = dev->of_node; + u8 led_mode; + int err; + + if (!of_node) + return -ENODEV; + + led_mode = default_mode; + err = of_property_read_u8(of_node, led, &led_mode); + if (!err && (led_mode > 15 || led_mode == 7 || led_mode == 11)) { + phydev_err(phydev, "DT %s invalid\n", led); + return -EINVAL; + } + + return led_mode; +} + #else static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) { return 0; } + +static int vsc85xx_dt_led_mode_get(struct phy_device *phydev, + char *led, + u8 default_mode) +{ + return default_mode; +} #endif /* CONFIG_OF_MDIO */ static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate) @@ -499,6 +560,14 @@ static int vsc85xx_config_init(struct phy_device *phydev) if (rc) return rc; + rc = vsc85xx_led_cntl_set(phydev, 1, vsc8531->led_1_mode); + if (rc) + return rc; + + rc = vsc85xx_led_cntl_set(phydev, 0, vsc8531->led_0_mode); + if (rc) + return rc; + rc = genphy_config_init(phydev); return rc; @@ -555,8 +624,9 @@ static int vsc85xx_read_status(struct phy_device *phydev) static int vsc85xx_probe(struct phy_device *phydev) { - int rate_magic; struct vsc8531_private *vsc8531; + int rate_magic; + int led_mode; rate_magic = vsc85xx_edge_rate_magic_get(phydev); if (rate_magic < 0) @@ -570,6 +640,19 @@ static int vsc85xx_probe(struct phy_device *phydev) vsc8531->rate_magic = rate_magic; + /* LED[0] and LED[1] mode */ + led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-0-mode", + VSC8531_LINK_1000_ACTIVITY); + if (led_mode < 0) + return led_mode; + vsc8531->led_0_mode = led_mode; + + led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-1-mode", + VSC8531_LINK_100_ACTIVITY); + if (led_mode < 0) + return led_mode; + vsc8531->led_1_mode = led_mode; + return 0; } diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h new file mode 100644 index 000000000000..697161f80eb5 --- /dev/null +++ b/include/dt-bindings/net/mscc-phy-vsc8531.h @@ -0,0 +1,29 @@ +/* + * Device Tree constants for Microsemi VSC8531 PHY + * + * Author: Nagaraju Lakkaraju + * + * License: Dual MIT/GPL + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _DT_BINDINGS_MSCC_VSC8531_H +#define _DT_BINDINGS_MSCC_VSC8531_H + +/* PHY LED Modes */ +#define VSC8531_LINK_ACTIVITY 0 +#define VSC8531_LINK_1000_ACTIVITY 1 +#define VSC8531_LINK_100_ACTIVITY 2 +#define VSC8531_LINK_10_ACTIVITY 3 +#define VSC8531_LINK_100_1000_ACTIVITY 4 +#define VSC8531_LINK_10_1000_ACTIVITY 5 +#define VSC8531_LINK_10_100_ACTIVITY 6 +#define VSC8531_DUPLEX_COLLISION 8 +#define VSC8531_COLLISION 9 +#define VSC8531_ACTIVITY 10 +#define VSC8531_AUTONEG_FAULT 12 +#define VSC8531_SERIAL_MODE 13 +#define VSC8531_FORCE_LED_OFF 14 +#define VSC8531_FORCE_LED_ON 15 + +#endif -- cgit v1.2.3 From 3c19bd6c52d441893ba19b3418825b27cfa4fd9c Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 8 Feb 2017 03:42:05 +0800 Subject: net: qcom/emac: fix semicolon.cocci warnings drivers/net/ethernet/qualcomm/emac/emac-ethtool.c:155:49-50: Unneeded semicolon Remove unneeded semicolon. Generated by: scripts/coccinelle/misc/semicolon.cocci CC: Timur Tabi Signed-off-by: Fengguang Wu Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c index c418a6e9a591..ca616fd93312 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -152,7 +152,7 @@ static void emac_get_pauseparam(struct net_device *netdev, pause->autoneg = adpt->automatic ? AUTONEG_ENABLE : AUTONEG_DISABLE; pause->rx_pause = adpt->rx_flow_control ? 1 : 0; - pause->tx_pause = adpt->tx_flow_control ? 1 : 0;; + pause->tx_pause = adpt->tx_flow_control ? 1 : 0; } static int emac_set_pauseparam(struct net_device *netdev, -- cgit v1.2.3 From 40710cf9ad234778eb8b10ef07d922f435005cf7 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Wed, 8 Feb 2017 00:07:33 +0100 Subject: net: mellanox: switchx2: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Reviewed-by: Ido Schimmel Tested-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 47 ++++++++++++++++---------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 169193ee3d4b..ec1e886d4566 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -733,7 +733,7 @@ static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto) } static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, - struct ethtool_cmd *cmd) + struct ethtool_link_ksettings *cmd) { u32 speed = SPEED_UNKNOWN; u8 duplex = DUPLEX_UNKNOWN; @@ -750,8 +750,8 @@ static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, } } out: - ethtool_cmd_speed_set(cmd, speed); - cmd->duplex = duplex; + cmd->base.speed = speed; + cmd->base.duplex = duplex; } static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto) @@ -776,8 +776,9 @@ static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto) return PORT_OTHER; } -static int mlxsw_sx_port_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +mlxsw_sx_port_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; @@ -785,6 +786,7 @@ static int mlxsw_sx_port_get_settings(struct net_device *dev, u32 eth_proto_cap; u32 eth_proto_admin; u32 eth_proto_oper; + u32 supported, advertising, lp_advertising; int err; mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0); @@ -796,18 +798,24 @@ static int mlxsw_sx_port_get_settings(struct net_device *dev, mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, ð_proto_oper); - cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) | + supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) | mlxsw_sx_from_ptys_supported_link(eth_proto_cap) | SUPPORTED_Pause | SUPPORTED_Asym_Pause; - cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin); + advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin); mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, cmd); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; - cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper); - cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper); + cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper); + lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper); + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); - cmd->transceiver = XCVR_INTERNAL; return 0; } @@ -847,8 +855,9 @@ static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed) return ptys_proto; } -static int mlxsw_sx_port_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +mlxsw_sx_port_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; @@ -857,13 +866,17 @@ static int mlxsw_sx_port_set_settings(struct net_device *dev, u32 eth_proto_new; u32 eth_proto_cap; u32 eth_proto_admin; + u32 advertising; bool is_up; int err; - speed = ethtool_cmd_speed(cmd); + speed = cmd->base.speed; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); - eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? - mlxsw_sx_to_ptys_advert_link(cmd->advertising) : + eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ? + mlxsw_sx_to_ptys_advert_link(advertising) : mlxsw_sx_to_ptys_speed(speed); mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0); @@ -920,8 +933,8 @@ static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = { .get_strings = mlxsw_sx_port_get_strings, .get_ethtool_stats = mlxsw_sx_port_get_stats, .get_sset_count = mlxsw_sx_port_get_sset_count, - .get_settings = mlxsw_sx_port_get_settings, - .set_settings = mlxsw_sx_port_set_settings, + .get_link_ksettings = mlxsw_sx_port_get_link_ksettings, + .set_link_ksettings = mlxsw_sx_port_set_link_ksettings, }; static int mlxsw_sx_port_attr_get(struct net_device *dev, -- cgit v1.2.3 From 97e219b7c1f75b14b29abe28ad53e8709e8d15e5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 7 Feb 2017 15:37:15 -0800 Subject: gro_cells: move to net/core/gro_cells.c We have many gro cells users, so lets move the code to avoid duplication. This creates a CONFIG_GRO_CELLS option. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/Kconfig | 3 ++ include/net/gro_cells.h | 86 +++------------------------------------------ net/Kconfig | 4 +++ net/core/Makefile | 1 + net/core/gro_cells.c | 92 +++++++++++++++++++++++++++++++++++++++++++++++++ net/ipv4/Kconfig | 1 + net/ipv6/Kconfig | 1 + net/xfrm/Kconfig | 1 + 8 files changed, 107 insertions(+), 82 deletions(-) create mode 100644 net/core/gro_cells.c diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 95c32f2d7601..a993cbeb9e0c 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -170,6 +170,7 @@ config VXLAN tristate "Virtual eXtensible Local Area Network (VXLAN)" depends on INET select NET_UDP_TUNNEL + select GRO_CELLS ---help--- This allows one to create vxlan virtual interfaces that provide Layer 2 Networks over Layer 3 Networks. VXLAN is often used @@ -184,6 +185,7 @@ config GENEVE tristate "Generic Network Virtualization Encapsulation" depends on INET && NET_UDP_TUNNEL select NET_IP_TUNNEL + select GRO_CELLS ---help--- This allows one to create geneve virtual interfaces that provide Layer 2 Networks over Layer 3 Networks. GENEVE is often used @@ -216,6 +218,7 @@ config MACSEC select CRYPTO select CRYPTO_AES select CRYPTO_GCM + select GRO_CELLS ---help--- MACsec is an encryption standard for Ethernet. diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h index 2a1abbf8da74..fcaf8f479130 100644 --- a/include/net/gro_cells.h +++ b/include/net/gro_cells.h @@ -5,92 +5,14 @@ #include #include -struct gro_cell { - struct sk_buff_head napi_skbs; - struct napi_struct napi; -}; +struct gro_cell; struct gro_cells { struct gro_cell __percpu *cells; }; -static inline int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) -{ - struct gro_cell *cell; - struct net_device *dev = skb->dev; - - if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) - return netif_rx(skb); - - cell = this_cpu_ptr(gcells->cells); - - if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - - __skb_queue_tail(&cell->napi_skbs, skb); - if (skb_queue_len(&cell->napi_skbs) == 1) - napi_schedule(&cell->napi); - return NET_RX_SUCCESS; -} - -/* called under BH context */ -static inline int gro_cell_poll(struct napi_struct *napi, int budget) -{ - struct gro_cell *cell = container_of(napi, struct gro_cell, napi); - struct sk_buff *skb; - int work_done = 0; - - while (work_done < budget) { - skb = __skb_dequeue(&cell->napi_skbs); - if (!skb) - break; - napi_gro_receive(napi, skb); - work_done++; - } - - if (work_done < budget) - napi_complete_done(napi, work_done); - return work_done; -} - -static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev) -{ - int i; - - gcells->cells = alloc_percpu(struct gro_cell); - if (!gcells->cells) - return -ENOMEM; - - for_each_possible_cpu(i) { - struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); - - __skb_queue_head_init(&cell->napi_skbs); - - set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); - - netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); - napi_enable(&cell->napi); - } - return 0; -} - -static inline void gro_cells_destroy(struct gro_cells *gcells) -{ - int i; - - if (!gcells->cells) - return; - for_each_possible_cpu(i) { - struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); - - netif_napi_del(&cell->napi); - __skb_queue_purge(&cell->napi_skbs); - } - free_percpu(gcells->cells); - gcells->cells = NULL; -} +int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb); +int gro_cells_init(struct gro_cells *gcells, struct net_device *dev); +void gro_cells_destroy(struct gro_cells *gcells); #endif diff --git a/net/Kconfig b/net/Kconfig index 2f2842d2d3ed..f19c0c3b9589 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -413,6 +413,10 @@ config DST_CACHE bool default n +config GRO_CELLS + bool + default n + config NET_DEVLINK tristate "Network physical/parent device Netlink interface" help diff --git a/net/core/Makefile b/net/core/Makefile index f6761b6e3b29..79f9479e9658 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o obj-$(CONFIG_DST_CACHE) += dst_cache.o obj-$(CONFIG_HWBM) += hwbm.o obj-$(CONFIG_NET_DEVLINK) += devlink.o +obj-$(CONFIG_GRO_CELLS) += gro_cells.o diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c new file mode 100644 index 000000000000..c98bbfbd26b8 --- /dev/null +++ b/net/core/gro_cells.c @@ -0,0 +1,92 @@ +#include +#include +#include +#include + +struct gro_cell { + struct sk_buff_head napi_skbs; + struct napi_struct napi; +}; + +int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + struct gro_cell *cell; + + if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) + return netif_rx(skb); + + cell = this_cpu_ptr(gcells->cells); + + if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + + __skb_queue_tail(&cell->napi_skbs, skb); + if (skb_queue_len(&cell->napi_skbs) == 1) + napi_schedule(&cell->napi); + return NET_RX_SUCCESS; +} +EXPORT_SYMBOL(gro_cells_receive); + +/* called under BH context */ +static int gro_cell_poll(struct napi_struct *napi, int budget) +{ + struct gro_cell *cell = container_of(napi, struct gro_cell, napi); + struct sk_buff *skb; + int work_done = 0; + + while (work_done < budget) { + skb = __skb_dequeue(&cell->napi_skbs); + if (!skb) + break; + napi_gro_receive(napi, skb); + work_done++; + } + + if (work_done < budget) + napi_complete_done(napi, work_done); + return work_done; +} + +int gro_cells_init(struct gro_cells *gcells, struct net_device *dev) +{ + int i; + + gcells->cells = alloc_percpu(struct gro_cell); + if (!gcells->cells) + return -ENOMEM; + + for_each_possible_cpu(i) { + struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); + + __skb_queue_head_init(&cell->napi_skbs); + + set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); + + netif_napi_add(dev, &cell->napi, gro_cell_poll, + NAPI_POLL_WEIGHT); + napi_enable(&cell->napi); + } + return 0; +} +EXPORT_SYMBOL(gro_cells_init); + +void gro_cells_destroy(struct gro_cells *gcells) +{ + int i; + + if (!gcells->cells) + return; + for_each_possible_cpu(i) { + struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); + + netif_napi_del(&cell->napi); + __skb_queue_purge(&cell->napi_skbs); + } + free_percpu(gcells->cells); + gcells->cells = NULL; +} +EXPORT_SYMBOL(gro_cells_destroy); diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 6e7baaf814c6..e30f9caddae8 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -187,6 +187,7 @@ config NET_IPGRE_DEMUX config NET_IP_TUNNEL tristate select DST_CACHE + select GRO_CELLS default n config NET_IPGRE diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index ec1267e2bd1f..3c7c76b2a7ba 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -208,6 +208,7 @@ config IPV6_TUNNEL tristate "IPv6: IP-in-IPv6 tunnel (RFC2473)" select INET6_TUNNEL select DST_CACHE + select GRO_CELLS ---help--- Support for IPv6-in-IPv6 and IPv4-in-IPv6 tunnels described in RFC 2473. diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig index bda1a13628a8..c06d3997c6e7 100644 --- a/net/xfrm/Kconfig +++ b/net/xfrm/Kconfig @@ -4,6 +4,7 @@ config XFRM bool depends on NET + select GRO_CELLS config XFRM_ALGO tristate -- cgit v1.2.3 From 8ef9594764617e3fd4500205b080b53c45c14c4b Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Tue, 7 Feb 2017 16:12:00 -0800 Subject: bridge: vlan tunnel id info range fill size calc cleanups This fixes a bug and cleans up tunnelid range size calculation code by using consistent variable names and checks in size calculation and fill functions. tested for a few cases of vlan-vni range mappings: (output from patched iproute2): $bridge vlan showtunnel port vid tunid vxlan0 100-105 1000-1005 200 2000 210 2100 211-213 2100-2102 214 2104 216-217 2108-2109 219 2119 Fixes: efa5356b0d97 ("bridge: per vlan dst_metadata netlink support") Reported-by: Colin Ian King Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- net/bridge/br_netlink_tunnel.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c index f38473509647..c913491495ab 100644 --- a/net/bridge/br_netlink_tunnel.c +++ b/net/bridge/br_netlink_tunnel.c @@ -30,18 +30,18 @@ static size_t __get_vlan_tinfo_size(void) nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_VLAN_TUNNEL_FLAGS */ } -static bool vlan_tunnel_id_isrange(struct net_bridge_vlan *v, - struct net_bridge_vlan *v_end) +static bool vlan_tunid_inrange(struct net_bridge_vlan *v_curr, + struct net_bridge_vlan *v_last) { - __be32 tunid_curr = tunnel_id_to_key32(v->tinfo.tunnel_id); - __be32 tunid_end = tunnel_id_to_key32(v_end->tinfo.tunnel_id); + __be32 tunid_curr = tunnel_id_to_key32(v_curr->tinfo.tunnel_id); + __be32 tunid_last = tunnel_id_to_key32(v_last->tinfo.tunnel_id); - return (be32_to_cpu(tunid_curr) - be32_to_cpu(tunid_end)) == 1; + return (be32_to_cpu(tunid_curr) - be32_to_cpu(tunid_last)) == 1; } static int __get_num_vlan_tunnel_infos(struct net_bridge_vlan_group *vg) { - struct net_bridge_vlan *v, *v_start = NULL, *v_end = NULL; + struct net_bridge_vlan *v, *vtbegin = NULL, *vtend = NULL; int num_tinfos = 0; /* Count number of vlan infos */ @@ -50,27 +50,25 @@ static int __get_num_vlan_tunnel_infos(struct net_bridge_vlan_group *vg) if (!br_vlan_should_use(v) || !v->tinfo.tunnel_id) continue; - if (!v_start) { + if (!vtbegin) { goto initvars; - } else if ((v->vid - v_end->vid) == 1 && - vlan_tunnel_id_isrange(v_end, v) == 1) { - v_end = v; + } else if ((v->vid - vtend->vid) == 1 && + vlan_tunid_inrange(v, vtend)) { + vtend = v; continue; } else { - if ((v_end->vid - v->vid) > 0 && - vlan_tunnel_id_isrange(v_end, v) > 0) + if ((vtend->vid - vtbegin->vid) > 0) num_tinfos += 2; else num_tinfos += 1; } initvars: - v_start = v; - v_end = v; + vtbegin = v; + vtend = v; } - if (v_start) { - if ((v_end->vid - v->vid) > 0 && - vlan_tunnel_id_isrange(v_end, v) > 0) + if (vtbegin && vtend) { + if ((vtend->vid - vtbegin->vid) > 0) num_tinfos += 2; else num_tinfos += 1; @@ -171,7 +169,7 @@ int br_fill_vlan_tunnel_info(struct sk_buff *skb, if (!vtbegin) { goto initvars; } else if ((v->vid - vtend->vid) == 1 && - vlan_tunnel_id_isrange(v, vtend)) { + vlan_tunid_inrange(v, vtend)) { vtend = v; continue; } else { -- cgit v1.2.3 From c502faf94153bd0fedc5389a936f728a659cc6ab Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 8 Feb 2017 01:19:43 +0100 Subject: bpf, lpm: fix overflows in trie_alloc checks Cap the maximum (total) value size and bail out if larger than KMALLOC_MAX_SIZE as otherwise it doesn't make any sense to proceed further, since we're guaranteed to fail to allocate elements anyway in lpm_trie_node_alloc(); likleyhood of failure is still high for large values, though, similarly as with htab case in non-prealloc. Next, make sure that cost vars are really u64 instead of size_t, so that we don't overflow on 32 bit and charge only tiny map.pages against memlock while allowing huge max_entries; cap also the max cost like we do with other map types. Fixes: b95a5c4db09b ("bpf: add a longest prefix match trie map implementation") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/lpm_trie.c | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 144e9763102f..e0f6a0bd279b 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -394,10 +394,21 @@ static int trie_delete_elem(struct bpf_map *map, void *key) return -ENOSYS; } +#define LPM_DATA_SIZE_MAX 256 +#define LPM_DATA_SIZE_MIN 1 + +#define LPM_VAL_SIZE_MAX (KMALLOC_MAX_SIZE - LPM_DATA_SIZE_MAX - \ + sizeof(struct lpm_trie_node)) +#define LPM_VAL_SIZE_MIN 1 + +#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key) + (X)) +#define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX) +#define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN) + static struct bpf_map *trie_alloc(union bpf_attr *attr) { - size_t cost, cost_per_node; struct lpm_trie *trie; + u64 cost = sizeof(*trie), cost_per_node; int ret; if (!capable(CAP_SYS_ADMIN)) @@ -406,9 +417,10 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) /* check sanity of attributes */ if (attr->max_entries == 0 || attr->map_flags != BPF_F_NO_PREALLOC || - attr->key_size < sizeof(struct bpf_lpm_trie_key) + 1 || - attr->key_size > sizeof(struct bpf_lpm_trie_key) + 256 || - attr->value_size == 0) + attr->key_size < LPM_KEY_SIZE_MIN || + attr->key_size > LPM_KEY_SIZE_MAX || + attr->value_size < LPM_VAL_SIZE_MIN || + attr->value_size > LPM_VAL_SIZE_MAX) return ERR_PTR(-EINVAL); trie = kzalloc(sizeof(*trie), GFP_USER | __GFP_NOWARN); @@ -426,18 +438,24 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) cost_per_node = sizeof(struct lpm_trie_node) + attr->value_size + trie->data_size; - cost = sizeof(*trie) + attr->max_entries * cost_per_node; + cost += (u64) attr->max_entries * cost_per_node; + if (cost >= U32_MAX - PAGE_SIZE) { + ret = -E2BIG; + goto out_err; + } + trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; ret = bpf_map_precharge_memlock(trie->map.pages); - if (ret) { - kfree(trie); - return ERR_PTR(ret); - } + if (ret) + goto out_err; raw_spin_lock_init(&trie->lock); return &trie->map; +out_err: + kfree(trie); + return ERR_PTR(ret); } static void trie_free(struct bpf_map *map) -- cgit v1.2.3 From 280892226ba25289519c685a6d25ba2670f2d851 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:06 +0100 Subject: net: stmmac: fix the typo on MAC_RNABLE_RX the define MAC_RNABLE_RX have a typo, rename it to MAC_ENABLE_RX Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 2 +- drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 75e2666df940..262a1c477a93 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -343,7 +343,7 @@ struct dma_features { /* Common MAC defines */ #define MAC_CTRL_REG 0x00000000 /* MAC Control */ #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ -#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ +#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ /* Default LPI timers */ #define STMMAC_DEFAULT_LIT_LS 0x3E8 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 84e3e84cec7d..a414bde56dd7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -261,9 +261,9 @@ void stmmac_set_mac(void __iomem *ioaddr, bool enable) u32 value = readl(ioaddr + MAC_CTRL_REG); if (enable) - value |= MAC_RNABLE_RX | MAC_ENABLE_TX; + value |= MAC_ENABLE_RX | MAC_ENABLE_TX; else - value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX); + value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); writel(value, ioaddr + MAC_CTRL_REG); } -- cgit v1.2.3 From 6a2cac549b368960c9cd6a993f2f2cc6d720e935 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:07 +0100 Subject: net: stmmac: Remove the bus_setup function pointer The bus_setup function pointer is not used at all, this patch remove it. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ---- include/linux/stmmac.h | 1 - 2 files changed, 5 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index bd83bf9ef326..1ef602820110 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1682,10 +1682,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Copy the MAC addr into the HW */ priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); - /* If required, perform hw setup of the bus. */ - if (priv->plat->bus_setup) - priv->plat->bus_setup(priv->ioaddr); - /* PS and related bits will be programmed according to the speed */ if (priv->hw->pcs) { int speed = priv->plat->mac_port_sel_speed; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index d76033d6726d..fc273e9d5f67 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -134,7 +134,6 @@ struct plat_stmmacenet_data { int tx_fifo_size; int rx_fifo_size; void (*fix_mac_speed)(void *priv, unsigned int speed); - void (*bus_setup)(void __iomem *ioaddr); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); void *bsp_priv; -- cgit v1.2.3 From 8d45e42babb1c7b1a1974cc7c4582efcaba11a35 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:08 +0100 Subject: net: stmmac: fix some typos in comments This patch fix some typos in comments. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | 6 +++--- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 16 ++++++++-------- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index a414bde56dd7..3b1570d73880 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -102,7 +102,7 @@ static void show_tx_process_state(unsigned int status) pr_debug("- TX (Stopped): Reset or Stop command\n"); break; case 1: - pr_debug("- TX (Running):Fetching the Tx desc\n"); + pr_debug("- TX (Running): Fetching the Tx desc\n"); break; case 2: pr_debug("- TX (Running): Waiting for end of tx\n"); @@ -136,7 +136,7 @@ static void show_rx_process_state(unsigned int status) pr_debug("- RX (Running): Fetching the Rx desc\n"); break; case 2: - pr_debug("- RX (Running):Checking for end of pkt\n"); + pr_debug("- RX (Running): Checking for end of pkt\n"); break; case 3: pr_debug("- RX (Running): Waiting for Rx pkt\n"); @@ -246,7 +246,7 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned long data; data = (addr[5] << 8) | addr[4]; - /* For MAC Addr registers se have to set the Address Enable (AE) + /* For MAC Addr registers we have to set the Address Enable (AE) * bit that has no effect on the High Reg 0 where the bit 31 (MO) * is RO. */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 1ef602820110..0636858cf77a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -216,7 +216,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) /** * stmmac_hw_fix_mac_speed - callback for speed selection * @priv: driver private structure - * Description: on some platforms (e.g. ST), some HW system configuraton + * Description: on some platforms (e.g. ST), some HW system configuration * registers have to be set according to the link speed negotiated. */ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) @@ -416,7 +416,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, /** * stmmac_hwtstamp_ioctl - control hardware timestamping. * @dev: device pointer. - * @ifr: An IOCTL specefic structure, that can contain a pointer to + * @ifr: An IOCTL specific structure, that can contain a pointer to * a proprietary structure used to pass information to the driver. * Description: * This function configures the MAC to enable/disable both outgoing(TX) @@ -1003,7 +1003,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) * @dev: net device structure * @flags: gfp flag. * Description: this function initializes the DMA RX/TX descriptors - * and allocates the socket buffers. It suppors the chained and ring + * and allocates the socket buffers. It supports the chained and ring * modes. */ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) @@ -2532,7 +2532,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; if (priv->hwts_rx_en && !priv->extend_desc) { - /* DESC2 & DESC3 will be overwitten by device + /* DESC2 & DESC3 will be overwritten by device * with timestamp value, hence reinitialize * them in stmmac_rx_refill() function so that * device can reuse it. @@ -2555,7 +2555,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) frame_len = priv->hw->desc->get_rx_frame_len(p, coe); - /* If frame length is greather than skb buffer size + /* If frame length is greater than skb buffer size * (preallocated during init) then the packet is * ignored */ @@ -2761,7 +2761,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, /* Some GMAC devices have a bugged Jumbo frame support that * needs to have the Tx COE disabled for oversized frames * (due to limited buffer sizes). In this case we disable - * the TX csum insertionin the TDES and not use SF. + * the TX csum insertion in the TDES and not use SF. */ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) features &= ~NETIF_F_CSUM_MASK; @@ -2988,7 +2988,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) (priv->dma_cap.hash_filter) ? "Y" : "N"); seq_printf(seq, "\tMultiple MAC address registers: %s\n", (priv->dma_cap.multi_addr) ? "Y" : "N"); - seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n", + seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", (priv->dma_cap.pcs) ? "Y" : "N"); seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", (priv->dma_cap.sma_mdio) ? "Y" : "N"); @@ -3485,7 +3485,7 @@ int stmmac_resume(struct device *dev) priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); - /* enable the clk prevously disabled */ + /* enable the clk previously disabled */ clk_enable(priv->plat->stmmac_clk); clk_enable(priv->plat->pclk); /* reset the phy so that it's ready */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index b0344c213752..a1a469efd54f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -198,7 +198,7 @@ int stmmac_mdio_reset(struct mii_bus *bus) /* This is a workaround for problems with the STE101P PHY. * It doesn't complete its reset until at least one clock cycle - * on MDC, so perform a dummy mdio read. To be upadted for GMAC4 + * on MDC, so perform a dummy mdio read. To be updated for GMAC4 * if needed. */ if (!priv->plat->has_gmac4) -- cgit v1.2.3 From deeb6376caeeab38d2cf616aea6f2ca3a4ecf0ed Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:09 +0100 Subject: net: stmmac: remove freesoftware address This patch fix the checkpatch warning about free software address. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/common.h | 4 ---- drivers/net/ethernet/stmicro/stmmac/descs.h | 4 ---- drivers/net/ethernet/stmicro/stmmac/descs_com.h | 4 ---- drivers/net/ethernet/stmicro/stmmac/dwmac100.h | 4 ---- drivers/net/ethernet/stmicro/stmmac/dwmac1000.h | 4 ---- drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h | 4 ---- drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/mmc.h | 4 ---- drivers/net/ethernet/stmicro/stmmac/mmc_core.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 4 ---- drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 4 ---- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | 4 ---- 26 files changed, 104 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 026e8e9cb942..01a8c020d6db 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -16,10 +16,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 262a1c477a93..24929bf17e84 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index faeeef75d7f1..0c2432b1ce67 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -11,10 +11,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 1d181e205d6e..ca9d7e48034c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -17,10 +17,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h index 1657acfa70c2..e14984814041 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 52b9407a8a39..c02d36629c52 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -10,10 +10,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index a26715bf4564..91c8926b7479 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -16,10 +16,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 612d3aaac9a4..fbaec0ffd9ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -16,10 +16,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 9dd2987e284d..8ab518997b1b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -18,10 +18,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index e5664da382f3..d40e91e8fc7b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -18,10 +18,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 726d9d9aaf83..56e485f79077 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 3b1570d73880..e4cda39fb3b5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -10,10 +10,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index f0d86321dfe2..8427643e7211 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 38a1a5603293..c037326331f5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index ce9aa792857b..e9b04c28980f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index fd78406e2e9a..5a0d4b038e35 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 9983ce9bd90d..452f256ff03f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -16,10 +16,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index bf8a83ef96f9..cd8fb619b1e9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -10,10 +10,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 322e5c6a0d4b..9083d9a1cded 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 10d6059b2f26..721b61655261 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0636858cf77a..a13fcc4f3536 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -13,10 +13,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index a1a469efd54f..738d5c754ac4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -13,10 +13,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 3da4737620cb..5c9e462276b9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 4963ccdb31e3..320f46eb648d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 3eb281d1db08..d71bd80c5b5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index c06938c47af5..48fb72fc423c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". -- cgit v1.2.3 From bbf892849678ff9e25590cbf78d0202e35d7e41d Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:10 +0100 Subject: net: stmmac: remplace asm/io.h by linux/io.h This patch fix the checkpatch warning about asm/io.h. Sorting all includes in the process. Signed-off-by: Corentin Labbe Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 738d5c754ac4..3fdc6ec6ebf0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -20,13 +20,13 @@ Maintainer: Giuseppe Cavallaro *******************************************************************************/ +#include #include -#include -#include #include #include #include -#include +#include +#include #include "stmmac.h" -- cgit v1.2.3 From efd89b60a35d6ab0bfdd24348a0b6dbe7118aee4 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:11 +0100 Subject: net: stmmac: fix some code style problem Checkpatch complains about some code style problem on stmmac_mdio.c. This patch fix them. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 3fdc6ec6ebf0..c24bef2f7349 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -152,9 +152,9 @@ int stmmac_mdio_reset(struct mii_bus *bus) #ifdef CONFIG_OF if (priv->device->of_node) { - if (data->reset_gpio < 0) { struct device_node *np = priv->device->of_node; + if (!np) return 0; @@ -221,7 +221,7 @@ int stmmac_mdio_register(struct net_device *ndev) return 0; new_bus = mdiobus_alloc(); - if (new_bus == NULL) + if (!new_bus) return -ENOMEM; if (mdio_bus_data->irqs) @@ -258,6 +258,7 @@ int stmmac_mdio_register(struct net_device *ndev) found = 0; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); + if (phydev) { int act = 0; char irq_num[4]; @@ -267,7 +268,7 @@ int stmmac_mdio_register(struct net_device *ndev) * If an IRQ was provided to be assigned after * the bus probe, do it here. */ - if ((mdio_bus_data->irqs == NULL) && + if ((!mdio_bus_data->irqs) && (mdio_bus_data->probed_phy_irq > 0)) { new_bus->irq[addr] = mdio_bus_data->probed_phy_irq; -- cgit v1.2.3 From a5f48adc316aa21944c19019583fb9933af01b9a Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:12 +0100 Subject: net: stmmac: replace stmmac_mdio_busy_wait by readl_poll_timeout The stmmac_mdio_busy_wait() function do the same job than readl_poll_timeout(). So is is better to replace it. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 33 ++++++++--------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index c24bef2f7349..d9893cf05695 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -21,6 +21,7 @@ *******************************************************************************/ #include +#include #include #include #include @@ -38,22 +39,6 @@ #define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT) #define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT) -static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr) -{ - unsigned long curr; - unsigned long finish = jiffies + 3 * HZ; - - do { - curr = jiffies; - if (readl(ioaddr + mii_addr) & MII_BUSY) - cpu_relax(); - else - return 0; - } while (!time_after_eq(curr, finish)); - - return -EBUSY; -} - /** * stmmac_mdio_read * @bus: points to the mii_bus structure @@ -70,7 +55,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; - + u32 v; int data; u32 value = MII_BUSY; @@ -82,12 +67,14 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) if (priv->plat->has_gmac4) value |= MII_GMAC4_READ; - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) return -EBUSY; writel(value, priv->ioaddr + mii_address); - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) return -EBUSY; /* Read the data from the MII data register */ @@ -111,7 +98,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; - + u32 v; u32 value = MII_BUSY; value |= (phyaddr << priv->hw->mii.addr_shift) @@ -126,7 +113,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, value |= MII_WRITE; /* Wait until any existing MII operation is complete */ - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) return -EBUSY; /* Set the MII address register to write */ @@ -134,7 +122,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, writel(value, priv->ioaddr + mii_address); /* Wait until any existing MII operation is complete */ - return stmmac_mdio_busy_wait(priv->ioaddr, mii_address); + return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000); } /** -- cgit v1.2.3 From 8a70aeca80c2425b60ff821140b5b5c141e28ba1 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:13 +0100 Subject: net: stmmac: Use readl_poll_timeout The dwmac_dma_reset function use an open coded of readl_poll_timeout(). Replace the open coded handling with the proper function. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index e4cda39fb3b5..e60bfca2a763 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -17,6 +17,7 @@ *******************************************************************************/ #include +#include #include "common.h" #include "dwmac_dma.h" @@ -25,19 +26,16 @@ int dwmac_dma_reset(void __iomem *ioaddr) { u32 value = readl(ioaddr + DMA_BUS_MODE); - int limit; + int err; /* DMA SW reset */ value |= DMA_BUS_MODE_SFT_RESET; writel(value, ioaddr + DMA_BUS_MODE); - limit = 10; - while (limit--) { - if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) - break; - mdelay(10); - } - if (limit < 0) + err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, + !(value & DMA_BUS_MODE_SFT_RESET), + 100000, 10000); + if (err) return -EBUSY; return 0; -- cgit v1.2.3 From b2a8315a5c655ffa5b9330dfba2496e8f0a5d7ad Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:14 +0100 Subject: net: stmmac: replace ENOSYS by EINVAL As said by checkpatch ENOSYS means 'invalid syscall nr' and nothing else. This patch replace ENOSYS by the more appropriate value EINVAL. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 320f46eb648d..433a84239a68 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -411,7 +411,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data * stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EINVAL); } void stmmac_remove_config_dt(struct platform_device *pdev, -- cgit v1.2.3 From cba920affb7e7326aaea998797c76aa4989c130f Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:15 +0100 Subject: net: stmmac: Correct the error message about invalid speed The message about invalid speed does not state 1000 as a valid speed. It is much simpler to said that the speed is invalid. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a13fcc4f3536..ed813752e23b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -738,8 +738,7 @@ static void stmmac_adjust_link(struct net_device *dev) break; default: netif_warn(priv, link, priv->dev, - "Speed (%d) not 10/100\n", - phydev->speed); + "broken speed: %d\n", phydev->speed); break; } -- cgit v1.2.3 From 662ec2b7e842c0bb1ef482060497c44afa3f1037 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:16 +0100 Subject: net: stmmac: Rewrite two test against NULL value This patch rewrite two test against NULL value with correct style. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ed813752e23b..cc88bdb929dd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -689,7 +689,7 @@ static void stmmac_adjust_link(struct net_device *dev) int new_state = 0; unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; - if (phydev == NULL) + if (!phydev) return; spin_lock_irqsave(&priv->lock, flags); @@ -1131,7 +1131,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) DMA_TO_DEVICE); } - if (priv->tx_skbuff[i] != NULL) { + if (priv->tx_skbuff[i]) { dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; priv->tx_skbuff_dma[i].buf = 0; -- cgit v1.2.3 From e0a76606d6f0dfdb0954ddb2da2c8809329054d6 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:17 +0100 Subject: net: stmmac: rename rx_crc to rx_crc_errors The ethtool stat counter rx_crc from stmmac is mis-named, the name seems to speak about the number of RX CRC done, but in fact it is about errors. This patch rename it to rx_crc_errors, just like the same ifconfig counter. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 2 +- drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | 2 +- drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 2 +- drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 2 +- drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 24929bf17e84..144fe84e8a53 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -67,7 +67,7 @@ struct stmmac_extra_stats { unsigned long overflow_error; unsigned long ipc_csum_error; unsigned long rx_collision; - unsigned long rx_crc; + unsigned long rx_crc_errors; unsigned long dribbling_bit; unsigned long rx_length; unsigned long rx_mii; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 8816515e1bbb..843ec69222ea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -103,7 +103,7 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x, x->rx_mii++; if (unlikely(rdes3 & RDES3_CRC_ERROR)) { - x->rx_crc++; + x->rx_crc_errors++; stats->rx_crc_errors++; } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 8427643e7211..323b59ec74a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -221,7 +221,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, x->rx_mii++; if (unlikely(rdes0 & RDES0_CRC_ERROR)) { - x->rx_crc++; + x->rx_crc_errors++; stats->rx_crc_errors++; } ret = discard_frame; diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 5a0d4b038e35..efb818ebd55e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -111,7 +111,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, stats->collisions++; } if (unlikely(rdes0 & RDES0_CRC_ERROR)) { - x->rx_crc++; + x->rx_crc_errors++; stats->rx_crc_errors++; } ret = discard_frame; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 9083d9a1cded..aab895d9257e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -61,7 +61,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(overflow_error), STMMAC_STAT(ipc_csum_error), STMMAC_STAT(rx_collision), - STMMAC_STAT(rx_crc), + STMMAC_STAT(rx_crc_errors), STMMAC_STAT(dribbling_bit), STMMAC_STAT(rx_length), STMMAC_STAT(rx_mii), -- cgit v1.2.3 From b05c76a1f8fe708cd998042a5b0479aef7f2e70b Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:18 +0100 Subject: net: stmmac: print phy information When a PHY is found, printing which one was found (and which type/model) is a good information to know. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index cc88bdb929dd..9805aa8c599e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -870,9 +870,7 @@ static int stmmac_init_phy(struct net_device *dev) if (phydev->is_pseudo_fixed_link) phydev->irq = PHY_POLL; - netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n", - __func__, phydev->phy_id, phydev->link); - + phy_attached_info(phydev); return 0; } -- cgit v1.2.3 From 732fe78c0785aa8f1ff40a06a2fd5eb221a81157 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:19 +0100 Subject: net: stmmac: remove dead code in stmmac_tx_clean Since commit cf32deec16e4 ("stmmac: add tx_skbuff_dma to save descriptors used by PTP"), the struct dma_desc *p in stmmac_tx_clean was not used at all. This patch remove this dead code. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9805aa8c599e..c5dc3f944da6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1109,13 +1109,6 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) int i; for (i = 0; i < DMA_TX_SIZE; i++) { - struct dma_desc *p; - - if (priv->extend_desc) - p = &((priv->dma_etx + i)->basic); - else - p = priv->dma_tx + i; - if (priv->tx_skbuff_dma[i].buf) { if (priv->tx_skbuff_dma[i].map_as_page) dma_unmap_page(priv->device, -- cgit v1.2.3 From 64679e565a4a099c5d5dc2ecf103dcaf039ef8c7 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:20 +0100 Subject: net: stmmac: remove unused variable in sysfs_display_ring The u64 x variable in sysfs_display_ring is unused. This patch remove it. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c5dc3f944da6..d1b2e1ec7c6e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2893,9 +2893,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc, struct dma_desc *p = (struct dma_desc *)head; for (i = 0; i < size; i++) { - u64 x; if (extend_desc) { - x = *(u64 *) ep; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), le32_to_cpu(ep->basic.des0), @@ -2904,7 +2902,6 @@ static void sysfs_display_ring(void *head, int size, int extend_desc, le32_to_cpu(ep->basic.des3)); ep++; } else { - x = *(u64 *) p; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), le32_to_cpu(p->des0), le32_to_cpu(p->des1), -- cgit v1.2.3 From a6a3e026f0d358904afd3df6b7f952ed8ea5b942 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 8 Feb 2017 09:31:21 +0100 Subject: net: stmmac: replace unsigned by u32 checkpatch complains about two unsigned without type after. Since the value return is u32, it is simpler to replace it by u32 instead of "unsigned int" Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d1b2e1ec7c6e..7251871eaa32 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -187,7 +187,7 @@ static void print_pkt(unsigned char *buf, int len) static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) { - unsigned avail; + u32 avail; if (priv->dirty_tx > priv->cur_tx) avail = priv->dirty_tx - priv->cur_tx - 1; @@ -199,7 +199,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) { - unsigned dirty; + u32 dirty; if (priv->dirty_rx <= priv->cur_rx) dirty = priv->cur_rx - priv->dirty_rx; -- cgit v1.2.3 From b05d0cfa1932376dc388a0fe67564572a84374a3 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 8 Feb 2017 10:39:16 +0100 Subject: mlxsw: acl: Fix mlxsw_afa_block_commit error path No rollback is needed since the chain is in consistent state and mlxsw_afa_block_destroy() will take care of putting it away. So remove the one we have now which is wrong. Also move the set of 'finished' flag to the beginning of the function, because the block is certainly unusable for future action addition no matter if the function succeeds or not. Reported-by: Dan Carpenter Fixes: 4cda7d8d7098 ("mlxsw: core: Introduce flexible actions support") Signed-off-by: Jiri Pirko Acked-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/core_acl_flex_actions.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 34e2fefb0a25..42bb18feb316 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -356,9 +356,9 @@ int mlxsw_afa_block_commit(struct mlxsw_afa_block *block) { struct mlxsw_afa_set *set = block->cur_set; struct mlxsw_afa_set *prev_set; - int err; block->cur_set = NULL; + block->finished = true; /* Go over all linked sets starting from last * and try to find existing set in the hash table. @@ -368,10 +368,12 @@ int mlxsw_afa_block_commit(struct mlxsw_afa_block *block) do { prev_set = set->prev; set = mlxsw_afa_set_get(block->afa, set); - if (IS_ERR(set)) { - err = PTR_ERR(set); - goto rollback; - } + if (IS_ERR(set)) + /* No rollback is needed since the chain is + * in consistent state and mlxsw_afa_block_destroy + * will take care of putting it away. + */ + return PTR_ERR(set); if (prev_set) { prev_set->next = set; mlxsw_afa_set_next_set(prev_set, set->kvdl_index); @@ -380,13 +382,7 @@ int mlxsw_afa_block_commit(struct mlxsw_afa_block *block) } while (prev_set); block->first_set = set; - block->finished = true; return 0; - -rollback: - while ((set = set->next)) - mlxsw_afa_set_put(block->afa, set); - return err; } EXPORT_SYMBOL(mlxsw_afa_block_commit); -- cgit v1.2.3 From e58be79e2dc818b0e2b3c69ea3d3fefcbc8c709b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:28 +0100 Subject: mlxsw: spectrum_router: Nullify nexthop's neigh pointer When we invalidate a nexthop we should also invalidate its neighbour entry pointer as it might be destroyed later on. This makes the nexthop de-init function symmetric with its init and also ensures nobody will try to access the neighbour entry. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index b19f69f4e28e..31680dea871a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1398,12 +1398,13 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, __mlxsw_sp_nexthop_neigh_update(nh, true); list_del(&nh->neigh_list_node); + nh->neigh_entry = NULL; /* If that is the last nexthop connected to that neigh, remove from * nexthop_neighs_list */ - if (list_empty(&nh->neigh_entry->nexthop_list)) - list_del(&nh->neigh_entry->nexthop_neighs_list_node); + if (list_empty(&neigh_entry->nexthop_list)) + list_del(&neigh_entry->nexthop_neighs_list_node); if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); -- cgit v1.2.3 From e9ad5e7d8d3cf55ceb204b0f7a26c89bfd4b41fe Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:29 +0100 Subject: mlxsw: spectrum_router: Store nexthop groups in a hash table Currently, when we're notified about a new RTN_UNICAST route we perform a lookup on the nexthop group list looking for a group with a matching configuration to that found in the FIB info. This is quite inefficient. Instead, we can simply rely on the kernel to consolidate several FIB configurations into the same FIB info and use the FIB info as the key for our private nexthop group struct. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 104 +++++++++++---------- 2 files changed, 54 insertions(+), 52 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index dcd641aff785..8014d153975f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -254,13 +254,13 @@ struct mlxsw_sp_router { struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT]; struct mlxsw_sp_vr *vrs; struct rhashtable neigh_ht; + struct rhashtable nexthop_group_ht; struct { struct delayed_work dw; unsigned long interval; /* ms */ } neighs_update; struct delayed_work nexthop_probe_dw; #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */ - struct list_head nexthop_group_list; struct list_head nexthop_neighs_list; bool aborted; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 31680dea871a..a511b95bb6f1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1107,9 +1107,14 @@ struct mlxsw_sp_nexthop { struct mlxsw_sp_neigh_entry *neigh_entry; }; +struct mlxsw_sp_nexthop_group_key { + struct fib_info *fi; +}; + struct mlxsw_sp_nexthop_group { - struct list_head list; /* node in mlxsw->router.nexthop_group_list */ + struct rhash_head ht_node; struct list_head fib_list; /* list of fib entries that use this group */ + struct mlxsw_sp_nexthop_group_key key; u8 adj_index_valid:1; u32 adj_index; u16 ecmp_size; @@ -1117,6 +1122,36 @@ struct mlxsw_sp_nexthop_group { struct mlxsw_sp_nexthop nexthops[0]; }; +static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key), + .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node), + .key_len = sizeof(struct mlxsw_sp_nexthop_group_key), +}; + +static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht, + &nh_grp->ht_node, + mlxsw_sp_nexthop_group_ht_params); +} + +static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht, + &nh_grp->ht_node, + mlxsw_sp_nexthop_group_ht_params); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group_key key) +{ + return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key, + mlxsw_sp_nexthop_group_ht_params); +} + static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, u32 adj_index, u16 ecmp_size, @@ -1429,6 +1464,7 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&nh_grp->fib_list); nh_grp->count = fi->fib_nhs; + nh_grp->key.fi = fi; for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; fib_nh = &fi->fib_nh[i]; @@ -1436,10 +1472,13 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) if (err) goto err_nexthop_init; } - list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list); + err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); + if (err) + goto err_nexthop_group_insert; mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); return nh_grp; +err_nexthop_group_insert: err_nexthop_init: for (i--; i >= 0; i--) mlxsw_sp_nexthop_fini(mlxsw_sp, nh); @@ -1454,7 +1493,7 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh; int i; - list_del(&nh_grp->list); + mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; mlxsw_sp_nexthop_fini(mlxsw_sp, nh); @@ -1464,59 +1503,15 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, kfree(nh_grp); } -static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh, - struct fib_info *fi) -{ - int i; - - for (i = 0; i < fi->fib_nhs; i++) { - struct fib_nh *fib_nh = &fi->fib_nh[i]; - struct neighbour *n = nh->neigh_entry->key.n; - - if (memcmp(n->primary_key, &fib_nh->nh_gw, - sizeof(fib_nh->nh_gw)) == 0 && - n->dev == fib_nh->nh_dev) - return true; - } - return false; -} - -static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp, - struct fib_info *fi) -{ - int i; - - if (nh_grp->count != fi->fib_nhs) - return false; - for (i = 0; i < nh_grp->count; i++) { - struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; - - if (!mlxsw_sp_nexthop_match(nh, fi)) - return false; - } - return true; -} - -static struct mlxsw_sp_nexthop_group * -mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) -{ - struct mlxsw_sp_nexthop_group *nh_grp; - - list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list, - list) { - if (mlxsw_sp_nexthop_group_match(nh_grp, fi)) - return nh_grp; - } - return NULL; -} - static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, struct fib_info *fi) { + struct mlxsw_sp_nexthop_group_key key; struct mlxsw_sp_nexthop_group *nh_grp; - nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi); + key.fi = fi; + nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key); if (!nh_grp) { nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi); if (IS_ERR(nh_grp)) @@ -2053,11 +2048,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) int err; INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list); - INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list); err = __mlxsw_sp_router_init(mlxsw_sp); if (err) return err; + err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht, + &mlxsw_sp_nexthop_group_ht_params); + if (err) + goto err_nexthop_group_ht_init; + mlxsw_sp_lpm_init(mlxsw_sp); err = mlxsw_sp_vrs_init(mlxsw_sp); if (err) @@ -2080,6 +2079,8 @@ err_register_fib_notifier: err_neigh_init: mlxsw_sp_vrs_fini(mlxsw_sp); err_vrs_init: + rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht); +err_nexthop_group_ht_init: __mlxsw_sp_router_fini(mlxsw_sp); return err; } @@ -2089,5 +2090,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) unregister_fib_notifier(&mlxsw_sp->fib_nb); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); + rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht); __mlxsw_sp_router_fini(mlxsw_sp); } -- cgit v1.2.3 From c53b8e1b5a6a84da753d8b19c4f59fd8b693d579 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:30 +0100 Subject: mlxsw: spectrum_router: Store nexthops in a hash table Later in the patchset we'll add the NH_{ADD,DEL} events which will let us know when a nexthop is considered to be dead. Based on these events we need to be able to add or remove the nexthop from the device's tables. Therefore, store the private nexthop structs in a hash table and use the kernel's fib_nh struct as the key, so that we'll be able to easily find them when the events are received. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 58 ++++++++++++++++++++-- 2 files changed, 55 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 8014d153975f..eb743fe35c91 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -255,6 +255,7 @@ struct mlxsw_sp_router { struct mlxsw_sp_vr *vrs; struct rhashtable neigh_ht; struct rhashtable nexthop_group_ht; + struct rhashtable nexthop_ht; struct { struct delayed_work dw; unsigned long interval; /* ms */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index a511b95bb6f1..3d540d9427a0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1090,11 +1090,17 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) rhashtable_destroy(&mlxsw_sp->router.neigh_ht); } +struct mlxsw_sp_nexthop_key { + struct fib_nh *fib_nh; +}; + struct mlxsw_sp_nexthop { struct list_head neigh_list_node; /* member of neigh entry list */ struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group * this belongs to */ + struct rhash_head ht_node; + struct mlxsw_sp_nexthop_key key; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. */ @@ -1152,6 +1158,26 @@ mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_group_ht_params); } +static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_nexthop, key), + .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node), + .key_len = sizeof(struct mlxsw_sp_nexthop_key), +}; + +static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht, + &nh->ht_node, mlxsw_sp_nexthop_ht_params); +} + +static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node, + mlxsw_sp_nexthop_ht_params); +} + static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, u32 adj_index, u16 ecmp_size, @@ -1384,6 +1410,12 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, struct net_device *dev = fib_nh->nh_dev; struct neighbour *n; u8 nud_state, dead; + int err; + + nh->key.fib_nh = fib_nh; + err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); + if (err) + return err; /* Take a reference of neigh here ensuring that neigh would * not be detructed before the nexthop entry is finished. @@ -1393,16 +1425,18 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev); if (!n) { n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev); - if (IS_ERR(n)) - return PTR_ERR(n); + if (IS_ERR(n)) { + err = PTR_ERR(n); + goto err_neigh_create; + } neigh_event_send(n, NULL); } neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); if (!neigh_entry) { neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); if (IS_ERR(neigh_entry)) { - neigh_release(n); - return -EINVAL; + err = -EINVAL; + goto err_neigh_entry_create; } } @@ -1423,6 +1457,12 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead)); return 0; + +err_neigh_entry_create: + neigh_release(n); +err_neigh_create: + mlxsw_sp_nexthop_remove(mlxsw_sp, nh); + return err; } static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, @@ -1445,6 +1485,8 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); neigh_release(n); + + mlxsw_sp_nexthop_remove(mlxsw_sp, nh); } static struct mlxsw_sp_nexthop_group * @@ -2052,6 +2094,11 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) if (err) return err; + err = rhashtable_init(&mlxsw_sp->router.nexthop_ht, + &mlxsw_sp_nexthop_ht_params); + if (err) + goto err_nexthop_ht_init; + err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht, &mlxsw_sp_nexthop_group_ht_params); if (err) @@ -2081,6 +2128,8 @@ err_neigh_init: err_vrs_init: rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht); err_nexthop_group_ht_init: + rhashtable_destroy(&mlxsw_sp->router.nexthop_ht); +err_nexthop_ht_init: __mlxsw_sp_router_fini(mlxsw_sp); return err; } @@ -2091,5 +2140,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht); + rhashtable_destroy(&mlxsw_sp->router.nexthop_ht); __mlxsw_sp_router_fini(mlxsw_sp); } -- cgit v1.2.3 From d55409cb2858701292bbff7b3712d3beaefc17fe Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:31 +0100 Subject: mlxsw: spectrum_router: Use nexthop's scope to set action type We currently use the scope of the FIB info to distinguish between a direct unicast route and a gatewayed one. However, the kernel is perfectly happy to configure a route with scope UNIVERSE to a directly connected network. Instead, we can rely on the first nexthop's scope to check if the route is gatewayed or not. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 3d540d9427a0..cad7e9abef22 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1721,7 +1721,7 @@ mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp, return 0; } - if (fi->fib_scope != RT_SCOPE_UNIVERSE) { + if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) { fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; fib_entry->rif = r->rif; } else { -- cgit v1.2.3 From b3e8d1ebad2d041d3226ce283451bc9d38cf5870 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:32 +0100 Subject: mlxsw: spectrum_router: Add gateway indication to nexthop group The next patch is going to generalize the way in which we store routes. Instead of attaching a nexthop group only to gatewayed routes, one will be attached to each route, in a similar way to the way the FIB code stores its routes. The above means that any function operating on a nexthop group cannot assume the group represents only gatewayed nexthops. One such function is the one that refreshes a nexthop group and updates the adjacency table following nexthop changes. For a nexthop group that doesn't represent any gateways this function would essentially be a NOP, but it would be useful if it did update the action associated with any route using it. This will allow us to later consolidate code paths when a nexthop changes following NH_{ADD,DEL} events. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index cad7e9abef22..110a7bb65f02 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1121,7 +1121,8 @@ struct mlxsw_sp_nexthop_group { struct rhash_head ht_node; struct list_head fib_list; /* list of fib entries that use this group */ struct mlxsw_sp_nexthop_group_key key; - u8 adj_index_valid:1; + u8 adj_index_valid:1, + gateway:1; /* routes using the group use a gateway */ u32 adj_index; u16 ecmp_size; u16 count; @@ -1292,6 +1293,11 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, int i; int err; + if (!nh_grp->gateway) { + mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); + return; + } + for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; @@ -1505,6 +1511,7 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) if (!nh_grp) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&nh_grp->fib_list); + nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK; nh_grp->count = fi->fib_nhs; nh_grp->key.fi = fi; for (i = 0; i < nh_grp->count; i++) { -- cgit v1.2.3 From b8399a1e5ad86807cee52b65820ffa543c8d66d0 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:33 +0100 Subject: mlxsw: spectrum_router: Store routes in a more generic way Up until now, the only FIB entries that were associated with a nexthop group were routes to remote networks where all the nexthop devices had a valid router interface (RIF). This is in contrast to the FIB code, where all the routes are associated with a FIB info. The same design choice needs to be applied to the driver's cache. Based on the NH_{ADD,DEL} events which will be added later in the patchset, we need to be able to change the action (forward / trap) associated with all the routes using the nexthop group. However, if we can't link between the nexthop and the routes using it, then the above is impossible. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 40 +++++++++++++++------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 110a7bb65f02..8a72a1b9e24a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -127,7 +127,6 @@ struct mlxsw_sp_fib_entry { struct mlxsw_sp_fib_key key; enum mlxsw_sp_fib_entry_type type; unsigned int ref_count; - u16 rif; /* used for action local */ struct mlxsw_sp_vr *vr; struct fib_info *fi; struct list_head nexthop_group_node; @@ -1101,6 +1100,7 @@ struct mlxsw_sp_nexthop { */ struct rhash_head ht_node; struct mlxsw_sp_nexthop_key key; + struct mlxsw_sp_rif *r; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. */ @@ -1127,6 +1127,7 @@ struct mlxsw_sp_nexthop_group { u16 ecmp_size; u16 count; struct mlxsw_sp_nexthop nexthops[0]; +#define nh_rif nexthops[0].r }; static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { @@ -1414,15 +1415,25 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_neigh_entry *neigh_entry; struct net_device *dev = fib_nh->nh_dev; + struct mlxsw_sp_rif *r; struct neighbour *n; u8 nud_state, dead; int err; + nh->nh_grp = nh_grp; nh->key.fib_nh = fib_nh; err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); if (err) return err; + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!r) + return 0; + nh->r = r; + + if (!nh_grp->gateway) + return 0; + /* Take a reference of neigh here ensuring that neigh would * not be detructed before the nexthop entry is finished. * The reference is taken either in neigh_lookup() or @@ -1453,7 +1464,6 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, list_add_tail(&neigh_entry->nexthop_neighs_list_node, &mlxsw_sp->router.nexthop_neighs_list); - nh->nh_grp = nh_grp; nh->neigh_entry = neigh_entry; list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list); read_lock_bh(&n->lock); @@ -1477,6 +1487,9 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; struct neighbour *n = neigh_entry->key.n; + if (!neigh_entry) + goto out; + __mlxsw_sp_nexthop_neigh_update(nh, true); list_del(&nh->neigh_list_node); nh->neigh_entry = NULL; @@ -1492,6 +1505,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, neigh_release(n); +out: mlxsw_sp_nexthop_remove(mlxsw_sp, nh); } @@ -1619,6 +1633,7 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { + struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif; char ralue_pl[MLXSW_REG_RALUE_LEN]; u32 *p_dip = (u32 *) fib_entry->key.addr; struct mlxsw_sp_vr *vr = fib_entry->vr; @@ -1628,7 +1643,7 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, vr->id, fib_entry->key.prefix_len, *p_dip); mlxsw_reg_ralue_act_local_pack(ralue_pl, MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0, - fib_entry->rif); + r->rif); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } @@ -1697,7 +1712,6 @@ mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi = fen_info->fi; struct mlxsw_sp_rif *r = NULL; int nhsel; - int err; if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) { fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; @@ -1728,15 +1742,10 @@ mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp, return 0; } - if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) { + if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; - fib_entry->rif = r->rif; - } else { + else fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; - err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi); - if (err) - return err; - } fib_info_offload_inc(fen_info->fi); return 0; } @@ -1747,8 +1756,6 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp, { if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP) fib_info_offload_dec(fib_entry->fi); - if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE) - mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); } static struct mlxsw_sp_fib_entry * @@ -1788,8 +1795,14 @@ mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, if (err) goto err_fib4_entry_init; + err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi); + if (err) + goto err_nexthop_group_get; + return fib_entry; +err_nexthop_group_get: + mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); err_fib4_entry_init: mlxsw_sp_fib_entry_destroy(fib_entry); err_fib_entry_create: @@ -1821,6 +1834,7 @@ static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr = fib_entry->vr; if (--fib_entry->ref_count == 0) { + mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); mlxsw_sp_fib_entry_destroy(fib_entry); } -- cgit v1.2.3 From c8b030774f43a816e287e50581fe84e8ea905c44 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:34 +0100 Subject: mlxsw: spectrum_router: Remove FIB info from FIB entry struct After the previous changes, the FIB info is embedded in every nexthop group struct, which in turn is embedded in every FIB entry struct. We can therefore safely remove the FIB info from the entry struct. This has the added advantage of making the router-related structs more generic and suitable for use with IPv6 offloads. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 8a72a1b9e24a..74839f7f368a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -128,7 +128,6 @@ struct mlxsw_sp_fib_entry { enum mlxsw_sp_fib_entry_type type; unsigned int ref_count; struct mlxsw_sp_vr *vr; - struct fib_info *fi; struct list_head nexthop_group_node; struct mlxsw_sp_nexthop_group *nh_group; }; @@ -1755,7 +1754,7 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry) { if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP) - fib_info_offload_dec(fib_entry->fi); + fib_info_offload_dec(fib_entry->nh_group->key.fi); } static struct mlxsw_sp_fib_entry * @@ -1788,7 +1787,6 @@ mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, goto err_fib_entry_create; } fib_entry->vr = vr; - fib_entry->fi = fi; fib_entry->ref_count = 1; err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry); -- cgit v1.2.3 From a8c9701427987d959fec3f99bb2811d943686b7b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:35 +0100 Subject: mlxsw: spectrum_router: Refactor nexthop init routine The nexthop init and de-init functions both have symmetric parts concerned with the reflection of the neighbour entry into the device's adjacency table, in case it's used by a gatewayed route. These sections of code also need to be called when a nexthop is marked as valid / invalid following NH_{ADD,DEL} events. Break these out into appropriate functions, so that they could be invoked following the reception of above events. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 80 +++++++++++++--------- 1 file changed, 49 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 74839f7f368a..f53513580c2a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1407,30 +1407,16 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, } } -static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp, - struct mlxsw_sp_nexthop *nh, - struct fib_nh *fib_nh) +static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry; - struct net_device *dev = fib_nh->nh_dev; - struct mlxsw_sp_rif *r; + struct fib_nh *fib_nh = nh->key.fib_nh; struct neighbour *n; u8 nud_state, dead; int err; - nh->nh_grp = nh_grp; - nh->key.fib_nh = fib_nh; - err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); - if (err) - return err; - - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!r) - return 0; - nh->r = r; - - if (!nh_grp->gateway) + if (!nh->nh_grp->gateway) return 0; /* Take a reference of neigh here ensuring that neigh would @@ -1438,13 +1424,11 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, * The reference is taken either in neigh_lookup() or * in neigh_create() in case n is not found. */ - n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev); + n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev); if (!n) { - n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev); - if (IS_ERR(n)) { - err = PTR_ERR(n); - goto err_neigh_create; - } + n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev); + if (IS_ERR(n)) + return PTR_ERR(n); neigh_event_send(n, NULL); } neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); @@ -1475,19 +1459,18 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, err_neigh_entry_create: neigh_release(n); -err_neigh_create: - mlxsw_sp_nexthop_remove(mlxsw_sp, nh); return err; } -static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh) +static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; - struct neighbour *n = neigh_entry->key.n; + struct neighbour *n; if (!neigh_entry) - goto out; + return; + n = neigh_entry->key.n; __mlxsw_sp_nexthop_neigh_update(nh, true); list_del(&nh->neigh_list_node); @@ -1503,8 +1486,43 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); neigh_release(n); +} -out: +static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + struct fib_nh *fib_nh) +{ + struct net_device *dev = fib_nh->nh_dev; + struct mlxsw_sp_rif *r; + int err; + + nh->nh_grp = nh_grp; + nh->key.fib_nh = fib_nh; + err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); + if (err) + return err; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!r) + return 0; + nh->r = r; + + err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + if (err) + goto err_nexthop_neigh_init; + + return 0; + +err_nexthop_neigh_init: + mlxsw_sp_nexthop_remove(mlxsw_sp, nh); + return err; +} + +static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); } -- cgit v1.2.3 From 013b20f953ab011ffb5d203a07b981bad16342f4 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:36 +0100 Subject: mlxsw: spectrum_router: More accurately set offload flag We currently set the RTNH_F_OFFLOAD flag for all routes using remote action, but this isn't always correct. If none of the nexthops associated with a gatewayed route can be offloaded into the device, then any packet hitting it would be trapped to the CPU and forwarded by the kernel. Solve this by pushing the setting of the offload flag to after the route was programmed into the device, thereby allowing us to take all the parameters into account. This change will also help us further in the patchset, when we refresh routes following the reception of NH_{ADD,DEL} events. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 100 ++++++++++++++++----- 1 file changed, 80 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f53513580c2a..acabb35cda4a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -130,6 +130,7 @@ struct mlxsw_sp_fib_entry { struct mlxsw_sp_vr *vr; struct list_head nexthop_group_node; struct mlxsw_sp_nexthop_group *nh_group; + bool offloaded; }; struct mlxsw_sp_fib { @@ -1613,6 +1614,72 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp); } +static bool +mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; + + switch (fib_entry->type) { + case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: + return !!nh_group->adj_index_valid; + case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: + return true; + default: + return false; + } +} + +static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) +{ + fib_entry->offloaded = true; + + switch (fib_entry->vr->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + fib_info_offload_inc(fib_entry->nh_group->key.fi); + break; + case MLXSW_SP_L3_PROTO_IPV6: + WARN_ON_ONCE(1); + } +} + +static void +mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) +{ + switch (fib_entry->vr->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + fib_info_offload_dec(fib_entry->nh_group->key.fi); + break; + case MLXSW_SP_L3_PROTO_IPV6: + WARN_ON_ONCE(1); + } + + fib_entry->offloaded = false; +} + +static void +mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op, int err) +{ + switch (op) { + case MLXSW_REG_RALUE_OP_WRITE_DELETE: + if (!fib_entry->offloaded) + return; + return mlxsw_sp_fib_entry_offload_unset(fib_entry); + case MLXSW_REG_RALUE_OP_WRITE_WRITE: + if (err) + return; + if (mlxsw_sp_fib_entry_should_offload(fib_entry) && + !fib_entry->offloaded) + mlxsw_sp_fib_entry_offload_set(fib_entry); + else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) && + fib_entry->offloaded) + mlxsw_sp_fib_entry_offload_unset(fib_entry); + return; + default: + return; + } +} + static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) @@ -1698,13 +1765,17 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { + int err = -EINVAL; + switch (fib_entry->vr->proto) { case MLXSW_SP_L3_PROTO_IPV4: - return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); + err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); + break; case MLXSW_SP_L3_PROTO_IPV6: - return -EINVAL; + return err; } - return -EINVAL; + mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err); + return err; } static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, @@ -1722,9 +1793,9 @@ static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, } static int -mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info, - struct mlxsw_sp_fib_entry *fib_entry) +mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, + const struct fib_entry_notifier_info *fen_info, + struct mlxsw_sp_fib_entry *fib_entry) { struct fib_info *fi = fen_info->fi; struct mlxsw_sp_rif *r = NULL; @@ -1763,18 +1834,9 @@ mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp, fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; else fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; - fib_info_offload_inc(fen_info->fi); return 0; } -static void -mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) -{ - if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP) - fib_info_offload_dec(fib_entry->nh_group->key.fi); -} - static struct mlxsw_sp_fib_entry * mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info) @@ -1807,9 +1869,9 @@ mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, fib_entry->vr = vr; fib_entry->ref_count = 1; - err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry); + err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry); if (err) - goto err_fib4_entry_init; + goto err_fib4_entry_type_set; err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi); if (err) @@ -1818,8 +1880,7 @@ mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, return fib_entry; err_nexthop_group_get: - mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); -err_fib4_entry_init: +err_fib4_entry_type_set: mlxsw_sp_fib_entry_destroy(fib_entry); err_fib_entry_create: mlxsw_sp_vr_put(mlxsw_sp, vr); @@ -1851,7 +1912,6 @@ static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp, if (--fib_entry->ref_count == 0) { mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); - mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); mlxsw_sp_fib_entry_destroy(fib_entry); } mlxsw_sp_vr_put(mlxsw_sp, vr); -- cgit v1.2.3 From 4b4114775143db27371cc4da500ea44e7601955f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:37 +0100 Subject: mlxsw: spectrum_router: Determine offload status using generic function The previous patch introduced a generic function to determine whether a route should be offloaded or not. Make use of it here. In the future we're going to add more conditions to this test (e.g., whether TOS is non-zero), so it makes sense to centralize it instead of open coding it in a few places. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index acabb35cda4a..c452bd3440a0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1696,7 +1696,7 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, * with provided ECMP size. Otherwise, setup trap and pass * traffic to kernel. */ - if (fib_entry->nh_group->adj_index_valid) { + if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; adjacency_index = fib_entry->nh_group->adj_index; ecmp_size = fib_entry->nh_group->ecmp_size; -- cgit v1.2.3 From 70ad35067c99dd6fb1fefbabf11dd53806f1978c Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:38 +0100 Subject: mlxsw: spectrum_router: Use trap action only for some route types The device can have one of three actions associated with a route: 1) Remote - packets continue to the adjacency table 2) Local - packets continue to the neighbour table 3) Trap - packets continue to the CPU The first two actions can also trap packets to the CPU, but they do so using a different trap ID, which has a lower traffic class and less allotted bandwidth. We currently use the third action for both RTN_{LOCAL,BROADCAST} routes and RTN_UNICAST routes not pointing to the switch ports. However, packets that merely need to be forwarded by the switch are likely not control packets and can be therefore scheduled towards the CPU using a lower traffic class. Achieve the above by assigning the third action only to local and broadcast routes and have any other route use either of the first two actions, based on whether the route is gatewayed or not. This will also allow us to refresh routes using the local action and have them trap packets when their RIF is no longer valid following a NH_DEL event. One side effect of this patch is that we no longer give special treatment to multipath routes using both switch and non-switch ports towards their nexthops. If at least one of the nexthops can be resolved, then the device will forward the packets instead of trapping them. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 42 +++++++--------------- 1 file changed, 13 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index c452bd3440a0..136a50cffa85 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1623,7 +1623,7 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: return !!nh_group->adj_index_valid; case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: - return true; + return !!nh_group->nh_rif; default: return false; } @@ -1718,16 +1718,25 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, enum mlxsw_reg_ralue_op op) { struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif; + enum mlxsw_reg_ralue_trap_action trap_action; char ralue_pl[MLXSW_REG_RALUE_LEN]; u32 *p_dip = (u32 *) fib_entry->key.addr; struct mlxsw_sp_vr *vr = fib_entry->vr; + u16 trap_id = 0; + u16 rif = 0; + + if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; + rif = r->rif; + } else { + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; + trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; + } mlxsw_reg_ralue_pack4(ralue_pl, (enum mlxsw_reg_ralxx_protocol) vr->proto, op, vr->id, fib_entry->key.prefix_len, *p_dip); - mlxsw_reg_ralue_act_local_pack(ralue_pl, - MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0, - r->rif); + mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } @@ -1798,8 +1807,6 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry) { struct fib_info *fi = fen_info->fi; - struct mlxsw_sp_rif *r = NULL; - int nhsel; if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) { fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; @@ -1807,29 +1814,6 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, } if (fen_info->type != RTN_UNICAST) return -EINVAL; - - for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { - const struct fib_nh *nh = &fi->fib_nh[nhsel]; - - if (!nh->nh_dev) - continue; - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev); - if (!r) { - /* In case router interface is not found for - * at least one of the nexthops, that means - * the nexthop points to some device unrelated - * to us. Set trap and pass the packets for - * this prefix to kernel. - */ - break; - } - } - - if (!r) { - fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; - return 0; - } - if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; else -- cgit v1.2.3 From 982acb97560c8118c2109504a22b0d78a580547d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:39 +0100 Subject: ipv4: fib: Notify about nexthop status changes When a multipath route is hit the kernel doesn't consider nexthops that are DEAD or LINKDOWN when IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN is set. Devices that offload multipath routes need to be made aware of nexthop status changes. Otherwise, the device will keep forwarding packets to non-functional nexthops. Add the FIB_EVENT_NH_{ADD,DEL} events to the fib notification chain, which notify capable devices when they should add or delete a nexthop from their tables. Cc: Roopa Prabhu Cc: David Ahern Cc: Andy Gospodarek Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Reviewed-by: Andy Gospodarek Signed-off-by: David S. Miller --- include/net/ip_fib.h | 7 +++++++ net/ipv4/fib_semantics.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 57c2a863d0b2..45a184eaff2b 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -214,11 +214,18 @@ struct fib_entry_notifier_info { u32 nlflags; }; +struct fib_nh_notifier_info { + struct fib_notifier_info info; /* must be first */ + struct fib_nh *fib_nh; +}; + enum fib_event_type { FIB_EVENT_ENTRY_ADD, FIB_EVENT_ENTRY_DEL, FIB_EVENT_RULE_ADD, FIB_EVENT_RULE_DEL, + FIB_EVENT_NH_ADD, + FIB_EVENT_NH_DEL, }; int register_fib_notifier(struct notifier_block *nb, diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 6306a67880e8..317026a39cfa 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1355,6 +1355,36 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local) return ret; } +static int call_fib_nh_notifiers(struct fib_nh *fib_nh, + enum fib_event_type event_type) +{ + struct in_device *in_dev = __in_dev_get_rtnl(fib_nh->nh_dev); + struct fib_nh_notifier_info info = { + .fib_nh = fib_nh, + }; + + switch (event_type) { + case FIB_EVENT_NH_ADD: + if (fib_nh->nh_flags & RTNH_F_DEAD) + break; + if (IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && + fib_nh->nh_flags & RTNH_F_LINKDOWN) + break; + return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type, + &info.info); + case FIB_EVENT_NH_DEL: + if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && + fib_nh->nh_flags & RTNH_F_LINKDOWN) || + (fib_nh->nh_flags & RTNH_F_DEAD)) + return call_fib_notifiers(dev_net(fib_nh->nh_dev), + event_type, &info.info); + default: + break; + } + + return NOTIFY_DONE; +} + /* Event force Flags Description * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host @@ -1396,6 +1426,8 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force) nexthop_nh->nh_flags |= RTNH_F_LINKDOWN; break; } + call_fib_nh_notifiers(nexthop_nh, + FIB_EVENT_NH_DEL); dead++; } #ifdef CONFIG_IP_ROUTE_MULTIPATH @@ -1550,6 +1582,7 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags) continue; alive++; nexthop_nh->nh_flags &= ~nh_flags; + call_fib_nh_notifiers(nexthop_nh, FIB_EVENT_NH_ADD); } endfor_nexthops(fi) if (alive > 0) { -- cgit v1.2.3 From ad178c8eefdd94c946e3b808d8c772541a18b97d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:40 +0100 Subject: mlxsw: spectrum_router: Reflect nexthop status changes When a packet hits a multipath route in the device's routing table, a hash is computed over its headers, which is then used to select the appropriate nexthop from the device's adjacency table. There are situations in which the kernel removes a nexthop from a multipath route (e.g., no carrier) and the device should do the same. Upon the reception of NH_{ADD,DEL} events, add or remove a nexthop from the device's adjacency table and refresh all the routes using the nexthop group. If all the nexthops of a multipath route are invalid, then any packet hitting the route would be trapped to the CPU for forwarding. If all the nexthops are DEAD, then the kernel would remove the route entirely. On the other hand, if all the nexthops are merely LINKDOWN, then the kernel would keep the route and forward any incoming packet using a different route. While the last case might sound like a problem, it's expected that a routing daemon running in user space would remove such a route from the FIB as it's dumped with the DEAD flag set. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 59 +++++++++++++++++++++- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 136a50cffa85..1c68b4021524 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1180,6 +1180,14 @@ static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_ht_params); } +static struct mlxsw_sp_nexthop * +mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_key key) +{ + return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key, + mlxsw_sp_nexthop_ht_params); +} + static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, u32 adj_index, u16 ecmp_size, @@ -1417,7 +1425,7 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, u8 nud_state, dead; int err; - if (!nh->nh_grp->gateway) + if (!nh->nh_grp->gateway || nh->neigh_entry) return 0; /* Take a reference of neigh here ensuring that neigh would @@ -1527,6 +1535,39 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_remove(mlxsw_sp, nh); } +static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, + unsigned long event, struct fib_nh *fib_nh) +{ + struct mlxsw_sp_nexthop_key key; + struct mlxsw_sp_nexthop *nh; + struct mlxsw_sp_rif *r; + + if (mlxsw_sp->router.aborted) + return; + + key.fib_nh = fib_nh; + nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key); + if (WARN_ON_ONCE(!nh)) + return; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev); + if (!r) + return; + + switch (event) { + case FIB_EVENT_NH_ADD: + nh->r = r; + mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + break; + case FIB_EVENT_NH_DEL: + mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); + nh->r = NULL; + break; + } + + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); +} + static struct mlxsw_sp_nexthop_group * mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) { @@ -2085,7 +2126,10 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_fib_event_work { struct work_struct work; - struct fib_entry_notifier_info fen_info; + union { + struct fib_entry_notifier_info fen_info; + struct fib_nh_notifier_info fnh_info; + }; struct mlxsw_sp *mlxsw_sp; unsigned long event; }; @@ -2114,6 +2158,12 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) case FIB_EVENT_RULE_DEL: mlxsw_sp_router_fib4_abort(mlxsw_sp); break; + case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_DEL: + mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event, + fib_work->fnh_info.fib_nh); + fib_info_put(fib_work->fnh_info.fib_nh->nh_parent); + break; } rtnl_unlock(); kfree(fib_work); @@ -2147,6 +2197,11 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, */ fib_info_hold(fib_work->fen_info.fi); break; + case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_DEL: + memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info)); + fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); + break; } mlxsw_core_schedule_work(&fib_work->work); -- cgit v1.2.3 From 9665b74562ab95bcf1b0684d599bc6779c433fca Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 11:16:42 +0100 Subject: mlxsw: spectrum_router: Flush resources when RIF is deleted When the last IP address is removed from a netdev, its RIF is deleted. However, if user didn't first remove neighbours and nexthops using this interface, then they would still be present in the device's tables. Therefore, whenever a RIF is deleted, make sure all the neighbours and nexthops (adjacency entries) using it are removed from the relevant tables as well. The action associated with any route using this RIF would be refreshed, most likely to trap. If the kernel decides to remove the route (f.e., because all the nexthops are now DEAD), then an event would be sent, causing the route to be removed from the device. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 6 ++ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 4 + .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 86 +++++++++++++++++++++- 3 files changed, 93 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c2a675125801..8de5ed526904 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3473,6 +3473,8 @@ mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) if (!r) return NULL; + INIT_LIST_HEAD(&r->nexthop_list); + INIT_LIST_HEAD(&r->neigh_list); ether_addr_copy(r->addr, l3_dev->dev_addr); r->mtu = l3_dev->mtu; r->ref_count = 1; @@ -3541,6 +3543,8 @@ static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid = f->fid; u16 rif = r->rif; + mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r); + mlxsw_sp->rifs[rif] = NULL; f->r = NULL; @@ -3770,6 +3774,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f = r->f; u16 rif = r->rif; + mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r); + mlxsw_sp->rifs[rif] = NULL; f->r = NULL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index eb743fe35c91..145897c5a779 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -108,6 +108,8 @@ struct mlxsw_sp_fid { }; struct mlxsw_sp_rif { + struct list_head nexthop_list; + struct list_head neigh_list; struct net_device *dev; unsigned int ref_count; struct mlxsw_sp_fid *f; @@ -602,6 +604,8 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_router_netevent_event(struct notifier_block *unused, unsigned long event, void *ptr); +void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r); int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 1c68b4021524..a080c95fed6b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -609,6 +609,7 @@ struct mlxsw_sp_neigh_key { }; struct mlxsw_sp_neigh_entry { + struct list_head rif_list_node; struct rhash_head ht_node; struct mlxsw_sp_neigh_key key; u16 rif; @@ -685,6 +686,8 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) if (err) goto err_neigh_entry_insert; + list_add(&neigh_entry->rif_list_node, &r->neigh_list); + return neigh_entry; err_neigh_entry_insert: @@ -696,6 +699,7 @@ static void mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry) { + list_del(&neigh_entry->rif_list_node); mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); mlxsw_sp_neigh_entry_free(neigh_entry); } @@ -1089,12 +1093,34 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) rhashtable_destroy(&mlxsw_sp->router.neigh_ht); } +static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_rif *r) +{ + char rauht_pl[MLXSW_REG_RAUHT_LEN]; + + mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, + r->rif, r->addr); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); +} + +static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r) +{ + struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; + + mlxsw_sp_neigh_rif_flush(mlxsw_sp, r); + list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list, + rif_list_node) + mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); +} + struct mlxsw_sp_nexthop_key { struct fib_nh *fib_nh; }; struct mlxsw_sp_nexthop { struct list_head neigh_list_node; /* member of neigh entry list */ + struct list_head rif_list_node; struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group * this belongs to */ @@ -1416,6 +1442,25 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, } } +static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh, + struct mlxsw_sp_rif *r) +{ + if (nh->r) + return; + + nh->r = r; + list_add(&nh->rif_list_node, &r->nexthop_list); +} + +static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh) +{ + if (!nh->r) + return; + + list_del(&nh->rif_list_node); + nh->r = NULL; +} + static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { @@ -1515,7 +1560,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); if (!r) return 0; - nh->r = r; + mlxsw_sp_nexthop_rif_init(nh, r); err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); if (err) @@ -1532,6 +1577,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_rif_fini(nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); } @@ -1556,18 +1602,30 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, switch (event) { case FIB_EVENT_NH_ADD: - nh->r = r; + mlxsw_sp_nexthop_rif_init(nh, r); mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); break; case FIB_EVENT_NH_DEL: mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); - nh->r = NULL; + mlxsw_sp_nexthop_rif_fini(nh); break; } mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); } +static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r) +{ + struct mlxsw_sp_nexthop *nh, *tmp; + + list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) { + mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_rif_fini(nh); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + } +} + static struct mlxsw_sp_nexthop_group * mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) { @@ -2082,6 +2140,28 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n"); } +static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + int err; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (WARN_ON_ONCE(err)) + return err; + + mlxsw_reg_ritr_enable_set(ritr_pl, false); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r) +{ + mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif); + mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r); + mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r); +} + static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { char rgcr_pl[MLXSW_REG_RGCR_LEN]; -- cgit v1.2.3 From df6dd79be8d85ee098433bca23678fab508db541 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 8 Feb 2017 14:36:49 +0100 Subject: mlxsw: spectrum_router: Don't reflect LINKDOWN nexthops The kernel resolves the nexthops for a given route using FIB_LOOKUP_IGNORE_LINKSTATE which means a notification can be sent for a route with one of its nexthops being LINKDOWN. In case IGNORE_ROUTES_WITH_LINKDOWN is set for the nexthop netdev, then we shouldn't reflect the nexthop to the device's table. Once the nexthop netdev's carrier goes up we'll be notified using NH_ADD and reflect it to the device. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index a080c95fed6b..71ff02f7e29a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -1548,6 +1549,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, struct fib_nh *fib_nh) { struct net_device *dev = fib_nh->nh_dev; + struct in_device *in_dev; struct mlxsw_sp_rif *r; int err; @@ -1557,6 +1559,11 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, if (err) return err; + in_dev = __in_dev_get_rtnl(dev); + if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && + fib_nh->nh_flags & RTNH_F_LINKDOWN) + return 0; + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); if (!r) return 0; @@ -1660,8 +1667,10 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) err_nexthop_group_insert: err_nexthop_init: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + nh = &nh_grp->nexthops[i]; mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + } kfree(nh_grp); return ERR_PTR(err); } -- cgit v1.2.3 From 960fdfdeb9e85a67bed136bc945c541ba61c2bdd Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 7 Feb 2017 14:52:30 +0100 Subject: xfrm: input: constify xfrm_input_afinfo Nothing writes to these structures (the module owner was not used). While at it, size xfrm_input_afinfo[] by the highest existing xfrm family (INET6), not AF_MAX. Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 5 ++--- net/ipv4/xfrm4_protocol.c | 3 +-- net/ipv6/xfrm6_protocol.c | 3 +-- net/xfrm/xfrm_input.c | 31 +++++++++++-------------------- 4 files changed, 15 insertions(+), 27 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index d9a81dcef53e..da0e4dd653e2 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -349,13 +349,12 @@ struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family); struct xfrm_input_afinfo { unsigned int family; - struct module *owner; int (*callback)(struct sk_buff *skb, u8 protocol, int err); }; -int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo); -int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo); +int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo); +int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo); void xfrm_state_delete_tunnel(struct xfrm_state *x); diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c index dccefa9d84cf..8dd0e6ab8606 100644 --- a/net/ipv4/xfrm4_protocol.c +++ b/net/ipv4/xfrm4_protocol.c @@ -188,9 +188,8 @@ static const struct net_protocol ipcomp4_protocol = { .netns_ok = 1, }; -static struct xfrm_input_afinfo xfrm4_input_afinfo = { +static const struct xfrm_input_afinfo xfrm4_input_afinfo = { .family = AF_INET, - .owner = THIS_MODULE, .callback = xfrm4_rcv_cb, }; diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c index 54d13f8dbbae..b2dc8ce49378 100644 --- a/net/ipv6/xfrm6_protocol.c +++ b/net/ipv6/xfrm6_protocol.c @@ -162,9 +162,8 @@ static const struct inet6_protocol ipcomp6_protocol = { .flags = INET6_PROTO_NOPOLICY, }; -static struct xfrm_input_afinfo xfrm6_input_afinfo = { +static const struct xfrm_input_afinfo xfrm6_input_afinfo = { .family = AF_INET6, - .owner = THIS_MODULE, .callback = xfrm6_rcv_cb, }; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 3213fe8027be..8722294c6e59 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -19,19 +19,18 @@ static struct kmem_cache *secpath_cachep __read_mostly; static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); -static struct xfrm_input_afinfo __rcu *xfrm_input_afinfo[NPROTO]; +static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1]; static struct gro_cells gro_cells; static struct net_device xfrm_napi_dev; -int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo) +int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) { int err = 0; - if (unlikely(afinfo == NULL)) - return -EINVAL; - if (unlikely(afinfo->family >= NPROTO)) + if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo))) return -EAFNOSUPPORT; + spin_lock_bh(&xfrm_input_afinfo_lock); if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL)) err = -EEXIST; @@ -42,14 +41,10 @@ int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_input_register_afinfo); -int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo) +int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) { int err = 0; - if (unlikely(afinfo == NULL)) - return -EINVAL; - if (unlikely(afinfo->family >= NPROTO)) - return -EAFNOSUPPORT; spin_lock_bh(&xfrm_input_afinfo_lock); if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) { if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo)) @@ -63,12 +58,13 @@ int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_input_unregister_afinfo); -static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family) +static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family) { - struct xfrm_input_afinfo *afinfo; + const struct xfrm_input_afinfo *afinfo; - if (unlikely(family >= NPROTO)) + if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo))) return NULL; + rcu_read_lock(); afinfo = rcu_dereference(xfrm_input_afinfo[family]); if (unlikely(!afinfo)) @@ -76,22 +72,17 @@ static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family) return afinfo; } -static void xfrm_input_put_afinfo(struct xfrm_input_afinfo *afinfo) -{ - rcu_read_unlock(); -} - static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, int err) { int ret; - struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family); + const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family); if (!afinfo) return -EAFNOSUPPORT; ret = afinfo->callback(skb, protocol, err); - xfrm_input_put_afinfo(afinfo); + rcu_read_unlock(); return ret; } -- cgit v1.2.3 From f5e2bb4f5b2252e8f170f59bece2cf9a2efc8e6a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 7 Feb 2017 15:00:14 +0100 Subject: xfrm: policy: xfrm_get_tos cannot fail The comment makes it look like get_tos() is used to validate something, but it turns out the comment was about xfrm_find_bundle() which got removed years ago. xfrm_get_tos will return either the tos (ipv4) or 0 (ipv6). Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 99ad1af2927f..6fffa2fac607 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1538,19 +1538,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, } -/* Check that the bundle accepts the flow and its components are - * still valid. - */ - -static inline int xfrm_get_tos(const struct flowi *fl, int family) +static int xfrm_get_tos(const struct flowi *fl, int family) { - struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); - int tos; - - if (!afinfo) - return -EINVAL; + struct xfrm_policy_afinfo *afinfo; + int tos = 0; - tos = afinfo->get_tos(fl); + afinfo = xfrm_policy_get_afinfo(family); + tos = afinfo ? afinfo->get_tos(fl) : 0; xfrm_policy_put_afinfo(afinfo); @@ -1705,9 +1699,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, xfrm_flowi_addr_get(fl, &saddr, &daddr, family); tos = xfrm_get_tos(fl, family); - err = tos; - if (tos < 0) - goto put_states; dst_hold(dst); -- cgit v1.2.3 From 2b61997aa0c68ae033d066ac2d9905ada81b761a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 7 Feb 2017 15:00:15 +0100 Subject: xfrm: policy: xfrm_policy_unregister_afinfo can return void Nothing checks the return value. Also, the errors returned on unregister are impossible (we only support INET and INET6, so no way xfrm_policy_afinfo[afinfo->family] can be anything other than 'afinfo' itself). Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 2 +- net/xfrm/xfrm_policy.c | 35 +++++++++++++---------------------- 2 files changed, 14 insertions(+), 23 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index da0e4dd653e2..39037b1cce7a 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -304,7 +304,7 @@ struct xfrm_policy_afinfo { }; int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); -int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); +void xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c); void km_state_notify(struct xfrm_state *x, const struct km_event *c); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 6fffa2fac607..794148f76ae2 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2883,34 +2883,25 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_policy_register_afinfo); -int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) +void xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) { - int err = 0; - if (unlikely(afinfo == NULL)) - return -EINVAL; + struct dst_ops *dst_ops = afinfo->dst_ops; + if (unlikely(afinfo->family >= NPROTO)) - return -EAFNOSUPPORT; - spin_lock(&xfrm_policy_afinfo_lock); - if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { - if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) - err = -EINVAL; - else - RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family], + return; + + if (likely(xfrm_policy_afinfo[afinfo->family] != afinfo)) { + RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family], NULL); } - spin_unlock(&xfrm_policy_afinfo_lock); - if (!err) { - struct dst_ops *dst_ops = afinfo->dst_ops; - synchronize_rcu(); + synchronize_rcu(); - dst_ops->kmem_cachep = NULL; - dst_ops->check = NULL; - dst_ops->negative_advice = NULL; - dst_ops->link_failure = NULL; - afinfo->garbage_collect = NULL; - } - return err; + dst_ops->kmem_cachep = NULL; + dst_ops->check = NULL; + dst_ops->negative_advice = NULL; + dst_ops->link_failure = NULL; + afinfo->garbage_collect = NULL; } EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); -- cgit v1.2.3 From 3d7d25a68ea5153d9d0d01c8c83acf644eab9704 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 7 Feb 2017 15:00:16 +0100 Subject: xfrm: policy: remove garbage_collect callback Just call xfrm_garbage_collect_deferred() directly. This gets rid of a write to afinfo in register/unregister and allows to constify afinfo later on. Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 2 +- net/ipv4/xfrm4_policy.c | 2 +- net/ipv6/xfrm6_policy.c | 2 +- net/xfrm/xfrm_policy.c | 6 ++---- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 39037b1cce7a..a22368edd105 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -282,7 +282,6 @@ struct xfrm_dst; struct xfrm_policy_afinfo { unsigned short family; struct dst_ops *dst_ops; - void (*garbage_collect)(struct net *net); struct dst_entry *(*dst_lookup)(struct net *net, int tos, int oif, const xfrm_address_t *saddr, @@ -1169,6 +1168,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk) } void xfrm_garbage_collect(struct net *net); +void xfrm_garbage_collect_deferred(struct net *net); #else diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 6a7ff6957535..77ca91d7b79c 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -219,7 +219,7 @@ static inline int xfrm4_garbage_collect(struct dst_ops *ops) { struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops); - xfrm4_policy_afinfo.garbage_collect(net); + xfrm_garbage_collect_deferred(net); return (dst_entries_get_slow(ops) > ops->gc_thresh * 2); } diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index e0f71c01d728..c273f22b12af 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -220,7 +220,7 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops) { struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops); - xfrm6_policy_afinfo.garbage_collect(net); + xfrm_garbage_collect_deferred(net); return dst_entries_get_fast(ops) > ops->gc_thresh * 2; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 794148f76ae2..80695a157b6e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2733,10 +2733,11 @@ void xfrm_garbage_collect(struct net *net) } EXPORT_SYMBOL(xfrm_garbage_collect); -static void xfrm_garbage_collect_deferred(struct net *net) +void xfrm_garbage_collect_deferred(struct net *net) { flow_cache_flush_deferred(net); } +EXPORT_SYMBOL(xfrm_garbage_collect_deferred); static void xfrm_init_pmtu(struct dst_entry *dst) { @@ -2873,8 +2874,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->link_failure = xfrm_link_failure; if (likely(dst_ops->neigh_lookup == NULL)) dst_ops->neigh_lookup = xfrm_neigh_lookup; - if (likely(afinfo->garbage_collect == NULL)) - afinfo->garbage_collect = xfrm_garbage_collect_deferred; rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo); } spin_unlock(&xfrm_policy_afinfo_lock); @@ -2901,7 +2900,6 @@ void xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->check = NULL; dst_ops->negative_advice = NULL; dst_ops->link_failure = NULL; - afinfo->garbage_collect = NULL; } EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); -- cgit v1.2.3 From a2817d8b279bc8fe62da76e6019eb9ff9d4e319c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 7 Feb 2017 15:00:17 +0100 Subject: xfrm: policy: remove family field Only needed it to register the policy backend at init time. Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 5 ++--- net/ipv4/xfrm4_policy.c | 5 +---- net/ipv6/xfrm6_policy.c | 5 +---- net/xfrm/xfrm_policy.c | 34 +++++++++++++++++----------------- 4 files changed, 21 insertions(+), 28 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index a22368edd105..6e061309adca 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -280,7 +280,6 @@ struct net_device; struct xfrm_type; struct xfrm_dst; struct xfrm_policy_afinfo { - unsigned short family; struct dst_ops *dst_ops; struct dst_entry *(*dst_lookup)(struct net *net, int tos, int oif, @@ -302,8 +301,8 @@ struct xfrm_policy_afinfo { struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig); }; -int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); -void xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); +int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family); +void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo); void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c); void km_state_notify(struct xfrm_state *x, const struct km_event *c); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 77ca91d7b79c..25613539766f 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -17,8 +17,6 @@ #include #include -static struct xfrm_policy_afinfo xfrm4_policy_afinfo; - static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, int tos, int oif, const xfrm_address_t *saddr, @@ -272,7 +270,6 @@ static struct dst_ops xfrm4_dst_ops_template = { }; static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { - .family = AF_INET, .dst_ops = &xfrm4_dst_ops_template, .dst_lookup = xfrm4_dst_lookup, .get_saddr = xfrm4_get_saddr, @@ -376,7 +373,7 @@ static struct pernet_operations __net_initdata xfrm4_net_ops = { static void __init xfrm4_policy_init(void) { - xfrm_policy_register_afinfo(&xfrm4_policy_afinfo); + xfrm_policy_register_afinfo(&xfrm4_policy_afinfo, AF_INET); } void __init xfrm4_init(void) diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index c273f22b12af..3217e85334e3 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -25,8 +25,6 @@ #include #endif -static struct xfrm_policy_afinfo xfrm6_policy_afinfo; - static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr) @@ -292,7 +290,6 @@ static struct dst_ops xfrm6_dst_ops_template = { }; static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { - .family = AF_INET6, .dst_ops = &xfrm6_dst_ops_template, .dst_lookup = xfrm6_dst_lookup, .get_saddr = xfrm6_get_saddr, @@ -305,7 +302,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { static int __init xfrm6_policy_init(void) { - return xfrm_policy_register_afinfo(&xfrm6_policy_afinfo); + return xfrm_policy_register_afinfo(&xfrm6_policy_afinfo, AF_INET6); } static void xfrm6_policy_fini(void) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 80695a157b6e..7698785876d7 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -45,7 +45,7 @@ struct xfrm_flo { }; static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); -static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] +static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[AF_INET6 + 1] __read_mostly; static struct kmem_cache *xfrm_dst_cache __read_mostly; @@ -103,11 +103,11 @@ bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl return false; } -static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) +static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) { - struct xfrm_policy_afinfo *afinfo; + const struct xfrm_policy_afinfo *afinfo; - if (unlikely(family >= NPROTO)) + if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo))) return NULL; rcu_read_lock(); afinfo = rcu_dereference(xfrm_policy_afinfo[family]); @@ -2848,15 +2848,15 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, return dst->path->ops->neigh_lookup(dst, skb, daddr); } -int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) +int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family) { int err = 0; - if (unlikely(afinfo == NULL)) - return -EINVAL; - if (unlikely(afinfo->family >= NPROTO)) + + if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo))) return -EAFNOSUPPORT; + spin_lock(&xfrm_policy_afinfo_lock); - if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) + if (unlikely(xfrm_policy_afinfo[family] != NULL)) err = -EEXIST; else { struct dst_ops *dst_ops = afinfo->dst_ops; @@ -2874,7 +2874,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->link_failure = xfrm_link_failure; if (likely(dst_ops->neigh_lookup == NULL)) dst_ops->neigh_lookup = xfrm_neigh_lookup; - rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo); + rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo); } spin_unlock(&xfrm_policy_afinfo_lock); @@ -2882,16 +2882,16 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_policy_register_afinfo); -void xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) +void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo) { struct dst_ops *dst_ops = afinfo->dst_ops; + int i; - if (unlikely(afinfo->family >= NPROTO)) - return; - - if (likely(xfrm_policy_afinfo[afinfo->family] != afinfo)) { - RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family], - NULL); + for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) { + if (xfrm_policy_afinfo[i] != afinfo) + continue; + RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL); + break; } synchronize_rcu(); -- cgit v1.2.3 From bdba9fe01e1bcb942c95a1a332b27ef829c87df4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 7 Feb 2017 15:00:18 +0100 Subject: xfrm: policy: remove xfrm_policy_put_afinfo Alternative is to keep it an make the (unused) afinfo arg const to avoid the compiler warnings once the afinfo structs get constified. Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 7698785876d7..80ef0dcf0e29 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -116,11 +116,6 @@ static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short fa return afinfo; } -static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) -{ - rcu_read_unlock(); -} - static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, @@ -136,7 +131,7 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return dst; } @@ -1436,7 +1431,7 @@ xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local, if (unlikely(afinfo == NULL)) return -EINVAL; err = afinfo->get_saddr(net, oif, local, remote); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return err; } @@ -1546,7 +1541,7 @@ static int xfrm_get_tos(const struct flowi *fl, int family) afinfo = xfrm_policy_get_afinfo(family); tos = afinfo ? afinfo->get_tos(fl) : 0; - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return tos; } @@ -1632,7 +1627,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) } else xdst = ERR_PTR(-ENOBUFS); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return xdst; } @@ -1649,7 +1644,7 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, err = afinfo->init_path(path, dst, nfheader_len); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return err; } @@ -1666,7 +1661,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, err = afinfo->fill_dst(xdst, dev, fl); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return err; } @@ -2215,7 +2210,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family, } else { ret = afinfo->blackhole_route(net, dst_orig); } - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return ret; } @@ -2465,7 +2460,7 @@ int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, afinfo->decode_session(skb, fl, reverse); err = security_xfrm_decode_session(skb, &fl->flowi_secid); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(__xfrm_decode_session); -- cgit v1.2.3 From 37b103830ec3e52a761bb647eb78da22a1fe4e09 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 7 Feb 2017 15:00:19 +0100 Subject: xfrm: policy: make policy backend const Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- net/ipv4/xfrm4_policy.c | 2 +- net/ipv6/xfrm6_policy.c | 2 +- net/xfrm/xfrm_policy.c | 18 +++++++++--------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 25613539766f..71b4ecc195c7 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -269,7 +269,7 @@ static struct dst_ops xfrm4_dst_ops_template = { .gc_thresh = INT_MAX, }; -static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { +static const struct xfrm_policy_afinfo xfrm4_policy_afinfo = { .dst_ops = &xfrm4_dst_ops_template, .dst_lookup = xfrm4_dst_lookup, .get_saddr = xfrm4_get_saddr, diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 3217e85334e3..79651bc71bf0 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -289,7 +289,7 @@ static struct dst_ops xfrm6_dst_ops_template = { .gc_thresh = INT_MAX, }; -static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { +static const struct xfrm_policy_afinfo xfrm6_policy_afinfo = { .dst_ops = &xfrm6_dst_ops_template, .dst_lookup = xfrm6_dst_lookup, .get_saddr = xfrm6_get_saddr, diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 80ef0dcf0e29..04ed1a1ae019 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -45,7 +45,7 @@ struct xfrm_flo { }; static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); -static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[AF_INET6 + 1] +static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1] __read_mostly; static struct kmem_cache *xfrm_dst_cache __read_mostly; @@ -122,7 +122,7 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, const xfrm_address_t *daddr, int family) { - struct xfrm_policy_afinfo *afinfo; + const struct xfrm_policy_afinfo *afinfo; struct dst_entry *dst; afinfo = xfrm_policy_get_afinfo(family); @@ -1426,7 +1426,7 @@ xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local, xfrm_address_t *remote, unsigned short family) { int err; - struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); if (unlikely(afinfo == NULL)) return -EINVAL; @@ -1535,7 +1535,7 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, static int xfrm_get_tos(const struct flowi *fl, int family) { - struct xfrm_policy_afinfo *afinfo; + const struct xfrm_policy_afinfo *afinfo; int tos = 0; afinfo = xfrm_policy_get_afinfo(family); @@ -1598,7 +1598,7 @@ static const struct flow_cache_ops xfrm_bundle_fc_ops = { static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) { - struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); struct dst_ops *dst_ops; struct xfrm_dst *xdst; @@ -1635,7 +1635,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len) { - struct xfrm_policy_afinfo *afinfo = + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(dst->ops->family); int err; @@ -1652,7 +1652,7 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, const struct flowi *fl) { - struct xfrm_policy_afinfo *afinfo = + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(xdst->u.dst.ops->family); int err; @@ -2201,7 +2201,7 @@ error: static struct dst_entry *make_blackhole(struct net *net, u16 family, struct dst_entry *dst_orig) { - struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); struct dst_entry *ret; if (!afinfo) { @@ -2452,7 +2452,7 @@ xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int star int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned int family, int reverse) { - struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); int err; if (unlikely(afinfo == NULL)) -- cgit v1.2.3 From 8585989d146c61dd073d2135c5bb11d0f979d576 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 8 Feb 2017 15:00:34 +0200 Subject: cfg80211: fix NAN bands definition The nl80211_nan_dual_band_conf enumeration doesn't make much sense. The default value is assigned to a bit, which makes it weird if the default bit and other bits are set at the same time. To improve this, get rid of NL80211_NAN_BAND_DEFAULT and add a wiphy configuration to let the drivers define which bands are supported. This is exposed to the userspace, which then can make a decision on which band(s) to use. Additionally, rename all "dual_band" elements to "bands", to make things clearer. Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 18 ++++++++++---- include/uapi/linux/nl80211.h | 57 ++++++++++++++++++++------------------------ net/mac80211/cfg.c | 4 ++-- net/mac80211/trace.h | 16 ++++++------- net/wireless/core.c | 3 ++- net/wireless/nl80211.c | 35 ++++++++++++++++++++------- net/wireless/trace.h | 16 ++++++------- 7 files changed, 86 insertions(+), 63 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index a2c18b53e053..c92dc03c8528 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5,7 +5,7 @@ * * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright 2015-2016 Intel Deutschland GmbH + * Copyright 2015-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -2416,11 +2416,13 @@ struct cfg80211_qos_map { * This struct defines NAN configuration parameters * * @master_pref: master preference (1 - 255) - * @dual: dual band operation mode, see &enum nl80211_nan_dual_band_conf + * @bands: operating bands, a bitmap of &enum nl80211_band values. + * For instance, for NL80211_BAND_2GHZ, bit 0 would be set + * (i.e. BIT(NL80211_BAND_2GHZ)). */ struct cfg80211_nan_conf { u8 master_pref; - u8 dual; + u8 bands; }; /** @@ -2428,11 +2430,11 @@ struct cfg80211_nan_conf { * configuration * * @CFG80211_NAN_CONF_CHANGED_PREF: master preference - * @CFG80211_NAN_CONF_CHANGED_DUAL: dual band operation + * @CFG80211_NAN_CONF_CHANGED_BANDS: operating bands */ enum cfg80211_nan_conf_changes { CFG80211_NAN_CONF_CHANGED_PREF = BIT(0), - CFG80211_NAN_CONF_CHANGED_DUAL = BIT(1), + CFG80211_NAN_CONF_CHANGED_BANDS = BIT(1), }; /** @@ -3596,6 +3598,10 @@ struct wiphy_iftype_ext_capab { * attribute indices defined in &enum nl80211_bss_select_attr. * * @cookie_counter: unique generic cookie counter, used to identify objects. + * @nan_supported_bands: bands supported by the device in NAN mode, a + * bitmap of &enum nl80211_band values. For instance, for + * NL80211_BAND_2GHZ, bit 0 would be set + * (i.e. BIT(NL80211_BAND_2GHZ)). */ struct wiphy { /* assign these fields before you register the wiphy */ @@ -3727,6 +3733,8 @@ struct wiphy { u64 cookie_counter; + u8 nan_supported_bands; + char priv[0] __aligned(NETDEV_ALIGN); }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index cd547b864595..5ed257c4cd4e 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -10,7 +10,7 @@ * Copyright 2008, 2009 Luis R. Rodriguez * Copyright 2008 Jouni Malinen * Copyright 2008 Colin McCabe - * Copyright 2015 Intel Deutschland GmbH + * Copyright 2015-2017 Intel Deutschland GmbH * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -854,12 +854,15 @@ * cfg80211_scan_done(). * * @NL80211_CMD_START_NAN: Start NAN operation, identified by its - * %NL80211_ATTR_WDEV interface. This interface must have been previously - * created with %NL80211_CMD_NEW_INTERFACE. After it has been started, the - * NAN interface will create or join a cluster. This command must have a - * valid %NL80211_ATTR_NAN_MASTER_PREF attribute and optional - * %NL80211_ATTR_NAN_DUAL attributes. - * After this command NAN functions can be added. + * %NL80211_ATTR_WDEV interface. This interface must have been + * previously created with %NL80211_CMD_NEW_INTERFACE. After it + * has been started, the NAN interface will create or join a + * cluster. This command must have a valid + * %NL80211_ATTR_NAN_MASTER_PREF attribute and optional + * %NL80211_ATTR_BANDS attributes. If %NL80211_ATTR_BANDS is + * omitted or set to 0, it means don't-care and the device will + * decide what to use. After this command NAN functions can be + * added. * @NL80211_CMD_STOP_NAN: Stop the NAN operation, identified by * its %NL80211_ATTR_WDEV interface. * @NL80211_CMD_ADD_NAN_FUNCTION: Add a NAN function. The function is defined @@ -880,10 +883,14 @@ * This command is also used as a notification sent when a NAN function is * terminated. This will contain a %NL80211_ATTR_NAN_FUNC_INST_ID * and %NL80211_ATTR_COOKIE attributes. - * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN configuration. NAN - * must be operational (%NL80211_CMD_START_NAN was executed). - * It must contain at least one of the following attributes: - * %NL80211_ATTR_NAN_MASTER_PREF, %NL80211_ATTR_NAN_DUAL. + * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN + * configuration. NAN must be operational (%NL80211_CMD_START_NAN + * was executed). It must contain at least one of the following + * attributes: %NL80211_ATTR_NAN_MASTER_PREF, + * %NL80211_ATTR_BANDS. If %NL80211_ATTR_BANDS is omitted, the + * current configuration is not changed. If it is present but + * set to zero, the configuration is changed to don't-care + * (i.e. the device can decide what to do). * @NL80211_CMD_NAN_FUNC_MATCH: Notification sent when a match is reported. * This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and * %NL80211_ATTR_COOKIE. @@ -1963,10 +1970,13 @@ enum nl80211_commands { * %NL80211_CMD_CHANGE_NAN_CONFIG. Its type is u8 and it can't be 0. * Also, values 1 and 255 are reserved for certification purposes and * should not be used during a normal device operation. - * @NL80211_ATTR_NAN_DUAL: NAN dual band operation config (see - * &enum nl80211_nan_dual_band_conf). This attribute is used with - * %NL80211_CMD_START_NAN and optionally with - * %NL80211_CMD_CHANGE_NAN_CONFIG. + * @NL80211_ATTR_BANDS: operating bands configuration. This is a u32 + * bitmask of BIT(NL80211_BAND_*) as described in %enum + * nl80211_band. For instance, for NL80211_BAND_2GHZ, bit 0 + * would be set. This attribute is used with + * %NL80211_CMD_START_NAN and %NL80211_CMD_CHANGE_NAN_CONFIG, and + * it is optional. If no bands are set, it means don't-care and + * the device will decide what to use. * @NL80211_ATTR_NAN_FUNC: a function that can be added to NAN. See * &enum nl80211_nan_func_attributes for description of this nested * attribute. @@ -2397,7 +2407,7 @@ enum nl80211_attrs { NL80211_ATTR_MESH_PEER_AID, NL80211_ATTR_NAN_MASTER_PREF, - NL80211_ATTR_NAN_DUAL, + NL80211_ATTR_BANDS, NL80211_ATTR_NAN_FUNC, NL80211_ATTR_NAN_MATCH, @@ -5070,21 +5080,6 @@ enum nl80211_bss_select_attr { NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1 }; -/** - * enum nl80211_nan_dual_band_conf - NAN dual band configuration - * - * Defines the NAN dual band mode of operation - * - * @NL80211_NAN_BAND_DEFAULT: device default mode - * @NL80211_NAN_BAND_2GHZ: 2.4GHz mode - * @NL80211_NAN_BAND_5GHZ: 5GHz mode - */ -enum nl80211_nan_dual_band_conf { - NL80211_NAN_BAND_DEFAULT = 1 << 0, - NL80211_NAN_BAND_2GHZ = 1 << 1, - NL80211_NAN_BAND_5GHZ = 1 << 2, -}; - /** * enum nl80211_nan_function_type - NAN function type * diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index a0be2f6cd121..ac879bb17870 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -208,8 +208,8 @@ static int ieee80211_nan_change_conf(struct wiphy *wiphy, if (changes & CFG80211_NAN_CONF_CHANGED_PREF) new_conf.master_pref = conf->master_pref; - if (changes & CFG80211_NAN_CONF_CHANGED_DUAL) - new_conf.dual = conf->dual; + if (changes & CFG80211_NAN_CONF_CHANGED_BANDS) + new_conf.bands = conf->bands; ret = drv_nan_change_conf(sdata->local, sdata, &new_conf, changes); if (!ret) diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index f78d9f4f8711..0d645bc148d0 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -1736,21 +1736,21 @@ TRACE_EVENT(drv_start_nan, LOCAL_ENTRY VIF_ENTRY __field(u8, master_pref) - __field(u8, dual) + __field(u8, bands) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->master_pref = conf->master_pref; - __entry->dual = conf->dual; + __entry->bands = conf->bands; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT - ", master preference: %u, dual: %d", + ", master preference: %u, bands: 0x%0x", LOCAL_PR_ARG, VIF_PR_ARG, __entry->master_pref, - __entry->dual + __entry->bands ) ); @@ -1787,7 +1787,7 @@ TRACE_EVENT(drv_nan_change_conf, LOCAL_ENTRY VIF_ENTRY __field(u8, master_pref) - __field(u8, dual) + __field(u8, bands) __field(u32, changes) ), @@ -1795,15 +1795,15 @@ TRACE_EVENT(drv_nan_change_conf, LOCAL_ASSIGN; VIF_ASSIGN; __entry->master_pref = conf->master_pref; - __entry->dual = conf->dual; + __entry->bands = conf->bands; __entry->changes = changes; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT - ", master preference: %u, dual: %d, changes: 0x%x", + ", master preference: %u, bands: 0x%0x, changes: 0x%x", LOCAL_PR_ARG, VIF_PR_ARG, __entry->master_pref, - __entry->dual, __entry->changes + __entry->bands, __entry->changes ) ); diff --git a/net/wireless/core.c b/net/wireless/core.c index 903fc419217a..e55e05bc4805 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -626,7 +626,8 @@ int wiphy_register(struct wiphy *wiphy) if (WARN_ON((wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN)) && (!rdev->ops->start_nan || !rdev->ops->stop_nan || - !rdev->ops->add_nan_func || !rdev->ops->del_nan_func))) + !rdev->ops->add_nan_func || !rdev->ops->del_nan_func || + !(wiphy->nan_supported_bands & BIT(NL80211_BAND_2GHZ))))) return -EINVAL; #ifndef CONFIG_WIRELESS_WDS diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 9d738f75bd4e..b5f755b3ac5d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -398,7 +398,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { }, [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN }, [NL80211_ATTR_NAN_MASTER_PREF] = { .type = NLA_U8 }, - [NL80211_ATTR_NAN_DUAL] = { .type = NLA_U8 }, + [NL80211_ATTR_BANDS] = { .type = NLA_U32 }, [NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED }, [NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY, .len = FILS_MAX_KEK_LEN }, @@ -1886,6 +1886,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, } } + if (nla_put_u32(msg, NL80211_ATTR_BANDS, + rdev->wiphy.nan_supported_bands)) + goto nla_put_failure; + /* done */ state->split_start = 0; break; @@ -10777,15 +10781,22 @@ static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info) if (!info->attrs[NL80211_ATTR_NAN_MASTER_PREF]) return -EINVAL; - if (!info->attrs[NL80211_ATTR_NAN_DUAL]) - return -EINVAL; - conf.master_pref = nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]); if (!conf.master_pref) return -EINVAL; - conf.dual = nla_get_u8(info->attrs[NL80211_ATTR_NAN_DUAL]); + if (info->attrs[NL80211_ATTR_BANDS]) { + u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]); + + if (bands & ~(u32)wdev->wiphy->nan_supported_bands) + return -EOPNOTSUPP; + + if (bands && !(bands & BIT(NL80211_BAND_2GHZ))) + return -EINVAL; + + conf.bands = bands; + } err = rdev_start_nan(rdev, wdev, &conf); if (err) @@ -11150,9 +11161,17 @@ static int nl80211_nan_change_config(struct sk_buff *skb, changed |= CFG80211_NAN_CONF_CHANGED_PREF; } - if (info->attrs[NL80211_ATTR_NAN_DUAL]) { - conf.dual = nla_get_u8(info->attrs[NL80211_ATTR_NAN_DUAL]); - changed |= CFG80211_NAN_CONF_CHANGED_DUAL; + if (info->attrs[NL80211_ATTR_BANDS]) { + u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]); + + if (bands & ~(u32)wdev->wiphy->nan_supported_bands) + return -EOPNOTSUPP; + + if (bands && !(bands & BIT(NL80211_BAND_2GHZ))) + return -EINVAL; + + conf.bands = bands; + changed |= CFG80211_NAN_CONF_CHANGED_BANDS; } if (!changed) diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 2419c390f150..776e80cef9b4 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1915,18 +1915,18 @@ TRACE_EVENT(rdev_start_nan, WIPHY_ENTRY WDEV_ENTRY __field(u8, master_pref) - __field(u8, dual); + __field(u8, bands); ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->master_pref = conf->master_pref; - __entry->dual = conf->dual; + __entry->bands = conf->bands; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT - ", master preference: %u, dual: %d", + ", master preference: %u, bands: 0x%0x", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->master_pref, - __entry->dual) + __entry->bands) ); TRACE_EVENT(rdev_nan_change_conf, @@ -1937,20 +1937,20 @@ TRACE_EVENT(rdev_nan_change_conf, WIPHY_ENTRY WDEV_ENTRY __field(u8, master_pref) - __field(u8, dual); + __field(u8, bands); __field(u32, changes); ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->master_pref = conf->master_pref; - __entry->dual = conf->dual; + __entry->bands = conf->bands; __entry->changes = changes; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT - ", master preference: %u, dual: %d, changes: %x", + ", master preference: %u, bands: 0x%0x, changes: %x", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->master_pref, - __entry->dual, __entry->changes) + __entry->bands, __entry->changes) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_nan, -- cgit v1.2.3 From f181d6a3bcc35633facf5f3925699021c13492c5 Mon Sep 17 00:00:00 2001 From: Koen Vandeputte Date: Wed, 8 Feb 2017 15:32:05 +0100 Subject: mac80211: fix CSA in IBSS mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the missing IBSS capability flag during capability init as it needs to be inserted into the generated beacon in order for CSA to work. Fixes: cd7760e62c2ac ("mac80211: add support for CSA in IBSS mode") Signed-off-by: Piotr Gawlowicz Signed-off-by: Mikołaj Chwalisz Tested-by: Koen Vandeputte Signed-off-by: Johannes Berg --- net/mac80211/ibss.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index a31d30713d08..98999d3d5262 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -487,14 +487,14 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, struct beacon_data *presp, *old_presp; struct cfg80211_bss *cbss; const struct cfg80211_bss_ies *ies; - u16 capability = 0; + u16 capability = WLAN_CAPABILITY_IBSS; u64 tsf; int ret = 0; sdata_assert_lock(sdata); if (ifibss->privacy) - capability = WLAN_CAPABILITY_PRIVACY; + capability |= WLAN_CAPABILITY_PRIVACY; cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, -- cgit v1.2.3 From a0ee3541483767e82bf26b8269693eec25c01cba Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Wed, 8 Feb 2017 16:50:40 +0000 Subject: sfc: process RX event inner checksum flags Add support for RX checksum offload of encapsulated packets. This essentially just means paying attention to the inner checksum flags in the RX event, and if *either* checksum flag indicates a fail then don't tell the kernel that checksum offload was successful. Also, count these checksum errors and export the counts to ethtool -S. Test the most common "good" case of RX events with a single bitmask instead of a series of ifs. Move the more specific error checking in to a separate function for clarity, and don't use unlikely() there since we know at least one of the bits is bad. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/bitfield.h | 3 + drivers/net/ethernet/sfc/ef10.c | 123 ++++++++++++++++++++++++++++++---- drivers/net/ethernet/sfc/ethtool.c | 5 ++ drivers/net/ethernet/sfc/net_driver.h | 19 ++++-- 4 files changed, 131 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h index 17d83f37fbf2..41ad07d45144 100644 --- a/drivers/net/ethernet/sfc/bitfield.h +++ b/drivers/net/ethernet/sfc/bitfield.h @@ -433,6 +433,9 @@ typedef union efx_oword { (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ } while (0) +#define EFX_AND_QWORD(qword, from, mask) \ + (qword).u64[0] = (from).u64[0] & (mask).u64[0] + #define EFX_OR_OWORD(oword, from, mask) \ do { \ (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \ diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index dec0c8083ff3..fd1008daf1e3 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3154,13 +3154,103 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; } +static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, + unsigned int n_packets, + unsigned int rx_encap_hdr, + unsigned int rx_l3_class, + unsigned int rx_l4_class, + const efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { + if (!efx->loopback_selftest) + channel->n_rx_eth_crc_err += n_packets; + return EFX_RX_PKT_DISCARD; + } + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { + if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && + rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && + rx_l3_class != ESE_DZ_L3_CLASS_IP6 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) + netdev_WARN(efx->net_dev, + "invalid class for RX_IPCKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + *(rx_encap_hdr ? + &channel->n_rx_outer_ip_hdr_chksum_err : + &channel->n_rx_ip_hdr_chksum_err) += n_packets; + return 0; + } + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { + if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && + ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6) || + (rx_l4_class != ESE_DZ_L4_CLASS_TCP && + rx_l4_class != ESE_DZ_L4_CLASS_UDP)))) + netdev_WARN(efx->net_dev, + "invalid class for RX_TCPUDP_CKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + *(rx_encap_hdr ? + &channel->n_rx_outer_tcp_udp_chksum_err : + &channel->n_rx_tcp_udp_chksum_err) += n_packets; + return 0; + } + if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { + if (unlikely(!rx_encap_hdr)) + netdev_WARN(efx->net_dev, + "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && + rx_l3_class != ESE_DZ_L3_CLASS_IP6 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) + netdev_WARN(efx->net_dev, + "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + channel->n_rx_inner_ip_hdr_chksum_err += n_packets; + return 0; + } + if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { + if (unlikely(!rx_encap_hdr)) + netdev_WARN(efx->net_dev, + "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6) || + (rx_l4_class != ESE_DZ_L4_CLASS_TCP && + rx_l4_class != ESE_DZ_L4_CLASS_UDP))) + netdev_WARN(efx->net_dev, + "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + channel->n_rx_inner_tcp_udp_chksum_err += n_packets; + return 0; + } + + WARN_ON(1); /* No error bits were recognised */ + return 0; +} + static int efx_ef10_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) { - unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; + unsigned int rx_bytes, next_ptr_lbits, rx_queue_label; + unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr; unsigned int n_descs, n_packets, i; struct efx_nic *efx = channel->efx; + struct efx_ef10_nic_data *nic_data = efx->nic_data; struct efx_rx_queue *rx_queue; + efx_qword_t errors; bool rx_cont; u16 flags = 0; @@ -3171,8 +3261,14 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); + rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); + rx_encap_hdr = + nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ? + EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : + ESE_EZ_ENCAP_HDR_NONE; if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" @@ -3232,17 +3328,20 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, n_packets = 1; } - if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR))) - flags |= EFX_RX_PKT_DISCARD; - - if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) { - channel->n_rx_ip_hdr_chksum_err += n_packets; - } else if (unlikely(EFX_QWORD_FIELD(*event, - ESF_DZ_RX_TCPUDP_CKSUM_ERR))) { - channel->n_rx_tcp_udp_chksum_err += n_packets; - } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || - rx_l4_class == ESE_DZ_L4_CLASS_UDP) { - flags |= EFX_RX_PKT_CSUMMED; + EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, + ESF_DZ_RX_IPCKSUM_ERR, 1, + ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, + ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, + ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); + EFX_AND_QWORD(errors, *event, errors); + if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { + flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, + rx_l3_class, rx_l4_class, + rx_encap_hdr, event); + } else { + if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || + rx_l4_class == ESE_DZ_L4_CLASS_UDP) + flags |= EFX_RX_PKT_CSUMMED; } if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index adddf70780ad..3747b5644110 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -77,6 +77,11 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events), diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 73810d2d630c..3b1511092781 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -469,13 +469,18 @@ struct efx_channel { u32 *rps_flow_id; #endif - unsigned n_rx_tobe_disc; - unsigned n_rx_ip_hdr_chksum_err; - unsigned n_rx_tcp_udp_chksum_err; - unsigned n_rx_mcast_mismatch; - unsigned n_rx_frm_trunc; - unsigned n_rx_overlength; - unsigned n_skbuff_leaks; + unsigned int n_rx_tobe_disc; + unsigned int n_rx_ip_hdr_chksum_err; + unsigned int n_rx_tcp_udp_chksum_err; + unsigned int n_rx_outer_ip_hdr_chksum_err; + unsigned int n_rx_outer_tcp_udp_chksum_err; + unsigned int n_rx_inner_ip_hdr_chksum_err; + unsigned int n_rx_inner_tcp_udp_chksum_err; + unsigned int n_rx_eth_crc_err; + unsigned int n_rx_mcast_mismatch; + unsigned int n_rx_frm_trunc; + unsigned int n_rx_overlength; + unsigned int n_skbuff_leaks; unsigned int n_rx_nodesc_trunc; unsigned int n_rx_merge_events; unsigned int n_rx_merge_packets; -- cgit v1.2.3 From da50ae2eae96e00e0188b24014efc10161e60925 Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Wed, 8 Feb 2017 16:51:02 +0000 Subject: sfc: set csum_level for encapsulated packets Set the csum_level for encapsulated packets where the encapsulation type, l3 class and l4 class are sets that need it. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 23 ++++++++++++++++++++--- drivers/net/ethernet/sfc/net_driver.h | 1 + drivers/net/ethernet/sfc/rx.c | 5 ++++- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index fd1008daf1e3..c738b113b0ac 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3339,9 +3339,26 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, rx_l3_class, rx_l4_class, rx_encap_hdr, event); } else { - if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || - rx_l4_class == ESE_DZ_L4_CLASS_UDP) - flags |= EFX_RX_PKT_CSUMMED; + bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP || + rx_l4_class == ESE_DZ_L4_CLASS_UDP; + + switch (rx_encap_hdr) { + case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */ + flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */ + if (tcpudp) + flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */ + break; + case ESE_EZ_ENCAP_HDR_GRE: + case ESE_EZ_ENCAP_HDR_NONE: + if (tcpudp) + flags |= EFX_RX_PKT_CSUMMED; + break; + default: + netdev_WARN(efx->net_dev, + "unknown encapsulation type: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + } } if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 3b1511092781..e64e933c5bb3 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -307,6 +307,7 @@ struct efx_rx_buffer { #define EFX_RX_PKT_DISCARD 0x0004 #define EFX_RX_PKT_TCP 0x0040 #define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */ +#define EFX_RX_PKT_CSUM_LEVEL 0x0200 /** * struct efx_rx_page_state - Page-based rx buffer state diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 31587f4066ab..42443f434569 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -434,6 +434,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, PKT_HASH_TYPE_L3); skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); for (;;) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, @@ -621,8 +622,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, /* Set the SKB flags */ skb_checksum_none_assert(skb); - if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) + if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) { skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); + } efx_rx_skb_attach_timestamp(channel, skb); -- cgit v1.2.3 From 8a531400623c52b09bc3b718e9a6142eb60a11a7 Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Wed, 8 Feb 2017 16:51:18 +0000 Subject: sfc: harden driver against MC resets during initial probe This is mainly to prepare for a future overlay networking patch that could cause an MC reset at probe time if the UDP tunnel port list is set immediately upon driver load. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 99 +++++++++++++++++++++++++++--------------- 1 file changed, 64 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index c28cd9daf949..baab0a2a9eff 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -3136,6 +3136,51 @@ static int efx_pci_probe_main(struct efx_nic *efx) return rc; } +static int efx_pci_probe_post_io(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + int rc = efx_pci_probe_main(efx); + + if (rc) + return rc; + + if (efx->type->sriov_init) { + rc = efx->type->sriov_init(efx); + if (rc) + netif_err(efx, probe, efx->net_dev, + "SR-IOV can't be enabled rc %d\n", rc); + } + + /* Determine netdevice features */ + net_dev->features |= (efx->type->offload_features | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_RXCSUM); + if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) + net_dev->features |= NETIF_F_TSO6; + /* Check whether device supports TSO */ + if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) + net_dev->features &= ~NETIF_F_ALL_TSO; + /* Mask for features that also apply to VLAN devices */ + net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | + NETIF_F_RXCSUM); + + net_dev->hw_features = net_dev->features & ~efx->fixed_features; + + /* Disable VLAN filtering by default. It may be enforced if + * the feature is fixed (i.e. VLAN filters are required to + * receive VLAN tagged packets due to vPort restrictions). + */ + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + net_dev->features |= efx->fixed_features; + + rc = efx_register_netdev(efx); + if (!rc) + return 0; + + efx_pci_remove_main(efx); + return rc; +} + /* NIC initialisation * * This is called at module load (or hotplug insertion, @@ -3178,42 +3223,28 @@ static int efx_pci_probe(struct pci_dev *pci_dev, if (rc) goto fail2; - rc = efx_pci_probe_main(efx); + rc = efx_pci_probe_post_io(efx); + if (rc) { + /* On failure, retry once immediately. + * If we aborted probe due to a scheduled reset, dismiss it. + */ + efx->reset_pending = 0; + rc = efx_pci_probe_post_io(efx); + if (rc) { + /* On another failure, retry once more + * after a 50-305ms delay. + */ + unsigned char r; + + get_random_bytes(&r, 1); + msleep((unsigned int)r + 50); + efx->reset_pending = 0; + rc = efx_pci_probe_post_io(efx); + } + } if (rc) goto fail3; - net_dev->features |= (efx->type->offload_features | NETIF_F_SG | - NETIF_F_TSO | NETIF_F_RXCSUM); - if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) - net_dev->features |= NETIF_F_TSO6; - /* Check whether device supports TSO */ - if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) - net_dev->features &= ~NETIF_F_ALL_TSO; - /* Mask for features that also apply to VLAN devices */ - net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | - NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | - NETIF_F_RXCSUM); - - net_dev->hw_features = net_dev->features & ~efx->fixed_features; - - /* Disable VLAN filtering by default. It may be enforced if - * the feature is fixed (i.e. VLAN filters are required to - * receive VLAN tagged packets due to vPort restrictions). - */ - net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; - net_dev->features |= efx->fixed_features; - - rc = efx_register_netdev(efx); - if (rc) - goto fail4; - - if (efx->type->sriov_init) { - rc = efx->type->sriov_init(efx); - if (rc) - netif_err(efx, probe, efx->net_dev, - "SR-IOV can't be enabled rc %d\n", rc); - } - netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); /* Try to create MTDs, but allow this to fail */ @@ -3232,8 +3263,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev, return 0; - fail4: - efx_pci_remove_main(efx); fail3: efx_fini_io(efx); fail2: -- cgit v1.2.3 From 0ca2b46dbb645c1aa23eda6f5ac49a7be31ac87d Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Wed, 8 Feb 2017 16:51:33 +0000 Subject: sfc: call mcdi_reboot_detected() when MC reboots during an MCDI command This function wasn't being called in this particular case when the MC reboots. This caused resource reallocations to not be handled properly and often ended up disabling the interface. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 24b271b9c260..fae647dde0ee 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -716,8 +716,11 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, if (cmd == MC_CMD_REBOOT && rc == -EIO) { /* Don't reset if MC_CMD_REBOOT returns EIO */ } else if (rc == -EIO || rc == -EINTR) { - netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", - -rc); + netif_err(efx, hw, efx->net_dev, "MC reboot detected\n"); + netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n", + cmd, -rc); + if (efx->type->mcdi_reboot_detected) + efx->type->mcdi_reboot_detected(efx); efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); } else if (proxy_handle && (rc == -EPROTO) && efx_mcdi_get_proxy_handle(efx, hdr_len, data_len, -- cgit v1.2.3 From d4e85477cc3f1b2ca445cdf14b285238ea0d5516 Mon Sep 17 00:00:00 2001 From: Matthew Slattery Date: Wed, 8 Feb 2017 16:51:50 +0000 Subject: sfc: update mcdi_pcol definitions for MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_pcol.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 35cc3d4fa5f6..47ced8a898ca 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -11913,6 +11913,27 @@ #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 +/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4 +/* UDP port (the standard ports are named below but any port may be used) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 +/* enum: the IANA allocated UDP port for VXLAN */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 +/* enum: the IANA allocated UDP port for Geneve */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 +/* tunnel encapsulation protocol (only those named below are supported) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 +/* enum: VXLAN */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 +/* enum: Geneve */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 + /***********************************/ /* MC_CMD_RX_BALANCING -- cgit v1.2.3 From e5fbd977641c92a3a2b559bb5ebb425458e3efe8 Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Wed, 8 Feb 2017 16:52:10 +0000 Subject: sfc: configure UDP tunnel offload ports Implement ndo_udp_tunnel_{add,del} to update the NIC's list of VXLAN and GENEVE UDP ports. Also reset the port list to empty on driver load and on driver unload, with appropriate flag set on the unload case. These port numbers are used for RX inner checksum offload, and in future will also be used for TX inner checksum offload and encapsulated TSO. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 289 ++++++++++++++++++++++++++++++++++ drivers/net/ethernet/sfc/efx.c | 72 +++++++++ drivers/net/ethernet/sfc/mcdi.c | 8 +- drivers/net/ethernet/sfc/mcdi.h | 1 + drivers/net/ethernet/sfc/net_driver.h | 19 +++ drivers/net/ethernet/sfc/nic.h | 7 + drivers/net/ethernet/sfc/siena.c | 2 + 7 files changed, 397 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index c738b113b0ac..6bba2d206d82 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -132,6 +132,7 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid); static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, struct efx_ef10_filter_vlan *vlan); static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); +static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) { @@ -624,6 +625,8 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc) goto fail2; + mutex_init(&nic_data->udp_tunnels_lock); + /* Reset (most) configuration for this function */ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); if (rc) @@ -712,6 +715,14 @@ fail5: fail4: device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); fail3: + efx_mcdi_detach(efx); + + mutex_lock(&nic_data->udp_tunnels_lock); + memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); + (void)efx_ef10_set_udp_tnl_ports(efx, true); + mutex_unlock(&nic_data->udp_tunnels_lock); + mutex_destroy(&nic_data->udp_tunnels_lock); + efx_mcdi_fini(efx); fail2: efx_nic_free_buffer(efx, &nic_data->mcdi_buf); @@ -981,6 +992,15 @@ static void efx_ef10_remove(struct efx_nic *efx) device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); + efx_mcdi_detach(efx); + + memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); + mutex_lock(&nic_data->udp_tunnels_lock); + (void)efx_ef10_set_udp_tnl_ports(efx, true); + mutex_unlock(&nic_data->udp_tunnels_lock); + + mutex_destroy(&nic_data->udp_tunnels_lock); + efx_mcdi_fini(efx); efx_nic_free_buffer(efx, &nic_data->mcdi_buf); kfree(nic_data); @@ -6066,6 +6086,271 @@ static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) return efx_ef10_del_vlan(efx, vid); } +/* We rely on the MCDI wiping out our TX rings if it made any changes to the + * ports table, ensuring that any TSO descriptors that were made on a now- + * removed tunnel port will be blown away and won't break things when we try + * to transmit them using the new ports table. + */ +static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); + bool will_reset = false; + size_t num_entries = 0; + size_t inlen, outlen; + size_t i; + int rc; + efx_dword_t flags_and_num_entries; + + WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock)); + + nic_data->udp_tunnels_dirty = false; + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { + netif_device_attach(efx->net_dev); + return 0; + } + + BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); + + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { + if (nic_data->udp_tunnels[i].count && + nic_data->udp_tunnels[i].port) { + efx_dword_t entry; + + EFX_POPULATE_DWORD_2(entry, + TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, + ntohs(nic_data->udp_tunnels[i].port), + TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, + nic_data->udp_tunnels[i].type); + *_MCDI_ARRAY_DWORD(inbuf, + SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, + num_entries++) = entry; + } + } + + BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != + EFX_WORD_1_LBN); + BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != + EFX_WORD_1_WIDTH); + EFX_POPULATE_DWORD_2(flags_and_num_entries, + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, + !!unloading, + EFX_WORD_1, num_entries); + *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = + flags_and_num_entries; + + inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, + inbuf, inlen, outbuf, sizeof(outbuf), &outlen); + if (rc == -EIO) { + /* Most likely the MC rebooted due to another function also + * setting its tunnel port list. Mark the tunnel port list as + * dirty, so it will be pushed upon coming up from the reboot. + */ + nic_data->udp_tunnels_dirty = true; + return 0; + } + + if (rc) { + /* expected not available on unprivileged functions */ + if (rc != -EPERM) + netif_warn(efx, drv, efx->net_dev, + "Unable to set UDP tunnel ports; rc=%d.\n", rc); + } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & + (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { + netif_info(efx, drv, efx->net_dev, + "Rebooting MC due to UDP tunnel port list change\n"); + will_reset = true; + if (unloading) + /* Delay for the MC reset to complete. This will make + * unloading other functions a bit smoother. This is a + * race, but the other unload will work whichever way + * it goes, this just avoids an unnecessary error + * message. + */ + msleep(100); + } + if (!will_reset && !unloading) { + /* The caller will have detached, relying on the MC reset to + * trigger a re-attach. Since there won't be an MC reset, we + * have to do the attach ourselves. + */ + netif_device_attach(efx->net_dev); + } + + return rc; +} + +static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc = 0; + + mutex_lock(&nic_data->udp_tunnels_lock); + if (nic_data->udp_tunnels_dirty) { + /* Make sure all TX are stopped while we modify the table, else + * we might race against an efx_features_check(). + */ + efx_device_detach_sync(efx); + rc = efx_ef10_set_udp_tnl_ports(efx, false); + } + mutex_unlock(&nic_data->udp_tunnels_lock); + return rc; +} + +static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx, + __be16 port) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t i; + + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { + if (!nic_data->udp_tunnels[i].count) + continue; + if (nic_data->udp_tunnels[i].port == port) + return &nic_data->udp_tunnels[i]; + } + return NULL; +} + +static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx, + struct efx_udp_tunnel tnl) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_udp_tunnel *match; + char typebuf[8]; + size_t i; + int rc; + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) + return 0; + + efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); + netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n", + typebuf, ntohs(tnl.port)); + + mutex_lock(&nic_data->udp_tunnels_lock); + /* Make sure all TX are stopped while we add to the table, else we + * might race against an efx_features_check(). + */ + efx_device_detach_sync(efx); + + match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); + if (match != NULL) { + if (match->type == tnl.type) { + netif_dbg(efx, drv, efx->net_dev, + "Referencing existing tunnel entry\n"); + match->count++; + /* No need to cause an MCDI update */ + rc = 0; + goto unlock_out; + } + efx_get_udp_tunnel_type_name(match->type, + typebuf, sizeof(typebuf)); + netif_dbg(efx, drv, efx->net_dev, + "UDP port %d is already in use by %s\n", + ntohs(tnl.port), typebuf); + rc = -EEXIST; + goto unlock_out; + } + + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) + if (!nic_data->udp_tunnels[i].count) { + nic_data->udp_tunnels[i] = tnl; + nic_data->udp_tunnels[i].count = 1; + rc = efx_ef10_set_udp_tnl_ports(efx, false); + goto unlock_out; + } + + netif_dbg(efx, drv, efx->net_dev, + "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n", + typebuf, ntohs(tnl.port)); + + rc = -ENOMEM; + +unlock_out: + mutex_unlock(&nic_data->udp_tunnels_lock); + return rc; +} + +/* Called under the TX lock with the TX queue running, hence no-one can be + * in the middle of updating the UDP tunnels table. However, they could + * have tried and failed the MCDI, in which case they'll have set the dirty + * flag before dropping their locks. + */ +static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) + return false; + + if (nic_data->udp_tunnels_dirty) + /* SW table may not match HW state, so just assume we can't + * use any UDP tunnel offloads. + */ + return false; + + return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL; +} + +static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx, + struct efx_udp_tunnel tnl) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_udp_tunnel *match; + char typebuf[8]; + int rc; + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) + return 0; + + efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); + netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n", + typebuf, ntohs(tnl.port)); + + mutex_lock(&nic_data->udp_tunnels_lock); + /* Make sure all TX are stopped while we remove from the table, else we + * might race against an efx_features_check(). + */ + efx_device_detach_sync(efx); + + match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); + if (match != NULL) { + if (match->type == tnl.type) { + if (--match->count) { + /* Port is still in use, so nothing to do */ + netif_dbg(efx, drv, efx->net_dev, + "UDP tunnel port %d remains active\n", + ntohs(tnl.port)); + rc = 0; + goto out_unlock; + } + rc = efx_ef10_set_udp_tnl_ports(efx, false); + goto out_unlock; + } + efx_get_udp_tunnel_type_name(match->type, + typebuf, sizeof(typebuf)); + netif_warn(efx, drv, efx->net_dev, + "UDP port %d is actually in use by %s, not removing\n", + ntohs(tnl.port), typebuf); + } + rc = -ENOENT; + +out_unlock: + mutex_unlock(&nic_data->udp_tunnels_lock); + return rc; +} + #define EF10_OFFLOAD_FEATURES \ (NETIF_F_IP_CSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | \ @@ -6269,6 +6554,10 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, + .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, + .udp_tnl_add_port = efx_ef10_udp_tnl_add_port, + .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, + .udp_tnl_del_port = efx_ef10_udp_tnl_del_port, #ifdef CONFIG_SFC_SRIOV .sriov_configure = efx_ef10_sriov_configure, .sriov_init = efx_ef10_sriov_init, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index baab0a2a9eff..cb8e2c3f806a 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -23,12 +23,15 @@ #include #include #include "net_driver.h" +#include +#include #include "efx.h" #include "nic.h" #include "selftest.h" #include "sriov.h" #include "mcdi.h" +#include "mcdi_pcol.h" #include "workarounds.h" /************************************************************************** @@ -88,6 +91,21 @@ const char *const efx_reset_type_names[] = { [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)", }; +/* UDP tunnel type names */ +static const char *const efx_udp_tunnel_type_names[] = { + [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan", + [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve", +}; + +void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen) +{ + if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) && + efx_udp_tunnel_type_names[type] != NULL) + snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]); + else + snprintf(buf, buflen, "type %d", type); +} + /* Reset workqueue. If any NIC has a hardware failure then a reset will be * queued onto this work queue. This is not a per-nic work queue, because * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. @@ -2336,6 +2354,52 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi return -EOPNOTSUPP; } +static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in) +{ + switch (in) { + case UDP_TUNNEL_TYPE_VXLAN: + return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN; + case UDP_TUNNEL_TYPE_GENEVE: + return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE; + default: + return -1; + } +} + +static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct efx_nic *efx = netdev_priv(dev); + struct efx_udp_tunnel tnl; + int efx_tunnel_type; + + efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); + if (efx_tunnel_type < 0) + return; + + tnl.type = (u16)efx_tunnel_type; + tnl.port = ti->port; + + if (efx->type->udp_tnl_add_port) + (void)efx->type->udp_tnl_add_port(efx, tnl); +} + +static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct efx_nic *efx = netdev_priv(dev); + struct efx_udp_tunnel tnl; + int efx_tunnel_type; + + efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); + if (efx_tunnel_type < 0) + return; + + tnl.type = (u16)efx_tunnel_type; + tnl.port = ti->port; + + if (efx->type->udp_tnl_add_port) + (void)efx->type->udp_tnl_del_port(efx, tnl); +} + static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, @@ -2366,6 +2430,8 @@ static const struct net_device_ops efx_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif + .ndo_udp_tunnel_add = efx_udp_tunnel_add, + .ndo_udp_tunnel_del = efx_udp_tunnel_del, }; static void efx_update_name(struct efx_nic *efx) @@ -2605,6 +2671,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) efx_start_all(efx); + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); + return 0; fail: @@ -3261,6 +3330,9 @@ static int efx_pci_probe(struct pci_dev *pci_dev, "PCIE error reporting unavailable (%d).\n", rc); + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); + return 0; fail3: diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index fae647dde0ee..b9422450deb8 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -128,7 +128,7 @@ fail: return rc; } -void efx_mcdi_fini(struct efx_nic *efx) +void efx_mcdi_detach(struct efx_nic *efx) { if (!efx->mcdi) return; @@ -137,6 +137,12 @@ void efx_mcdi_fini(struct efx_nic *efx) /* Relinquish the device (back to the BMC, if this is a LOM) */ efx_mcdi_drv_attach(efx, false, NULL); +} + +void efx_mcdi_fini(struct efx_nic *efx) +{ + if (!efx->mcdi) + return; #ifdef CONFIG_SFC_MCDI_LOGGING free_page((unsigned long)efx->mcdi->iface.logging_buffer); diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 4472107ca8c1..154ef41d1927 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -142,6 +142,7 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) #endif int efx_mcdi_init(struct efx_nic *efx); +void efx_mcdi_detach(struct efx_nic *efx); void efx_mcdi_fini(struct efx_nic *efx); int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index e64e933c5bb3..5cb8112b1cf3 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -554,6 +554,8 @@ extern const unsigned int efx_reset_type_max; #define RESET_TYPE(type) \ STRING_TABLE_LOOKUP(type, efx_reset_type) +void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen); + enum efx_int_mode { /* Be careful if altering to correct macro below */ EFX_INT_MODE_MSIX = 0, @@ -993,6 +995,15 @@ struct efx_mtd_partition { char name[IFNAMSIZ + 20]; }; +struct efx_udp_tunnel { + u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */ + __be16 port; + /* Count of repeated adds of the same port. Used only inside the list, + * not in request arguments. + */ + u16 count; +}; + /** * struct efx_nic_type - Efx device type definition * @mem_bar: Get the memory BAR @@ -1113,6 +1124,10 @@ struct efx_mtd_partition { * @set_mac_address: Set the MAC address of the device * @tso_versions: Returns mask of firmware-assisted TSO versions supported. * If %NULL, then device does not support any TSO version. + * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required. + * @udp_tnl_add_port: Add a UDP tunnel port + * @udp_tnl_has_port: Check if a port has been added as UDP tunnel + * @udp_tnl_del_port: Remove a UDP tunnel port * @revision: Hardware architecture revision * @txd_ptr_tbl_base: TX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address @@ -1272,6 +1287,10 @@ struct efx_nic_type { int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr); int (*set_mac_address)(struct efx_nic *efx); u32 (*tso_versions)(struct efx_nic *efx); + int (*udp_tnl_push_ports)(struct efx_nic *efx); + int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl); + bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port); + int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl); int revision; unsigned int txd_ptr_tbl_base; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 85cf131288b7..7b916aa21bde 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -369,6 +369,10 @@ enum { * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock. * @vlan_lock: Lock to serialize access to vlan_list. + * @udp_tunnels: UDP tunnel port numbers and types. + * @udp_tunnels_dirty: flag indicating a reboot occurred while pushing + * @udp_tunnels to hardware and thus the push must be re-done. + * @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty. */ struct efx_ef10_nic_data { struct efx_buffer mcdi_buf; @@ -405,6 +409,9 @@ struct efx_ef10_nic_data { u8 vport_mac[ETH_ALEN]; struct list_head vlan_list; struct mutex vlan_lock; + struct efx_udp_tunnel udp_tunnels[16]; + bool udp_tunnels_dirty; + struct mutex udp_tunnels_lock; }; int efx_init_sriov(void); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index af7cd8565a41..b1d36df71ecb 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -326,6 +326,7 @@ fail5: efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: + efx_mcdi_detach(efx); efx_mcdi_fini(efx); fail1: kfree(efx->nic_data); @@ -450,6 +451,7 @@ static void siena_remove_nic(struct efx_nic *efx) efx_mcdi_reset(efx, RESET_TYPE_ALL); + efx_mcdi_detach(efx); efx_mcdi_fini(efx); /* Tear down the private nic state */ -- cgit v1.2.3 From 9faf1c0fd5a943e498dbc0c86ff42e965b347d08 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 9 Feb 2017 01:18:15 +0800 Subject: sctp: drop unnecessary __packed from some stream reconf structures commit 85c727b59483 ("sctp: drop __packed from almost all SCTP structures") has removed __packed from almost all SCTP structures. But there still are three structures where it should be dropped. This patch is to remove it from some stream reconf structures. Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 2408c6877ca0..d74fca3f3141 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -721,7 +721,7 @@ struct sctp_infox { struct sctp_reconf_chunk { sctp_chunkhdr_t chunk_hdr; __u8 params[0]; -} __packed; +}; struct sctp_strreset_outreq { sctp_paramhdr_t param_hdr; @@ -729,12 +729,12 @@ struct sctp_strreset_outreq { __u32 response_seq; __u32 send_reset_at_tsn; __u16 list_of_streams[0]; -} __packed; +}; struct sctp_strreset_inreq { sctp_paramhdr_t param_hdr; __u32 request_seq; __u16 list_of_streams[0]; -} __packed; +}; #endif /* __LINUX_SCTP_H__ */ -- cgit v1.2.3 From 119aecbae57e6f10b85b7cd5e070eef006dbcd74 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 9 Feb 2017 01:18:16 +0800 Subject: sctp: streams should be recovered when it fails to send request. Now when sending stream reset request, it closes the streams to block further xmit of data until this request is completed, then calls sctp_send_reconf to send the chunk. But if sctp_send_reconf returns err, and it doesn't recover the streams' states back, which means the request chunk would not be queued and sent, so the asoc will get stuck, streams are closed and no packet is even queued. This patch is to fix it by recovering the streams' states when it fails to send the request, it is also to fix a return value. Fixes: 7f9d68ac944e ("sctp: implement sender-side procedures for SSN Reset Request Parameter") Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/stream.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 13d5e07dcd7d..6a686e330c57 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -136,8 +136,10 @@ int sctp_send_reset_streams(struct sctp_association *asoc, goto out; chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in); - if (!chunk) + if (!chunk) { + retval = -ENOMEM; goto out; + } if (out) { if (str_nums) @@ -149,7 +151,6 @@ int sctp_send_reset_streams(struct sctp_association *asoc, stream->out[i].state = SCTP_STREAM_CLOSED; } - asoc->strreset_outstanding = out + in; asoc->strreset_chunk = chunk; sctp_chunk_hold(asoc->strreset_chunk); @@ -157,8 +158,22 @@ int sctp_send_reset_streams(struct sctp_association *asoc, if (retval) { sctp_chunk_put(asoc->strreset_chunk); asoc->strreset_chunk = NULL; + if (!out) + goto out; + + if (str_nums) + for (i = 0; i < str_nums; i++) + stream->out[str_list[i]].state = + SCTP_STREAM_OPEN; + else + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_OPEN; + + goto out; } + asoc->strreset_outstanding = out + in; + out: return retval; } -- cgit v1.2.3 From c56480a1e90261842f54f3a5a9ebc12d827f0c3e Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 9 Feb 2017 01:18:17 +0800 Subject: sctp: add support for generating stream reconf ssn/tsn reset request chunk This patch is to define SSN/TSN Reset Request Parameter described in rfc6525 section 4.3. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 5 +++++ include/net/sctp/sm.h | 2 ++ net/sctp/sm_make_chunk.c | 29 +++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index d74fca3f3141..71c0d41d9a59 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -737,4 +737,9 @@ struct sctp_strreset_inreq { __u16 list_of_streams[0]; }; +struct sctp_strreset_tsnreq { + sctp_paramhdr_t param_hdr; + __u32 request_seq; +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 430ed139fbbb..ac37c1782e23 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -265,6 +265,8 @@ struct sctp_chunk *sctp_make_strreset_req( const struct sctp_association *asoc, __u16 stream_num, __u16 *stream_list, bool out, bool in); +struct sctp_chunk *sctp_make_strreset_tsnreq( + const struct sctp_association *asoc); void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index c7d3249f88ec..749842aead33 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3658,3 +3658,32 @@ struct sctp_chunk *sctp_make_strreset_req( return retval; } + +/* RE-CONFIG 4.3 (SSN/TSN RESET ALL) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Parameter Type = 15 | Parameter Length = 8 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Re-configuration Request Sequence Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct sctp_chunk *sctp_make_strreset_tsnreq( + const struct sctp_association *asoc) +{ + struct sctp_strreset_tsnreq tsnreq; + __u16 length = sizeof(tsnreq); + struct sctp_chunk *retval; + + retval = sctp_make_reconf(asoc, length); + if (!retval) + return NULL; + + tsnreq.param_hdr.type = SCTP_PARAM_RESET_TSN_REQUEST; + tsnreq.param_hdr.length = htons(length); + tsnreq.request_seq = htonl(asoc->strreset_outseq); + + sctp_addto_chunk(retval, sizeof(tsnreq), &tsnreq); + + return retval; +} -- cgit v1.2.3 From a92ce1a42dde1caaee4afae67531e3e7acecf6e4 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 9 Feb 2017 01:18:18 +0800 Subject: sctp: implement sender-side procedures for SSN/TSN Reset Request Parameter This patch is to implement Sender-Side Procedures for the SSN/TSN Reset Request Parameter descibed in rfc6525 section 5.1.4. It is also to add sockopt SCTP_RESET_ASSOC in rfc6525 section 6.3.3 for users. Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 1 + include/uapi/linux/sctp.h | 1 + net/sctp/socket.c | 29 +++++++++++++++++++++++++++++ net/sctp/stream.c | 40 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 71 insertions(+) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 480b65a24aff..b60ca14068d8 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -198,6 +198,7 @@ int sctp_offload_init(void); */ int sctp_send_reset_streams(struct sctp_association *asoc, struct sctp_reset_streams *params); +int sctp_send_reset_assoc(struct sctp_association *asoc); /* * Module global variables diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index 03c27cefffb1..c0bd8c3d565a 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -117,6 +117,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_PR_ASSOC_STATUS 115 #define SCTP_ENABLE_STREAM_RESET 118 #define SCTP_RESET_STREAMS 119 +#define SCTP_RESET_ASSOC 120 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a8b4252fe084..45a7c417eb7f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3818,6 +3818,32 @@ out: return retval; } +static int sctp_setsockopt_reset_assoc(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_association *asoc; + sctp_assoc_t associd; + int retval = -EINVAL; + + if (optlen != sizeof(associd)) + goto out; + + if (copy_from_user(&associd, optval, optlen)) { + retval = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, associd); + if (!asoc) + goto out; + + retval = sctp_send_reset_assoc(asoc); + +out: + return retval; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -3990,6 +4016,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_RESET_STREAMS: retval = sctp_setsockopt_reset_streams(sk, optval, optlen); break; + case SCTP_RESET_ASSOC: + retval = sctp_setsockopt_reset_assoc(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 6a686e330c57..53e49fc2f0a3 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -177,3 +177,43 @@ int sctp_send_reset_streams(struct sctp_association *asoc, out: return retval; } + +int sctp_send_reset_assoc(struct sctp_association *asoc) +{ + struct sctp_chunk *chunk = NULL; + int retval; + __u16 i; + + if (!asoc->peer.reconf_capable || + !(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ)) + return -ENOPROTOOPT; + + if (asoc->strreset_outstanding) + return -EINPROGRESS; + + chunk = sctp_make_strreset_tsnreq(asoc); + if (!chunk) + return -ENOMEM; + + /* Block further xmit of data until this request is completed */ + for (i = 0; i < asoc->stream->outcnt; i++) + asoc->stream->out[i].state = SCTP_STREAM_CLOSED; + + asoc->strreset_chunk = chunk; + sctp_chunk_hold(asoc->strreset_chunk); + + retval = sctp_send_reconf(asoc, chunk); + if (retval) { + sctp_chunk_put(asoc->strreset_chunk); + asoc->strreset_chunk = NULL; + + for (i = 0; i < asoc->stream->outcnt; i++) + asoc->stream->out[i].state = SCTP_STREAM_OPEN; + + return retval; + } + + asoc->strreset_outstanding = 1; + + return 0; +} -- cgit v1.2.3 From 78098117f8bfad4f2104c3f7b6b69071af95a246 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 9 Feb 2017 01:18:19 +0800 Subject: sctp: add support for generating stream reconf add incoming/outgoing streams request chunk This patch is to define Add Incoming/Outgoing Streams Request Parameter described in rfc6525 section 4.5 and 4.6. They can be in one same chunk trunk as rfc6525 section 3.1-7 describes, so make them in one function. Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/linux/sctp.h | 7 +++++++ include/net/sctp/sm.h | 3 +++ net/sctp/sm_make_chunk.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 71c0d41d9a59..b055788de0cf 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -742,4 +742,11 @@ struct sctp_strreset_tsnreq { __u32 request_seq; }; +struct sctp_strreset_addstrm { + sctp_paramhdr_t param_hdr; + __u32 request_seq; + __u16 number_of_streams; + __u16 reserved; +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index ac37c1782e23..3675fde3a26e 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -267,6 +267,9 @@ struct sctp_chunk *sctp_make_strreset_req( bool out, bool in); struct sctp_chunk *sctp_make_strreset_tsnreq( const struct sctp_association *asoc); +struct sctp_chunk *sctp_make_strreset_addstrm( + const struct sctp_association *asoc, + __u16 out, __u16 in); void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 749842aead33..7f8dbf2c6cee 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3687,3 +3687,49 @@ struct sctp_chunk *sctp_make_strreset_tsnreq( return retval; } + +/* RE-CONFIG 4.5/4.6 (ADD STREAM) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Parameter Type = 17 | Parameter Length = 12 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Re-configuration Request Sequence Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Number of new streams | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct sctp_chunk *sctp_make_strreset_addstrm( + const struct sctp_association *asoc, + __u16 out, __u16 in) +{ + struct sctp_strreset_addstrm addstrm; + __u16 size = sizeof(addstrm); + struct sctp_chunk *retval; + + retval = sctp_make_reconf(asoc, (!!out + !!in) * size); + if (!retval) + return NULL; + + if (out) { + addstrm.param_hdr.type = SCTP_PARAM_RESET_ADD_OUT_STREAMS; + addstrm.param_hdr.length = htons(size); + addstrm.number_of_streams = htons(out); + addstrm.request_seq = htonl(asoc->strreset_outseq); + addstrm.reserved = 0; + + sctp_addto_chunk(retval, size, &addstrm); + } + + if (in) { + addstrm.param_hdr.type = SCTP_PARAM_RESET_ADD_IN_STREAMS; + addstrm.param_hdr.length = htons(size); + addstrm.number_of_streams = htons(in); + addstrm.request_seq = htonl(asoc->strreset_outseq + !!out); + addstrm.reserved = 0; + + sctp_addto_chunk(retval, size, &addstrm); + } + + return retval; +} -- cgit v1.2.3 From 242bd2d519d7194633e309286ba7ba29a1ad63e8 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 9 Feb 2017 01:18:20 +0800 Subject: sctp: implement sender-side procedures for Add Incoming/Outgoing Streams Request Parameter This patch is to implement Sender-Side Procedures for the Add Outgoing and Incoming Streams Request Parameter described in rfc6525 section 5.1.5-5.1.6. It is also to add sockopt SCTP_ADD_STREAMS in rfc6525 section 6.3.4 for users. Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 2 ++ include/uapi/linux/sctp.h | 7 +++++ net/sctp/socket.c | 29 ++++++++++++++++++ net/sctp/stream.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 115 insertions(+) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index b60ca14068d8..6dfc5536a3e6 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -199,6 +199,8 @@ int sctp_offload_init(void); int sctp_send_reset_streams(struct sctp_association *asoc, struct sctp_reset_streams *params); int sctp_send_reset_assoc(struct sctp_association *asoc); +int sctp_send_add_streams(struct sctp_association *asoc, + struct sctp_add_streams *params); /* * Module global variables diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index c0bd8c3d565a..a91a9cccbae6 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -118,6 +118,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_ENABLE_STREAM_RESET 118 #define SCTP_RESET_STREAMS 119 #define SCTP_RESET_ASSOC 120 +#define SCTP_ADD_STREAMS 121 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 @@ -1027,4 +1028,10 @@ struct sctp_reset_streams { uint16_t srs_stream_list[]; /* list if srs_num_streams is not 0 */ }; +struct sctp_add_streams { + sctp_assoc_t sas_assoc_id; + uint16_t sas_instrms; + uint16_t sas_outstrms; +}; + #endif /* _UAPI_SCTP_H */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 45a7c417eb7f..75f35cea4371 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3844,6 +3844,32 @@ out: return retval; } +static int sctp_setsockopt_add_streams(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_association *asoc; + struct sctp_add_streams params; + int retval = -EINVAL; + + if (optlen != sizeof(params)) + goto out; + + if (copy_from_user(¶ms, optval, optlen)) { + retval = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, params.sas_assoc_id); + if (!asoc) + goto out; + + retval = sctp_send_add_streams(asoc, ¶ms); + +out: + return retval; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -4019,6 +4045,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_RESET_ASSOC: retval = sctp_setsockopt_reset_assoc(sk, optval, optlen); break; + case SCTP_ADD_STREAMS: + retval = sctp_setsockopt_add_streams(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 53e49fc2f0a3..eb02490245ba 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -217,3 +217,80 @@ int sctp_send_reset_assoc(struct sctp_association *asoc) return 0; } + +int sctp_send_add_streams(struct sctp_association *asoc, + struct sctp_add_streams *params) +{ + struct sctp_stream *stream = asoc->stream; + struct sctp_chunk *chunk = NULL; + int retval = -ENOMEM; + __u32 outcnt, incnt; + __u16 out, in; + + if (!asoc->peer.reconf_capable || + !(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { + retval = -ENOPROTOOPT; + goto out; + } + + if (asoc->strreset_outstanding) { + retval = -EINPROGRESS; + goto out; + } + + out = params->sas_outstrms; + in = params->sas_instrms; + outcnt = stream->outcnt + out; + incnt = stream->incnt + in; + if (outcnt > SCTP_MAX_STREAM || incnt > SCTP_MAX_STREAM || + (!out && !in)) { + retval = -EINVAL; + goto out; + } + + if (out) { + struct sctp_stream_out *streamout; + + streamout = krealloc(stream->out, outcnt * sizeof(*streamout), + GFP_KERNEL); + if (!streamout) + goto out; + + memset(streamout + stream->outcnt, 0, out * sizeof(*streamout)); + stream->out = streamout; + } + + if (in) { + struct sctp_stream_in *streamin; + + streamin = krealloc(stream->in, incnt * sizeof(*streamin), + GFP_KERNEL); + if (!streamin) + goto out; + + memset(streamin + stream->incnt, 0, in * sizeof(*streamin)); + stream->in = streamin; + } + + chunk = sctp_make_strreset_addstrm(asoc, out, in); + if (!chunk) + goto out; + + asoc->strreset_chunk = chunk; + sctp_chunk_hold(asoc->strreset_chunk); + + retval = sctp_send_reconf(asoc, chunk); + if (retval) { + sctp_chunk_put(asoc->strreset_chunk); + asoc->strreset_chunk = NULL; + goto out; + } + + stream->incnt = incnt; + stream->outcnt = outcnt; + + asoc->strreset_outstanding = !!out + !!in; + +out: + return retval; +} -- cgit v1.2.3 From 15c2e102412d95af4fe88e5b28b3eba124545bbf Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Feb 2017 22:24:19 +0100 Subject: ARM: orion: remove unused wnr854t_switch_plat_data The other instances of this structure got removed along with the MDIO device change, but this one was left behind and needs to be removed as well: arch/arm/mach-orion5x/wnr854t-setup.c:109:44: error: 'wnr854t_switch_plat_data' defined but not used [-Werror=unused-variable] static struct dsa_platform_data __initdata wnr854t_switch_plat_data = { Fixes: 575e93f7b5e6 ("ARM: orion: Register DSA switch as a MDIO device") Signed-off-by: Arnd Bergmann Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- arch/arm/mach-orion5x/wnr854t-setup.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c index ac3376fc269f..d162d4c7f85d 100644 --- a/arch/arm/mach-orion5x/wnr854t-setup.c +++ b/arch/arm/mach-orion5x/wnr854t-setup.c @@ -106,11 +106,6 @@ static struct dsa_chip_data wnr854t_switch_chip_data = { .port_names[7] = "lan2", }; -static struct dsa_platform_data __initdata wnr854t_switch_plat_data = { - .nr_chips = 1, - .chip = &wnr854t_switch_chip_data, -}; - static void __init wnr854t_init(void) { /* -- cgit v1.2.3 From c4e7beea21921733026b6a1bca0652c883d84680 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Wed, 8 Feb 2017 15:49:27 -0600 Subject: net: qcom/emac: add ethtool support for reading hardware registers Implement the get_regs_len and get_regs ethtool methods. The driver returns the values of selected hardware registers. The make the register offsets known to emac_ethtool, the the register offset macros are all combined into one header file. They were inexplicably and arbitrarily split between two files. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-ethtool.c | 40 ++++++++ drivers/net/ethernet/qualcomm/emac/emac-mac.c | 52 ----------- drivers/net/ethernet/qualcomm/emac/emac.h | 108 ++++++++++++++++------ 3 files changed, 119 insertions(+), 81 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c index ca616fd93312..758cd648d666 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -170,6 +170,43 @@ static int emac_set_pauseparam(struct net_device *netdev, return 0; } +/* Selected registers that might want to track during runtime. */ +static const u16 emac_regs[] = { + EMAC_DMA_MAS_CTRL, + EMAC_MAC_CTRL, + EMAC_TXQ_CTRL_0, + EMAC_RXQ_CTRL_0, + EMAC_DMA_CTRL, + EMAC_INT_MASK, + EMAC_AXI_MAST_CTRL, + EMAC_CORE_HW_VERSION, + EMAC_MISC_CTRL, +}; + +/* Every time emac_regs[] above is changed, increase this version number. */ +#define EMAC_REGS_VERSION 0 + +#define EMAC_MAX_REG_SIZE ARRAY_SIZE(emac_regs) + +static void emac_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *buff) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + u32 *val = buff; + unsigned int i; + + regs->version = EMAC_REGS_VERSION; + regs->len = EMAC_MAX_REG_SIZE * sizeof(u32); + + for (i = 0; i < EMAC_MAX_REG_SIZE; i++) + val[i] = readl(adpt->base + emac_regs[i]); +} + +static int emac_get_regs_len(struct net_device *netdev) +{ + return EMAC_MAX_REG_SIZE * sizeof(32); +} + static const struct ethtool_ops emac_ethtool_ops = { .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, @@ -189,6 +226,9 @@ static const struct ethtool_ops emac_ethtool_ops = { .nway_reset = emac_nway_reset, .get_link = ethtool_op_get_link, + + .get_regs_len = emac_get_regs_len, + .get_regs = emac_get_regs, }; void emac_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 4b3e0144ad5e..cc065ffbe4b5 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -25,58 +25,6 @@ #include "emac.h" #include "emac-sgmii.h" -/* EMAC base register offsets */ -#define EMAC_MAC_CTRL 0x001480 -#define EMAC_WOL_CTRL0 0x0014a0 -#define EMAC_RSS_KEY0 0x0014b0 -#define EMAC_H1TPD_BASE_ADDR_LO 0x0014e0 -#define EMAC_H2TPD_BASE_ADDR_LO 0x0014e4 -#define EMAC_H3TPD_BASE_ADDR_LO 0x0014e8 -#define EMAC_INTER_SRAM_PART9 0x001534 -#define EMAC_DESC_CTRL_0 0x001540 -#define EMAC_DESC_CTRL_1 0x001544 -#define EMAC_DESC_CTRL_2 0x001550 -#define EMAC_DESC_CTRL_10 0x001554 -#define EMAC_DESC_CTRL_12 0x001558 -#define EMAC_DESC_CTRL_13 0x00155c -#define EMAC_DESC_CTRL_3 0x001560 -#define EMAC_DESC_CTRL_4 0x001564 -#define EMAC_DESC_CTRL_5 0x001568 -#define EMAC_DESC_CTRL_14 0x00156c -#define EMAC_DESC_CTRL_15 0x001570 -#define EMAC_DESC_CTRL_16 0x001574 -#define EMAC_DESC_CTRL_6 0x001578 -#define EMAC_DESC_CTRL_8 0x001580 -#define EMAC_DESC_CTRL_9 0x001584 -#define EMAC_DESC_CTRL_11 0x001588 -#define EMAC_TXQ_CTRL_0 0x001590 -#define EMAC_TXQ_CTRL_1 0x001594 -#define EMAC_TXQ_CTRL_2 0x001598 -#define EMAC_RXQ_CTRL_0 0x0015a0 -#define EMAC_RXQ_CTRL_1 0x0015a4 -#define EMAC_RXQ_CTRL_2 0x0015a8 -#define EMAC_RXQ_CTRL_3 0x0015ac -#define EMAC_BASE_CPU_NUMBER 0x0015b8 -#define EMAC_DMA_CTRL 0x0015c0 -#define EMAC_MAILBOX_0 0x0015e0 -#define EMAC_MAILBOX_5 0x0015e4 -#define EMAC_MAILBOX_6 0x0015e8 -#define EMAC_MAILBOX_13 0x0015ec -#define EMAC_MAILBOX_2 0x0015f4 -#define EMAC_MAILBOX_3 0x0015f8 -#define EMAC_MAILBOX_11 0x00160c -#define EMAC_AXI_MAST_CTRL 0x001610 -#define EMAC_MAILBOX_12 0x001614 -#define EMAC_MAILBOX_9 0x001618 -#define EMAC_MAILBOX_10 0x00161c -#define EMAC_ATHR_HEADER_CTRL 0x001620 -#define EMAC_CLK_GATE_CTRL 0x001814 -#define EMAC_MISC_CTRL 0x001990 -#define EMAC_MAILBOX_7 0x0019e0 -#define EMAC_MAILBOX_8 0x0019e4 -#define EMAC_MAILBOX_15 0x001bd4 -#define EMAC_MAILBOX_16 0x001bd8 - /* EMAC_MAC_CTRL */ #define SINGLE_PAUSE_MODE 0x10000000 #define DEBUG_MODE 0x08000000 diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index e77fb6966cbb..8ee4ec6aef2e 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -22,35 +22,85 @@ #include "emac-sgmii.h" /* EMAC base register offsets */ -#define EMAC_DMA_MAS_CTRL 0x001400 -#define EMAC_IRQ_MOD_TIM_INIT 0x001408 -#define EMAC_BLK_IDLE_STS 0x00140c -#define EMAC_PHY_LINK_DELAY 0x00141c -#define EMAC_SYS_ALIV_CTRL 0x001434 -#define EMAC_MAC_IPGIFG_CTRL 0x001484 -#define EMAC_MAC_STA_ADDR0 0x001488 -#define EMAC_MAC_STA_ADDR1 0x00148c -#define EMAC_HASH_TAB_REG0 0x001490 -#define EMAC_HASH_TAB_REG1 0x001494 -#define EMAC_MAC_HALF_DPLX_CTRL 0x001498 -#define EMAC_MAX_FRAM_LEN_CTRL 0x00149c -#define EMAC_INT_STATUS 0x001600 -#define EMAC_INT_MASK 0x001604 -#define EMAC_RXMAC_STATC_REG0 0x001700 -#define EMAC_RXMAC_STATC_REG22 0x001758 -#define EMAC_TXMAC_STATC_REG0 0x001760 -#define EMAC_TXMAC_STATC_REG24 0x0017c0 -#define EMAC_CORE_HW_VERSION 0x001974 -#define EMAC_IDT_TABLE0 0x001b00 -#define EMAC_RXMAC_STATC_REG23 0x001bc8 -#define EMAC_RXMAC_STATC_REG24 0x001bcc -#define EMAC_TXMAC_STATC_REG25 0x001bd0 -#define EMAC_INT1_MASK 0x001bf0 -#define EMAC_INT1_STATUS 0x001bf4 -#define EMAC_INT2_MASK 0x001bf8 -#define EMAC_INT2_STATUS 0x001bfc -#define EMAC_INT3_MASK 0x001c00 -#define EMAC_INT3_STATUS 0x001c04 +#define EMAC_DMA_MAS_CTRL 0x1400 +#define EMAC_IRQ_MOD_TIM_INIT 0x1408 +#define EMAC_BLK_IDLE_STS 0x140c +#define EMAC_PHY_LINK_DELAY 0x141c +#define EMAC_SYS_ALIV_CTRL 0x1434 +#define EMAC_MAC_CTRL 0x1480 +#define EMAC_MAC_IPGIFG_CTRL 0x1484 +#define EMAC_MAC_STA_ADDR0 0x1488 +#define EMAC_MAC_STA_ADDR1 0x148c +#define EMAC_HASH_TAB_REG0 0x1490 +#define EMAC_HASH_TAB_REG1 0x1494 +#define EMAC_MAC_HALF_DPLX_CTRL 0x1498 +#define EMAC_MAX_FRAM_LEN_CTRL 0x149c +#define EMAC_WOL_CTRL0 0x14a0 +#define EMAC_RSS_KEY0 0x14b0 +#define EMAC_H1TPD_BASE_ADDR_LO 0x14e0 +#define EMAC_H2TPD_BASE_ADDR_LO 0x14e4 +#define EMAC_H3TPD_BASE_ADDR_LO 0x14e8 +#define EMAC_INTER_SRAM_PART9 0x1534 +#define EMAC_DESC_CTRL_0 0x1540 +#define EMAC_DESC_CTRL_1 0x1544 +#define EMAC_DESC_CTRL_2 0x1550 +#define EMAC_DESC_CTRL_10 0x1554 +#define EMAC_DESC_CTRL_12 0x1558 +#define EMAC_DESC_CTRL_13 0x155c +#define EMAC_DESC_CTRL_3 0x1560 +#define EMAC_DESC_CTRL_4 0x1564 +#define EMAC_DESC_CTRL_5 0x1568 +#define EMAC_DESC_CTRL_14 0x156c +#define EMAC_DESC_CTRL_15 0x1570 +#define EMAC_DESC_CTRL_16 0x1574 +#define EMAC_DESC_CTRL_6 0x1578 +#define EMAC_DESC_CTRL_8 0x1580 +#define EMAC_DESC_CTRL_9 0x1584 +#define EMAC_DESC_CTRL_11 0x1588 +#define EMAC_TXQ_CTRL_0 0x1590 +#define EMAC_TXQ_CTRL_1 0x1594 +#define EMAC_TXQ_CTRL_2 0x1598 +#define EMAC_RXQ_CTRL_0 0x15a0 +#define EMAC_RXQ_CTRL_1 0x15a4 +#define EMAC_RXQ_CTRL_2 0x15a8 +#define EMAC_RXQ_CTRL_3 0x15ac +#define EMAC_BASE_CPU_NUMBER 0x15b8 +#define EMAC_DMA_CTRL 0x15c0 +#define EMAC_MAILBOX_0 0x15e0 +#define EMAC_MAILBOX_5 0x15e4 +#define EMAC_MAILBOX_6 0x15e8 +#define EMAC_MAILBOX_13 0x15ec +#define EMAC_MAILBOX_2 0x15f4 +#define EMAC_MAILBOX_3 0x15f8 +#define EMAC_INT_STATUS 0x1600 +#define EMAC_INT_MASK 0x1604 +#define EMAC_MAILBOX_11 0x160c +#define EMAC_AXI_MAST_CTRL 0x1610 +#define EMAC_MAILBOX_12 0x1614 +#define EMAC_MAILBOX_9 0x1618 +#define EMAC_MAILBOX_10 0x161c +#define EMAC_ATHR_HEADER_CTRL 0x1620 +#define EMAC_RXMAC_STATC_REG0 0x1700 +#define EMAC_RXMAC_STATC_REG22 0x1758 +#define EMAC_TXMAC_STATC_REG0 0x1760 +#define EMAC_TXMAC_STATC_REG24 0x17c0 +#define EMAC_CLK_GATE_CTRL 0x1814 +#define EMAC_CORE_HW_VERSION 0x1974 +#define EMAC_MISC_CTRL 0x1990 +#define EMAC_MAILBOX_7 0x19e0 +#define EMAC_MAILBOX_8 0x19e4 +#define EMAC_IDT_TABLE0 0x1b00 +#define EMAC_RXMAC_STATC_REG23 0x1bc8 +#define EMAC_RXMAC_STATC_REG24 0x1bcc +#define EMAC_TXMAC_STATC_REG25 0x1bd0 +#define EMAC_MAILBOX_15 0x1bd4 +#define EMAC_MAILBOX_16 0x1bd8 +#define EMAC_INT1_MASK 0x1bf0 +#define EMAC_INT1_STATUS 0x1bf4 +#define EMAC_INT2_MASK 0x1bf8 +#define EMAC_INT2_STATUS 0x1bfc +#define EMAC_INT3_MASK 0x1c00 +#define EMAC_INT3_STATUS 0x1c04 /* EMAC_DMA_MAS_CTRL */ #define DEV_ID_NUM_BMSK 0x7f000000 -- cgit v1.2.3 From 038b9404d4e2db4fbc03d5d2203abafc6e188528 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Wed, 8 Feb 2017 15:49:28 -0600 Subject: net: qcom/emac: add ethtool support for setting ring parameters Implement the set_ringparam method, which allows the user to specify the size of the TX and RX descriptor rings. The values are constrained to the limits of the hardware. Since the driver does not use separate queues for mini or jumbo frames, attempts to set those values are rejected. If the interface is already running when the setting is changed, then the interface is reset. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-ethtool.c | 24 +++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c index 758cd648d666..0d9945fb79be 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -145,6 +145,29 @@ static void emac_get_ringparam(struct net_device *netdev, ring->tx_pending = adpt->tx_desc_cnt; } +static int emac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + /* We don't have separate queues/rings for small/large frames, so + * reject any attempt to specify those values separately. + */ + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + adpt->tx_desc_cnt = + clamp_val(ring->tx_pending, EMAC_MIN_TX_DESCS, EMAC_MAX_TX_DESCS); + + adpt->rx_desc_cnt = + clamp_val(ring->rx_pending, EMAC_MIN_RX_DESCS, EMAC_MAX_RX_DESCS); + + if (netif_running(netdev)) + return emac_reinit_locked(adpt); + + return 0; +} + static void emac_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { @@ -219,6 +242,7 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_ethtool_stats = emac_get_ethtool_stats, .get_ringparam = emac_get_ringparam, + .set_ringparam = emac_set_ringparam, .get_pauseparam = emac_get_pauseparam, .set_pauseparam = emac_set_pauseparam, -- cgit v1.2.3 From 50f008e583d11b3133f23f29b05a0fcac2af3b40 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 8 Feb 2017 14:40:04 -0800 Subject: net: dsa: Fix duplicate object rule While adding switch.o to the list of DSA object files, we essentially duplicated the previous obj-y line and just added switch.o, remove the duplicate. Fixes: f515f192ab4f ("net: dsa: add switch notifier") Signed-off-by: Florian Fainelli Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 72912982de3d..31d343796251 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -1,6 +1,5 @@ # the core obj-$(CONFIG_NET_DSA) += dsa_core.o -dsa_core-y += dsa.o slave.o dsa2.o dsa_core-y += dsa.o slave.o dsa2.o switch.o # tagging formats -- cgit v1.2.3 From c0e4dadb3494bd0bf4fdeac341509deac8f4b47c Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Feb 2017 00:00:43 +0100 Subject: net: dsa: mv88e6xxx: Move forward declaration to where it is needed Move it out from the middle for the #defines to just before it is needed. Signed-off-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 8a21800374f3..d6b335cd8c09 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -662,8 +662,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAGS_PVT | \ MV88E6XXX_FLAGS_SERDES) -struct mv88e6xxx_ops; - #define MV88E6XXX_FLAGS_FAMILY_6390 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ @@ -673,6 +671,8 @@ struct mv88e6xxx_ops; MV88E6XXX_FLAGS_MULTI_CHIP | \ MV88E6XXX_FLAGS_PVT) +struct mv88e6xxx_ops; + struct mv88e6xxx_info { enum mv88e6xxx_family family; u16 prod_num; -- cgit v1.2.3 From ca0291798227a6da0f3ba6c2e3a43d94d5dcf591 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan Date: Wed, 8 Feb 2017 16:43:07 -0800 Subject: enic: add devcmds for vxlan offload This patch adds devcmds needed for vxlan offload. Implement 3 new devcmd overlay_offload_ctrl: enable/disable offload overlay_offload_cfg: update offload udp port number get_supported_feature_ver: get hw supported offload version. Each version has different bitmap for csum_ok/encap Signed-off-by: Govindarajulu Varadarajan Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/vnic_dev.c | 34 ++++++++++++++++++ drivers/net/ethernet/cisco/enic/vnic_dev.h | 5 +++ drivers/net/ethernet/cisco/enic/vnic_devcmd.h | 51 +++++++++++++++++++++++++++ drivers/net/ethernet/cisco/enic/vnic_enet.h | 1 + 4 files changed, 91 insertions(+) diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 8f27df3207bc..1841ad45d215 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -1247,3 +1247,37 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, return ret; } + +int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config) +{ + u64 a0 = overlay; + u64 a1 = config; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); +} + +int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, + u16 vxlan_udp_port_number) +{ + u64 a1 = vxlan_udp_port_number; + u64 a0 = overlay; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); +} + +int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, + u64 *supported_versions) +{ + u64 a0 = feature; + int wait = 1000; + u64 a1 = 0; + int ret; + + ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); + if (!ret) + *supported_versions = a0; + + return ret; +} diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index 54156c484424..9d43d6bb9907 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -179,5 +179,10 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter *data); int vnic_devcmd_init(struct vnic_dev *vdev); +int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config); +int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, + u16 vxlan_udp_port_number); +int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, + u64 *supported_versions); #endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index 2a812880b884..d83880b0d468 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -406,6 +406,31 @@ enum vnic_devcmd_cmd { * in: (u32) a0=Queue Pair number */ CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63), + + /* Use this devcmd for agreeing on the highest common version supported + * by both driver and fw for features who need such a facility. + * in: (u64) a0 = feature (driver requests for the supported versions + * on this feature) + * out: (u64) a0 = bitmap of all supported versions for that feature + */ + CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69), + + /* Control (Enable/Disable) overlay offloads on the given vnic + * in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE + * a0 = OVERLAY_FEATURE_VXLAN : VxLAN + * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or + * a1 = OVERLAY_OFFLOAD_DISABLE : Disable or + * a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2 + */ + CMD_OVERLAY_OFFLOAD_CTRL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72), + + /* Configuration of overlay offloads feature on a given vNIC + * in: (u8) a0 = DEVCMD_OVERLAY_NVGRE : NVGRE + * a0 = DEVCMD_OVERLAY_VXLAN : VxLAN + * in: (u8) a1 = VXLAN_PORT_UPDATE : VxLAN + * in: (u16) a2 = unsigned short int port information + */ + CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73), }; /* CMD_ENABLE2 flags */ @@ -657,4 +682,30 @@ struct devcmd2_result { #define DEVCMD2_RING_SIZE 32 #define DEVCMD2_DESC_SIZE 128 +enum overlay_feature_t { + OVERLAY_FEATURE_NVGRE = 1, + OVERLAY_FEATURE_VXLAN, + OVERLAY_FEATURE_MAX, +}; + +enum overlay_ofld_cmd { + OVERLAY_OFFLOAD_ENABLE, + OVERLAY_OFFLOAD_DISABLE, + OVERLAY_OFFLOAD_ENABLE_P2, + OVERLAY_OFFLOAD_MAX, +}; + +#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0 + +/* Use this enum to get the supported versions for each of these features + * If you need to use the devcmd_get_supported_feature_version(), add + * the new feature into this enum and install function handler in devcmd.c + */ +enum vic_feature_t { + VIC_FEATURE_VXLAN, + VIC_FEATURE_RDMA, + VIC_FEATURE_VXLAN_PATCH, + VIC_FEATURE_MAX, +}; + #endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h index 75aced2de869..7d6fbb5635a4 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_enet.h +++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h @@ -48,6 +48,7 @@ struct vnic_enet_config { #define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */ #define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */ #define VENETF_LOOP 0x800 /* Loopback enabled */ +#define VENETF_VXLAN 0x10000 /* VxLAN offload */ #define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */ #define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */ -- cgit v1.2.3 From 257e738238e8ee3960957097be4bf6be6c66a3c4 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan Date: Wed, 8 Feb 2017 16:43:08 -0800 Subject: enic: add udp_tunnel ndo for vxlan offload Defines enic_udp_tunnel_add/del for configuring vxlan tunnel offload. enic supports offload of only one ipv4/udp port. There are two modes that fw supports for vxlan offload. mode 0: fcoe bit is set for encapsulated packet. fcoe_fc_crc_ok is set if checksum of csum is ok. This bit is or of ip_csum_ok and tcp_udp_csum_ok mode 2: BIT(0) in rss_hash is set if it is encapsulated packet. BIT(1) is set if outer_ip_csum_ok/ BIT(2) is set if outer_tcp_csum_ok tcp_udp_csum_ok/ipv4_csum_ok is set if inner csum is OK. Signed-off-by: Govindarajulu Varadarajan Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic.h | 6 ++ drivers/net/ethernet/cisco/enic/enic_main.c | 156 +++++++++++++++++++++++++++- 2 files changed, 159 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 9023c858715d..2b23f46b34d3 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -135,6 +135,11 @@ struct enic_rfs_flw_tbl { struct timer_list rfs_may_expire; }; +struct vxlan_offload { + u16 vxlan_udp_port_number; + u8 patch_level; +}; + /* Per-instance private data structure */ struct enic { struct net_device *netdev; @@ -175,6 +180,7 @@ struct enic { /* receive queue cache line section */ ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; unsigned int rq_count; + struct vxlan_offload vxlan; u64 rq_truncated_pkts; u64 rq_bad_fcs; struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX]; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index c009f6ddabf7..7e56bf95cfc7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -45,6 +45,7 @@ #endif #include #include +#include #include "cq_enet_desc.h" #include "vnic_dev.h" @@ -176,6 +177,92 @@ static void enic_unset_affinity_hint(struct enic *enic) irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); } +static void enic_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct enic *enic = netdev_priv(netdev); + __be16 port = ti->port; + int err; + + spin_lock_bh(&enic->devcmd_lock); + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) { + netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported"); + goto error; + } + + if (ti->sa_family != AF_INET) { + netdev_info(netdev, "vxlan: only IPv4 offload supported"); + goto error; + } + + if (enic->vxlan.vxlan_udp_port_number) { + if (ntohs(port) == enic->vxlan.vxlan_udp_port_number) + netdev_warn(netdev, "vxlan: udp port already offloaded"); + else + netdev_info(netdev, "vxlan: offload supported for only one UDP port"); + + goto error; + } + + err = vnic_dev_overlay_offload_cfg(enic->vdev, + OVERLAY_CFG_VXLAN_PORT_UPDATE, + ntohs(port)); + if (err) + goto error; + + err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, + enic->vxlan.patch_level); + if (err) + goto error; + + enic->vxlan.vxlan_udp_port_number = ntohs(port); + + netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ", + (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family); + + goto unlock; + +error: + netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d", + ntohs(port), ti->sa_family, ti->type); +unlock: + spin_unlock_bh(&enic->devcmd_lock); +} + +static void enic_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct enic *enic = netdev_priv(netdev); + int err; + + spin_lock_bh(&enic->devcmd_lock); + + if ((ti->sa_family != AF_INET) || + ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number)) || + (ti->type != UDP_TUNNEL_TYPE_VXLAN)) { + netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded", + ntohs(ti->port), ti->sa_family, ti->type); + goto unlock; + } + + err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, + OVERLAY_OFFLOAD_DISABLE); + if (err) { + netdev_err(netdev, "vxlan: del offload udp port: %d failed", + ntohs(ti->port)); + goto unlock; + } + + enic->vxlan.vxlan_udp_port_number = 0; + + netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n", + ntohs(ti->port), ti->sa_family); + +unlock: + spin_unlock_bh(&enic->devcmd_lock); +} + int enic_is_dynamic(struct enic *enic) { return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; @@ -1113,6 +1200,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, u8 packet_error; u16 q_number, completed_index, bytes_written, vlan_tci, checksum; u32 rss_hash; + bool outer_csum_ok = true, encap = false; if (skipped) return; @@ -1161,7 +1249,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, skb_put(skb, bytes_written); skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, q_number); - if (netdev->features & NETIF_F_RXHASH) { + if ((netdev->features & NETIF_F_RXHASH) && rss_hash && + (type == 3)) { switch (rss_type) { case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4: case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6: @@ -1175,15 +1264,39 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, break; } } + if (enic->vxlan.vxlan_udp_port_number) { + switch (enic->vxlan.patch_level) { + case 0: + if (fcoe) { + encap = true; + outer_csum_ok = fcoe_fc_crc_ok; + } + break; + case 2: + if ((type == 7) && + (rss_hash & BIT(0))) { + encap = true; + outer_csum_ok = (rss_hash & BIT(1)) && + (rss_hash & BIT(2)); + } + break; + } + } /* Hardware does not provide whole packet checksum. It only * provides pseudo checksum. Since hw validates the packet * checksum but not provide us the checksum value. use * CHECSUM_UNNECESSARY. + * + * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is + * inner csum_ok. outer_csum_ok is set by hw when outer udp + * csum is correct or is zero. */ - if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok && - ipv4_csum_ok) + if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && + tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = encap; + } if (vlan_stripped) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); @@ -2285,6 +2398,8 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif + .ndo_udp_tunnel_add = enic_udp_tunnel_add, + .ndo_udp_tunnel_del = enic_udp_tunnel_del, }; static const struct net_device_ops enic_netdev_ops = { @@ -2308,6 +2423,8 @@ static const struct net_device_ops enic_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif + .ndo_udp_tunnel_add = enic_udp_tunnel_add, + .ndo_udp_tunnel_del = enic_udp_tunnel_del, }; static void enic_dev_deinit(struct enic *enic) @@ -2683,6 +2800,39 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_RXHASH; if (ENIC_SETTING(enic, RXCSUM)) netdev->hw_features |= NETIF_F_RXCSUM; + if (ENIC_SETTING(enic, VXLAN)) { + u64 patch_level; + + netdev->hw_enc_features |= NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_HW_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_features |= netdev->hw_enc_features; + /* get bit mask from hw about supported offload bit level + * BIT(0) = fw supports patch_level 0 + * fcoe bit = encap + * fcoe_fc_crc_ok = outer csum ok + * BIT(1) = always set by fw + * BIT(2) = fw supports patch_level 2 + * BIT(0) in rss_hash = encap + * BIT(1,2) in rss_hash = outer_ip_csum_ok/ + * outer_tcp_csum_ok + * used in enic_rq_indicate_buf + */ + err = vnic_dev_get_supported_feature_ver(enic->vdev, + VIC_FEATURE_VXLAN, + &patch_level); + if (err) + patch_level = 0; + /* mask bits that are supported by driver + */ + patch_level &= BIT_ULL(0) | BIT_ULL(2); + patch_level = fls(patch_level); + patch_level = patch_level ? patch_level - 1 : 0; + enic->vxlan.patch_level = patch_level; + } netdev->features |= netdev->hw_features; netdev->vlan_features |= netdev->features; -- cgit v1.2.3 From 9c744d10870c5d6f36264be39bf5de87447f1052 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan Date: Wed, 8 Feb 2017 16:43:09 -0800 Subject: enic: add vxlan offload on tx path Define ndo_features_check. Hw supports offload only for ipv4 inner and ipv4 outer pkt. Code refactor for setting inner tcp pseudo csum. Signed-off-by: Govindarajulu Varadarajan Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic_main.c | 126 +++++++++++++++++++++++++--- 1 file changed, 114 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 7e56bf95cfc7..4b87beeabce1 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -263,6 +263,48 @@ unlock: spin_unlock_bh(&enic->devcmd_lock); } +static netdev_features_t enic_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb); + struct enic *enic = netdev_priv(dev); + struct udphdr *udph; + u16 port = 0; + u16 proto; + + if (!skb->encapsulation) + return features; + + features = vxlan_features_check(skb, features); + + /* hardware only supports IPv4 vxlan tunnel */ + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) + goto out; + + /* hardware does not support offload of ipv6 inner pkt */ + if (eth->h_proto != ntohs(ETH_P_IP)) + goto out; + + proto = ip_hdr(skb)->protocol; + + if (proto == IPPROTO_UDP) { + udph = udp_hdr(skb); + port = be16_to_cpu(udph->dest); + } + + /* HW supports offload of only one UDP port. Remove CSUM and GSO MASK + * for other UDP port tunnels + */ + if (port != enic->vxlan.vxlan_udp_port_number) + goto out; + + return features; + +out: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + int enic_is_dynamic(struct enic *enic) { return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; @@ -591,20 +633,19 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, return err; } -static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, - struct sk_buff *skb, unsigned int mss, - int vlan_tag_insert, unsigned int vlan_tag, - int loopback) +static void enic_preload_tcp_csum_encap(struct sk_buff *skb) { - unsigned int frag_len_left = skb_headlen(skb); - unsigned int len_left = skb->len - frag_len_left; - unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int eop = (len_left == 0); - unsigned int len; - dma_addr_t dma_addr; - unsigned int offset = 0; - skb_frag_t *frag; + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + inner_ip_hdr(skb)->check = 0; + inner_tcp_hdr(skb)->check = + ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, + inner_ip_hdr(skb)->daddr, 0, + IPPROTO_TCP, 0); + } +} +static void enic_preload_tcp_csum(struct sk_buff *skb) +{ /* Preload TCP csum field with IP pseudo hdr calculated * with IP length set to zero. HW will later add in length * to each TCP segment resulting from the TSO. @@ -618,6 +659,30 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } +} + +static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, + struct sk_buff *skb, unsigned int mss, + int vlan_tag_insert, unsigned int vlan_tag, + int loopback) +{ + unsigned int frag_len_left = skb_headlen(skb); + unsigned int len_left = skb->len - frag_len_left; + int eop = (len_left == 0); + unsigned int offset = 0; + unsigned int hdr_len; + dma_addr_t dma_addr; + unsigned int len; + skb_frag_t *frag; + + if (skb->encapsulation) { + hdr_len = skb_inner_transport_header(skb) - skb->data; + hdr_len += inner_tcp_hdrlen(skb); + enic_preload_tcp_csum_encap(skb); + } else { + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + enic_preload_tcp_csum(skb); + } /* Queue WQ_ENET_MAX_DESC_LEN length descriptors * for the main skb fragment @@ -666,6 +731,38 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, return 0; } +static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, + struct sk_buff *skb, + int vlan_tag_insert, + unsigned int vlan_tag, int loopback) +{ + unsigned int head_len = skb_headlen(skb); + unsigned int len_left = skb->len - head_len; + /* Hardware will overwrite the checksum fields, calculating from + * scratch and ignoring the value placed by software. + * Offload mode = 00 + * mss[2], mss[1], mss[0] bits are set + */ + unsigned int mss_or_csum = 7; + int eop = (len_left == 0); + dma_addr_t dma_addr; + int err = 0; + + dma_addr = pci_map_single(enic->pdev, skb->data, head_len, + PCI_DMA_TODEVICE); + if (unlikely(enic_dma_map_check(enic, dma_addr))) + return -ENOMEM; + + enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0, + vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop, + loopback); + if (!eop) + err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + + return err; +} + static inline void enic_queue_wq_skb(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb) { @@ -688,6 +785,9 @@ static inline void enic_queue_wq_skb(struct enic *enic, err = enic_queue_wq_skb_tso(enic, wq, skb, mss, vlan_tag_insert, vlan_tag, loopback); + else if (skb->encapsulation) + err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert, + vlan_tag, loopback); else if (skb->ip_summed == CHECKSUM_PARTIAL) err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, vlan_tag, loopback); @@ -2400,6 +2500,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { #endif .ndo_udp_tunnel_add = enic_udp_tunnel_add, .ndo_udp_tunnel_del = enic_udp_tunnel_del, + .ndo_features_check = enic_features_check, }; static const struct net_device_ops enic_netdev_ops = { @@ -2425,6 +2526,7 @@ static const struct net_device_ops enic_netdev_ops = { #endif .ndo_udp_tunnel_add = enic_udp_tunnel_add, .ndo_udp_tunnel_del = enic_udp_tunnel_del, + .ndo_features_check = enic_features_check, }; static void enic_dev_deinit(struct enic *enic) -- cgit v1.2.3 From fdeea0ad87fd05e5fcb5e7b6643eabc34f29405a Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:27 +0200 Subject: net/ena: remove ntuple filter support from device feature list Remove NETIF_F_NTUPLE from netdev->features. The ENA device driver does not support ntuple filtering. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index aca95b397393..d8cd9cac6cf0 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2720,7 +2720,6 @@ static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, netdev->features = dev_features | NETIF_F_SG | - NETIF_F_NTUPLE | NETIF_F_RXHASH | NETIF_F_HIGHDMA; -- cgit v1.2.3 From 6a1ce2fb67161f249b372a87d1d9899d8b087c8e Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:28 +0200 Subject: net/ena: fix queues number calculation The ENA driver tries to open a queue per vCPU. To determine how many vCPUs the instance have it uses num_possible_cpus() while it should have use num_online_cpus() instead. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d8cd9cac6cf0..effe686dbcdb 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2658,7 +2658,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, io_sq_num = get_feat_ctx->max_queues.max_sq_num; } - io_queue_num = min_t(int, num_possible_cpus(), ENA_MAX_NUM_IO_QUEUES); + io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); io_queue_num = min_t(int, io_queue_num, io_sq_num); io_queue_num = min_t(int, io_queue_num, get_feat_ctx->max_queues.max_cq_num); -- cgit v1.2.3 From 6e2de20ddc4b606a9b6b170a6bb21ee4dc4ad93a Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:29 +0200 Subject: net/ena: fix ethtool RSS flow configuration ena_flow_data_to_flow_hash and ena_flow_hash_to_flow_type treat the ena_flow_hash_to_flow_type enum as power of two values. Change the values of ena_admin_flow_hash_fields to be power of two values. This bug effect the ethtool set/get rxnfc. ethtool will report wrong values hash fields for get and will configure wrong hash fields in set. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_admin_defs.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index a46e749bf226..e1594d6d6789 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -631,22 +631,22 @@ enum ena_admin_flow_hash_proto { /* RSS flow hash fields */ enum ena_admin_flow_hash_fields { /* Ethernet Dest Addr */ - ENA_ADMIN_RSS_L2_DA = 0, + ENA_ADMIN_RSS_L2_DA = BIT(0), /* Ethernet Src Addr */ - ENA_ADMIN_RSS_L2_SA = 1, + ENA_ADMIN_RSS_L2_SA = BIT(1), /* ipv4/6 Dest Addr */ - ENA_ADMIN_RSS_L3_DA = 2, + ENA_ADMIN_RSS_L3_DA = BIT(2), /* ipv4/6 Src Addr */ - ENA_ADMIN_RSS_L3_SA = 5, + ENA_ADMIN_RSS_L3_SA = BIT(3), /* tcp/udp Dest Port */ - ENA_ADMIN_RSS_L4_DP = 6, + ENA_ADMIN_RSS_L4_DP = BIT(4), /* tcp/udp Src Port */ - ENA_ADMIN_RSS_L4_SP = 7, + ENA_ADMIN_RSS_L4_SP = BIT(5), }; struct ena_admin_proto_input { -- cgit v1.2.3 From 422e21e7619bb8751aa1cd32a9b671b1baaf3d18 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:30 +0200 Subject: net/ena: fix RSS default hash configuration ENA default hash configures IPv4_frag hash twice instead of configure non-IP packets. The bug caused IPv4 fragmented packets to be calculated based on L2 source and destination address instead of L3 source and destination. IPv4 packets can reach to the wrong Rx queue. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 3066d9c99984..46aad3a4f6e3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2184,7 +2184,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; - hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = + hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { -- cgit v1.2.3 From 22b331c9e0a345126708af60f7d00d38b53db70b Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:31 +0200 Subject: net/ena: fix NULL dereference when removing the driver after device reset failed If for some reason the device stops responding, and the device reset failes to recover the device, the mmio register read data structure will not be reinitialized. On driver removal, the driver will also try to reset the device, but this time the mmio data structure will be NULL. To solve this issue, perform the device reset in the remove function only if the device is runnig. Crash log 54.240382] BUG: unable to handle kernel NULL pointer dereference at (null) [ 54.244186] IP: [] ena_com_reg_bar_read32+0x8a/0x180 [ena_drv] [ 54.244186] PGD 0 [ 54.244186] Oops: 0002 [#1] SMP [ 54.244186] Modules linked in: ena_drv(OE-) snd_hda_codec_generic kvm_intel kvm crct10dif_pclmul ppdev crc32_pclmul ghash_clmulni_intel aesni_intel snd_hda_intel aes_x86_64 snd_hda_controller lrw gf128mul cirrus glue_helper ablk_helper ttm snd_hda_codec drm_kms_helper cryptd snd_hwdep drm snd_pcm pvpanic snd_timer syscopyarea sysfillrect snd parport_pc sysimgblt serio_raw soundcore i2c_piix4 mac_hid lp parport psmouse floppy [ 54.244186] CPU: 5 PID: 1841 Comm: rmmod Tainted: G OE 3.16.0-031600-generic #201408031935 [ 54.244186] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 [ 54.244186] task: ffff880135852880 ti: ffff8800bb640000 task.ti: ffff8800bb640000 [ 54.244186] RIP: 0010:[] [] ena_com_reg_bar_read32+0x8a/0x180 [ena_drv] [ 54.244186] RSP: 0018:ffff8800bb643d50 EFLAGS: 00010083 [ 54.244186] RAX: 000000000000deb0 RBX: 0000000000030d40 RCX: 0000000000000003 [ 54.244186] RDX: 0000000000000202 RSI: 0000000000000058 RDI: ffffc90000775104 [ 54.244186] RBP: ffff8800bb643d88 R08: 0000000000000000 R09: cf00000000000000 [ 54.244186] R10: 0000000fffffffe0 R11: 0000000000000001 R12: 0000000000000000 [ 54.244186] R13: ffffc90000765000 R14: ffffc90000775104 R15: 00007fca1fa98090 [ 54.244186] FS: 00007fca1f1bd740(0000) GS:ffff88013fd40000(0000) knlGS:0000000000000000 [ 54.244186] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 54.244186] CR2: 0000000000000000 CR3: 00000000b9cf6000 CR4: 00000000001406e0 [ 54.244186] Stack: [ 54.244186] 0000000000000202 0000005800000286 ffffc90000765000 ffffc90000765000 [ 54.244186] ffff880135f6b000 ffff8800b9360000 00007fca1fa98090 ffff8800bb643db8 [ 54.244186] ffffffffc0680b3d ffff8800b93608c0 ffffc90000765000 ffff880135f6b000 [ 54.244186] Call Trace: [ 54.244186] [] ena_com_dev_reset+0x1d/0x1b0 [ena_drv] [ 54.244186] [] ena_remove+0xa7/0x130 [ena_drv] [ 54.244186] [] pci_device_remove+0x46/0xc0 [ 54.244186] [] __device_release_driver+0x7f/0xf0 [ 54.244186] [] driver_detach+0xc8/0xd0 [ 54.244186] [] bus_remove_driver+0x59/0xd0 [ 54.244186] [] driver_unregister+0x2e/0x60 [ 54.244186] [] ? show_refcnt+0x40/0x40 [ 54.244186] [] pci_unregister_driver+0x23/0xa0 [ 54.244186] [] ena_cleanup+0x10/0xed1 [ena_drv] [ 54.244186] [] SyS_delete_module+0x157/0x1e0 [ 54.244186] [] ? do_notify_resume+0xc7/0xd0 [ 54.244186] [] system_call_fastpath+0x1a/0x1f [ 54.244186] Code: c3 4d 8d b5 04 01 01 00 4c 89 f7 e8 e1 5a 11 c1 48 89 45 c8 41 0f b7 85 00 01 01 00 8d 48 01 66 2d 52 21 66 41 89 8d 00 01 01 00 <66> 41 89 04 24 0f b7 45 d4 89 45 d0 89 c1 41 0f b7 85 00 01 01 [ 54.244186] RIP [] ena_com_reg_bar_read32+0x8a/0x180 [ena_drv] [ 54.244186] RSP [ 54.244186] CR2: 0000000000000000 [ 54.244186] ---[ end trace 18dd9889b6497810 ]--- Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index effe686dbcdb..d1aa7b63f797 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2507,6 +2507,8 @@ err_device_destroy: err: rtnl_unlock(); + clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); + dev_err(&pdev->dev, "Reset attempt failed. Can not reset the device\n"); } @@ -3115,7 +3117,9 @@ static void ena_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->resume_io_task); - ena_com_dev_reset(ena_dev); + /* Reset the device only if the device is running. */ + if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) + ena_com_dev_reset(ena_dev); ena_free_mgmnt_irq(adapter); -- cgit v1.2.3 From d81db24056132fe8b83e2fba337e9ea76675e68d Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:32 +0200 Subject: net/ena: refactor ena_get_stats64 to be atomic context safe ndo_get_stat64() can be called from atomic context, but the current implementation sends an admin command to retrieve the statistics from the device. This admin command can sleep. This patch re-factors the implementation of ena_get_stats64() to use the {rx,tx}bytes/count from the driver's inner counters, and to obtain the rx drop counter from the asynchronous keep alive (heart bit) event. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_admin_defs.h | 8 ++++ drivers/net/ethernet/amazon/ena/ena_netdev.c | 48 ++++++++++++++++-------- drivers/net/ethernet/amazon/ena/ena_netdev.h | 1 + 3 files changed, 42 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index e1594d6d6789..5b6509d59716 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -873,6 +873,14 @@ struct ena_admin_aenq_link_change_desc { u32 flags; }; +struct ena_admin_aenq_keep_alive_desc { + struct ena_admin_aenq_common_desc aenq_common_desc; + + u32 rx_drops_low; + + u32 rx_drops_high; +}; + struct ena_admin_ena_mmio_req_read_less_resp { u16 req_id; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d1aa7b63f797..54493e13dcaf 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2169,28 +2169,46 @@ static void ena_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ena_adapter *adapter = netdev_priv(netdev); - struct ena_admin_basic_stats ena_stats; - int rc; + struct ena_ring *rx_ring, *tx_ring; + unsigned int start; + u64 rx_drops; + int i; if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) return; - rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats); - if (rc) - return; + for (i = 0; i < adapter->num_queues; i++) { + u64 bytes, packets; + + tx_ring = &adapter->tx_ring[i]; - stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) | - ena_stats.tx_bytes_low; - stats->rx_bytes = ((u64)ena_stats.rx_bytes_high << 32) | - ena_stats.rx_bytes_low; + do { + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); + packets = tx_ring->tx_stats.cnt; + bytes = tx_ring->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); - stats->rx_packets = ((u64)ena_stats.rx_pkts_high << 32) | - ena_stats.rx_pkts_low; - stats->tx_packets = ((u64)ena_stats.tx_pkts_high << 32) | - ena_stats.tx_pkts_low; + stats->tx_packets += packets; + stats->tx_bytes += bytes; + + rx_ring = &adapter->rx_ring[i]; + + do { + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); + packets = rx_ring->rx_stats.cnt; + bytes = rx_ring->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + + do { + start = u64_stats_fetch_begin_irq(&adapter->syncp); + rx_drops = adapter->dev_stats.rx_drops; + } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); - stats->rx_dropped = ((u64)ena_stats.rx_drops_high << 32) | - ena_stats.rx_drops_low; + stats->rx_dropped = rx_drops; stats->multicast = 0; stats->collisions = 0; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 69d7e9ed5bc8..f0ddc117d976 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -241,6 +241,7 @@ struct ena_stats_dev { u64 interface_up; u64 interface_down; u64 admin_q_pause; + u64 rx_drops; }; enum ena_flags_t { -- cgit v1.2.3 From 3f6159dbfc24c5e61fb5deb9b69e0abb934609bb Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:33 +0200 Subject: net/ena: fix potential access to freed memory during device reset If the ena driver detects that the device is not behave as expected, it tries to reset the device. The reset flow calls ena_down, which will frees all the resources the driver allocates and then it will reset the device. This flow can cause memory corruption if the device is still writes to the driver's memory space. To overcome this potential race, move the reset before the device resources are freed. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 56 +++++++++++++++++++++------- 1 file changed, 43 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 54493e13dcaf..8ca1ba3344d2 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -80,14 +80,18 @@ static void ena_tx_timeout(struct net_device *dev) { struct ena_adapter *adapter = netdev_priv(dev); + /* Change the state of the device to trigger reset + * Check that we are not in the middle or a trigger already + */ + + if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) + return; + u64_stats_update_begin(&adapter->syncp); adapter->dev_stats.tx_timeout++; u64_stats_update_end(&adapter->syncp); netif_err(adapter, tx_err, dev, "Transmit time out\n"); - - /* Change the state of the device to trigger reset */ - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) @@ -1109,7 +1113,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget) tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; - if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { + if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || + test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { napi_complete_done(napi, 0); return 0; } @@ -1698,12 +1703,22 @@ static void ena_down(struct ena_adapter *adapter) adapter->dev_stats.interface_down++; u64_stats_update_end(&adapter->syncp); - /* After this point the napi handler won't enable the tx queue */ - ena_napi_disable_all(adapter); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); + /* After this point the napi handler won't enable the tx queue */ + ena_napi_disable_all(adapter); + /* After destroy the queue there won't be any new interrupts */ + + if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { + int rc; + + rc = ena_com_dev_reset(adapter->ena_dev); + if (rc) + dev_err(&adapter->pdev->dev, "Device reset failed\n"); + } + ena_destroy_all_io_queues(adapter); ena_disable_io_intr_sync(adapter); @@ -2065,6 +2080,14 @@ static void ena_netpoll(struct net_device *netdev) struct ena_adapter *adapter = netdev_priv(netdev); int i; + /* Dont schedule NAPI if the driver is in the middle of reset + * or netdev is down. + */ + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) || + test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) + return; + for (i = 0; i < adapter->num_queues; i++) napi_schedule(&adapter->ena_napi[i].napi); } @@ -2449,6 +2472,14 @@ static void ena_fw_reset_device(struct work_struct *work) bool dev_up, wd_state; int rc; + if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + dev_err(&pdev->dev, + "device reset schedule while reset bit is off\n"); + return; + } + + netif_carrier_off(netdev); + del_timer_sync(&adapter->timer_service); rtnl_lock(); @@ -2462,12 +2493,6 @@ static void ena_fw_reset_device(struct work_struct *work) */ ena_close(netdev); - rc = ena_com_dev_reset(ena_dev); - if (rc) { - dev_err(&pdev->dev, "Device reset failed\n"); - goto err; - } - ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); @@ -2480,6 +2505,8 @@ static void ena_fw_reset_device(struct work_struct *work) ena_com_mmio_reg_read_request_destroy(ena_dev); + clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + /* Finish with the destroy part. Start the init part */ rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); @@ -2545,6 +2572,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) return; + if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) + return; + budget = ENA_MONITORED_TX_QUEUES; for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { @@ -2644,7 +2674,7 @@ static void ena_timer_service(unsigned long data) if (host_info) ena_update_host_info(host_info, adapter->netdev); - if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { netif_err(adapter, drv, adapter->netdev, "Trigger reset is on\n"); ena_dump_stats_to_dmesg(adapter); -- cgit v1.2.3 From b1669c9f5ac9d50651889e22e4a82f1b34af32d3 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:34 +0200 Subject: net/ena: use napi_complete_done() return value Do not unamsk interrupts if we are in busy poll mode. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 44 ++++++++++++++++++---------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 8ca1ba3344d2..d467a7914dd0 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1122,26 +1122,40 @@ static int ena_io_poll(struct napi_struct *napi, int budget) tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); - if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { - napi_complete_done(napi, rx_work_done); + /* If the device is about to reset or down, avoid unmask + * the interrupt and return 0 so NAPI won't reschedule + */ + if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || + test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { + napi_complete_done(napi, 0); + ret = 0; + } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { napi_comp_call = 1; - /* Tx and Rx share the same interrupt vector */ - if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) - ena_adjust_intr_moderation(rx_ring, tx_ring); - /* Update intr register: rx intr delay, tx intr delay and - * interrupt unmask + /* Update numa and unmask the interrupt only when schedule + * from the interrupt context (vs from sk_busy_loop) */ - ena_com_update_intr_reg(&intr_reg, - rx_ring->smoothed_interval, - tx_ring->smoothed_interval, - true); + if (napi_complete_done(napi, rx_work_done)) { + /* Tx and Rx share the same interrupt vector */ + if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) + ena_adjust_intr_moderation(rx_ring, tx_ring); + + /* Update intr register: rx intr delay, + * tx intr delay and interrupt unmask + */ + ena_com_update_intr_reg(&intr_reg, + rx_ring->smoothed_interval, + tx_ring->smoothed_interval, + true); + + /* It is a shared MSI-X. + * Tx and Rx CQ have pointer to it. + * So we use one of them to reach the intr reg + */ + ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); + } - /* It is a shared MSI-X. Tx and Rx CQ have pointer to it. - * So we use one of them to reach the intr reg - */ - ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); ena_update_ring_numa_node(tx_ring, rx_ring); -- cgit v1.2.3 From a8496eb81342393e4c8280a5ec27e1872a3fb9fd Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:35 +0200 Subject: net/ena: use READ_ONCE to access completion descriptors Completion descriptors are accessed from the driver and from the device. To avoid reading the old value, use READ_ONCE macro. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.h | 1 + drivers/net/ethernet/amazon/ena/ena_eth_com.c | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 509d7b8e15ab..c9b33ee5f258 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -33,6 +33,7 @@ #ifndef ENA_COM #define ENA_COM +#include #include #include #include diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 539c536464a5..f999305e1363 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -45,7 +45,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr + (head_masked * io_cq->cdesc_entry_size_in_bytes)); - desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> + desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; if (desc_phase != expected_phase) @@ -141,7 +141,7 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, ena_com_cq_inc_head(io_cq); count++; - last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> + last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; } while (!last); @@ -489,13 +489,13 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) * expected, it mean that the device still didn't update * this completion. */ - cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; + cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; if (cdesc_phase != expected_phase) return -EAGAIN; ena_com_cq_inc_head(io_cq); - *req_id = cdesc->req_id; + *req_id = READ_ONCE(cdesc->req_id); return 0; } -- cgit v1.2.3 From 5add6e4a222f2eb437992af6b39c8a9c9a28604a Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:36 +0200 Subject: net/ena: reduce the severity of ena printouts Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 27 +++++++++++++++++---------- drivers/net/ethernet/amazon/ena/ena_netdev.c | 14 +++++++++++--- 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 46aad3a4f6e3..5518b1f2fe42 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -784,7 +784,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, int ret; if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { - pr_info("Feature %d isn't supported\n", feature_id); + pr_debug("Feature %d isn't supported\n", feature_id); return -EPERM; } @@ -1126,7 +1126,13 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, comp, comp_size); if (unlikely(IS_ERR(comp_ctx))) { - pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx)); + if (comp_ctx == ERR_PTR(-ENODEV)) + pr_debug("Failed to submit command [%ld]\n", + PTR_ERR(comp_ctx)); + else + pr_err("Failed to submit command [%ld]\n", + PTR_ERR(comp_ctx)); + return PTR_ERR(comp_ctx); } @@ -1895,7 +1901,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { - pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU); + pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU); return -EPERM; } @@ -1948,8 +1954,8 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) { - pr_info("Feature %d isn't supported\n", - ENA_ADMIN_RSS_HASH_FUNCTION); + pr_debug("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_FUNCTION); return -EPERM; } @@ -2112,7 +2118,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) { - pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT); + pr_debug("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_INPUT); return -EPERM; } @@ -2270,8 +2277,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id( ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { - pr_info("Feature %d isn't supported\n", - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + pr_debug("Feature %d isn't supported\n", + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); return -EPERM; } @@ -2542,8 +2549,8 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) if (rc) { if (rc == -EPERM) { - pr_info("Feature %d isn't supported\n", - ENA_ADMIN_INTERRUPT_MODERATION); + pr_debug("Feature %d isn't supported\n", + ENA_ADMIN_INTERRUPT_MODERATION); rc = 0; } else { pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d467a7914dd0..50793665707c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -563,6 +563,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter) */ static void ena_free_tx_bufs(struct ena_ring *tx_ring) { + bool print_once = true; u32 i; for (i = 0; i < tx_ring->ring_size; i++) { @@ -574,9 +575,16 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) if (!tx_info->skb) continue; - netdev_notice(tx_ring->netdev, - "free uncompleted tx skb qid %d idx 0x%x\n", - tx_ring->qid, i); + if (print_once) { + netdev_notice(tx_ring->netdev, + "free uncompleted tx skb qid %d idx 0x%x\n", + tx_ring->qid, i); + print_once = false; + } else { + netdev_dbg(tx_ring->netdev, + "free uncompleted tx skb qid %d idx 0x%x\n", + tx_ring->qid, i); + } ena_buf = tx_info->bufs; dma_unmap_single(tx_ring->dev, -- cgit v1.2.3 From 7102a18ac3f323805e3cd8f3dc64907644608c1e Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:37 +0200 Subject: net/ena: change driver's default timeouts The timeouts were too agressive and sometimes cause false alarms. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 4 ++-- drivers/net/ethernet/amazon/ena/ena_netdev.h | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 5518b1f2fe42..8029e7c1caf5 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -36,9 +36,9 @@ /*****************************************************************************/ /* Timeout in micro-sec */ -#define ADMIN_CMD_TIMEOUT_US (1000000) +#define ADMIN_CMD_TIMEOUT_US (3000000) -#define ENA_ASYNC_QUEUE_DEPTH 4 +#define ENA_ASYNC_QUEUE_DEPTH 16 #define ENA_ADMIN_QUEUE_DEPTH 32 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index f0ddc117d976..efe0ea113088 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -100,7 +100,7 @@ /* Number of queues to check for missing queues per timer service */ #define ENA_MONITORED_TX_QUEUES 4 /* Max timeout packets before device reset */ -#define MAX_NUM_OF_TIMEOUTED_PACKETS 32 +#define MAX_NUM_OF_TIMEOUTED_PACKETS 128 #define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1)) @@ -116,9 +116,9 @@ #define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q)) /* ENA device should send keep alive msg every 1 sec. - * We wait for 3 sec just to be on the safe side. + * We wait for 6 sec just to be on the safe side. */ -#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ) +#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ) #define ENA_MMIO_DISABLE_REG_READ BIT(0) -- cgit v1.2.3 From dd8427a78ffccbacf166a2d5d5eae55b586e1afe Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:38 +0200 Subject: net/ena: change condition for host attribute configuration Move the host info config to be the first admin command that is executed. This change require the driver to remove the 'feature check' from host info configuration flow. The check is removed since the supported features bitmask field is retrieved only after calling ENA_ADMIN_DEVICE_ATTRIBUTES admin command. If set host info is not supported an error will be returned by the device. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 8 +++----- drivers/net/ethernet/amazon/ena/ena_netdev.c | 5 +++-- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 8029e7c1caf5..08d11cede9c9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2451,11 +2451,9 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) int ret; - if (!ena_com_check_supported_feature_id(ena_dev, - ENA_ADMIN_HOST_ATTR_CONFIG)) { - pr_warn("Set host attribute isn't supported\n"); - return -EPERM; - } + /* Host attribute config is called before ena_com_get_dev_attr_feat + * so ena_com can't check if the feature is supported. + */ memset(&cmd, 0x0, sizeof(cmd)); admin_queue = &ena_dev->admin_queue; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 50793665707c..d8c920be5e91 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2414,6 +2414,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, */ ena_com_set_admin_polling_mode(ena_dev, true); + ena_config_host_info(ena_dev); + /* Get Device Attributes*/ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); if (rc) { @@ -2438,11 +2440,10 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); - ena_config_host_info(ena_dev); - return 0; err_admin_init: + ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); err_mmio_read_less: ena_com_mmio_reg_read_request_destroy(ena_dev); -- cgit v1.2.3 From 3e5d6897176d38b54124da56183810abbd68c8bc Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Thu, 9 Feb 2017 15:21:39 +0200 Subject: net/ena: update driver version to 1.1.2 Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index efe0ea113088..ed62d8e231a1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -44,7 +44,7 @@ #include "ena_eth_com.h" #define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_MINOR 1 #define DRV_MODULE_VER_SUBMINOR 2 #define DRV_MODULE_NAME "ena" -- cgit v1.2.3 From 5e17da634a21b1200853fe82ba67d6571f2beabe Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Thu, 9 Feb 2017 11:21:52 -0800 Subject: openvswitch: Fix comments for skb->_nfct Fix comments referring to skb 'nfct' and 'nfctinfo' fields now that they are combined into '_nfct'. Signed-off-by: Jarno Rajahalme Acked-by: Pravin B Shelar Acked-by: Joe Stringer Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index fbffe0ea4c4f..5de6d12b3a73 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -157,7 +157,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, ovs_ct_get_labels(ct, &key->ct.labels); } -/* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has +/* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has * previously sent the packet to conntrack via the ct action. If * 'keep_nat_flags' is true, the existing NAT flags retained, else they are * initialized from the connection status. @@ -421,12 +421,12 @@ ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h) /* Find an existing connection which this packet belongs to without * re-attributing statistics or modifying the connection state. This allows an - * skb->nfct lost due to an upcall to be recovered during actions execution. + * skb->_nfct lost due to an upcall to be recovered during actions execution. * * Must be called with rcu_read_lock. * - * On success, populates skb->nfct and skb->nfctinfo, and returns the - * connection. Returns NULL if there is no existing entry. + * On success, populates skb->_nfct and returns the connection. Returns NULL + * if there is no existing entry. */ static struct nf_conn * ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, @@ -464,7 +464,7 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, return ct; } -/* Determine whether skb->nfct is equal to the result of conntrack lookup. */ +/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */ static bool skb_nfct_cached(struct net *net, const struct sw_flow_key *key, const struct ovs_conntrack_info *info, @@ -475,7 +475,7 @@ static bool skb_nfct_cached(struct net *net, ct = nf_ct_get(skb, &ctinfo); /* If no ct, check if we have evidence that an existing conntrack entry - * might be found for this skb. This happens when we lose a skb->nfct + * might be found for this skb. This happens when we lose a skb->_nfct * due to an upcall. If the connection was not confirmed, it is not * cached and needs to be run through conntrack again. */ @@ -699,7 +699,7 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, /* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if * not done already. Update key with new CT state after passing the packet * through conntrack. - * Note that if the packet is deemed invalid by conntrack, skb->nfct will be + * Note that if the packet is deemed invalid by conntrack, skb->_nfct will be * set to NULL and 0 will be returned. */ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, -- cgit v1.2.3 From 9ff464db50e437eef131f719cc2e9902eea9c607 Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Thu, 9 Feb 2017 11:21:53 -0800 Subject: openvswitch: Use inverted tuple in ovs_ct_find_existing() if NATted. The conntrack lookup for existing connections fails to invert the packet 5-tuple for NATted packets, and therefore fails to find the existing conntrack entry. Conntrack only stores 5-tuples for incoming packets, and there are various situations where a lookup on a packet that has already been transformed by NAT needs to be made. Looking up an existing conntrack entry upon executing packet received from the userspace is one of them. This patch fixes ovs_ct_find_existing() to invert the packet 5-tuple for the conntrack lookup whenever the packet has already been transformed by conntrack from its input form as evidenced by one of the NAT flags being set in the conntrack state metadata. Fixes: 05752523e565 ("openvswitch: Interface with NAT.") Signed-off-by: Jarno Rajahalme Acked-by: Joe Stringer Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 5de6d12b3a73..4df9a5449c95 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -430,7 +430,7 @@ ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h) */ static struct nf_conn * ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, - u8 l3num, struct sk_buff *skb) + u8 l3num, struct sk_buff *skb, bool natted) { struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; @@ -453,6 +453,17 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, return NULL; } + /* Must invert the tuple if skb has been transformed by NAT. */ + if (natted) { + struct nf_conntrack_tuple inverse; + + if (!nf_ct_invert_tuple(&inverse, &tuple, l3proto, l4proto)) { + pr_debug("ovs_ct_find_existing: Inversion failed!\n"); + return NULL; + } + tuple = inverse; + } + /* look for tuple match */ h = nf_conntrack_find_get(net, zone, &tuple); if (!h) @@ -460,6 +471,13 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, ct = nf_ct_tuplehash_to_ctrack(h); + /* Inverted packet tuple matches the reverse direction conntrack tuple, + * select the other tuplehash to get the right 'ctinfo' bits for this + * packet. + */ + if (natted) + h = &ct->tuplehash[!h->tuple.dst.dir]; + nf_ct_set(skb, ct, ovs_ct_get_info(h)); return ct; } @@ -482,7 +500,9 @@ static bool skb_nfct_cached(struct net *net, if (!ct && key->ct.state & OVS_CS_F_TRACKED && !(key->ct.state & OVS_CS_F_INVALID) && key->ct.zone == info->zone.id) - ct = ovs_ct_find_existing(net, &info->zone, info->family, skb); + ct = ovs_ct_find_existing(net, &info->zone, info->family, skb, + !!(key->ct.state + & OVS_CS_F_NAT_MASK)); if (!ct) return false; if (!net_eq(net, read_pnet(&ct->ct_net))) -- cgit v1.2.3 From 193e30967897f3a8b6f9f137ac30571d832c2c5c Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Thu, 9 Feb 2017 11:21:54 -0800 Subject: openvswitch: Do not trigger events for unconfirmed connections. Receiving change events before the 'new' event for the connection has been received can be confusing. Avoid triggering change events for setting conntrack mark or labels before the conntrack entry has been confirmed. Fixes: 182e3042e15d ("openvswitch: Allow matching on conntrack mark") Fixes: c2ac66735870 ("openvswitch: Allow matching on conntrack label") Signed-off-by: Jarno Rajahalme Acked-by: Joe Stringer Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 4df9a5449c95..a6ff374d57d3 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -245,7 +245,8 @@ static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key, new_mark = ct_mark | (ct->mark & ~(mask)); if (ct->mark != new_mark) { ct->mark = new_mark; - nf_conntrack_event_cache(IPCT_MARK, ct); + if (nf_ct_is_confirmed(ct)) + nf_conntrack_event_cache(IPCT_MARK, ct); key->ct.mark = new_mark; } @@ -262,7 +263,6 @@ static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key, enum ip_conntrack_info ctinfo; struct nf_conn_labels *cl; struct nf_conn *ct; - int err; /* The connection could be invalid, in which case set_label is no-op.*/ ct = nf_ct_get(skb, &ctinfo); @@ -277,10 +277,26 @@ static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key, if (!cl || sizeof(cl->bits) < OVS_CT_LABELS_LEN) return -ENOSPC; - err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask, - OVS_CT_LABELS_LEN / sizeof(u32)); - if (err) - return err; + if (nf_ct_is_confirmed(ct)) { + /* Triggers a change event, which makes sense only for + * confirmed connections. + */ + int err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask, + OVS_CT_LABELS_LEN / sizeof(u32)); + if (err) + return err; + } else { + u32 *dst = (u32 *)cl->bits; + const u32 *msk = (const u32 *)mask->ct_labels; + const u32 *lbl = (const u32 *)labels->ct_labels; + int i; + + /* No-one else has access to the non-confirmed entry, copy + * labels over, keeping any bits we are not explicitly setting. + */ + for (i = 0; i < OVS_CT_LABELS_LEN / sizeof(u32); i++) + dst[i] = (dst[i] & ~msk[i]) | (lbl[i] & msk[i]); + } ovs_ct_get_labels(ct, &key->ct.labels); return 0; -- cgit v1.2.3 From cb80d58fae76d8ea93555149b2b16e19b89a1f4f Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Thu, 9 Feb 2017 11:21:55 -0800 Subject: openvswitch: Unionize ovs_key_ct_label with a u32 array. Make the array of labels in struct ovs_key_ct_label an union, adding a u32 array of the same byte size as the existing u8 array. It is faster to loop through the labels 32 bits at the time, which is also the alignment of netlink attributes. Signed-off-by: Jarno Rajahalme Acked-by: Joe Stringer Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 8 ++++++-- net/openvswitch/conntrack.c | 15 ++++++++------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 375d812fea36..96aee34ef55f 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -446,9 +446,13 @@ struct ovs_key_nd { __u8 nd_tll[ETH_ALEN]; }; -#define OVS_CT_LABELS_LEN 16 +#define OVS_CT_LABELS_LEN_32 4 +#define OVS_CT_LABELS_LEN (OVS_CT_LABELS_LEN_32 * sizeof(__u32)) struct ovs_key_ct_labels { - __u8 ct_labels[OVS_CT_LABELS_LEN]; + union { + __u8 ct_labels[OVS_CT_LABELS_LEN]; + __u32 ct_labels_32[OVS_CT_LABELS_LEN_32]; + }; }; /* OVS_KEY_ATTR_CT_STATE flags */ diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index a6ff374d57d3..f23934ccce20 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -281,20 +281,21 @@ static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key, /* Triggers a change event, which makes sense only for * confirmed connections. */ - int err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask, - OVS_CT_LABELS_LEN / sizeof(u32)); + int err = nf_connlabels_replace(ct, labels->ct_labels_32, + mask->ct_labels_32, + OVS_CT_LABELS_LEN_32); if (err) return err; } else { u32 *dst = (u32 *)cl->bits; - const u32 *msk = (const u32 *)mask->ct_labels; - const u32 *lbl = (const u32 *)labels->ct_labels; + const u32 *msk = mask->ct_labels_32; + const u32 *lbl = labels->ct_labels_32; int i; /* No-one else has access to the non-confirmed entry, copy * labels over, keeping any bits we are not explicitly setting. */ - for (i = 0; i < OVS_CT_LABELS_LEN / sizeof(u32); i++) + for (i = 0; i < OVS_CT_LABELS_LEN_32; i++) dst[i] = (dst[i] & ~msk[i]) | (lbl[i] & msk[i]); } @@ -866,8 +867,8 @@ static bool labels_nonzero(const struct ovs_key_ct_labels *labels) { size_t i; - for (i = 0; i < sizeof(*labels); i++) - if (labels->ct_labels[i]) + for (i = 0; i < OVS_CT_LABELS_LEN_32; i++) + if (labels->ct_labels_32[i]) return true; return false; -- cgit v1.2.3 From b87cec3814ccc7f6afb0a1378ee7e5110d07cdd3 Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Thu, 9 Feb 2017 11:21:56 -0800 Subject: openvswitch: Simplify labels length logic. Since 23014011ba42 ("netfilter: conntrack: support a fixed size of 128 distinct labels"), the size of conntrack labels extension has fixed to 128 bits, so we do not need to check for labels sizes shorter than 128 at run-time. This patch simplifies labels length logic accordingly, but allows the conntrack labels size to be increased in the future without breaking the build. In the event of conntrack labels increasing in size OVS would still be able to deal with the 128 first label bits. Suggested-by: Joe Stringer Signed-off-by: Jarno Rajahalme Acked-by: Pravin B Shelar Acked-by: Joe Stringer Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index f23934ccce20..fe2a410ce70a 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -129,22 +129,20 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct) #endif } +/* Guard against conntrack labels max size shrinking below 128 bits. */ +#if NF_CT_LABELS_MAX_SIZE < 16 +#error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes +#endif + static void ovs_ct_get_labels(const struct nf_conn *ct, struct ovs_key_ct_labels *labels) { struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL; - if (cl) { - size_t len = sizeof(cl->bits); - - if (len > OVS_CT_LABELS_LEN) - len = OVS_CT_LABELS_LEN; - else if (len < OVS_CT_LABELS_LEN) - memset(labels, 0, OVS_CT_LABELS_LEN); - memcpy(labels, cl->bits, len); - } else { + if (cl) + memcpy(labels, cl->bits, OVS_CT_LABELS_LEN); + else memset(labels, 0, OVS_CT_LABELS_LEN); - } } static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, @@ -274,7 +272,7 @@ static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key, nf_ct_labels_ext_add(ct); cl = nf_ct_labels_find(ct); } - if (!cl || sizeof(cl->bits) < OVS_CT_LABELS_LEN) + if (!cl) return -ENOSPC; if (nf_ct_is_confirmed(ct)) { -- cgit v1.2.3 From 6ffcea79957df43caeaa6d1de5062556a5afc262 Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Thu, 9 Feb 2017 11:21:57 -0800 Subject: openvswitch: Refactor labels initialization. Refactoring conntrack labels initialization makes changes in later patches easier to review. Signed-off-by: Jarno Rajahalme Acked-by: Pravin B Shelar Acked-by: Joe Stringer Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 104 ++++++++++++++++++++++++++------------------ 1 file changed, 62 insertions(+), 42 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index fe2a410ce70a..7c5bb98c22c6 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -227,19 +227,12 @@ int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) return 0; } -static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key, +static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key, u32 ct_mark, u32 mask) { #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) - enum ip_conntrack_info ctinfo; - struct nf_conn *ct; u32 new_mark; - /* The connection could be invalid, in which case set_mark is no-op. */ - ct = nf_ct_get(skb, &ctinfo); - if (!ct) - return 0; - new_mark = ct_mark | (ct->mark & ~(mask)); if (ct->mark != new_mark) { ct->mark = new_mark; @@ -254,50 +247,66 @@ static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key, #endif } -static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key, - const struct ovs_key_ct_labels *labels, - const struct ovs_key_ct_labels *mask) +static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct) { - enum ip_conntrack_info ctinfo; struct nf_conn_labels *cl; - struct nf_conn *ct; - - /* The connection could be invalid, in which case set_label is no-op.*/ - ct = nf_ct_get(skb, &ctinfo); - if (!ct) - return 0; cl = nf_ct_labels_find(ct); if (!cl) { nf_ct_labels_ext_add(ct); cl = nf_ct_labels_find(ct); } + + return cl; +} + +/* Initialize labels for a new, yet to be committed conntrack entry. Note that + * since the new connection is not yet confirmed, and thus no-one else has + * access to it's labels, we simply write them over. Also, we refrain from + * triggering events, as receiving change events before the create event would + * be confusing. + */ +static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key, + const struct ovs_key_ct_labels *labels, + const struct ovs_key_ct_labels *mask) +{ + struct nf_conn_labels *cl; + u32 *dst; + int i; + + cl = ovs_ct_get_conn_labels(ct); if (!cl) return -ENOSPC; - if (nf_ct_is_confirmed(ct)) { - /* Triggers a change event, which makes sense only for - * confirmed connections. - */ - int err = nf_connlabels_replace(ct, labels->ct_labels_32, - mask->ct_labels_32, - OVS_CT_LABELS_LEN_32); - if (err) - return err; - } else { - u32 *dst = (u32 *)cl->bits; - const u32 *msk = mask->ct_labels_32; - const u32 *lbl = labels->ct_labels_32; - int i; + dst = (u32 *)cl->bits; + for (i = 0; i < OVS_CT_LABELS_LEN_32; i++) + dst[i] = (dst[i] & ~mask->ct_labels_32[i]) | + (labels->ct_labels_32[i] & mask->ct_labels_32[i]); - /* No-one else has access to the non-confirmed entry, copy - * labels over, keeping any bits we are not explicitly setting. - */ - for (i = 0; i < OVS_CT_LABELS_LEN_32; i++) - dst[i] = (dst[i] & ~msk[i]) | (lbl[i] & msk[i]); - } + memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN); + + return 0; +} + +static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key, + const struct ovs_key_ct_labels *labels, + const struct ovs_key_ct_labels *mask) +{ + struct nf_conn_labels *cl; + int err; + + cl = ovs_ct_get_conn_labels(ct); + if (!cl) + return -ENOSPC; + + err = nf_connlabels_replace(ct, labels->ct_labels_32, + mask->ct_labels_32, + OVS_CT_LABELS_LEN_32); + if (err) + return err; + + memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN); - ovs_ct_get_labels(ct, &key->ct.labels); return 0; } @@ -877,25 +886,36 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, const struct ovs_conntrack_info *info, struct sk_buff *skb) { + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; int err; err = __ovs_ct_lookup(net, key, info, skb); if (err) return err; + /* The connection could be invalid, in which case this is a no-op.*/ + ct = nf_ct_get(skb, &ctinfo); + if (!ct) + return 0; + /* Apply changes before confirming the connection so that the initial * conntrack NEW netlink event carries the values given in the CT * action. */ if (info->mark.mask) { - err = ovs_ct_set_mark(skb, key, info->mark.value, + err = ovs_ct_set_mark(ct, key, info->mark.value, info->mark.mask); if (err) return err; } if (labels_nonzero(&info->labels.mask)) { - err = ovs_ct_set_labels(skb, key, &info->labels.value, - &info->labels.mask); + if (!nf_ct_is_confirmed(ct)) + err = ovs_ct_init_labels(ct, key, &info->labels.value, + &info->labels.mask); + else + err = ovs_ct_set_labels(ct, key, &info->labels.value, + &info->labels.mask); if (err) return err; } -- cgit v1.2.3 From 09aa98ad496d6b11a698b258bc64d7f64c55d682 Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Thu, 9 Feb 2017 11:21:58 -0800 Subject: openvswitch: Inherit master's labels. We avoid calling into nf_conntrack_in() for expected connections, as that would remove the expectation that we want to stick around until we are ready to commit the connection. Instead, we do a lookup in the expectation table directly. However, after a successful expectation lookup we have set the flow key label field from the master connection, whereas nf_conntrack_in() does not do this. This leads to master's labels being inherited after an expectation lookup, but those labels not being inherited after the corresponding conntrack action with a commit flag. This patch resolves the problem by changing the commit code path to also inherit the master's labels to the expected connection. Resolving this conflict in favor of inheriting the labels allows more information be passed from the master connection to related connections, which would otherwise be much harder if the 32 bits in the connmark are not enough. Labels can still be set explicitly, so this change only affects the default values of the labels in presense of a master connection. Fixes: 7f8a436eaa2c ("openvswitch: Add conntrack action") Signed-off-by: Jarno Rajahalme Acked-by: Pravin B Shelar Acked-by: Joe Stringer Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 45 +++++++++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 7c5bb98c22c6..f989ccf38eab 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -73,6 +73,8 @@ struct ovs_conntrack_info { #endif }; +static bool labels_nonzero(const struct ovs_key_ct_labels *labels); + static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info); static u16 key_to_nfproto(const struct sw_flow_key *key) @@ -270,18 +272,32 @@ static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key, const struct ovs_key_ct_labels *labels, const struct ovs_key_ct_labels *mask) { - struct nf_conn_labels *cl; - u32 *dst; - int i; + struct nf_conn_labels *cl, *master_cl; + bool have_mask = labels_nonzero(mask); + + /* Inherit master's labels to the related connection? */ + master_cl = ct->master ? nf_ct_labels_find(ct->master) : NULL; + + if (!master_cl && !have_mask) + return 0; /* Nothing to do. */ cl = ovs_ct_get_conn_labels(ct); if (!cl) return -ENOSPC; - dst = (u32 *)cl->bits; - for (i = 0; i < OVS_CT_LABELS_LEN_32; i++) - dst[i] = (dst[i] & ~mask->ct_labels_32[i]) | - (labels->ct_labels_32[i] & mask->ct_labels_32[i]); + /* Inherit the master's labels, if any. */ + if (master_cl) + *cl = *master_cl; + + if (have_mask) { + u32 *dst = (u32 *)cl->bits; + int i; + + for (i = 0; i < OVS_CT_LABELS_LEN_32; i++) + dst[i] = (dst[i] & ~mask->ct_labels_32[i]) | + (labels->ct_labels_32[i] + & mask->ct_labels_32[i]); + } memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN); @@ -909,13 +925,14 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, if (err) return err; } - if (labels_nonzero(&info->labels.mask)) { - if (!nf_ct_is_confirmed(ct)) - err = ovs_ct_init_labels(ct, key, &info->labels.value, - &info->labels.mask); - else - err = ovs_ct_set_labels(ct, key, &info->labels.value, - &info->labels.mask); + if (!nf_ct_is_confirmed(ct)) { + err = ovs_ct_init_labels(ct, key, &info->labels.value, + &info->labels.mask); + if (err) + return err; + } else if (labels_nonzero(&info->labels.mask)) { + err = ovs_ct_set_labels(ct, key, &info->labels.value, + &info->labels.mask); if (err) return err; } -- cgit v1.2.3 From 9dd7f8907c3705dc7a7a375d1c6e30b06e6daffc Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Thu, 9 Feb 2017 11:21:59 -0800 Subject: openvswitch: Add original direction conntrack tuple to sw_flow_key. Add the fields of the conntrack original direction 5-tuple to struct sw_flow_key. The new fields are initially marked as non-existent, and are populated whenever a conntrack action is executed and either finds or generates a conntrack entry. This means that these fields exist for all packets that were not rejected by conntrack as untrackable. The original tuple fields in the sw_flow_key are filled from the original direction tuple of the conntrack entry relating to the current packet, or from the original direction tuple of the master conntrack entry, if the current conntrack entry has a master. Generally, expected connections of connections having an assigned helper (e.g., FTP), have a master conntrack entry. The main purpose of the new conntrack original tuple fields is to allow matching on them for policy decision purposes, with the premise that the admissibility of tracked connections reply packets (as well as original direction packets), and both direction packets of any related connections may be based on ACL rules applying to the master connection's original direction 5-tuple. This also makes it easier to make policy decisions when the actual packet headers might have been transformed by NAT, as the original direction 5-tuple represents the packet headers before any such transformation. When using the original direction 5-tuple the admissibility of return and/or related packets need not be based on the mere existence of a conntrack entry, allowing separation of admission policy from the established conntrack state. While existence of a conntrack entry is required for admission of the return or related packets, policy changes can render connections that were initially admitted to be rejected or dropped afterwards. If the admission of the return and related packets was based on mere conntrack state (e.g., connection being in an established state), a policy change that would make the connection rejected or dropped would need to find and delete all conntrack entries affected by such a change. When using the original direction 5-tuple matching the affected conntrack entries can be allowed to time out instead, as the established state of the connection would not need to be the basis for packet admission any more. It should be noted that the directionality of related connections may be the same or different than that of the master connection, and neither the original direction 5-tuple nor the conntrack state bits carry this information. If needed, the directionality of the master connection can be stored in master's conntrack mark or labels, which are automatically inherited by the expected related connections. The fact that neither ARP nor ND packets are trackable by conntrack allows mutual exclusion between ARP/ND and the new conntrack original tuple fields. Hence, the IP addresses are overlaid in union with ARP and ND fields. This allows the sw_flow_key to not grow much due to this patch, but it also means that we must be careful to never use the new key fields with ARP or ND packets. ARP is easy to distinguish and keep mutually exclusive based on the ethernet type, but ND being an ICMPv6 protocol requires a bit more attention. Signed-off-by: Jarno Rajahalme Acked-by: Joe Stringer Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 20 +++++++++- net/openvswitch/actions.c | 2 + net/openvswitch/conntrack.c | 86 +++++++++++++++++++++++++++++++++++++--- net/openvswitch/conntrack.h | 10 ++++- net/openvswitch/flow.c | 34 +++++++++++++--- net/openvswitch/flow.h | 49 ++++++++++++++++++----- net/openvswitch/flow_netlink.c | 85 +++++++++++++++++++++++++++++---------- net/openvswitch/flow_netlink.h | 7 +++- 8 files changed, 246 insertions(+), 47 deletions(-) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 96aee34ef55f..90af8b8e10f8 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -1,6 +1,6 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2017 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -331,6 +331,8 @@ enum ovs_key_attr { OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */ OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */ OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */ + OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, /* struct ovs_key_ct_tuple_ipv4 */ + OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, /* struct ovs_key_ct_tuple_ipv6 */ #ifdef __KERNEL__ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */ @@ -472,6 +474,22 @@ struct ovs_key_ct_labels { #define OVS_CS_F_NAT_MASK (OVS_CS_F_SRC_NAT | OVS_CS_F_DST_NAT) +struct ovs_key_ct_tuple_ipv4 { + __be32 ipv4_src; + __be32 ipv4_dst; + __be16 src_port; + __be16 dst_port; + __u8 ipv4_proto; +}; + +struct ovs_key_ct_tuple_ipv6 { + __be32 ipv6_src[4]; + __be32 ipv6_dst[4]; + __be16 src_port; + __be16 dst_port; + __u8 ipv6_proto; +}; + /** * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index efa9a8858cc6..b1beb2b94ec7 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1074,6 +1074,8 @@ static int execute_masked_set_action(struct sk_buff *skb, case OVS_KEY_ATTR_CT_ZONE: case OVS_KEY_ATTR_CT_MARK: case OVS_KEY_ATTR_CT_LABELS: + case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: + case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: err = -EINVAL; break; } diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index f989ccf38eab..bfd7606c8be1 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -147,6 +147,20 @@ static void ovs_ct_get_labels(const struct nf_conn *ct, memset(labels, 0, OVS_CT_LABELS_LEN); } +static void __ovs_ct_update_key_orig_tp(struct sw_flow_key *key, + const struct nf_conntrack_tuple *orig, + u8 icmp_proto) +{ + key->ct.orig_proto = orig->dst.protonum; + if (orig->dst.protonum == icmp_proto) { + key->ct.orig_tp.src = htons(orig->dst.u.icmp.type); + key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code); + } else { + key->ct.orig_tp.src = orig->src.u.all; + key->ct.orig_tp.dst = orig->dst.u.all; + } +} + static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, const struct nf_conntrack_zone *zone, const struct nf_conn *ct) @@ -155,6 +169,35 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, key->ct.zone = zone->id; key->ct.mark = ovs_ct_get_mark(ct); ovs_ct_get_labels(ct, &key->ct.labels); + + if (ct) { + const struct nf_conntrack_tuple *orig; + + /* Use the master if we have one. */ + if (ct->master) + ct = ct->master; + orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; + + /* IP version must match with the master connection. */ + if (key->eth.type == htons(ETH_P_IP) && + nf_ct_l3num(ct) == NFPROTO_IPV4) { + key->ipv4.ct_orig.src = orig->src.u3.ip; + key->ipv4.ct_orig.dst = orig->dst.u3.ip; + __ovs_ct_update_key_orig_tp(key, orig, IPPROTO_ICMP); + return; + } else if (key->eth.type == htons(ETH_P_IPV6) && + !sw_flow_key_is_nd(key) && + nf_ct_l3num(ct) == NFPROTO_IPV6) { + key->ipv6.ct_orig.src = orig->src.u3.in6; + key->ipv6.ct_orig.dst = orig->dst.u3.in6; + __ovs_ct_update_key_orig_tp(key, orig, NEXTHDR_ICMP); + return; + } + } + /* Clear 'ct.orig_proto' to mark the non-existence of conntrack + * original direction key fields. + */ + key->ct.orig_proto = 0; } /* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has @@ -208,24 +251,55 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) ovs_ct_update_key(skb, NULL, key, false, false); } -int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) +#define IN6_ADDR_INITIALIZER(ADDR) \ + { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \ + (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] } + +int ovs_ct_put_key(const struct sw_flow_key *swkey, + const struct sw_flow_key *output, struct sk_buff *skb) { - if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state)) + if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct.state)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && - nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, key->ct.zone)) + nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct.zone)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && - nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, key->ct.mark)) + nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && - nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(key->ct.labels), - &key->ct.labels)) + nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels), + &output->ct.labels)) return -EMSGSIZE; + if (swkey->ct.orig_proto) { + if (swkey->eth.type == htons(ETH_P_IP)) { + struct ovs_key_ct_tuple_ipv4 orig = { + output->ipv4.ct_orig.src, + output->ipv4.ct_orig.dst, + output->ct.orig_tp.src, + output->ct.orig_tp.dst, + output->ct.orig_proto, + }; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, + sizeof(orig), &orig)) + return -EMSGSIZE; + } else if (swkey->eth.type == htons(ETH_P_IPV6)) { + struct ovs_key_ct_tuple_ipv6 orig = { + IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src), + IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst), + output->ct.orig_tp.src, + output->ct.orig_tp.dst, + output->ct.orig_proto, + }; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, + sizeof(orig), &orig)) + return -EMSGSIZE; + } + } + return 0; } diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h index 8f6230bd6183..9e92445dc092 100644 --- a/net/openvswitch/conntrack.h +++ b/net/openvswitch/conntrack.h @@ -32,7 +32,8 @@ int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *, const struct ovs_conntrack_info *); void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key); -int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb); +int ovs_ct_put_key(const struct sw_flow_key *swkey, + const struct sw_flow_key *output, struct sk_buff *skb); void ovs_ct_free_action(const struct nlattr *a); #define CT_SUPPORTED_MASK (OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | \ @@ -79,9 +80,14 @@ static inline void ovs_ct_fill_key(const struct sk_buff *skb, key->ct.zone = 0; key->ct.mark = 0; memset(&key->ct.labels, 0, sizeof(key->ct.labels)); + /* Clear 'ct.orig_proto' to mark the non-existence of original + * direction key fields. + */ + key->ct.orig_proto = 0; } -static inline int ovs_ct_put_key(const struct sw_flow_key *key, +static inline int ovs_ct_put_key(const struct sw_flow_key *swkey, + const struct sw_flow_key *output, struct sk_buff *skb) { return 0; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 2c0a00f7f1b7..9d4bb8eb63f2 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -765,7 +765,7 @@ static int key_extract_mac_proto(struct sk_buff *skb) int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, struct sk_buff *skb, struct sw_flow_key *key) { - int res; + int res, err; /* Extract metadata from packet. */ if (tun_info) { @@ -792,7 +792,6 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, key->phy.priority = skb->priority; key->phy.in_port = OVS_CB(skb)->input_vport->port_no; key->phy.skb_mark = skb->mark; - ovs_ct_fill_key(skb, key); key->ovs_flow_hash = 0; res = key_extract_mac_proto(skb); if (res < 0) @@ -800,17 +799,26 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, key->mac_proto = res; key->recirc_id = 0; - return key_extract(skb, key); + err = key_extract(skb, key); + if (!err) + ovs_ct_fill_key(skb, key); /* Must be after key_extract(). */ + return err; } int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr, struct sk_buff *skb, struct sw_flow_key *key, bool log) { + const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; + u64 attrs = 0; int err; + err = parse_flow_nlattrs(attr, a, &attrs, log); + if (err) + return -EINVAL; + /* Extract metadata from netlink attributes. */ - err = ovs_nla_get_flow_metadata(net, attr, key, log); + err = ovs_nla_get_flow_metadata(net, a, attrs, key, log); if (err) return err; @@ -824,5 +832,21 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr, */ skb->protocol = key->eth.type; - return key_extract(skb, key); + err = key_extract(skb, key); + if (err) + return err; + + /* Check that we have conntrack original direction tuple metadata only + * for packets for which it makes sense. Otherwise the key may be + * corrupted due to overlapping key fields. + */ + if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4) && + key->eth.type != htons(ETH_P_IP)) + return -EINVAL; + if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6) && + (key->eth.type != htons(ETH_P_IPV6) || + sw_flow_key_is_nd(key))) + return -EINVAL; + + return 0; } diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index f61cae7f9030..76e05b25f030 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2014 Nicira, Inc. + * Copyright (c) 2007-2017 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -107,10 +107,16 @@ struct sw_flow_key { __be32 src; /* IP source address. */ __be32 dst; /* IP destination address. */ } addr; - struct { - u8 sha[ETH_ALEN]; /* ARP source hardware address. */ - u8 tha[ETH_ALEN]; /* ARP target hardware address. */ - } arp; + union { + struct { + __be32 src; + __be32 dst; + } ct_orig; /* Conntrack original direction fields. */ + struct { + u8 sha[ETH_ALEN]; /* ARP source hardware address. */ + u8 tha[ETH_ALEN]; /* ARP target hardware address. */ + } arp; + }; } ipv4; struct { struct { @@ -118,23 +124,44 @@ struct sw_flow_key { struct in6_addr dst; /* IPv6 destination address. */ } addr; __be32 label; /* IPv6 flow label. */ - struct { - struct in6_addr target; /* ND target address. */ - u8 sll[ETH_ALEN]; /* ND source link layer address. */ - u8 tll[ETH_ALEN]; /* ND target link layer address. */ - } nd; + union { + struct { + struct in6_addr src; + struct in6_addr dst; + } ct_orig; /* Conntrack original direction fields. */ + struct { + struct in6_addr target; /* ND target address. */ + u8 sll[ETH_ALEN]; /* ND source link layer address. */ + u8 tll[ETH_ALEN]; /* ND target link layer address. */ + } nd; + }; } ipv6; }; struct { /* Connection tracking fields. */ + u8 state; + u8 orig_proto; /* CT orig tuple IP protocol. */ u16 zone; u32 mark; - u8 state; + struct { + __be16 src; /* CT orig tuple tp src port. */ + __be16 dst; /* CT orig tuple tp dst port. */ + } orig_tp; + struct ovs_key_ct_labels labels; } ct; } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ +static inline bool sw_flow_key_is_nd(const struct sw_flow_key *key) +{ + return key->eth.type == htons(ETH_P_IPV6) && + key->ip.proto == NEXTHDR_ICMP && + key->tp.dst == 0 && + (key->tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) || + key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)); +} + struct sw_flow_key_range { unsigned short int start; unsigned short int end; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index c87d359b9b37..989f38f120bb 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -129,7 +129,9 @@ static bool match_validate(const struct sw_flow_match *match, /* The following mask attributes allowed only if they * pass the validation tests. */ mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4) + | (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4) | (1 << OVS_KEY_ATTR_IPV6) + | (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6) | (1 << OVS_KEY_ATTR_TCP) | (1 << OVS_KEY_ATTR_TCP_FLAGS) | (1 << OVS_KEY_ATTR_UDP) @@ -161,8 +163,10 @@ static bool match_validate(const struct sw_flow_match *match, if (match->key->eth.type == htons(ETH_P_IP)) { key_expected |= 1 << OVS_KEY_ATTR_IPV4; - if (match->mask && (match->mask->key.eth.type == htons(0xffff))) + if (match->mask && match->mask->key.eth.type == htons(0xffff)) { mask_allowed |= 1 << OVS_KEY_ATTR_IPV4; + mask_allowed |= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4; + } if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { if (match->key->ip.proto == IPPROTO_UDP) { @@ -196,8 +200,10 @@ static bool match_validate(const struct sw_flow_match *match, if (match->key->eth.type == htons(ETH_P_IPV6)) { key_expected |= 1 << OVS_KEY_ATTR_IPV6; - if (match->mask && (match->mask->key.eth.type == htons(0xffff))) + if (match->mask && match->mask->key.eth.type == htons(0xffff)) { mask_allowed |= 1 << OVS_KEY_ATTR_IPV6; + mask_allowed |= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6; + } if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { if (match->key->ip.proto == IPPROTO_UDP) { @@ -230,6 +236,12 @@ static bool match_validate(const struct sw_flow_match *match, htons(NDISC_NEIGHBOUR_SOLICITATION) || match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { key_expected |= 1 << OVS_KEY_ATTR_ND; + /* Original direction conntrack tuple + * uses the same space as the ND fields + * in the key, so both are not allowed + * at the same time. + */ + mask_allowed &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6); if (match->mask && (match->mask->key.tp.src == htons(0xff))) mask_allowed |= 1 << OVS_KEY_ATTR_ND; } @@ -282,7 +294,7 @@ size_t ovs_key_attr_size(void) /* Whenever adding new OVS_KEY_ FIELDS, we should consider * updating this function. */ - BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 26); + BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 28); return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ @@ -295,6 +307,7 @@ size_t ovs_key_attr_size(void) + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */ + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */ + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */ + + nla_total_size(40) /* OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6 */ + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */ @@ -355,6 +368,10 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) }, [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) }, [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) }, + [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = { + .len = sizeof(struct ovs_key_ct_tuple_ipv4) }, + [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = { + .len = sizeof(struct ovs_key_ct_tuple_ipv6) }, }; static bool check_attr_len(unsigned int attr_len, unsigned int expected_len) @@ -430,9 +447,8 @@ static int parse_flow_mask_nlattrs(const struct nlattr *attr, return __parse_flow_nlattrs(attr, a, attrsp, log, true); } -static int parse_flow_nlattrs(const struct nlattr *attr, - const struct nlattr *a[], u64 *attrsp, - bool log) +int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[], + u64 *attrsp, bool log) { return __parse_flow_nlattrs(attr, a, attrsp, log, false); } @@ -1082,6 +1098,34 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, sizeof(*cl), is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS); } + if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) { + const struct ovs_key_ct_tuple_ipv4 *ct; + + ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]); + + SW_FLOW_KEY_PUT(match, ipv4.ct_orig.src, ct->ipv4_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv4.ct_orig.dst, ct->ipv4_dst, is_mask); + SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask); + SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask); + SW_FLOW_KEY_PUT(match, ct.orig_proto, ct->ipv4_proto, is_mask); + *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4); + } + if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) { + const struct ovs_key_ct_tuple_ipv6 *ct; + + ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]); + + SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.src, &ct->ipv6_src, + sizeof(match->key->ipv6.ct_orig.src), + is_mask); + SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.dst, &ct->ipv6_dst, + sizeof(match->key->ipv6.ct_orig.dst), + is_mask); + SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask); + SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask); + SW_FLOW_KEY_PUT(match, ct.orig_proto, ct->ipv6_proto, is_mask); + *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6); + } /* For layer 3 packets the Ethernet type is provided * and treated as metadata but no MAC addresses are provided. @@ -1493,9 +1537,12 @@ u32 ovs_nla_get_ufid_flags(const struct nlattr *attr) /** * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key. - * @key: Receives extracted in_port, priority, tun_key and skb_mark. - * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute - * sequence. + * @net: Network namespace. + * @key: Receives extracted in_port, priority, tun_key, skb_mark and conntrack + * metadata. + * @a: Array of netlink attributes holding parsed %OVS_KEY_ATTR_* Netlink + * attributes. + * @attrs: Bit mask for the netlink attributes included in @a. * @log: Boolean to allow kernel error logging. Normally true, but when * probing for feature compatibility this should be passed in as false to * suppress unnecessary error logging. @@ -1504,25 +1551,23 @@ u32 ovs_nla_get_ufid_flags(const struct nlattr *attr) * take the same form accepted by flow_from_nlattrs(), but only enough of it to * get the metadata, that is, the parts of the flow key that cannot be * extracted from the packet itself. + * + * This must be called before the packet key fields are filled in 'key'. */ -int ovs_nla_get_flow_metadata(struct net *net, const struct nlattr *attr, - struct sw_flow_key *key, - bool log) +int ovs_nla_get_flow_metadata(struct net *net, + const struct nlattr *a[OVS_KEY_ATTR_MAX + 1], + u64 attrs, struct sw_flow_key *key, bool log) { - const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; struct sw_flow_match match; - u64 attrs = 0; - int err; - - err = parse_flow_nlattrs(attr, a, &attrs, log); - if (err) - return -EINVAL; memset(&match, 0, sizeof(match)); match.key = key; memset(&key->ct, 0, sizeof(key->ct)); + memset(&key->ipv4.ct_orig, 0, sizeof(key->ipv4.ct_orig)); + memset(&key->ipv6.ct_orig, 0, sizeof(key->ipv6.ct_orig)); + key->phy.in_port = DP_MAX_PORTS; return metadata_from_nlattrs(net, &match, &attrs, a, false, log); @@ -1584,7 +1629,7 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey, if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark)) goto nla_put_failure; - if (ovs_ct_put_key(output, skb)) + if (ovs_ct_put_key(swkey, output, skb)) goto nla_put_failure; if (ovs_key_mac_proto(swkey) == MAC_PROTO_ETHERNET) { diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h index 45f9769e5aac..929c665ac3aa 100644 --- a/net/openvswitch/flow_netlink.h +++ b/net/openvswitch/flow_netlink.h @@ -46,8 +46,11 @@ void ovs_match_init(struct sw_flow_match *match, int ovs_nla_put_key(const struct sw_flow_key *, const struct sw_flow_key *, int attr, bool is_mask, struct sk_buff *); -int ovs_nla_get_flow_metadata(struct net *, const struct nlattr *, - struct sw_flow_key *, bool log); +int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[], + u64 *attrsp, bool log); +int ovs_nla_get_flow_metadata(struct net *net, + const struct nlattr *a[OVS_KEY_ATTR_MAX + 1], + u64 attrs, struct sw_flow_key *key, bool log); int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb); int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb); -- cgit v1.2.3 From dd41d33f0b033885211a5d6f3ee19e73238aa9ee Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Thu, 9 Feb 2017 11:22:00 -0800 Subject: openvswitch: Add force commit. Stateful network admission policy may allow connections to one direction and reject connections initiated in the other direction. After policy change it is possible that for a new connection an overlapping conntrack entry already exists, where the original direction of the existing connection is opposed to the new connection's initial packet. Most importantly, conntrack state relating to the current packet gets the "reply" designation based on whether the original direction tuple or the reply direction tuple matched. If this "directionality" is wrong w.r.t. to the stateful network admission policy it may happen that packets in neither direction are correctly admitted. This patch adds a new "force commit" option to the OVS conntrack action that checks the original direction of an existing conntrack entry. If that direction is opposed to the current packet, the existing conntrack entry is deleted and a new one is subsequently created in the correct direction. Signed-off-by: Jarno Rajahalme Acked-by: Pravin B Shelar Acked-by: Joe Stringer Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 5 +++++ net/openvswitch/conntrack.c | 26 ++++++++++++++++++++++++-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 90af8b8e10f8..7f41f7d0000f 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -674,6 +674,10 @@ struct ovs_action_hash { * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. * @OVS_CT_ATTR_NAT: Nested OVS_NAT_ATTR_* for performing L3 network address * translation (NAT) on the packet. + * @OVS_CT_ATTR_FORCE_COMMIT: Like %OVS_CT_ATTR_COMMIT, but instead of doing + * nothing if the connection is already committed will check that the current + * packet is in conntrack entry's original direction. If directionality does + * not match, will delete the existing conntrack entry and commit a new one. */ enum ovs_ct_attr { OVS_CT_ATTR_UNSPEC, @@ -684,6 +688,7 @@ enum ovs_ct_attr { OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of related connections. */ OVS_CT_ATTR_NAT, /* Nested OVS_NAT_ATTR_* */ + OVS_CT_ATTR_FORCE_COMMIT, /* No argument */ __OVS_CT_ATTR_MAX }; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index bfd7606c8be1..8b15bab70583 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -65,6 +65,7 @@ struct ovs_conntrack_info { struct nf_conn *ct; u8 commit : 1; u8 nat : 3; /* enum ovs_ct_nat */ + u8 force : 1; u16 family; struct md_mark mark; struct md_labels labels; @@ -613,10 +614,13 @@ static bool skb_nfct_cached(struct net *net, */ if (!ct && key->ct.state & OVS_CS_F_TRACKED && !(key->ct.state & OVS_CS_F_INVALID) && - key->ct.zone == info->zone.id) + key->ct.zone == info->zone.id) { ct = ovs_ct_find_existing(net, &info->zone, info->family, skb, !!(key->ct.state & OVS_CS_F_NAT_MASK)); + if (ct) + nf_ct_get(skb, &ctinfo); + } if (!ct) return false; if (!net_eq(net, read_pnet(&ct->ct_net))) @@ -630,6 +634,18 @@ static bool skb_nfct_cached(struct net *net, if (help && rcu_access_pointer(help->helper) != info->helper) return false; } + /* Force conntrack entry direction to the current packet? */ + if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { + /* Delete the conntrack entry if confirmed, else just release + * the reference. + */ + if (nf_ct_is_confirmed(ct)) + nf_ct_delete(ct, 0, 0); + else + nf_conntrack_put(&ct->ct_general); + nf_ct_set(skb, NULL, 0); + return false; + } return true; } @@ -1207,6 +1223,7 @@ static int parse_nat(const struct nlattr *attr, static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = { [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 }, + [OVS_CT_ATTR_FORCE_COMMIT] = { .minlen = 0, .maxlen = 0 }, [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16), .maxlen = sizeof(u16) }, [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark), @@ -1246,6 +1263,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, } switch (type) { + case OVS_CT_ATTR_FORCE_COMMIT: + info->force = true; + /* fall through. */ case OVS_CT_ATTR_COMMIT: info->commit = true; break; @@ -1472,7 +1492,9 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info, if (!start) return -EMSGSIZE; - if (ct_info->commit && nla_put_flag(skb, OVS_CT_ATTR_COMMIT)) + if (ct_info->commit && nla_put_flag(skb, ct_info->force + ? OVS_CT_ATTR_FORCE_COMMIT + : OVS_CT_ATTR_COMMIT)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id)) -- cgit v1.2.3 From 316d4d78cf9b6795b83f97c45368748741df418c Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Thu, 9 Feb 2017 11:22:01 -0800 Subject: openvswitch: Pack struct sw_flow_key. struct sw_flow_key has two 16-bit holes. Move the most matched conntrack match fields there. In some typical cases this reduces the size of the key that needs to be hashed into half and into one cache line. Signed-off-by: Jarno Rajahalme Acked-by: Joe Stringer Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 40 ++++++++++++++++++++-------------------- net/openvswitch/conntrack.h | 8 ++++---- net/openvswitch/flow.h | 14 ++++++++------ net/openvswitch/flow_netlink.c | 11 +++++++---- 4 files changed, 39 insertions(+), 34 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 8b15bab70583..c2d452eab0c5 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -152,7 +152,7 @@ static void __ovs_ct_update_key_orig_tp(struct sw_flow_key *key, const struct nf_conntrack_tuple *orig, u8 icmp_proto) { - key->ct.orig_proto = orig->dst.protonum; + key->ct_orig_proto = orig->dst.protonum; if (orig->dst.protonum == icmp_proto) { key->ct.orig_tp.src = htons(orig->dst.u.icmp.type); key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code); @@ -166,8 +166,8 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, const struct nf_conntrack_zone *zone, const struct nf_conn *ct) { - key->ct.state = state; - key->ct.zone = zone->id; + key->ct_state = state; + key->ct_zone = zone->id; key->ct.mark = ovs_ct_get_mark(ct); ovs_ct_get_labels(ct, &key->ct.labels); @@ -195,10 +195,10 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, return; } } - /* Clear 'ct.orig_proto' to mark the non-existence of conntrack + /* Clear 'ct_orig_proto' to mark the non-existence of conntrack * original direction key fields. */ - key->ct.orig_proto = 0; + key->ct_orig_proto = 0; } /* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has @@ -228,7 +228,7 @@ static void ovs_ct_update_key(const struct sk_buff *skb, if (ct->master) state |= OVS_CS_F_RELATED; if (keep_nat_flags) { - state |= key->ct.state & OVS_CS_F_NAT_MASK; + state |= key->ct_state & OVS_CS_F_NAT_MASK; } else { if (ct->status & IPS_SRC_NAT) state |= OVS_CS_F_SRC_NAT; @@ -259,11 +259,11 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) int ovs_ct_put_key(const struct sw_flow_key *swkey, const struct sw_flow_key *output, struct sk_buff *skb) { - if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct.state)) + if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct_state)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && - nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct.zone)) + nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct_zone)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && @@ -275,14 +275,14 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey, &output->ct.labels)) return -EMSGSIZE; - if (swkey->ct.orig_proto) { + if (swkey->ct_orig_proto) { if (swkey->eth.type == htons(ETH_P_IP)) { struct ovs_key_ct_tuple_ipv4 orig = { output->ipv4.ct_orig.src, output->ipv4.ct_orig.dst, output->ct.orig_tp.src, output->ct.orig_tp.dst, - output->ct.orig_proto, + output->ct_orig_proto, }; if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, sizeof(orig), &orig)) @@ -293,7 +293,7 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey, IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst), output->ct.orig_tp.src, output->ct.orig_tp.dst, - output->ct.orig_proto, + output->ct_orig_proto, }; if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, sizeof(orig), &orig)) @@ -612,11 +612,11 @@ static bool skb_nfct_cached(struct net *net, * due to an upcall. If the connection was not confirmed, it is not * cached and needs to be run through conntrack again. */ - if (!ct && key->ct.state & OVS_CS_F_TRACKED && - !(key->ct.state & OVS_CS_F_INVALID) && - key->ct.zone == info->zone.id) { + if (!ct && key->ct_state & OVS_CS_F_TRACKED && + !(key->ct_state & OVS_CS_F_INVALID) && + key->ct_zone == info->zone.id) { ct = ovs_ct_find_existing(net, &info->zone, info->family, skb, - !!(key->ct.state + !!(key->ct_state & OVS_CS_F_NAT_MASK)); if (ct) nf_ct_get(skb, &ctinfo); @@ -740,7 +740,7 @@ static void ovs_nat_update_key(struct sw_flow_key *key, if (maniptype == NF_NAT_MANIP_SRC) { __be16 src; - key->ct.state |= OVS_CS_F_SRC_NAT; + key->ct_state |= OVS_CS_F_SRC_NAT; if (key->eth.type == htons(ETH_P_IP)) key->ipv4.addr.src = ip_hdr(skb)->saddr; else if (key->eth.type == htons(ETH_P_IPV6)) @@ -762,7 +762,7 @@ static void ovs_nat_update_key(struct sw_flow_key *key, } else { __be16 dst; - key->ct.state |= OVS_CS_F_DST_NAT; + key->ct_state |= OVS_CS_F_DST_NAT; if (key->eth.type == htons(ETH_P_IP)) key->ipv4.addr.dst = ip_hdr(skb)->daddr; else if (key->eth.type == htons(ETH_P_IPV6)) @@ -886,7 +886,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, * NAT after the nf_conntrack_in() call. We can actually clear * the whole state, as it will be re-initialized below. */ - key->ct.state = 0; + key->ct_state = 0; /* Update the key, but keep the NAT flags. */ ovs_ct_update_key(skb, info, key, true, true); @@ -902,9 +902,9 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, * * NAT will be done only if the CT action has NAT, and only * once per packet (per zone), as guarded by the NAT bits in - * the key->ct.state. + * the key->ct_state. */ - if (info->nat && !(key->ct.state & OVS_CS_F_NAT_MASK) && + if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) && (nf_ct_is_confirmed(ct) || info->commit) && ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) { return -EINVAL; diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h index 9e92445dc092..bc7efd1867ab 100644 --- a/net/openvswitch/conntrack.h +++ b/net/openvswitch/conntrack.h @@ -76,14 +76,14 @@ static inline int ovs_ct_execute(struct net *net, struct sk_buff *skb, static inline void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) { - key->ct.state = 0; - key->ct.zone = 0; + key->ct_state = 0; + key->ct_zone = 0; key->ct.mark = 0; memset(&key->ct.labels, 0, sizeof(key->ct.labels)); - /* Clear 'ct.orig_proto' to mark the non-existence of original + /* Clear 'ct_orig_proto' to mark the non-existence of original * direction key fields. */ - key->ct.orig_proto = 0; + key->ct_orig_proto = 0; } static inline int ovs_ct_put_key(const struct sw_flow_key *swkey, diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 76e05b25f030..a9bc1c875965 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -85,6 +85,11 @@ struct sw_flow_key { struct vlan_head cvlan; __be16 type; /* Ethernet frame type. */ } eth; + /* Filling a hole of two bytes. */ + u8 ct_state; + u8 ct_orig_proto; /* CT original direction tuple IP + * protocol. + */ union { struct { __be32 top_lse; /* top label stack entry */ @@ -96,6 +101,7 @@ struct sw_flow_key { u8 frag; /* One of OVS_FRAG_TYPE_*. */ } ip; }; + u16 ct_zone; /* Conntrack zone. */ struct { __be16 src; /* TCP/UDP/SCTP source port. */ __be16 dst; /* TCP/UDP/SCTP destination port. */ @@ -138,16 +144,12 @@ struct sw_flow_key { } ipv6; }; struct { - /* Connection tracking fields. */ - u8 state; - u8 orig_proto; /* CT orig tuple IP protocol. */ - u16 zone; - u32 mark; + /* Connection tracking fields not packed above. */ struct { __be16 src; /* CT orig tuple tp src port. */ __be16 dst; /* CT orig tuple tp dst port. */ } orig_tp; - + u32 mark; struct ovs_key_ct_labels labels; } ct; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 989f38f120bb..6f5fa50f716d 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1072,14 +1072,14 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, return -EINVAL; } - SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask); + SW_FLOW_KEY_PUT(match, ct_state, ct_state, is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE); } if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) && ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) { u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]); - SW_FLOW_KEY_PUT(match, ct.zone, ct_zone, is_mask); + SW_FLOW_KEY_PUT(match, ct_zone, ct_zone, is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE); } if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) && @@ -1107,7 +1107,7 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, SW_FLOW_KEY_PUT(match, ipv4.ct_orig.dst, ct->ipv4_dst, is_mask); SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask); SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask); - SW_FLOW_KEY_PUT(match, ct.orig_proto, ct->ipv4_proto, is_mask); + SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv4_proto, is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4); } if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) { @@ -1123,7 +1123,7 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, is_mask); SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask); SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask); - SW_FLOW_KEY_PUT(match, ct.orig_proto, ct->ipv6_proto, is_mask); + SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv6_proto, is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6); } @@ -1564,6 +1564,9 @@ int ovs_nla_get_flow_metadata(struct net *net, memset(&match, 0, sizeof(match)); match.key = key; + key->ct_state = 0; + key->ct_zone = 0; + key->ct_orig_proto = 0; memset(&key->ct, 0, sizeof(key->ct)); memset(&key->ipv4.ct_orig, 0, sizeof(key->ipv4.ct_orig)); memset(&key->ipv6.ct_orig, 0, sizeof(key->ipv6.ct_orig)); -- cgit v1.2.3 From 58e3bdd59742feef680861acec19126e40e4fa8d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 9 Feb 2017 10:28:38 +0100 Subject: ipv4: fib: Only flush FIB aliases belonging to currently flushed table In case the MAIN table is flushed and its trie is shared with the LOCAL table, then we might be flushing FIB aliases belonging to the latter. This can lead to FIB_ENTRY_DEL notifications sent with the wrong table ID. The above doesn't affect current listeners, as the table ID is ignored during entry deletion, but this will change later in the patchset. When flushing a particular table, skip any aliases belonging to a different one. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko CC: Alexander Duyck CC: Patrick McHardy Reviewed-by: Alexander Duyck Signed-off-by: David S. Miller --- net/ipv4/fib_trie.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 2919d1a10cfd..5ef4596d7afc 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1963,7 +1963,8 @@ int fib_table_flush(struct net *net, struct fib_table *tb) hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { struct fib_info *fi = fa->fa_info; - if (!fi || !(fi->fib_flags & RTNH_F_DEAD)) { + if (!fi || !(fi->fib_flags & RTNH_F_DEAD) || + tb->tb_id != fa->tb_id) { slen = fa->fa_slen; continue; } -- cgit v1.2.3 From 42d5aa76ec8fc9602922cc590a437ecd6693523b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 9 Feb 2017 10:28:39 +0100 Subject: ipv4: fib: Send deletion notification with actual FIB alias type When a FIB alias is removed, a notification is sent using the type passed from user space - can be RTN_UNSPEC - instead of the actual type of the removed alias. This is problematic for listeners of the FIB notification chain, as several FIB aliases can exist with matching parameters, but the type. Solve this by passing the actual type of the removed FIB alias. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko CC: Patrick McHardy Signed-off-by: David S. Miller --- net/ipv4/fib_trie.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 5ef4596d7afc..b0bfb1cc791a 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1653,8 +1653,8 @@ int fib_table_delete(struct net *net, struct fib_table *tb, return -ESRCH; call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, key, plen, - fa_to_delete->fa_info, tos, cfg->fc_type, - tb->tb_id, 0); + fa_to_delete->fa_info, tos, + fa_to_delete->fa_type, tb->tb_id, 0); rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id, &cfg->fc_nlinfo, 0); -- cgit v1.2.3 From 5b7d616dbccc2fd6ae959045e1a9ca17de5dfc2a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 9 Feb 2017 10:28:40 +0100 Subject: ipv4: fib: Send notification before deleting FIB alias When a FIB alias is replaced following NLM_F_REPLACE, the ENTRY_ADD notification is sent after the reference on the previous FIB info was dropped. This is problematic as potential listeners might need to access it in their notification blocks. Solve this by sending the notification prior to the deletion of the replaced FIB alias. This is consistent with ENTRY_DEL notifications. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko CC: Patrick McHardy Signed-off-by: David S. Miller --- net/ipv4/fib_trie.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index b0bfb1cc791a..1c4d42e46dbb 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1295,6 +1295,13 @@ int fib_table_insert(struct net *net, struct fib_table *tb, new_fa->tb_id = tb->tb_id; new_fa->fa_default = -1; + call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, + key, plen, fi, + new_fa->fa_tos, cfg->fc_type, + tb->tb_id, nlflags); + rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, + tb->tb_id, &cfg->fc_nlinfo, nlflags); + hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list); alias_free_mem_rcu(fa); @@ -1303,13 +1310,6 @@ int fib_table_insert(struct net *net, struct fib_table *tb, if (state & FA_S_ACCESSED) rt_cache_flush(cfg->fc_nlinfo.nl_net); - call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, - key, plen, fi, - new_fa->fa_tos, cfg->fc_type, - tb->tb_id, cfg->fc_nlflags); - rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, - tb->tb_id, &cfg->fc_nlinfo, nlflags); - goto succeeded; } /* Error if we find a perfect match which -- cgit v1.2.3 From 2f3a5272e5c16c3c10fbba06928a513f9b1e2fcd Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 9 Feb 2017 10:28:41 +0100 Subject: ipv4: fib: Add events for FIB replace and append The FIB notification chain currently uses the NLM_F_{REPLACE,APPEND} flags to signal routes being replaced or appended. Instead of using netlink flags for in-kernel notifications we can simply introduce two new events in the FIB notification chain. This has the added advantage of making the API cleaner, thereby making it clear that these events should be supported by listeners of the notification chain. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko CC: Patrick McHardy Signed-off-by: David S. Miller --- include/net/ip_fib.h | 3 ++- net/ipv4/fib_trie.c | 27 ++++++++++++++------------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 45a184eaff2b..368bb4024b78 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -211,7 +211,6 @@ struct fib_entry_notifier_info { u8 tos; u8 type; u32 tb_id; - u32 nlflags; }; struct fib_nh_notifier_info { @@ -220,6 +219,8 @@ struct fib_nh_notifier_info { }; enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE, + FIB_EVENT_ENTRY_APPEND, FIB_EVENT_ENTRY_ADD, FIB_EVENT_ENTRY_DEL, FIB_EVENT_RULE_ADD, diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 1c4d42e46dbb..d8cea210af0e 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -124,7 +124,7 @@ static void fib_notify(struct net *net, struct notifier_block *nb, static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net, enum fib_event_type event_type, u32 dst, int dst_len, struct fib_info *fi, - u8 tos, u8 type, u32 tb_id, u32 nlflags) + u8 tos, u8 type, u32 tb_id) { struct fib_entry_notifier_info info = { .dst = dst, @@ -133,7 +133,6 @@ static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net, .tos = tos, .type = type, .tb_id = tb_id, - .nlflags = nlflags, }; return call_fib_notifier(nb, net, event_type, &info.info); } @@ -197,7 +196,7 @@ int call_fib_notifiers(struct net *net, enum fib_event_type event_type, static int call_fib_entry_notifiers(struct net *net, enum fib_event_type event_type, u32 dst, int dst_len, struct fib_info *fi, - u8 tos, u8 type, u32 tb_id, u32 nlflags) + u8 tos, u8 type, u32 tb_id) { struct fib_entry_notifier_info info = { .dst = dst, @@ -206,7 +205,6 @@ static int call_fib_entry_notifiers(struct net *net, .tos = tos, .type = type, .tb_id = tb_id, - .nlflags = nlflags, }; return call_fib_notifiers(net, event_type, &info.info); } @@ -1198,6 +1196,7 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp, int fib_table_insert(struct net *net, struct fib_table *tb, struct fib_config *cfg) { + enum fib_event_type event = FIB_EVENT_ENTRY_ADD; struct trie *t = (struct trie *)tb->tb_data; struct fib_alias *fa, *new_fa; struct key_vector *l, *tp; @@ -1295,10 +1294,10 @@ int fib_table_insert(struct net *net, struct fib_table *tb, new_fa->tb_id = tb->tb_id; new_fa->fa_default = -1; - call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, + call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, key, plen, fi, new_fa->fa_tos, cfg->fc_type, - tb->tb_id, nlflags); + tb->tb_id); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, nlflags); @@ -1319,10 +1318,12 @@ int fib_table_insert(struct net *net, struct fib_table *tb, if (fa_match) goto out; - if (cfg->fc_nlflags & NLM_F_APPEND) + if (cfg->fc_nlflags & NLM_F_APPEND) { + event = FIB_EVENT_ENTRY_APPEND; nlflags |= NLM_F_APPEND; - else + } else { fa = fa_first; + } } err = -ENOENT; if (!(cfg->fc_nlflags & NLM_F_CREATE)) @@ -1351,8 +1352,8 @@ int fib_table_insert(struct net *net, struct fib_table *tb, tb->tb_num_default++; rt_cache_flush(cfg->fc_nlinfo.nl_net); - call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, key, plen, fi, tos, - cfg->fc_type, tb->tb_id, cfg->fc_nlflags); + call_fib_entry_notifiers(net, event, key, plen, fi, tos, cfg->fc_type, + tb->tb_id); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id, &cfg->fc_nlinfo, nlflags); succeeded: @@ -1654,7 +1655,7 @@ int fib_table_delete(struct net *net, struct fib_table *tb, call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, key, plen, fa_to_delete->fa_info, tos, - fa_to_delete->fa_type, tb->tb_id, 0); + fa_to_delete->fa_type, tb->tb_id); rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id, &cfg->fc_nlinfo, 0); @@ -1973,7 +1974,7 @@ int fib_table_flush(struct net *net, struct fib_table *tb) n->key, KEYLENGTH - fa->fa_slen, fi, fa->fa_tos, fa->fa_type, - tb->tb_id, 0); + tb->tb_id); hlist_del_rcu(&fa->fa_list); fib_release_info(fa->fa_info); alias_free_mem_rcu(fa); @@ -2013,7 +2014,7 @@ static void fib_leaf_notify(struct net *net, struct key_vector *l, call_fib_entry_notifier(nb, net, event_type, l->key, KEYLENGTH - fa->fa_slen, fi, fa->fa_tos, - fa->fa_type, fa->tb_id, 0); + fa->fa_type, fa->tb_id); } } -- cgit v1.2.3 From 9aecce1c7d977e52759167a4916d71e80aaf5070 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 9 Feb 2017 10:28:42 +0100 Subject: mlxsw: spectrum_router: Correctly handle identical routes In the device, routes are indexed in a routing table based on the prefix and its length. This is in contrast to the kernel's FIB where several FIB aliases can exist with these parameters being identical. In such cases, the routes will be sorted by table ID (LOCAL first, then MAIN), TOS and finally priority (metric). During lookup, these routes will be evaluated in order. In case the packet's TOS field is non-zero and a FIB alias with a matching TOS is found, then it's selected. Otherwise, the lookup defaults to the route with TOS 0 (if it exists). However, if the requested scope is narrower than the one found, then the lookup continues. To best reflect the kernel's datapath we should take the above into account. Given a prefix and its length, the reflected route will always be the first one in the FIB alias list. However, if the route has a non-zero TOS then its action will be converted to trap instead of forward, since we currently don't support TOS-based routing. If this turns out to be a real issue, we can add support for that using policy-based switching. The route's scope can be effectively ignored as any packet being routed by the device would've been looked-up using the widest scope (UNIVERSE). To achieve that we need to do two changes. Firstly, we need to create another struct (FIB node) that will hold the list of FIB entries sharing the same prefix and length. This struct will be hashed using these two parameters. Secondly, we need to change the route reflection to match the above logic, so that the first FIB entry in the list will be programmed into the device while the rest will remain in the driver's cache in case of subsequent changes. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 581 ++++++++++++++------- 1 file changed, 403 insertions(+), 178 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 71ff02f7e29a..7c55df919a07 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -109,7 +109,6 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage, } struct mlxsw_sp_fib_key { - struct net_device *dev; unsigned char addr[sizeof(struct in6_addr)]; unsigned char prefix_len; }; @@ -122,94 +121,39 @@ enum mlxsw_sp_fib_entry_type { struct mlxsw_sp_nexthop_group; -struct mlxsw_sp_fib_entry { - struct rhash_head ht_node; +struct mlxsw_sp_fib_node { + struct list_head entry_list; struct list_head list; + struct rhash_head ht_node; + struct mlxsw_sp_vr *vr; struct mlxsw_sp_fib_key key; +}; + +struct mlxsw_sp_fib_entry_params { + u32 tb_id; + u32 prio; + u8 tos; + u8 type; +}; + +struct mlxsw_sp_fib_entry { + struct list_head list; + struct mlxsw_sp_fib_node *fib_node; enum mlxsw_sp_fib_entry_type type; - unsigned int ref_count; - struct mlxsw_sp_vr *vr; struct list_head nexthop_group_node; struct mlxsw_sp_nexthop_group *nh_group; + struct mlxsw_sp_fib_entry_params params; bool offloaded; }; struct mlxsw_sp_fib { struct rhashtable ht; - struct list_head entry_list; + struct list_head node_list; unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; struct mlxsw_sp_prefix_usage prefix_usage; }; -static const struct rhashtable_params mlxsw_sp_fib_ht_params = { - .key_offset = offsetof(struct mlxsw_sp_fib_entry, key), - .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node), - .key_len = sizeof(struct mlxsw_sp_fib_key), - .automatic_shrinking = true, -}; - -static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib, - struct mlxsw_sp_fib_entry *fib_entry) -{ - unsigned char prefix_len = fib_entry->key.prefix_len; - int err; - - err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node, - mlxsw_sp_fib_ht_params); - if (err) - return err; - list_add_tail(&fib_entry->list, &fib->entry_list); - if (fib->prefix_ref_count[prefix_len]++ == 0) - mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); - return 0; -} - -static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib, - struct mlxsw_sp_fib_entry *fib_entry) -{ - unsigned char prefix_len = fib_entry->key.prefix_len; - - if (--fib->prefix_ref_count[prefix_len] == 0) - mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); - list_del(&fib_entry->list); - rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node, - mlxsw_sp_fib_ht_params); -} - -static struct mlxsw_sp_fib_entry * -mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr, - size_t addr_len, unsigned char prefix_len, - struct net_device *dev) -{ - struct mlxsw_sp_fib_entry *fib_entry; - - fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); - if (!fib_entry) - return NULL; - fib_entry->key.dev = dev; - memcpy(fib_entry->key.addr, addr, addr_len); - fib_entry->key.prefix_len = prefix_len; - return fib_entry; -} - -static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry) -{ - kfree(fib_entry); -} - -static struct mlxsw_sp_fib_entry * -mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr, - size_t addr_len, unsigned char prefix_len, - struct net_device *dev) -{ - struct mlxsw_sp_fib_key key; - - memset(&key, 0, sizeof(key)); - key.dev = dev; - memcpy(key.addr, addr, addr_len); - key.prefix_len = prefix_len; - return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); -} +static const struct rhashtable_params mlxsw_sp_fib_ht_params; static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) { @@ -222,7 +166,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params); if (err) goto err_rhashtable_init; - INIT_LIST_HEAD(&fib->entry_list); + INIT_LIST_HEAD(&fib->node_list); return fib; err_rhashtable_init: @@ -232,6 +176,7 @@ err_rhashtable_init: static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib) { + WARN_ON(!list_empty(&fib->node_list)); rhashtable_destroy(&fib->ht); kfree(fib); } @@ -1239,9 +1184,9 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, int err; list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { - if (vr == fib_entry->vr) + if (vr == fib_entry->fib_node->vr) continue; - vr = fib_entry->vr; + vr = fib_entry->fib_node->vr; err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr, old_adj_index, old_ecmp_size, @@ -1727,6 +1672,9 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; + if (fib_entry->params.tos) + return false; + switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: return !!nh_group->adj_index_valid; @@ -1741,7 +1689,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) { fib_entry->offloaded = true; - switch (fib_entry->vr->proto) { + switch (fib_entry->fib_node->vr->proto) { case MLXSW_SP_L3_PROTO_IPV4: fib_info_offload_inc(fib_entry->nh_group->key.fi); break; @@ -1753,7 +1701,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) static void mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) { - switch (fib_entry->vr->proto) { + switch (fib_entry->fib_node->vr->proto) { case MLXSW_SP_L3_PROTO_IPV4: fib_info_offload_dec(fib_entry->nh_group->key.fi); break; @@ -1793,8 +1741,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, enum mlxsw_reg_ralue_op op) { char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->vr; + u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; enum mlxsw_reg_ralue_trap_action trap_action; u16 trap_id = 0; u32 adjacency_index = 0; @@ -1815,7 +1763,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_ralue_pack4(ralue_pl, (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->key.prefix_len, *p_dip); + vr->id, fib_entry->fib_node->key.prefix_len, + *p_dip); mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, adjacency_index, ecmp_size); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); @@ -1828,8 +1777,8 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif; enum mlxsw_reg_ralue_trap_action trap_action; char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->vr; + u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; u16 trap_id = 0; u16 rif = 0; @@ -1843,7 +1792,8 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_ralue_pack4(ralue_pl, (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->key.prefix_len, *p_dip); + vr->id, fib_entry->fib_node->key.prefix_len, + *p_dip); mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } @@ -1853,12 +1803,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp, enum mlxsw_reg_ralue_op op) { char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->vr; + u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; mlxsw_reg_ralue_pack4(ralue_pl, (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->key.prefix_len, *p_dip); + vr->id, fib_entry->fib_node->key.prefix_len, + *p_dip); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } @@ -1884,7 +1835,7 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, { int err = -EINVAL; - switch (fib_entry->vr->proto) { + switch (fib_entry->fib_node->vr->proto) { case MLXSW_SP_L3_PROTO_IPV4: err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); break; @@ -1930,130 +1881,376 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_fib_entry * -mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info) +mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node, + const struct fib_entry_notifier_info *fen_info) { struct mlxsw_sp_fib_entry *fib_entry; - struct fib_info *fi = fen_info->fi; - struct mlxsw_sp_vr *vr; int err; - vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id, - MLXSW_SP_L3_PROTO_IPV4); - if (IS_ERR(vr)) - return ERR_CAST(vr); - - fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len, fi->fib_dev); - if (fib_entry) { - /* Already exists, just take a reference */ - fib_entry->ref_count++; - return fib_entry; - } - fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len, fi->fib_dev); + fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); if (!fib_entry) { err = -ENOMEM; - goto err_fib_entry_create; + goto err_fib_entry_alloc; } - fib_entry->vr = vr; - fib_entry->ref_count = 1; err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry); if (err) goto err_fib4_entry_type_set; - err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi); + err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi); if (err) goto err_nexthop_group_get; + fib_entry->params.prio = fen_info->fi->fib_priority; + fib_entry->params.tb_id = fen_info->tb_id; + fib_entry->params.type = fen_info->type; + fib_entry->params.tos = fen_info->tos; + + fib_entry->fib_node = fib_node; + return fib_entry; err_nexthop_group_get: err_fib4_entry_type_set: - mlxsw_sp_fib_entry_destroy(fib_entry); -err_fib_entry_create: - mlxsw_sp_vr_put(mlxsw_sp, vr); - + kfree(fib_entry); +err_fib_entry_alloc: return ERR_PTR(err); } +static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); + kfree(fib_entry); +} + +static struct mlxsw_sp_fib_node * +mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, + const struct fib_entry_notifier_info *fen_info); + static struct mlxsw_sp_fib_entry * -mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info) +mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, + const struct fib_entry_notifier_info *fen_info) { - struct mlxsw_sp_vr *vr; + struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib_node *fib_node; - vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id, - MLXSW_SP_L3_PROTO_IPV4); - if (!vr) + fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info); + if (IS_ERR(fib_node)) + return NULL; + + list_for_each_entry(fib_entry, &fib_node->entry_list, list) { + if (fib_entry->params.tb_id == fen_info->tb_id && + fib_entry->params.tos == fen_info->tos && + fib_entry->params.type == fen_info->type && + fib_entry->nh_group->key.fi == fen_info->fi) { + return fib_entry; + } + } + + return NULL; +} + +static const struct rhashtable_params mlxsw_sp_fib_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_fib_node, key), + .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node), + .key_len = sizeof(struct mlxsw_sp_fib_key), + .automatic_shrinking = true, +}; + +static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib, + struct mlxsw_sp_fib_node *fib_node) +{ + return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node, + mlxsw_sp_fib_ht_params); +} + +static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib, + struct mlxsw_sp_fib_node *fib_node) +{ + rhashtable_remove_fast(&fib->ht, &fib_node->ht_node, + mlxsw_sp_fib_ht_params); +} + +static struct mlxsw_sp_fib_node * +mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, + size_t addr_len, unsigned char prefix_len) +{ + struct mlxsw_sp_fib_key key; + + memset(&key, 0, sizeof(key)); + memcpy(key.addr, addr, addr_len); + key.prefix_len = prefix_len; + return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); +} + +static struct mlxsw_sp_fib_node * +mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr, + size_t addr_len, unsigned char prefix_len) +{ + struct mlxsw_sp_fib_node *fib_node; + + fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL); + if (!fib_node) return NULL; - return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len, - fen_info->fi->fib_dev); + INIT_LIST_HEAD(&fib_node->entry_list); + list_add(&fib_node->list, &vr->fib->node_list); + memcpy(fib_node->key.addr, addr, addr_len); + fib_node->key.prefix_len = prefix_len; + mlxsw_sp_fib_node_insert(vr->fib, fib_node); + fib_node->vr = vr; + + return fib_node; +} + +static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node) +{ + mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node); + list_del(&fib_node->list); + WARN_ON(!list_empty(&fib_node->entry_list)); + kfree(fib_node); +} + +static bool +mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, + const struct mlxsw_sp_fib_entry *fib_entry) +{ + return list_first_entry(&fib_node->entry_list, + struct mlxsw_sp_fib_entry, list) == fib_entry; +} + +static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) +{ + unsigned char prefix_len = fib_node->key.prefix_len; + struct mlxsw_sp_fib *fib = fib_node->vr->fib; + + if (fib->prefix_ref_count[prefix_len]++ == 0) + mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); +} + +static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node) +{ + unsigned char prefix_len = fib_node->key.prefix_len; + struct mlxsw_sp_fib *fib = fib_node->vr->fib; + + if (--fib->prefix_ref_count[prefix_len] == 0) + mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); } -static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static struct mlxsw_sp_fib_node * +mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, + const struct fib_entry_notifier_info *fen_info) { - struct mlxsw_sp_vr *vr = fib_entry->vr; + struct mlxsw_sp_fib_node *fib_node; + struct mlxsw_sp_vr *vr; + int err; + + vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id, + MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(vr)) + return ERR_CAST(vr); + + fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst, + sizeof(fen_info->dst), + fen_info->dst_len); + if (fib_node) + return fib_node; - if (--fib_entry->ref_count == 0) { - mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); - mlxsw_sp_fib_entry_destroy(fib_entry); + fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst, + sizeof(fen_info->dst), + fen_info->dst_len); + if (!fib_node) { + err = -ENOMEM; + goto err_fib_node_create; } + + return fib_node; + +err_fib_node_create: mlxsw_sp_vr_put(mlxsw_sp, vr); + return ERR_PTR(err); } -static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) { - unsigned int last_ref_count; + struct mlxsw_sp_vr *vr = fib_node->vr; - do { - last_ref_count = fib_entry->ref_count; - mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); - } while (last_ref_count != 1); + if (!list_empty(&fib_node->entry_list)) + return; + mlxsw_sp_fib_node_destroy(fib_node); + mlxsw_sp_vr_put(mlxsw_sp, vr); } -static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, - struct fib_entry_notifier_info *fen_info) +static struct mlxsw_sp_fib_entry * +mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, + const struct mlxsw_sp_fib_entry_params *params) { struct mlxsw_sp_fib_entry *fib_entry; - struct mlxsw_sp_vr *vr; + + list_for_each_entry(fib_entry, &fib_node->entry_list, list) { + if (fib_entry->params.tb_id > params->tb_id) + continue; + if (fib_entry->params.tb_id != params->tb_id) + break; + if (fib_entry->params.tos > params->tos) + continue; + if (fib_entry->params.prio >= params->prio || + fib_entry->params.tos < params->tos) + return fib_entry; + } + + return NULL; +} + +static int +mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node, + struct mlxsw_sp_fib_entry *new_entry) +{ + struct mlxsw_sp_fib_entry *fib_entry; + + fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params); + + if (fib_entry) { + list_add_tail(&new_entry->list, &fib_entry->list); + } else { + struct mlxsw_sp_fib_entry *last; + + list_for_each_entry(last, &fib_node->entry_list, list) { + if (new_entry->params.tb_id > last->params.tb_id) + break; + fib_entry = last; + } + + if (fib_entry) + list_add(&new_entry->list, &fib_entry->list); + else + list_add(&new_entry->list, &fib_node->entry_list); + } + + return 0; +} + +static void +mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry) +{ + list_del(&fib_entry->list); +} + +static int +mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_fib_node *fib_node, + struct mlxsw_sp_fib_entry *fib_entry) +{ + if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) + return 0; + + /* To prevent packet loss, overwrite the previously offloaded + * entry. + */ + if (!list_is_singular(&fib_node->entry_list)) { + enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE; + struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list); + + mlxsw_sp_fib_entry_offload_refresh(n, op, 0); + } + + return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); +} + +static void +mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_fib_node *fib_node, + struct mlxsw_sp_fib_entry *fib_entry) +{ + if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) + return; + + /* Promote the next entry by overwriting the deleted entry */ + if (!list_is_singular(&fib_node->entry_list)) { + struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list); + enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE; + + mlxsw_sp_fib_entry_update(mlxsw_sp, n); + mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0); + return; + } + + mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); +} + +static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + int err; + + err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry); + if (err) + return err; + + err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry); + if (err) + goto err_fib4_node_entry_add; + + mlxsw_sp_fib_node_prefix_inc(fib_node); + + return 0; + +err_fib4_node_entry_add: + mlxsw_sp_fib4_node_list_remove(fib_entry); + return err; +} + +static void +mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + + mlxsw_sp_fib_node_prefix_dec(fib_node); + mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry); + mlxsw_sp_fib4_node_list_remove(fib_entry); +} + +static int +mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, + const struct fib_entry_notifier_info *fen_info) +{ + struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib_node *fib_node; int err; if (mlxsw_sp->router.aborted) return 0; - fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info); - if (IS_ERR(fib_entry)) { - dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n"); - return PTR_ERR(fib_entry); + fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info); + if (IS_ERR(fib_node)) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n"); + return PTR_ERR(fib_node); } - if (fib_entry->ref_count != 1) - return 0; + fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info); + if (IS_ERR(fib_entry)) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n"); + err = PTR_ERR(fib_entry); + goto err_fib4_entry_create; + } - vr = fib_entry->vr; - err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry); + err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry); if (err) { - dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n"); - goto err_fib_entry_insert; + dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n"); + goto err_fib4_node_entry_link; } - err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); - if (err) - goto err_fib_entry_add; + return 0; -err_fib_entry_add: - mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); -err_fib_entry_insert: - mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); +err_fib4_node_entry_link: + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); +err_fib4_entry_create: + mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); return err; } @@ -2061,20 +2258,19 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, struct fib_entry_notifier_info *fen_info) { struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib_node *fib_node; if (mlxsw_sp->router.aborted) return; - fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info); - if (!fib_entry) + fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); + if (WARN_ON(!fib_entry)) return; + fib_node = fib_entry->fib_node; - if (fib_entry->ref_count == 1) { - mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); - mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry); - } - - mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); } static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) @@ -2108,10 +2304,42 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } +static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) +{ + struct mlxsw_sp_fib_entry *fib_entry, *tmp; + + list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) { + bool do_break = &tmp->list == &fib_node->entry_list; + + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + /* Break when entry list is empty and node was freed. + * Otherwise, we'll access freed memory in the next + * iteration. + */ + if (do_break) + break; + } +} + +static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) +{ + switch (fib_node->vr->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); + break; + case MLXSW_SP_L3_PROTO_IPV6: + WARN_ON_ONCE(1); + break; + } +} + static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_sp_fib_entry *fib_entry; - struct mlxsw_sp_fib_entry *tmp; + struct mlxsw_sp_fib_node *fib_node, *tmp; struct mlxsw_sp_vr *vr; int i; @@ -2121,14 +2349,11 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) if (!vr->used) continue; - list_for_each_entry_safe(fib_entry, tmp, - &vr->fib->entry_list, list) { - bool do_break = &tmp->list == &vr->fib->entry_list; + list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list, + list) { + bool do_break = &tmp->list == &vr->fib->node_list; - mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); - mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, - fib_entry); - mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry); + mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node); if (do_break) break; } -- cgit v1.2.3 From 4283bce5f8c2307d47313e0429e5f1357c43023d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 9 Feb 2017 10:28:43 +0100 Subject: mlxsw: spectrum_router: Add support for route append When a new route is appended, it's placed after existing routes sharing the same parameters (prefix, length, table ID, TOS and priority). While the device supports only one route with the same prefix and length in a single table, it's important to correctly place the appended route in the driver's cache, as when a route is deleted the next one is programmed into the device. Following the reception of an ENTRY_APPEND notification, resolve the FIB node corresponding to the prefix and length and correctly place the new entry in its entry list. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 43 +++++++++++++++++++--- 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 7c55df919a07..d98f0396b0e8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2105,14 +2105,38 @@ mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, return NULL; } +static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry, + struct mlxsw_sp_fib_entry *new_entry) +{ + struct mlxsw_sp_fib_node *fib_node; + + if (WARN_ON(!fib_entry)) + return -EINVAL; + + fib_node = fib_entry->fib_node; + list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) { + if (fib_entry->params.tb_id != new_entry->params.tb_id || + fib_entry->params.tos != new_entry->params.tos || + fib_entry->params.prio != new_entry->params.prio) + break; + } + + list_add_tail(&new_entry->list, &fib_entry->list); + return 0; +} + static int mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib_entry *new_entry) + struct mlxsw_sp_fib_entry *new_entry, + bool append) { struct mlxsw_sp_fib_entry *fib_entry; fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params); + if (append) + return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry); + if (fib_entry) { list_add_tail(&new_entry->list, &fib_entry->list); } else { @@ -2182,12 +2206,13 @@ mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) + struct mlxsw_sp_fib_entry *fib_entry, + bool append) { struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; int err; - err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry); + err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, append); if (err) return err; @@ -2217,7 +2242,8 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info) + const struct fib_entry_notifier_info *fen_info, + bool append) { struct mlxsw_sp_fib_entry *fib_entry; struct mlxsw_sp_fib_node *fib_node; @@ -2239,7 +2265,7 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, goto err_fib4_entry_create; } - err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry); + err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, append); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n"); goto err_fib4_node_entry_link; @@ -2453,13 +2479,17 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; + bool append; int err; /* Protect internal structures from changes */ rtnl_lock(); switch (fib_work->event) { + case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: - err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info); + append = fib_work->event == FIB_EVENT_ENTRY_APPEND; + err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info, + append); if (err) mlxsw_sp_router_fib4_abort(mlxsw_sp); fib_info_put(fib_work->fen_info.fi); @@ -2503,6 +2533,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, fib_work->event = event; switch (event) { + case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); -- cgit v1.2.3 From 599cf8f95f221aa03ac5a6e5e3ee4bc97a6eade5 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 9 Feb 2017 10:28:44 +0100 Subject: mlxsw: spectrum_router: Add support for route replace Upon the reception of an ENTRY_REPLACE notification, resolve the FIB node corresponding to the prefix and length and insert the new route before the first matching entry. Since the notification also signals the deletion of the replaced route, delete it from the driver's cache. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 44 ++++++++++++++++++---- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index d98f0396b0e8..d7ac22d7f940 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2128,7 +2128,7 @@ static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry, static int mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node, struct mlxsw_sp_fib_entry *new_entry, - bool append) + bool replace, bool append) { struct mlxsw_sp_fib_entry *fib_entry; @@ -2136,7 +2136,12 @@ mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node, if (append) return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry); + if (replace && WARN_ON(!fib_entry)) + return -EINVAL; + /* Insert new entry before replaced one, so that we can later + * remove the second. + */ if (fib_entry) { list_add_tail(&new_entry->list, &fib_entry->list); } else { @@ -2207,12 +2212,13 @@ mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, - bool append) + bool replace, bool append) { struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; int err; - err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, append); + err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace, + append); if (err) return err; @@ -2240,10 +2246,28 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib4_node_list_remove(fib_entry); } +static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + bool replace) +{ + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + struct mlxsw_sp_fib_entry *replaced; + + if (!replace) + return; + + /* We inserted the new entry before replaced one */ + replaced = list_next_entry(fib_entry, list); + + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced); + mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); +} + static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info, - bool append) + bool replace, bool append) { struct mlxsw_sp_fib_entry *fib_entry; struct mlxsw_sp_fib_node *fib_node; @@ -2265,12 +2289,15 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, goto err_fib4_entry_create; } - err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, append); + err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace, + append); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n"); goto err_fib4_node_entry_link; } + mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace); + return 0; err_fib4_node_entry_link: @@ -2479,17 +2506,19 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; - bool append; + bool replace, append; int err; /* Protect internal structures from changes */ rtnl_lock(); switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: + replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; append = fib_work->event == FIB_EVENT_ENTRY_APPEND; err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info, - append); + replace, append); if (err) mlxsw_sp_router_fib4_abort(mlxsw_sp); fib_info_put(fib_work->fen_info.fi); @@ -2533,6 +2562,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, fib_work->event = event; switch (event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: -- cgit v1.2.3 From ff548773106ec7f8031bc6172e0234bd2a02c19c Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 10 Feb 2017 16:34:07 +0000 Subject: afs: Move UUID struct to linux/uuid.h Move the afs_uuid struct to linux/uuid.h, rename it to uuid_v1 and change the u16/u32 fields to __be16/__be32 instead so that the structure can be cast to a 16-octet network-order buffer. Signed-off-by: David Howells Reviewed-by: Arnd Bergmann request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL); + call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL); if (!call->request) return -ENOMEM; b = call->buffer; r = call->request; - r->time_low = ntohl(b[0]); - r->time_mid = ntohl(b[1]); - r->time_hi_and_version = ntohl(b[2]); + r->time_low = b[0]; + r->time_mid = htons(ntohl(b[1])); + r->time_hi_and_version = htons(ntohl(b[2])); r->clock_seq_hi_and_reserved = ntohl(b[3]); r->clock_seq_low = ntohl(b[4]); @@ -454,7 +454,7 @@ static int afs_deliver_cb_probe(struct afs_call *call) static void SRXAFSCB_ProbeUuid(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, work); - struct afs_uuid *r = call->request; + struct uuid_v1 *r = call->request; struct { __be32 match; @@ -477,7 +477,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work) */ static int afs_deliver_cb_probe_uuid(struct afs_call *call) { - struct afs_uuid *r; + struct uuid_v1 *r; unsigned loop; __be32 *b; int ret; @@ -503,15 +503,15 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) } _debug("unmarshall UUID"); - call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL); + call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL); if (!call->request) return -ENOMEM; b = call->buffer; r = call->request; - r->time_low = ntohl(b[0]); - r->time_mid = ntohl(b[1]); - r->time_hi_and_version = ntohl(b[2]); + r->time_low = b[0]; + r->time_mid = htons(ntohl(b[1])); + r->time_hi_and_version = htons(ntohl(b[2])); r->clock_seq_hi_and_reserved = ntohl(b[3]); r->clock_seq_low = ntohl(b[4]); @@ -569,9 +569,9 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) memset(&reply, 0, sizeof(reply)); reply.ia.nifs = htonl(nifs); - reply.ia.uuid[0] = htonl(afs_uuid.time_low); - reply.ia.uuid[1] = htonl(afs_uuid.time_mid); - reply.ia.uuid[2] = htonl(afs_uuid.time_hi_and_version); + reply.ia.uuid[0] = afs_uuid.time_low; + reply.ia.uuid[1] = htonl(ntohs(afs_uuid.time_mid)); + reply.ia.uuid[2] = htonl(ntohs(afs_uuid.time_hi_and_version)); reply.ia.uuid[3] = htonl((s8) afs_uuid.clock_seq_hi_and_reserved); reply.ia.uuid[4] = htonl((s8) afs_uuid.clock_seq_low); for (loop = 0; loop < 6; loop++) diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 65504e218d35..79061fa17168 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "afs.h" @@ -407,30 +408,6 @@ struct afs_interface { unsigned mtu; /* MTU of interface */ }; -/* - * UUID definition [internet draft] - * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns - * increments since midnight 15th October 1582 - * - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID - * time - * - the clock sequence is a 14-bit counter to avoid duplicate times - */ -struct afs_uuid { - u32 time_low; /* low part of timestamp */ - u16 time_mid; /* mid part of timestamp */ - u16 time_hi_and_version; /* high part of timestamp and version */ -#define AFS_UUID_TO_UNIX_TIME 0x01b21dd213814000ULL -#define AFS_UUID_TIMEHI_MASK 0x0fff -#define AFS_UUID_VERSION_TIME 0x1000 /* time-based UUID */ -#define AFS_UUID_VERSION_NAME 0x3000 /* name-based UUID */ -#define AFS_UUID_VERSION_RANDOM 0x4000 /* (pseudo-)random generated UUID */ - u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */ -#define AFS_UUID_CLOCKHI_MASK 0x3f -#define AFS_UUID_VARIANT_STD 0x80 - u8 clock_seq_low; /* clock seq low */ - u8 node[6]; /* spatially unique node ID (MAC addr) */ -}; - /*****************************************************************************/ /* * cache.c @@ -565,7 +542,7 @@ extern int afs_drop_inode(struct inode *); * main.c */ extern struct workqueue_struct *afs_wq; -extern struct afs_uuid afs_uuid; +extern struct uuid_v1 afs_uuid; /* * misc.c diff --git a/fs/afs/main.c b/fs/afs/main.c index f8188feb03ad..a07c14df3fd1 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c @@ -31,7 +31,7 @@ static char *rootcell; module_param(rootcell, charp, 0); MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); -struct afs_uuid afs_uuid; +struct uuid_v1 afs_uuid; struct workqueue_struct *afs_wq; /* @@ -41,7 +41,7 @@ static int __init afs_get_client_UUID(void) { struct timespec ts; u64 uuidtime; - u16 clockseq; + u16 clockseq, hi_v; int ret; /* read the MAC address of one of the external interfaces and construct @@ -53,22 +53,23 @@ static int __init afs_get_client_UUID(void) getnstimeofday(&ts); uuidtime = (u64) ts.tv_sec * 1000 * 1000 * 10; uuidtime += ts.tv_nsec / 100; - uuidtime += AFS_UUID_TO_UNIX_TIME; - afs_uuid.time_low = uuidtime; - afs_uuid.time_mid = uuidtime >> 32; - afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK; - afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME; + uuidtime += UUID_TO_UNIX_TIME; + afs_uuid.time_low = htonl(uuidtime); + afs_uuid.time_mid = htons(uuidtime >> 32); + hi_v = (uuidtime >> 48) & UUID_TIMEHI_MASK; + hi_v |= UUID_VERSION_TIME; + afs_uuid.time_hi_and_version = htons(hi_v); get_random_bytes(&clockseq, 2); afs_uuid.clock_seq_low = clockseq; afs_uuid.clock_seq_hi_and_reserved = - (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK; - afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD; + (clockseq >> 8) & UUID_CLOCKHI_MASK; + afs_uuid.clock_seq_hi_and_reserved |= UUID_VARIANT_STD; _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", - afs_uuid.time_low, - afs_uuid.time_mid, - afs_uuid.time_hi_and_version, + ntohl(afs_uuid.time_low), + ntohs(afs_uuid.time_mid), + ntohs(afs_uuid.time_hi_and_version), afs_uuid.clock_seq_hi_and_reserved, afs_uuid.clock_seq_low, afs_uuid.node[0], afs_uuid.node[1], afs_uuid.node[2], diff --git a/include/linux/uuid.h b/include/linux/uuid.h index 2d095fc60204..4dff73a89758 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h @@ -18,6 +18,30 @@ #include +/* + * V1 (time-based) UUID definition [RFC 4122]. + * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns + * increments since midnight 15th October 1582 + * - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID + * time + * - the clock sequence is a 14-bit counter to avoid duplicate times + */ +struct uuid_v1 { + __be32 time_low; /* low part of timestamp */ + __be16 time_mid; /* mid part of timestamp */ + __be16 time_hi_and_version; /* high part of timestamp and version */ +#define UUID_TO_UNIX_TIME 0x01b21dd213814000ULL +#define UUID_TIMEHI_MASK 0x0fff +#define UUID_VERSION_TIME 0x1000 /* time-based UUID */ +#define UUID_VERSION_NAME 0x3000 /* name-based UUID */ +#define UUID_VERSION_RANDOM 0x4000 /* (pseudo-)random generated UUID */ + u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */ +#define UUID_CLOCKHI_MASK 0x3f +#define UUID_VARIANT_STD 0x80 + u8 clock_seq_low; /* clock seq low */ + u8 node[6]; /* spatially unique node ID (MAC addr) */ +}; + /* * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee") * not including trailing NUL. -- cgit v1.2.3 From b4db2b35fc444409daf483006111a2a705550fff Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 10 Feb 2017 16:34:07 +0000 Subject: afs: Use core kernel UUID generation AFS uses a time based UUID to identify the host itself. This requires getting a timestamp which is currently done through the getnstimeofday() interface that we want to eventually get rid of. Instead of replacing it with a ktime-based interface, simply remove the entire function and use generate_random_uuid() instead, which has a v4 ("completely random") UUID instead of the time-based one. Signed-off-by: Arnd Bergmann Signed-off-by: David Howells --- fs/afs/internal.h | 11 +++++------ fs/afs/main.c | 48 +----------------------------------------------- fs/afs/netdevices.c | 21 --------------------- 3 files changed, 6 insertions(+), 74 deletions(-) diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 79061fa17168..8acf3670e756 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -560,6 +560,11 @@ extern struct vfsmount *afs_d_automount(struct path *); extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *); extern void afs_mntpt_kill_timer(void); +/* + * netdevices.c + */ +extern int afs_get_ipv4_interfaces(struct afs_interface *, size_t, bool); + /* * proc.c */ @@ -623,12 +628,6 @@ extern void __exit afs_purge_servers(void); extern int afs_fs_init(void); extern void afs_fs_exit(void); -/* - * use-rtnetlink.c - */ -extern int afs_get_ipv4_interfaces(struct afs_interface *, size_t, bool); -extern int afs_get_MAC_address(u8 *, size_t); - /* * vlclient.c */ diff --git a/fs/afs/main.c b/fs/afs/main.c index a07c14df3fd1..51d7d17bca57 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c @@ -34,50 +34,6 @@ MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); struct uuid_v1 afs_uuid; struct workqueue_struct *afs_wq; -/* - * get a client UUID - */ -static int __init afs_get_client_UUID(void) -{ - struct timespec ts; - u64 uuidtime; - u16 clockseq, hi_v; - int ret; - - /* read the MAC address of one of the external interfaces and construct - * a UUID from it */ - ret = afs_get_MAC_address(afs_uuid.node, sizeof(afs_uuid.node)); - if (ret < 0) - return ret; - - getnstimeofday(&ts); - uuidtime = (u64) ts.tv_sec * 1000 * 1000 * 10; - uuidtime += ts.tv_nsec / 100; - uuidtime += UUID_TO_UNIX_TIME; - afs_uuid.time_low = htonl(uuidtime); - afs_uuid.time_mid = htons(uuidtime >> 32); - hi_v = (uuidtime >> 48) & UUID_TIMEHI_MASK; - hi_v |= UUID_VERSION_TIME; - afs_uuid.time_hi_and_version = htons(hi_v); - - get_random_bytes(&clockseq, 2); - afs_uuid.clock_seq_low = clockseq; - afs_uuid.clock_seq_hi_and_reserved = - (clockseq >> 8) & UUID_CLOCKHI_MASK; - afs_uuid.clock_seq_hi_and_reserved |= UUID_VARIANT_STD; - - _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", - ntohl(afs_uuid.time_low), - ntohs(afs_uuid.time_mid), - ntohs(afs_uuid.time_hi_and_version), - afs_uuid.clock_seq_hi_and_reserved, - afs_uuid.clock_seq_low, - afs_uuid.node[0], afs_uuid.node[1], afs_uuid.node[2], - afs_uuid.node[3], afs_uuid.node[4], afs_uuid.node[5]); - - return 0; -} - /* * initialise the AFS client FS module */ @@ -87,9 +43,7 @@ static int __init afs_init(void) printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); - ret = afs_get_client_UUID(); - if (ret < 0) - return ret; + generate_random_uuid((unsigned char *)&afs_uuid); /* create workqueue */ ret = -ENOMEM; diff --git a/fs/afs/netdevices.c b/fs/afs/netdevices.c index 7ad36506c256..40b2bab3e401 100644 --- a/fs/afs/netdevices.c +++ b/fs/afs/netdevices.c @@ -11,27 +11,6 @@ #include #include "internal.h" -/* - * get a MAC address from a random ethernet interface that has a real one - * - the buffer will normally be 6 bytes in size - */ -int afs_get_MAC_address(u8 *mac, size_t maclen) -{ - struct net_device *dev; - int ret = -ENODEV; - - BUG_ON(maclen != ETH_ALEN); - - rtnl_lock(); - dev = __dev_getfirstbyhwtype(&init_net, ARPHRD_ETHER); - if (dev) { - memcpy(mac, dev->dev_addr, maclen); - ret = 0; - } - rtnl_unlock(); - return ret; -} - /* * get a list of this system's interface IPv4 addresses, netmasks and MTUs * - maxbufs must be at least 1 -- cgit v1.2.3 From 79112c26f14c38ddbac3b2739469e373ef424fe6 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 14:38:55 +0100 Subject: sched: rename tcf_destroy to tcf_destroy_proto This function destroys TC filter protocol, not TC filter. So name it accordingly. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/sch_generic.h | 2 +- net/sched/cls_api.c | 8 ++++---- net/sched/sch_api.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index e2f426f6d62f..453350650b9a 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -405,7 +405,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, u32 parentid); void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); -bool tcf_destroy(struct tcf_proto *tp, bool force); +bool tcf_proto_destroy(struct tcf_proto *tp, bool force); void tcf_destroy_chain(struct tcf_proto __rcu **fl); int skb_do_redirect(struct sk_buff *); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 1ecdf809b5fa..90536ebae02a 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -323,7 +323,7 @@ replay: tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER, false); - tcf_destroy(tp, true); + tcf_proto_destroy(tp, true); err = 0; goto errout; } @@ -338,7 +338,7 @@ replay: err = -EEXIST; if (n->nlmsg_flags & NLM_F_EXCL) { if (tp_created) - tcf_destroy(tp, true); + tcf_proto_destroy(tp, true); goto errout; } break; @@ -350,7 +350,7 @@ replay: tfilter_notify(net, skb, n, tp, t->tcm_handle, RTM_DELTFILTER, false); - if (tcf_destroy(tp, false)) + if (tcf_proto_destroy(tp, false)) RCU_INIT_POINTER(*back, next); } goto errout; @@ -374,7 +374,7 @@ replay: tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false); } else { if (tp_created) - tcf_destroy(tp, true); + tcf_proto_destroy(tp, true); } errout: diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index ef53ede11590..f30b517f2282 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1900,7 +1900,7 @@ reset: } EXPORT_SYMBOL(tc_classify); -bool tcf_destroy(struct tcf_proto *tp, bool force) +bool tcf_proto_destroy(struct tcf_proto *tp, bool force) { if (tp->ops->destroy(tp, force)) { module_put(tp->ops->owner); @@ -1917,7 +1917,7 @@ void tcf_destroy_chain(struct tcf_proto __rcu **fl) while ((tp = rtnl_dereference(*fl)) != NULL) { RCU_INIT_POINTER(*fl, tp->next); - tcf_destroy(tp, true); + tcf_proto_destroy(tp, true); } } EXPORT_SYMBOL(tcf_destroy_chain); -- cgit v1.2.3 From cf1facda2f61bc3e9ffd985b6d624dec6ad3f279 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 14:38:56 +0100 Subject: sched: move tcf_proto_destroy and tcf_destroy_chain helpers into cls_api Creation is done in this file, move destruction to be at the same place. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 2 ++ include/net/sch_generic.h | 2 -- net/sched/cls_api.c | 21 +++++++++++++++++++++ net/sched/sch_api.c | 22 ---------------------- net/sched/sch_atm.c | 1 + net/sched/sch_cbq.c | 1 + net/sched/sch_choke.c | 1 + net/sched/sch_dsmark.c | 1 + net/sched/sch_fq_codel.c | 1 + net/sched/sch_htb.c | 1 + net/sched/sch_ingress.c | 1 + net/sched/sch_multiq.c | 2 +- net/sched/sch_prio.c | 2 +- net/sched/sch_sfb.c | 1 + net/sched/sch_sfq.c | 1 + 15 files changed, 34 insertions(+), 26 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index dabb00af46a0..71b266cd63d4 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -17,6 +17,8 @@ struct tcf_walker { int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); +void tcf_destroy_chain(struct tcf_proto __rcu **fl); + static inline unsigned long __cls_set_class(unsigned long *clp, unsigned long cl) { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 453350650b9a..aeec4086afb2 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -405,8 +405,6 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, u32 parentid); void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); -bool tcf_proto_destroy(struct tcf_proto *tp, bool force); -void tcf_destroy_chain(struct tcf_proto __rcu **fl); int skb_do_redirect(struct sk_buff *); static inline void skb_reset_tc(struct sk_buff *skb) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 90536ebae02a..4efa4df8322f 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -127,6 +127,27 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp) return first; } +static bool tcf_proto_destroy(struct tcf_proto *tp, bool force) +{ + if (tp->ops->destroy(tp, force)) { + module_put(tp->ops->owner); + kfree_rcu(tp, rcu); + return true; + } + return false; +} + +void tcf_destroy_chain(struct tcf_proto __rcu **fl) +{ + struct tcf_proto *tp; + + while ((tp = rtnl_dereference(*fl)) != NULL) { + RCU_INIT_POINTER(*fl, tp->next); + tcf_proto_destroy(tp, true); + } +} +EXPORT_SYMBOL(tcf_destroy_chain); + /* Add/change/delete/get a filter node */ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index f30b517f2282..adeabaec0d0b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1900,28 +1900,6 @@ reset: } EXPORT_SYMBOL(tc_classify); -bool tcf_proto_destroy(struct tcf_proto *tp, bool force) -{ - if (tp->ops->destroy(tp, force)) { - module_put(tp->ops->owner); - kfree_rcu(tp, rcu); - return true; - } - - return false; -} - -void tcf_destroy_chain(struct tcf_proto __rcu **fl) -{ - struct tcf_proto *tp; - - while ((tp = rtnl_dereference(*fl)) != NULL) { - RCU_INIT_POINTER(*fl, tp->next); - tcf_proto_destroy(tp, true); - } -} -EXPORT_SYMBOL(tcf_destroy_chain); - #ifdef CONFIG_PROC_FS static int psched_show(struct seq_file *seq, void *v) { diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 481e4f12aeb4..2209c2ddacbf 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -15,6 +15,7 @@ #include /* for fput */ #include #include +#include /* * The ATM queuing discipline provides a framework for invoking classifiers diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index f1207582cbf3..d6ca18dc04c3 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -19,6 +19,7 @@ #include #include #include +#include /* Class-Based Queueing (CBQ) algorithm. diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 3b6d5bd69101..3b86a97bc67c 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 1308bbf460f7..802ac7c2e5e8 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 2f50e4c72fb4..9f3a884d1590 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 760f39e7caee..4cd5fb134bc9 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -40,6 +40,7 @@ #include #include #include +#include /* HTB algorithm. Author: devik@cdi.cz diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 8fe6999b642a..3bab5f66c392 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -16,6 +16,7 @@ #include #include +#include static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) { diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 9ffbb025b37e..e7839a0d0eaa 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -25,7 +25,7 @@ #include #include #include - +#include struct multiq_sched_data { u16 bands; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 8f575899adfa..d4d7db267b6e 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -20,7 +20,7 @@ #include #include #include - +#include struct prio_sched_data { int bands; diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 20a350bd1b1d..fe6963d21519 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -25,6 +25,7 @@ #include #include #include +#include #include /* diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 7f195ed4d568..83d06e251b93 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -23,6 +23,7 @@ #include #include #include +#include #include -- cgit v1.2.3 From 33a48927c193d030c80ecaeb3e021b7ed85f9c78 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 14:38:57 +0100 Subject: sched: push TC filter protocol creation into a separate function Make the long function tc_ctl_tfilter a little bit shorter and easier to read. Also make the creation of filter proto symmetric to destruction. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/cls_api.c | 110 ++++++++++++++++++++++++++++------------------------ 1 file changed, 59 insertions(+), 51 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 4efa4df8322f..d378a0b156c1 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -38,14 +39,14 @@ static DEFINE_RWLOCK(cls_mod_lock); /* Find classifier type by string name */ -static const struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind) +static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind) { const struct tcf_proto_ops *t, *res = NULL; if (kind) { read_lock(&cls_mod_lock); list_for_each_entry(t, &tcf_proto_base, head) { - if (nla_strcmp(kind, t->kind) == 0) { + if (strcmp(kind, t->kind) == 0) { if (try_module_get(t->owner)) res = t; break; @@ -127,6 +128,56 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp) return first; } +static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, + u32 prio, u32 parent, struct Qdisc *q) +{ + struct tcf_proto *tp; + int err; + + tp = kzalloc(sizeof(*tp), GFP_KERNEL); + if (!tp) + return ERR_PTR(-ENOBUFS); + + err = -ENOENT; + tp->ops = tcf_proto_lookup_ops(kind); + if (!tp->ops) { +#ifdef CONFIG_MODULES + rtnl_unlock(); + request_module("cls_%s", kind); + rtnl_lock(); + tp->ops = tcf_proto_lookup_ops(kind); + /* We dropped the RTNL semaphore in order to perform + * the module load. So, even if we succeeded in loading + * the module we have to replay the request. We indicate + * this using -EAGAIN. + */ + if (tp->ops) { + module_put(tp->ops->owner); + err = -EAGAIN; + } else { + err = -ENOENT; + } + goto errout; +#endif + } + tp->classify = tp->ops->classify; + tp->protocol = protocol; + tp->prio = prio; + tp->classid = parent; + tp->q = q; + + err = tp->ops->init(tp); + if (err) { + module_put(tp->ops->owner); + goto errout; + } + return tp; + +errout: + kfree(tp); + return ERR_PTR(err); +} + static bool tcf_proto_destroy(struct tcf_proto *tp, bool force) { if (tp->ops->destroy(tp, force)) { @@ -164,7 +215,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) struct tcf_proto __rcu **back; struct tcf_proto __rcu **chain; struct tcf_proto *tp; - const struct tcf_proto_ops *tp_ops; const struct Qdisc_class_ops *cops; unsigned long cl; unsigned long fh; @@ -279,58 +329,16 @@ replay: !(n->nlmsg_flags & NLM_F_CREATE)) goto errout; + if (!nprio) + nprio = TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back))); - /* Create new proto tcf */ - - err = -ENOBUFS; - tp = kzalloc(sizeof(*tp), GFP_KERNEL); - if (tp == NULL) - goto errout; - err = -ENOENT; - tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]); - if (tp_ops == NULL) { -#ifdef CONFIG_MODULES - struct nlattr *kind = tca[TCA_KIND]; - char name[IFNAMSIZ]; - - if (kind != NULL && - nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { - rtnl_unlock(); - request_module("cls_%s", name); - rtnl_lock(); - tp_ops = tcf_proto_lookup_ops(kind); - /* We dropped the RTNL semaphore in order to - * perform the module load. So, even if we - * succeeded in loading the module we have to - * replay the request. We indicate this using - * -EAGAIN. - */ - if (tp_ops != NULL) { - module_put(tp_ops->owner); - err = -EAGAIN; - } - } -#endif - kfree(tp); + tp = tcf_proto_create(nla_data(tca[TCA_KIND]), + protocol, nprio, parent, q); + if (IS_ERR(tp)) { + err = PTR_ERR(tp); goto errout; } - tp->ops = tp_ops; - tp->protocol = protocol; - tp->prio = nprio ? : - TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back))); - tp->q = q; - tp->classify = tp_ops->classify; - tp->classid = parent; - - err = tp_ops->init(tp); - if (err != 0) { - module_put(tp_ops->owner); - kfree(tp); - goto errout; - } - tp_created = 1; - } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) goto errout; -- cgit v1.2.3 From 6bb16e7ae26095892e8c51de4142d8f344793340 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 14:38:58 +0100 Subject: sched: move err set right before goto errout in tc_ctl_tfilter This makes the reader to know right away what is the error value. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/cls_api.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index d378a0b156c1..f44378c4859f 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -293,9 +293,10 @@ replay: /* And the last stroke */ chain = cops->tcf_chain(q, cl); - err = -EINVAL; - if (chain == NULL) + if (chain == NULL) { + err = -EINVAL; goto errout; + } if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) { tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER); tcf_destroy_chain(chain); @@ -310,8 +311,10 @@ replay: if (tp->prio >= prio) { if (tp->prio == prio) { if (!nprio || - (tp->protocol != protocol && protocol)) + (tp->protocol != protocol && protocol)) { + err = -EINVAL; goto errout; + } } else tp = NULL; break; @@ -321,13 +324,16 @@ replay: if (tp == NULL) { /* Proto-tcf does not exist, create new one */ - if (tca[TCA_KIND] == NULL || !protocol) + if (tca[TCA_KIND] == NULL || !protocol) { + err = -EINVAL; goto errout; + } - err = -ENOENT; if (n->nlmsg_type != RTM_NEWTFILTER || - !(n->nlmsg_flags & NLM_F_CREATE)) + !(n->nlmsg_flags & NLM_F_CREATE)) { + err = -ENOENT; goto errout; + } if (!nprio) nprio = TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back))); @@ -339,8 +345,10 @@ replay: goto errout; } tp_created = 1; - } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) + } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { + err = -EINVAL; goto errout; + } fh = tp->ops->get(tp, t->tcm_handle); @@ -357,17 +365,18 @@ replay: goto errout; } - err = -ENOENT; if (n->nlmsg_type != RTM_NEWTFILTER || - !(n->nlmsg_flags & NLM_F_CREATE)) + !(n->nlmsg_flags & NLM_F_CREATE)) { + err = -ENOENT; goto errout; + } } else { switch (n->nlmsg_type) { case RTM_NEWTFILTER: - err = -EEXIST; if (n->nlmsg_flags & NLM_F_EXCL) { if (tp_created) tcf_proto_destroy(tp, true); + err = -EEXIST; goto errout; } break; -- cgit v1.2.3 From 7215032ced14f9943c8b72b61f4ac52902002158 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 14:38:59 +0100 Subject: sched: add missing curly braces in else branch in tc_ctl_tfilter Curly braces need to be there, for stylistic reasons. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/cls_api.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index f44378c4859f..48864ad2322d 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -315,8 +315,9 @@ replay: err = -EINVAL; goto errout; } - } else + } else { tp = NULL; + } break; } } -- cgit v1.2.3 From 40c81b25b16cd871afe70630c131bd2544848d1f Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 14:39:00 +0100 Subject: sched: check negative err value to safe one level of indent As it is more common, check err for !0. That allows to safe one level of indentation and makes the code easier to read. Also, make 'next' variable global in function as it is used twice. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/cls_api.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 48864ad2322d..abe1fe13ae54 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -214,6 +214,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) struct Qdisc *q; struct tcf_proto __rcu **back; struct tcf_proto __rcu **chain; + struct tcf_proto *next; struct tcf_proto *tp; const struct Qdisc_class_ops *cops; unsigned long cl; @@ -355,10 +356,8 @@ replay: if (fh == 0) { if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { - struct tcf_proto *next = rtnl_dereference(tp->next); - + next = rtnl_dereference(tp->next); RCU_INIT_POINTER(*back, next); - tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER, false); tcf_proto_destroy(tp, true); @@ -383,16 +382,13 @@ replay: break; case RTM_DELTFILTER: err = tp->ops->delete(tp, fh); - if (err == 0) { - struct tcf_proto *next = rtnl_dereference(tp->next); - - tfilter_notify(net, skb, n, tp, - t->tcm_handle, - RTM_DELTFILTER, false); - if (tcf_proto_destroy(tp, false)) - RCU_INIT_POINTER(*back, next); - } - goto errout; + if (err) + goto errout; + next = rtnl_dereference(tp->next); + tfilter_notify(net, skb, n, tp, t->tcm_handle, + RTM_DELTFILTER, false); + if (tcf_proto_destroy(tp, false)) + RCU_INIT_POINTER(*back, next); case RTM_GETTFILTER: err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, true); -- cgit v1.2.3 From 147c1e9b902c25c868024260d24bb0b1dac1433d Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 9 Feb 2017 14:54:40 +0100 Subject: switchdev: bridge: Offload multicast disabled Offload multicast disabled flag, for more accurate mc flood behavior: When it is on, the mdb should be ignored. When it is off, unregistered mc packets should be flooded to mc router ports. Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Acked-by: Ivan Vecera Signed-off-by: David S. Miller --- include/net/switchdev.h | 2 ++ net/bridge/br_multicast.c | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/include/net/switchdev.h b/include/net/switchdev.h index eba80c4fc56f..2971c2a2cdf2 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -48,6 +48,7 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, + SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, }; struct switchdev_attr { @@ -62,6 +63,7 @@ struct switchdev_attr { unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ clock_t ageing_time; /* BRIDGE_AGEING_TIME */ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ + bool mc_disabled; /* MC_DISABLED */ } u; }; diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 1de3438e36bf..8c0e896936ff 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -27,6 +27,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_IPV6) #include #include @@ -1007,6 +1008,18 @@ static void br_ip6_multicast_port_query_expired(unsigned long data) } #endif +static void br_mc_disabled_update(struct net_device *dev, bool value) +{ + struct switchdev_attr attr = { + .orig_dev = dev, + .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, + .flags = SWITCHDEV_F_DEFER, + .u.mc_disabled = value, + }; + + switchdev_port_attr_set(dev, &attr); +} + int br_multicast_add_port(struct net_bridge_port *port) { port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; @@ -1019,6 +1032,8 @@ int br_multicast_add_port(struct net_bridge_port *port) setup_timer(&port->ip6_own_query.timer, br_ip6_multicast_port_query_expired, (unsigned long)port); #endif + br_mc_disabled_update(port->dev, port->br->multicast_disabled); + port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); if (!port->mcast_stats) return -ENOMEM; @@ -2121,6 +2136,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) if (br->multicast_disabled == !val) goto unlock; + br_mc_disabled_update(br->dev, !val); br->multicast_disabled = !val; if (br->multicast_disabled) goto unlock; -- cgit v1.2.3 From f12e7d95d12fa4dbe655bd038a6d38526a879deb Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 9 Feb 2017 14:54:41 +0100 Subject: bridge: mcast: Merge the mc router ports deletions to one function There are three places where a port gets deleted from the mc router port list. This patch join the actual deletion to one function. It will be helpful for later patch that will offload changes in the mc router ports list. Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Acked-by: Ivan Vecera Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 8c0e896936ff..2add6d417aa4 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -47,6 +47,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, __u16 vid, const unsigned char *src); +static void __del_port_router(struct net_bridge_port *p); #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, @@ -850,16 +851,10 @@ static void br_multicast_router_expired(unsigned long data) spin_lock(&br->multicast_lock); if (port->multicast_router == MDB_RTR_TYPE_DISABLED || port->multicast_router == MDB_RTR_TYPE_PERM || - timer_pending(&port->multicast_router_timer) || - hlist_unhashed(&port->rlist)) + timer_pending(&port->multicast_router_timer)) goto out; - hlist_del_init_rcu(&port->rlist); - br_rtr_notify(br->dev, port, RTM_DELMDB); - /* Don't allow timer refresh if the router expired */ - if (port->multicast_router == MDB_RTR_TYPE_TEMP) - port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; - + __del_port_router(port); out: spin_unlock(&br->multicast_lock); } @@ -1101,13 +1096,8 @@ void br_multicast_disable_port(struct net_bridge_port *port) if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) br_multicast_del_pg(br, pg); - if (!hlist_unhashed(&port->rlist)) { - hlist_del_init_rcu(&port->rlist); - br_rtr_notify(br->dev, port, RTM_DELMDB); - /* Don't allow timer refresh if disabling */ - if (port->multicast_router == MDB_RTR_TYPE_TEMP) - port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; - } + __del_port_router(port); + del_timer(&port->multicast_router_timer); del_timer(&port->ip4_own_query.timer); #if IS_ENABLED(CONFIG_IPV6) @@ -2059,6 +2049,10 @@ static void __del_port_router(struct net_bridge_port *p) return; hlist_del_init_rcu(&p->rlist); br_rtr_notify(p->br->dev, p, RTM_DELMDB); + + /* don't allow timer refresh */ + if (p->multicast_router == MDB_RTR_TYPE_TEMP) + p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; } int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) -- cgit v1.2.3 From 6d5496483f5eb7b4da2e83c7b2149a21ad412d96 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 9 Feb 2017 14:54:42 +0100 Subject: switchdev: bridge: Offload mc router ports Offload the mc router ports list, whenever it is being changed. It is done because in some cases mc packets needs to be flooded to all the ports in this list. Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Acked-by: Ivan Vecera Signed-off-by: David S. Miller --- include/net/switchdev.h | 2 ++ net/bridge/br_multicast.c | 15 +++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 2971c2a2cdf2..929d6af321cd 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -46,6 +46,7 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_PORT_PARENT_ID, SWITCHDEV_ATTR_ID_PORT_STP_STATE, SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, + SWITCHDEV_ATTR_ID_PORT_MROUTER, SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, @@ -61,6 +62,7 @@ struct switchdev_attr { struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ u8 stp_state; /* PORT_STP_STATE */ unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ + bool mrouter; /* PORT_MROUTER */ clock_t ageing_time; /* BRIDGE_AGEING_TIME */ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ bool mc_disabled; /* MC_DISABLED */ diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 2add6d417aa4..b760f2620abf 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1317,6 +1317,19 @@ br_multicast_update_query_timer(struct net_bridge *br, mod_timer(&query->timer, jiffies + br->multicast_querier_interval); } +static void br_port_mc_router_state_change(struct net_bridge_port *p, + bool is_mc_router) +{ + struct switchdev_attr attr = { + .orig_dev = p->dev, + .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, + .flags = SWITCHDEV_F_DEFER, + .u.mrouter = is_mc_router, + }; + + switchdev_port_attr_set(p->dev, &attr); +} + /* * Add port to router_list * list is maintained ordered by pointer value @@ -1342,6 +1355,7 @@ static void br_multicast_add_router(struct net_bridge *br, else hlist_add_head_rcu(&port->rlist, &br->router_list); br_rtr_notify(br->dev, port, RTM_NEWMDB); + br_port_mc_router_state_change(port, true); } static void br_multicast_mark_router(struct net_bridge *br, @@ -2049,6 +2063,7 @@ static void __del_port_router(struct net_bridge_port *p) return; hlist_del_init_rcu(&p->rlist); br_rtr_notify(p->br->dev, p, RTM_DELMDB); + br_port_mc_router_state_change(p, false); /* don't allow timer refresh */ if (p->multicast_router == MDB_RTR_TYPE_TEMP) -- cgit v1.2.3 From eaa7df3c5afe257e59e03746cdf9800fcd2494d2 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 9 Feb 2017 14:54:43 +0100 Subject: mlxsw: spectrum: Break flood set func to be per table Currently, the flood set function can't operate on only one table, but sets both uc_flood and mb_flood together. This patch creates a function that sets the flood state per table. Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 54 ++++++++++++++-------- 1 file changed, 34 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index b87ba7d36bc4..2bd5ffe36a8e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -166,9 +166,10 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); } -static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 idx_begin, u16 idx_end, bool uc_set, - bool bm_set) +static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 idx_begin, u16 idx_end, + enum mlxsw_sp_flood_table table, + bool set) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u16 local_port = mlxsw_sp_port->local_port; @@ -186,26 +187,35 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, if (!sftr_pl) return -ENOMEM; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, - table_type, range, local_port, uc_set); + mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin, + table_type, range, local_port, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + + kfree(sftr_pl); + return err; +} + +static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 idx_begin, u16 idx_end, bool uc_set, + bool bm_set) +{ + int err; + + err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, + MLXSW_SP_FLOOD_TABLE_UC, uc_set); if (err) - goto buffer_out; + return err; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, - table_type, range, local_port, bm_set); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, + MLXSW_SP_FLOOD_TABLE_BM, bm_set); if (err) goto err_flood_bm_set; - goto buffer_out; + return 0; err_flood_bm_set: - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, - table_type, range, local_port, !uc_set); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); -buffer_out: - kfree(sftr_pl); + __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, + MLXSW_SP_FLOOD_TABLE_UC, !uc_set); return err; } @@ -220,13 +230,16 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid; u16 vfid = mlxsw_sp_fid_to_vfid(fid); - return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, - set, true); + return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid, + vfid, + MLXSW_SP_FLOOD_TABLE_UC, + set); } for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { - err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set, - true); + err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, + MLXSW_SP_FLOOD_TABLE_UC, + set); if (err) { last_visited_vid = vid; goto err_port_flood_set; @@ -237,7 +250,8 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, err_port_flood_set: for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) - __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true); + __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, + MLXSW_SP_FLOOD_TABLE_UC, !set); netdev_err(dev, "Failed to configure unicast flooding\n"); return err; } -- cgit v1.2.3 From 69be01f374e4c09aae29be6e15a538ff0c282bb6 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 9 Feb 2017 14:54:44 +0100 Subject: mlxsw: spectrum: Make port flood update more generic Currently, there is a per port flood update function only for the UC table. Make the function more generic by changing the table type to be an input. Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 2bd5ffe36a8e..b1e2ec121886 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -219,8 +219,9 @@ err_flood_bm_set: return err; } -static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, - bool set) +static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_sp_flood_table table, + bool set) { struct net_device *dev = mlxsw_sp_port->dev; u16 vid, last_visited_vid; @@ -231,15 +232,12 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vfid = mlxsw_sp_fid_to_vfid(fid); return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid, - vfid, - MLXSW_SP_FLOOD_TABLE_UC, - set); + vfid, table, set); } for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, - MLXSW_SP_FLOOD_TABLE_UC, - set); + table, set); if (err) { last_visited_vid = vid; goto err_port_flood_set; @@ -250,8 +248,8 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, err_port_flood_set: for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) - __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, - MLXSW_SP_FLOOD_TABLE_UC, !set); + __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table, + !set); netdev_err(dev, "Failed to configure unicast flooding\n"); return err; } @@ -311,8 +309,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; if ((uc_flood ^ brport_flags) & BR_FLOOD) { - err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, - !mlxsw_sp_port->uc_flood); + err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port, + MLXSW_SP_FLOOD_TABLE_UC, + !mlxsw_sp_port->uc_flood); if (err) return err; } @@ -332,8 +331,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, err_port_learning_set: if ((uc_flood ^ brport_flags) & BR_FLOOD) - mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, - mlxsw_sp_port->uc_flood); + mlxsw_sp_port_flood_table_set(mlxsw_sp_port, + MLXSW_SP_FLOOD_TABLE_UC, + mlxsw_sp_port->uc_flood); return err; } -- cgit v1.2.3 From 63fe813c6008b5a10a06bffd64cee8da2984e716 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 9 Feb 2017 14:54:45 +0100 Subject: mlxsw: spectrum: Change max vfid A user that wants many bridges will use 1.Q bridge which are scalable. One can have as many 1.Q bridges as vfids. This patch sets their number to 1k, which is a reasonably large number. This change is done here because the next patches will add a new flood table, and without it, it will increase the overall size of the flood tables dramatically. Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 145897c5a779..49fe75face76 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -55,7 +55,7 @@ #include "core_acl_flex_actions.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID -#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */ +#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */ #define MLXSW_SP_RFID_BASE 15360 #define MLXSW_SP_INVALID_RIF 0xffff -- cgit v1.2.3 From 71c365bdc4396893798c8e1c9247663096ff4829 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 9 Feb 2017 14:54:46 +0100 Subject: mlxsw: spectrum: Separate bc and mc floods Break the bm (broadcast-multicast) into two tables, one for broadcast (and link local multicast that behaves like bc) and one for unknown multicasts. Add a bool into mlxsw_sp_port named mc_flood that reflect the value this port should have in the mc flood table (currently, always 1); Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 23 ++++++++++++++++------ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 4 +++- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 21 ++++++++++++++------ 3 files changed, 35 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 8de5ed526904..1d9a8c5948e7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3073,10 +3073,17 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, else table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; - if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) + switch (type) { + case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST: flood_table = MLXSW_SP_FLOOD_TABLE_UC; - else - flood_table = MLXSW_SP_FLOOD_TABLE_BM; + break; + case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4: + case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6: + flood_table = MLXSW_SP_FLOOD_TABLE_MC; + break; + default: + flood_table = MLXSW_SP_FLOOD_TABLE_BC; + } mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, flood_table); @@ -3270,9 +3277,9 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { .used_flood_tables = 1, .used_flood_mode = 1, .flood_mode = 3, - .max_fid_offset_flood_tables = 2, + .max_fid_offset_flood_tables = 3, .fid_offset_flood_table_size = VLAN_N_VID - 1, - .max_fid_flood_tables = 2, + .max_fid_flood_tables = 3, .fid_flood_table_size = MLXSW_SP_VFID_MAX, .used_max_ib_mc = 1, .max_ib_mc = 0, @@ -3689,7 +3696,7 @@ static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, table_type = mlxsw_sp_flood_table_type_get(fid); index = mlxsw_sp_flood_table_index_get(fid); - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type, + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type, 1, MLXSW_PORT_ROUTER_PORT, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); @@ -4065,6 +4072,7 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->learning = 1; mlxsw_sp_port->learning_sync = 1; mlxsw_sp_port->uc_flood = 1; + mlxsw_sp_port->mc_flood = 1; mlxsw_sp_port->bridged = 1; return 0; @@ -4081,6 +4089,7 @@ static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; + mlxsw_sp_port->mc_flood = 0; mlxsw_sp_port->bridged = 0; /* Add implicit VLAN interface in the device, so that untagged @@ -4743,6 +4752,7 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, mlxsw_sp_vport->learning = 1; mlxsw_sp_vport->learning_sync = 1; mlxsw_sp_vport->uc_flood = 1; + mlxsw_sp_vport->mc_flood = 1; mlxsw_sp_vport->bridged = 1; return 0; @@ -4763,6 +4773,7 @@ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) mlxsw_sp_vport->learning = 0; mlxsw_sp_vport->learning_sync = 0; mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->mc_flood = 0; mlxsw_sp_vport->bridged = 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 49fe75face76..7b8d1cfe5cf4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -342,6 +342,7 @@ struct mlxsw_sp_port { u8 learning:1, learning_sync:1, uc_flood:1, + mc_flood:1, bridged:1, lagged:1, split:1; @@ -509,7 +510,8 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_flood_table { MLXSW_SP_FLOOD_TABLE_UC, - MLXSW_SP_FLOOD_TABLE_BM, + MLXSW_SP_FLOOD_TABLE_BC, + MLXSW_SP_FLOOD_TABLE_MC, }; int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index b1e2ec121886..fe3cf568e1d5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -197,7 +197,7 @@ static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 idx_begin, u16 idx_end, bool uc_set, - bool bm_set) + bool bc_set, bool mc_set) { int err; @@ -207,12 +207,19 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, return err; err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, - MLXSW_SP_FLOOD_TABLE_BM, bm_set); + MLXSW_SP_FLOOD_TABLE_BC, bc_set); if (err) goto err_flood_bm_set; + err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, + MLXSW_SP_FLOOD_TABLE_MC, mc_set); + if (err) + goto err_flood_mc_set; return 0; +err_flood_mc_set: + __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, + MLXSW_SP_FLOOD_TABLE_BC, !bc_set); err_flood_bm_set: __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, MLXSW_SP_FLOOD_TABLE_UC, !uc_set); @@ -263,7 +270,8 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, * the start of the vFIDs range. */ vfid = mlxsw_sp_fid_to_vfid(fid); - return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set); + return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set, + set); } static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -568,7 +576,8 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, } err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, - mlxsw_sp_port->uc_flood, true); + mlxsw_sp_port->uc_flood, true, + mlxsw_sp_port->mc_flood); if (err) goto err_port_flood_set; @@ -584,7 +593,7 @@ err_port_fid_map: for (fid--; fid >= fid_begin; fid--) mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, - false); + false, false); err_port_flood_set: fid = fid_end; err_port_fid_join: @@ -602,7 +611,7 @@ static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, - false); + false, false); for (fid = fid_begin; fid <= fid_end; fid++) __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); -- cgit v1.2.3 From 8ecd4591e7612a7ea5c67589cf1a7f18bd17ba14 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 9 Feb 2017 14:54:47 +0100 Subject: mlxsw: spectrum: Add an option to flood mc by mc_router_port The decision whether to flood a multicast packet to a port dependent on three flags: mc_disabled, mc_router_port, mc_flood. If mc_disabled is on, the port will be flooded according to mc_flood, otherwise, according to mc_router_port. To accomplish that, add those flags into the mlxsw_sp_port struct and update the mc flood table accordingly. Update mc_router_port by switchdev attribute SWITCHDEV_ATTR_ID_PORT_MC_ROUTER_PORT. Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 6 ++++ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 4 ++- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 34 ++++++++++++++++++++-- 3 files changed, 41 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1d9a8c5948e7..37c018bea5e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4073,6 +4073,8 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->learning_sync = 1; mlxsw_sp_port->uc_flood = 1; mlxsw_sp_port->mc_flood = 1; + mlxsw_sp_port->mc_router = 0; + mlxsw_sp_port->mc_disabled = 1; mlxsw_sp_port->bridged = 1; return 0; @@ -4090,6 +4092,7 @@ static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; mlxsw_sp_port->mc_flood = 0; + mlxsw_sp_port->mc_router = 0; mlxsw_sp_port->bridged = 0; /* Add implicit VLAN interface in the device, so that untagged @@ -4753,6 +4756,8 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, mlxsw_sp_vport->learning_sync = 1; mlxsw_sp_vport->uc_flood = 1; mlxsw_sp_vport->mc_flood = 1; + mlxsw_sp_vport->mc_router = 0; + mlxsw_sp_vport->mc_disabled = 1; mlxsw_sp_vport->bridged = 1; return 0; @@ -4774,6 +4779,7 @@ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) mlxsw_sp_vport->learning_sync = 0; mlxsw_sp_vport->uc_flood = 0; mlxsw_sp_vport->mc_flood = 0; + mlxsw_sp_vport->mc_router = 0; mlxsw_sp_vport->bridged = 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 7b8d1cfe5cf4..13ec85e7c392 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -339,10 +339,12 @@ struct mlxsw_sp_port { struct mlxsw_sp *mlxsw_sp; u8 local_port; u8 stp_state; - u8 learning:1, + u16 learning:1, learning_sync:1, uc_flood:1, mc_flood:1, + mc_router:1, + mc_disabled:1, bridged:1, lagged:1, split:1; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index fe3cf568e1d5..53702464db2e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -264,14 +264,20 @@ err_port_flood_set: int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, bool set) { + bool mc_set = set; u16 vfid; /* In case of vFIDs, index into the flooding table is relative to * the start of the vFIDs range. */ vfid = mlxsw_sp_fid_to_vfid(fid); + + if (set) + mc_set = mlxsw_sp_vport->mc_disabled ? + mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router; + return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set, - set); + mc_set); } static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -393,6 +399,22 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + bool is_port_mc_router) +{ + if (switchdev_trans_ph_prepare(trans)) + return 0; + + mlxsw_sp_port->mc_router = is_port_mc_router; + if (!mlxsw_sp_port->mc_disabled) + return mlxsw_sp_port_flood_table_set(mlxsw_sp_port, + MLXSW_SP_FLOOD_TABLE_MC, + is_port_mc_router); + + return 0; +} + static int mlxsw_sp_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) @@ -422,6 +444,10 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, attr->orig_dev, attr->u.vlan_filtering); break; + case SWITCHDEV_ATTR_ID_PORT_MROUTER: + err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans, + attr->u.mrouter); + break; default: err = -EOPNOTSUPP; break; @@ -567,6 +593,7 @@ static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid, static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid_begin, u16 fid_end) { + bool mc_flood; int fid, err; for (fid = fid_begin; fid <= fid_end; fid++) { @@ -575,9 +602,12 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, goto err_port_fid_join; } + mc_flood = mlxsw_sp_port->mc_disabled ? + mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router; + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, mlxsw_sp_port->uc_flood, true, - mlxsw_sp_port->mc_flood); + mc_flood); if (err) goto err_port_flood_set; -- cgit v1.2.3 From 1e5d94327dfb0d3b877bb2a44328c0978734cfc2 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 9 Feb 2017 14:54:48 +0100 Subject: mlxsw: spectrum: Extend port_orig_get for bridge devices The function mlxsw_sp_port_orig_get returns the vport from the physical port if needed, based on the original device. This patch addresses the case where the original device is a bridge. If it is vlan unaware bridge, it returns the matching vport. If it is vlan aware bridge, there is no matching vport, and it returns the original port. Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 53702464db2e..9540f202c521 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -71,8 +71,21 @@ mlxsw_sp_port_orig_get(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_fid *fid; u16 vid; + if (netif_is_bridge_master(dev)) { + fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp, + dev); + if (fid) { + mlxsw_sp_vport = + mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, + fid->fid); + WARN_ON(!mlxsw_sp_vport); + return mlxsw_sp_vport; + } + } + if (!is_vlan_dev(dev)) return mlxsw_sp_port; -- cgit v1.2.3 From 90e0f0c1b4f2baa3639f957bfe904c9eda0d4db4 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 9 Feb 2017 14:54:49 +0100 Subject: mlxsw: spectrum: Update mc_disabled flag by switchdev attr Add a function to update mc_disabled from switchdev attr SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 28 ++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 9540f202c521..598727d578c1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -274,6 +274,30 @@ err_port_flood_set: return err; } +static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + bool mc_disabled) +{ + int set; + int err = 0; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) { + set = mc_disabled ? + mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router; + err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port, + MLXSW_SP_FLOOD_TABLE_MC, + set); + } + + if (!err) + mlxsw_sp_port->mc_disabled = mc_disabled; + + return err; +} + int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, bool set) { @@ -461,6 +485,10 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans, attr->u.mrouter); break; + case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: + err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, + attr->u.mc_disabled); + break; default: err = -EOPNOTSUPP; break; -- cgit v1.2.3 From ea6da4fd388a1eeab30d64da3eab3c5338714c74 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 7 Feb 2017 09:56:06 +0200 Subject: net/skbuff: Introduce skb_mac_offset() Introduce skb_mac_offset() that could be used to get mac header offset. Signed-off-by: Amir Vadai Reviewed-by: Or Gerlitz Signed-off-by: David S. Miller --- include/linux/skbuff.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f1adddc1c5ac..69ccd2636911 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2184,6 +2184,11 @@ static inline unsigned char *skb_mac_header(const struct sk_buff *skb) return skb->head + skb->mac_header; } +static inline int skb_mac_offset(const struct sk_buff *skb) +{ + return skb_mac_header(skb) - skb->data; +} + static inline int skb_mac_header_was_set(const struct sk_buff *skb) { return skb->mac_header != (typeof(skb->mac_header))~0U; -- cgit v1.2.3 From 71d0ed7079dffbc5cd0941d77d9b84e04109c9bb Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 7 Feb 2017 09:56:07 +0200 Subject: net/act_pedit: Support using offset relative to the conventional network headers Extend pedit to enable the user setting offset relative to network headers. This change would enable to work with more complex header schemes (vs the simple IPv4 case) where setting a fixed offset relative to the network header is not enough. After this patch, the action has information about the exact header type and field inside this header. This information could be used later on for hardware offloading of pedit. Backward compatibility was being kept: 1. Old kernel <-> new userspace 2. New kernel <-> old userspace 3. add rule using new userspace <-> dump using old userspace 4. add rule using old userspace <-> dump using new userspace When using the extended api, new netlink attributes are being used. This way, operation will fail in (1) and (3) - and no malformed rule be added or dumped. Of course, new user space that doesn't need the new functionality can use the old netlink attributes and operation will succeed. Since action can support both api's, (2) should work, and it is easy to write the new user space to have (4) work. The action is having a strict check that only header types and commands it can handle are accepted. This way future additions will be much easier. Usage example: $ tc filter add dev enp0s9 protocol ip parent ffff: \ flower \ ip_proto tcp \ dst_port 80 \ action pedit munge tcp dport set 8080 pipe \ action mirred egress redirect dev veth0 Will forward tcp port whose original dest port is 80, while modifying the destination port to 8080. Signed-off-by: Amir Vadai Reviewed-by: Or Gerlitz Signed-off-by: David S. Miller --- include/net/tc_act/tc_pedit.h | 5 + include/uapi/linux/tc_act/tc_pedit.h | 23 ++++ net/sched/act_pedit.c | 196 ++++++++++++++++++++++++++++++++--- 3 files changed, 208 insertions(+), 16 deletions(-) diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h index 29e38d6823df..e076f22035a5 100644 --- a/include/net/tc_act/tc_pedit.h +++ b/include/net/tc_act/tc_pedit.h @@ -3,11 +3,16 @@ #include +struct tcf_pedit_key_ex { + enum pedit_header_type htype; +}; + struct tcf_pedit { struct tc_action common; unsigned char tcfp_nkeys; unsigned char tcfp_flags; struct tc_pedit_key *tcfp_keys; + struct tcf_pedit_key_ex *tcfp_keys_ex; }; #define to_pedit(a) ((struct tcf_pedit *)a) diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h index 6389959a5157..22f19eeda997 100644 --- a/include/uapi/linux/tc_act/tc_pedit.h +++ b/include/uapi/linux/tc_act/tc_pedit.h @@ -11,10 +11,33 @@ enum { TCA_PEDIT_TM, TCA_PEDIT_PARMS, TCA_PEDIT_PAD, + TCA_PEDIT_PARMS_EX, + TCA_PEDIT_KEYS_EX, + TCA_PEDIT_KEY_EX, __TCA_PEDIT_MAX }; #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1) +enum { + TCA_PEDIT_KEY_EX_HTYPE = 1, + __TCA_PEDIT_KEY_EX_MAX +}; +#define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1) + + /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It + * means no specific header type - offset is relative to the network layer + */ +enum pedit_header_type { + TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0, + TCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1, + TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2, + TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3, + TCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4, + TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5, + __PEDIT_HDR_TYPE_MAX, +}; +#define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1) + struct tc_pedit_key { __u32 mask; /* AND */ __u32 val; /*XOR */ diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index b27c4daec88f..fdd012bd3602 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -22,6 +22,7 @@ #include #include #include +#include #define PEDIT_TAB_MASK 15 @@ -30,18 +31,112 @@ static struct tc_action_ops act_pedit_ops; static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { [TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) }, + [TCA_PEDIT_KEYS_EX] = { .type = NLA_NESTED }, }; +static const struct nla_policy pedit_key_ex_policy[TCA_PEDIT_KEY_EX_MAX + 1] = { + [TCA_PEDIT_KEY_EX_HTYPE] = { .type = NLA_U16 }, +}; + +static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, + u8 n) +{ + struct tcf_pedit_key_ex *keys_ex; + struct tcf_pedit_key_ex *k; + const struct nlattr *ka; + int err = -EINVAL; + int rem; + + if (!nla || !n) + return NULL; + + keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL); + if (!keys_ex) + return ERR_PTR(-ENOMEM); + + k = keys_ex; + + nla_for_each_nested(ka, nla, rem) { + struct nlattr *tb[TCA_PEDIT_KEY_EX_MAX + 1]; + + if (!n) { + err = -EINVAL; + goto err_out; + } + n--; + + if (nla_type(ka) != TCA_PEDIT_KEY_EX) { + err = -EINVAL; + goto err_out; + } + + err = nla_parse_nested(tb, TCA_PEDIT_KEY_EX_MAX, ka, + pedit_key_ex_policy); + if (err) + goto err_out; + + if (!tb[TCA_PEDIT_KEY_EX_HTYPE]) { + err = -EINVAL; + goto err_out; + } + + k->htype = nla_get_u16(tb[TCA_PEDIT_KEY_EX_HTYPE]); + + if (k->htype > TCA_PEDIT_HDR_TYPE_MAX) { + err = -EINVAL; + goto err_out; + } + + k++; + } + + if (n) + goto err_out; + + return keys_ex; + +err_out: + kfree(keys_ex); + return ERR_PTR(err); +} + +static int tcf_pedit_key_ex_dump(struct sk_buff *skb, + struct tcf_pedit_key_ex *keys_ex, int n) +{ + struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); + + for (; n > 0; n--) { + struct nlattr *key_start; + + key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); + + if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype)) { + nlmsg_trim(skb, keys_start); + return -EINVAL; + } + + nla_nest_end(skb, key_start); + + keys_ex++; + } + + nla_nest_end(skb, keys_start); + + return 0; +} + static int tcf_pedit_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind) { struct tc_action_net *tn = net_generic(net, pedit_net_id); struct nlattr *tb[TCA_PEDIT_MAX + 1]; + struct nlattr *pattr; struct tc_pedit *parm; int ret = 0, err; struct tcf_pedit *p; struct tc_pedit_key *keys = NULL; + struct tcf_pedit_key_ex *keys_ex; int ksize; if (nla == NULL) @@ -51,13 +146,21 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, if (err < 0) return err; - if (tb[TCA_PEDIT_PARMS] == NULL) + pattr = tb[TCA_PEDIT_PARMS]; + if (!pattr) + pattr = tb[TCA_PEDIT_PARMS_EX]; + if (!pattr) return -EINVAL; - parm = nla_data(tb[TCA_PEDIT_PARMS]); + + parm = nla_data(pattr); ksize = parm->nkeys * sizeof(struct tc_pedit_key); - if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize) + if (nla_len(pattr) < sizeof(*parm) + ksize) return -EINVAL; + keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys); + if (IS_ERR(keys_ex)) + return PTR_ERR(keys_ex); + if (!tcf_hash_check(tn, parm->index, a, bind)) { if (!parm->nkeys) return -EINVAL; @@ -69,6 +172,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) { tcf_hash_cleanup(*a, est); + kfree(keys_ex); return -ENOMEM; } ret = ACT_P_CREATED; @@ -81,8 +185,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, p = to_pedit(*a); if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { keys = kmalloc(ksize, GFP_KERNEL); - if (keys == NULL) + if (!keys) { + kfree(keys_ex); return -ENOMEM; + } } } @@ -95,6 +201,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, p->tcfp_nkeys = parm->nkeys; } memcpy(p->tcfp_keys, parm->keys, ksize); + + kfree(p->tcfp_keys_ex); + p->tcfp_keys_ex = keys_ex; + spin_unlock_bh(&p->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(tn, *a); @@ -106,6 +216,7 @@ static void tcf_pedit_cleanup(struct tc_action *a, int bind) struct tcf_pedit *p = to_pedit(a); struct tc_pedit_key *keys = p->tcfp_keys; kfree(keys); + kfree(p->tcfp_keys_ex); } static bool offset_valid(struct sk_buff *skb, int offset) @@ -119,38 +230,84 @@ static bool offset_valid(struct sk_buff *skb, int offset) return true; } +static int pedit_skb_hdr_offset(struct sk_buff *skb, + enum pedit_header_type htype, int *hoffset) +{ + int ret = -EINVAL; + + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + if (skb_mac_header_was_set(skb)) { + *hoffset = skb_mac_offset(skb); + ret = 0; + } + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK: + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + *hoffset = skb_network_offset(skb); + ret = 0; + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + if (skb_transport_header_was_set(skb)) { + *hoffset = skb_transport_offset(skb); + ret = 0; + } + break; + default: + ret = -EINVAL; + break; + }; + + return ret; +} + static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_pedit *p = to_pedit(a); int i; - unsigned int off; if (skb_unclone(skb, GFP_ATOMIC)) return p->tcf_action; - off = skb_network_offset(skb); - spin_lock(&p->tcf_lock); tcf_lastuse_update(&p->tcf_tm); if (p->tcfp_nkeys > 0) { struct tc_pedit_key *tkey = p->tcfp_keys; + struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex; + enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { u32 *ptr, _data; int offset = tkey->off; + int hoffset; + int rc; + + if (tkey_ex) { + htype = tkey_ex->htype; + tkey_ex++; + } + + rc = pedit_skb_hdr_offset(skb, htype, &hoffset); + if (rc) { + pr_info("tc filter pedit bad header type specified (0x%x)\n", + htype); + goto bad; + } if (tkey->offmask) { char *d, _d; - if (!offset_valid(skb, off + tkey->at)) { + if (!offset_valid(skb, hoffset + tkey->at)) { pr_info("tc filter pedit 'at' offset %d out of bounds\n", - off + tkey->at); + hoffset + tkey->at); goto bad; } - d = skb_header_pointer(skb, off + tkey->at, 1, + d = skb_header_pointer(skb, hoffset + tkey->at, 1, &_d); if (!d) goto bad; @@ -163,19 +320,19 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, goto bad; } - if (!offset_valid(skb, off + offset)) { + if (!offset_valid(skb, hoffset + offset)) { pr_info("tc filter pedit offset %d out of bounds\n", - offset); + hoffset + offset); goto bad; } - ptr = skb_header_pointer(skb, off + offset, 4, &_data); + ptr = skb_header_pointer(skb, hoffset + offset, 4, &_data); if (!ptr) goto bad; /* just do it, baby */ *ptr = ((*ptr & tkey->mask) ^ tkey->val); if (ptr == &_data) - skb_store_bits(skb, off + offset, ptr, 4); + skb_store_bits(skb, hoffset + offset, ptr, 4); } goto done; @@ -215,8 +372,15 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, opt->refcnt = p->tcf_refcnt - ref; opt->bindcnt = p->tcf_bindcnt - bind; - if (nla_put(skb, TCA_PEDIT_PARMS, s, opt)) - goto nla_put_failure; + if (p->tcfp_keys_ex) { + tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); + + if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) + goto nla_put_failure; + } else { + if (nla_put(skb, TCA_PEDIT_PARMS, s, opt)) + goto nla_put_failure; + } tcf_tm_dump(&t, &p->tcf_tm); if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD)) -- cgit v1.2.3 From 853a14ba4682f820266469979c9297debc05f60c Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 7 Feb 2017 09:56:08 +0200 Subject: net/act_pedit: Introduce 'add' operation This command could be useful to inc/dec fields. For example, to forward any TCP packet and decrease its TTL: $ tc filter add dev enp0s9 protocol ip parent ffff: \ flower ip_proto tcp \ action pedit munge ip ttl add 0xff pipe \ action mirred egress redirect dev veth0 In the example above, adding 0xff to this u8 field is actually decreasing it by one, since the operation is masked. Signed-off-by: Amir Vadai Reviewed-by: Or Gerlitz Signed-off-by: David S. Miller --- include/net/tc_act/tc_pedit.h | 1 + include/uapi/linux/tc_act/tc_pedit.h | 8 ++++++++ net/sched/act_pedit.c | 30 ++++++++++++++++++++++++++---- 3 files changed, 35 insertions(+), 4 deletions(-) diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h index e076f22035a5..dfbd6ee0bc7c 100644 --- a/include/net/tc_act/tc_pedit.h +++ b/include/net/tc_act/tc_pedit.h @@ -5,6 +5,7 @@ struct tcf_pedit_key_ex { enum pedit_header_type htype; + enum pedit_cmd cmd; }; struct tcf_pedit { diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h index 22f19eeda997..143d2b31a316 100644 --- a/include/uapi/linux/tc_act/tc_pedit.h +++ b/include/uapi/linux/tc_act/tc_pedit.h @@ -20,6 +20,7 @@ enum { enum { TCA_PEDIT_KEY_EX_HTYPE = 1, + TCA_PEDIT_KEY_EX_CMD = 2, __TCA_PEDIT_KEY_EX_MAX }; #define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1) @@ -38,6 +39,13 @@ enum pedit_header_type { }; #define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1) +enum pedit_cmd { + TCA_PEDIT_KEY_EX_CMD_SET = 0, + TCA_PEDIT_KEY_EX_CMD_ADD = 1, + __PEDIT_CMD_MAX, +}; +#define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1) + struct tc_pedit_key { __u32 mask; /* AND */ __u32 val; /*XOR */ diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index fdd012bd3602..c1310472f620 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -36,6 +36,7 @@ static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { static const struct nla_policy pedit_key_ex_policy[TCA_PEDIT_KEY_EX_MAX + 1] = { [TCA_PEDIT_KEY_EX_HTYPE] = { .type = NLA_U16 }, + [TCA_PEDIT_KEY_EX_CMD] = { .type = NLA_U16 }, }; static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, @@ -75,14 +76,17 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, if (err) goto err_out; - if (!tb[TCA_PEDIT_KEY_EX_HTYPE]) { + if (!tb[TCA_PEDIT_KEY_EX_HTYPE] || + !tb[TCA_PEDIT_KEY_EX_CMD]) { err = -EINVAL; goto err_out; } k->htype = nla_get_u16(tb[TCA_PEDIT_KEY_EX_HTYPE]); + k->cmd = nla_get_u16(tb[TCA_PEDIT_KEY_EX_CMD]); - if (k->htype > TCA_PEDIT_HDR_TYPE_MAX) { + if (k->htype > TCA_PEDIT_HDR_TYPE_MAX || + k->cmd > TCA_PEDIT_CMD_MAX) { err = -EINVAL; goto err_out; } @@ -110,7 +114,8 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); - if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype)) { + if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || + nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) { nlmsg_trim(skb, keys_start); return -EINVAL; } @@ -280,15 +285,19 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, struct tc_pedit_key *tkey = p->tcfp_keys; struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex; enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; + enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET; for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { u32 *ptr, _data; int offset = tkey->off; int hoffset; + u32 val; int rc; if (tkey_ex) { htype = tkey_ex->htype; + cmd = tkey_ex->cmd; + tkey_ex++; } @@ -330,7 +339,20 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, if (!ptr) goto bad; /* just do it, baby */ - *ptr = ((*ptr & tkey->mask) ^ tkey->val); + switch (cmd) { + case TCA_PEDIT_KEY_EX_CMD_SET: + val = tkey->val; + break; + case TCA_PEDIT_KEY_EX_CMD_ADD: + val = (*ptr + tkey->val) & ~tkey->mask; + break; + default: + pr_info("tc filter pedit bad command (%d)\n", + cmd); + goto bad; + } + + *ptr = ((*ptr & tkey->mask) ^ val); if (ptr == &_data) skb_store_bits(skb, hoffset + offset, ptr, 4); } -- cgit v1.2.3 From 722c9a0cebb88ac1a982285f15d5fd44f4140c66 Mon Sep 17 00:00:00 2001 From: tcharding Date: Thu, 9 Feb 2017 17:56:04 +1100 Subject: net: Fix checkpatch WARNING: please, no space before tabs This patch fixes multiple occurrences of space before tabs warnings. More lines of code were moved than required to keep kernel-doc comments uniform. Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- net/core/dev.c | 142 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 71 insertions(+), 71 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 0921609dfa81..b7c795017299 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1,5 +1,5 @@ /* - * NET3 Protocol independent device support routines. + * NET3 Protocol independent device support routines. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -7,7 +7,7 @@ * 2 of the License, or (at your option) any later version. * * Derived from the non IP parts of dev.c 1.0.19 - * Authors: Ross Biro + * Authors: Ross Biro * Fred N. van Kempen, * Mark Evans, * @@ -21,9 +21,9 @@ * * Changes: * D.J. Barrow : Fixed bug where dev->refcnt gets set - * to 2 if register_netdev gets called - * before net_dev_init & also removed a - * few lines of code in the process. + * to 2 if register_netdev gets called + * before net_dev_init & also removed a + * few lines of code in the process. * Alan Cox : device private ioctl copies fields back. * Alan Cox : Transmit queue code does relevant * stunts to keep the queue safe. @@ -36,7 +36,7 @@ * Alan Cox : 100 backlog just doesn't cut it when * you start doing multicast video 8) * Alan Cox : Rewrote net_bh and list manager. - * Alan Cox : Fix ETH_P_ALL echoback lengths. + * Alan Cox : Fix ETH_P_ALL echoback lengths. * Alan Cox : Took out transmit every packet pass * Saved a few bytes in the ioctl handler * Alan Cox : Network driver sets packet type before @@ -46,7 +46,7 @@ * Richard Kooijman: Timestamp fixes. * Alan Cox : Wrong field in SIOCGIFDSTADDR * Alan Cox : Device lock protection. - * Alan Cox : Fixed nasty side effect of device close + * Alan Cox : Fixed nasty side effect of device close * changes. * Rudi Cilibrasi : Pass the right thing to * set_mac_address() @@ -67,8 +67,8 @@ * Paul Rusty Russell : SIOCSIFNAME * Pekka Riikonen : Netdev boot-time settings code * Andrew Morton : Make unregister_netdevice wait - * indefinitely on dev->refcnt - * J Hadi Salim : - Backlog queue sampling + * indefinitely on dev->refcnt + * J Hadi Salim : - Backlog queue sampling * - netif_rx() feedback */ @@ -574,13 +574,13 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map) } /** - * netdev_boot_setup_check - check boot time settings - * @dev: the netdevice + * netdev_boot_setup_check - check boot time settings + * @dev: the netdevice * - * Check boot time settings for the device. - * The found settings are set for the device to be used - * later in the device probing. - * Returns 0 if no settings found, 1 if they are. + * Check boot time settings for the device. + * The found settings are set for the device to be used + * later in the device probing. + * Returns 0 if no settings found, 1 if they are. */ int netdev_boot_setup_check(struct net_device *dev) { @@ -590,10 +590,10 @@ int netdev_boot_setup_check(struct net_device *dev) for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && !strcmp(dev->name, s[i].name)) { - dev->irq = s[i].map.irq; - dev->base_addr = s[i].map.base_addr; - dev->mem_start = s[i].map.mem_start; - dev->mem_end = s[i].map.mem_end; + dev->irq = s[i].map.irq; + dev->base_addr = s[i].map.base_addr; + dev->mem_start = s[i].map.mem_start; + dev->mem_end = s[i].map.mem_end; return 1; } } @@ -603,14 +603,14 @@ EXPORT_SYMBOL(netdev_boot_setup_check); /** - * netdev_boot_base - get address from boot time settings - * @prefix: prefix for network device - * @unit: id for network device + * netdev_boot_base - get address from boot time settings + * @prefix: prefix for network device + * @unit: id for network device * - * Check boot time settings for the base address of device. - * The found settings are set for the device to be used - * later in the device probing. - * Returns 0 if no settings found. + * Check boot time settings for the base address of device. + * The found settings are set for the device to be used + * later in the device probing. + * Returns 0 if no settings found. */ unsigned long netdev_boot_base(const char *prefix, int unit) { @@ -737,15 +737,15 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name) EXPORT_SYMBOL(__dev_get_by_name); /** - * dev_get_by_name_rcu - find a device by its name - * @net: the applicable net namespace - * @name: name to find + * dev_get_by_name_rcu - find a device by its name + * @net: the applicable net namespace + * @name: name to find * - * Find an interface by name. - * If the name is found a pointer to the device is returned. - * If the name is not found then %NULL is returned. - * The reference counters are not incremented so the caller must be - * careful with locks. The caller must hold RCU lock. + * Find an interface by name. + * If the name is found a pointer to the device is returned. + * If the name is not found then %NULL is returned. + * The reference counters are not incremented so the caller must be + * careful with locks. The caller must hold RCU lock. */ struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) @@ -1289,8 +1289,8 @@ void netdev_state_change(struct net_device *dev) EXPORT_SYMBOL(netdev_state_change); /** - * netdev_notify_peers - notify network peers about existence of @dev - * @dev: network device + * netdev_notify_peers - notify network peers about existence of @dev + * @dev: network device * * Generate traffic such that interested network peers are aware of * @dev, such as by generating a gratuitous ARP. This may be used when @@ -1518,17 +1518,17 @@ static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, static int dev_boot_phase = 1; /** - * register_netdevice_notifier - register a network notifier block - * @nb: notifier + * register_netdevice_notifier - register a network notifier block + * @nb: notifier * - * Register a notifier to be called when network device events occur. - * The notifier passed is linked into the kernel structures and must - * not be reused until it has been unregistered. A negative errno code - * is returned on a failure. + * Register a notifier to be called when network device events occur. + * The notifier passed is linked into the kernel structures and must + * not be reused until it has been unregistered. A negative errno code + * is returned on a failure. * - * When registered all registration and up events are replayed - * to the new notifier to allow device to have a race free - * view of the network device list. + * When registered all registration and up events are replayed + * to the new notifier to allow device to have a race free + * view of the network device list. */ int register_netdevice_notifier(struct notifier_block *nb) @@ -1585,17 +1585,17 @@ outroll: EXPORT_SYMBOL(register_netdevice_notifier); /** - * unregister_netdevice_notifier - unregister a network notifier block - * @nb: notifier + * unregister_netdevice_notifier - unregister a network notifier block + * @nb: notifier * - * Unregister a notifier previously registered by - * register_netdevice_notifier(). The notifier is unlinked into the - * kernel structures and may then be reused. A negative errno code - * is returned on a failure. + * Unregister a notifier previously registered by + * register_netdevice_notifier(). The notifier is unlinked into the + * kernel structures and may then be reused. A negative errno code + * is returned on a failure. * - * After unregistering unregister and down device events are synthesized - * for all devices on the device list to the removed notifier to remove - * the need for special case cleanup code. + * After unregistering unregister and down device events are synthesized + * for all devices on the device list to the removed notifier to remove + * the need for special case cleanup code. */ int unregister_netdevice_notifier(struct notifier_block *nb) @@ -7544,17 +7544,17 @@ void netdev_freemem(struct net_device *dev) } /** - * alloc_netdev_mqs - allocate network device - * @sizeof_priv: size of private data to allocate space for - * @name: device name format string - * @name_assign_type: origin of device name - * @setup: callback to initialize device - * @txqs: the number of TX subqueues to allocate - * @rxqs: the number of RX subqueues to allocate - * - * Allocates a struct net_device with private data area for driver use - * and performs basic initialization. Also allocates subqueue structs - * for each queue on the device. + * alloc_netdev_mqs - allocate network device + * @sizeof_priv: size of private data to allocate space for + * @name: device name format string + * @name_assign_type: origin of device name + * @setup: callback to initialize device + * @txqs: the number of TX subqueues to allocate + * @rxqs: the number of RX subqueues to allocate + * + * Allocates a struct net_device with private data area for driver use + * and performs basic initialization. Also allocates subqueue structs + * for each queue on the device. */ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, unsigned char name_assign_type, @@ -7666,13 +7666,13 @@ free_dev: EXPORT_SYMBOL(alloc_netdev_mqs); /** - * free_netdev - free network device - * @dev: device + * free_netdev - free network device + * @dev: device * - * This function does the last stage of destroying an allocated device - * interface. The reference to the device object is released. - * If this is the last reference then it will be freed. - * Must be called in process context. + * This function does the last stage of destroying an allocated device + * interface. The reference to the device object is released. If this + * is the last reference then it will be freed.Must be called in process + * context. */ void free_netdev(struct net_device *dev) { -- cgit v1.2.3 From 643aa9cba0b688ffde28ac39aebec6f56fc2a8af Mon Sep 17 00:00:00 2001 From: tcharding Date: Thu, 9 Feb 2017 17:56:05 +1100 Subject: net: Fix checkpatch whitespace errors This patch fixes two trivial whitespace errors. Brace should be on the previous line and trailing statements should be on next line. Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- net/core/dev.c | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index b7c795017299..60a0be743cdb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -192,7 +192,8 @@ static seqcount_t devnet_rename_seq; static inline void dev_base_seq_inc(struct net *net) { - while (++net->dev_base_seq == 0); + while (++net->dev_base_seq == 0) + ; } static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) @@ -274,8 +275,8 @@ EXPORT_PER_CPU_SYMBOL(softnet_data); * register_netdevice() inits txq->_xmit_lock and sets lockdep class * according to dev->type */ -static const unsigned short netdev_lock_type[] = - {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, +static const unsigned short netdev_lock_type[] = { + ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, @@ -291,22 +292,22 @@ static const unsigned short netdev_lock_type[] = ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; -static const char *const netdev_lock_name[] = - {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", - "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", - "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", - "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", - "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", - "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", - "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", - "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", - "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", - "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", - "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", - "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", - "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", - "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", - "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; +static const char *const netdev_lock_name[] = { + "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", + "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", + "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", + "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", + "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", + "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", + "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", + "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", + "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", + "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", + "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", + "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", + "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", + "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", + "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; -- cgit v1.2.3 From eb13da1a103a808c05267816fa4d30d603bfda5e Mon Sep 17 00:00:00 2001 From: tcharding Date: Thu, 9 Feb 2017 17:56:06 +1100 Subject: net: Fix checkpatch block comments warnings Fix multiple occurrences of checkpatch warning. WARNING: Block comments use * on subsequent lines. Also make comment blocks more uniform. Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- net/core/dev.c | 65 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 60a0be743cdb..e583820c21be 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -353,10 +353,11 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev) #endif /******************************************************************************* + * + * Protocol management and registration routines + * + *******************************************************************************/ - Protocol management and registration routines - -*******************************************************************************/ /* * Add a protocol ID to the list. Now that the input handler is @@ -539,10 +540,10 @@ void dev_remove_offload(struct packet_offload *po) EXPORT_SYMBOL(dev_remove_offload); /****************************************************************************** - - Device Boot-time Settings Routines - -*******************************************************************************/ + * + * Device Boot-time Settings Routines + * + ******************************************************************************/ /* Boot time configuration table */ static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; @@ -664,10 +665,10 @@ int __init netdev_boot_setup(char *str) __setup("netdev=", netdev_boot_setup); /******************************************************************************* - - Device Interface Subroutines - -*******************************************************************************/ + * + * Device Interface Subroutines + * + *******************************************************************************/ /** * dev_get_iflink - get 'iflink' value of a interface @@ -3326,16 +3327,16 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) } /* The device has no queue. Common case for software devices: - loopback, all the sorts of tunnels... + * loopback, all the sorts of tunnels... - Really, it is unlikely that netif_tx_lock protection is necessary - here. (f.e. loopback and IP tunnels are clean ignoring statistics - counters.) - However, it is possible, that they rely on protection - made by us here. + * Really, it is unlikely that netif_tx_lock protection is necessary + * here. (f.e. loopback and IP tunnels are clean ignoring statistics + * counters.) + * However, it is possible, that they rely on protection + * made by us here. - Check this and shot the lock. It is not prone from deadlocks. - Either shot noqueue qdisc, it is even simpler 8) + * Check this and shot the lock. It is not prone from deadlocks. + *Either shot noqueue qdisc, it is even simpler 8) */ if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ @@ -3397,9 +3398,9 @@ int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) EXPORT_SYMBOL(dev_queue_xmit_accel); -/*======================================================================= - Receiver routines - =======================================================================*/ +/************************************************************************* + * Receiver routines + *************************************************************************/ int netdev_max_backlog __read_mostly = 1000; EXPORT_SYMBOL(netdev_max_backlog); @@ -6359,8 +6360,8 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags) } /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI - is important. Some (broken) drivers set IFF_PROMISC, when - IFF_ALLMULTI is requested not asking us and not reporting. + * is important. Some (broken) drivers set IFF_PROMISC, when + * IFF_ALLMULTI is requested not asking us and not reporting. */ if ((flags ^ dev->gflags) & IFF_ALLMULTI) { int inc = (flags & IFF_ALLMULTI) ? 1 : -1; @@ -6724,8 +6725,8 @@ static void rollback_registered_many(struct list_head *head) /* Notify protocols, that we are about to destroy - this device. They should clean all the things. - */ + * this device. They should clean all the things. + */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); if (!dev->rtnl_link_ops || @@ -7855,12 +7856,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char dev_shutdown(dev); /* Notify protocols, that we are about to destroy - this device. They should clean all the things. - - Note that dev->reg_state stays at NETREG_REGISTERED. - This is wanted because this way 8021q and macvlan know - the device is just moving and can keep their slaves up. - */ + * this device. They should clean all the things. + * + * Note that dev->reg_state stays at NETREG_REGISTERED. + * This is wanted because this way 8021q and macvlan know + * the device is just moving and can keep their slaves up. + */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); rcu_barrier(); call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); -- cgit v1.2.3 From f4563a75fb93d6a756416d97e13f0773ac373a24 Mon Sep 17 00:00:00 2001 From: tcharding Date: Thu, 9 Feb 2017 17:56:07 +1100 Subject: net: Fix checkpatch, Missing a blank line after declarations This patch fixes multiple occurrences of checkpatch WARNING: Missing a blank line after declarations. Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- net/core/dev.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index e583820c21be..363c44b9be63 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2498,6 +2498,7 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, if (dev->num_tc) { u8 tc = netdev_get_prio_tc_map(dev, skb->priority); + qoffset = dev->tc_to_txq[tc].offset; qcount = dev->tc_to_txq[tc].count; } @@ -2719,9 +2720,11 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_HIGHMEM int i; + if (!(dev->features & NETIF_F_HIGHDMA)) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + if (PageHighMem(skb_frag_page(frag))) return 1; } @@ -2735,6 +2738,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t addr = page_to_phys(skb_frag_page(frag)); + if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) return 1; } @@ -3210,6 +3214,7 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) if (queue_index < 0 || skb->ooo_okay || queue_index >= dev->real_num_tx_queues) { int new_index = get_xps_queue(dev, skb); + if (new_index < 0) new_index = skb_tx_hash(dev, skb); @@ -3239,6 +3244,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, if (dev->real_num_tx_queues != 1) { const struct net_device_ops *ops = dev->netdev_ops; + if (ops->ndo_select_queue) queue_index = ops->ndo_select_queue(dev, skb, accel_priv, __netdev_pick_tx); @@ -3768,6 +3774,7 @@ static int netif_rx_internal(struct sk_buff *skb) #endif { unsigned int qtail; + ret = enqueue_to_backlog(skb, get_cpu(), &qtail); put_cpu(); } @@ -3827,6 +3834,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h) while (clist) { struct sk_buff *skb = clist; + clist = clist->next; WARN_ON(atomic_read(&skb->users)); @@ -5663,6 +5671,7 @@ static int netdev_adjacent_sysfs_add(struct net_device *dev, struct list_head *dev_list) { char linkname[IFNAMSIZ+7]; + sprintf(linkname, dev_list == &dev->adj_list.upper ? "upper_%s" : "lower_%s", adj_dev->name); return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), @@ -5673,6 +5682,7 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev, struct list_head *dev_list) { char linkname[IFNAMSIZ+7]; + sprintf(linkname, dev_list == &dev->adj_list.upper ? "upper_%s" : "lower_%s", name); sysfs_remove_link(&(dev->dev.kobj), linkname); @@ -5942,6 +5952,7 @@ void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev) { struct netdev_notifier_changeupper_info changeupper_info; + ASSERT_RTNL(); changeupper_info.upper_dev = upper_dev; @@ -6659,6 +6670,7 @@ EXPORT_SYMBOL(dev_change_xdp_fd); static int dev_new_index(struct net *net) { int ifindex = net->ifindex; + for (;;) { if (++ifindex <= 0) ifindex = 1; @@ -7072,6 +7084,7 @@ void netif_tx_stop_all_queues(struct net_device *dev) for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_stop_queue(txq); } } -- cgit v1.2.3 From dc371700d43e45ed9bf380746e293f063113c2b1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 14:42:03 +0100 Subject: spectrum: flower: Treat ETH_P_ALL as a special case and translate for HW HW does not understand ETH_P_ALL. So treat this special case differently and translate to 0/0 key/mask. That will allow HW to match all ethertypes. Fixes: 7aa0f5aa9030 ("mlxsw: spectrum: Implement TC flower offload") Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 35b147a47eb5..22ab42925377 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -197,11 +197,18 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_BASIC, f->mask); - ip_proto = key->ip_proto; + u16 n_proto_key = ntohs(key->n_proto); + u16 n_proto_mask = ntohs(mask->n_proto); + + if (n_proto_key == ETH_P_ALL) { + n_proto_key = 0; + n_proto_mask = 0; + } mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_ETHERTYPE, - ntohs(key->n_proto), - ntohs(mask->n_proto)); + n_proto_key, n_proto_mask); + + ip_proto = key->ip_proto; mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_PROTO, key->ip_proto, mask->ip_proto); -- cgit v1.2.3 From f39b2dde4886450b965ccd0a6defe8bc33fcc938 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:02:54 -0800 Subject: net: sunrpc: fix build errors when linux/phy*.h is removed from net/dsa.h Removing linux/phy.h from net/dsa.h reveals a build error in the sunrpc code: net/sunrpc/xprtrdma/svc_rdma_backchannel.c: In function 'xprt_rdma_bc_put': net/sunrpc/xprtrdma/svc_rdma_backchannel.c:277:2: error: implicit declaration of function 'module_put' [-Werror=implicit-function-declaration] net/sunrpc/xprtrdma/svc_rdma_backchannel.c: In function 'xprt_setup_rdma_bc': net/sunrpc/xprtrdma/svc_rdma_backchannel.c:348:7: error: implicit declaration of function 'try_module_get' [-Werror=implicit-function-declaration] Fix this by adding linux/module.h to svc_rdma_backchannel.c Signed-off-by: Russell King Acked-by: Anna Schumaker Signed-off-by: David S. Miller --- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 288e35c2d8f4..cb1e48e54eb1 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -4,6 +4,7 @@ * Support for backward direction RPCs on RPC/RDMA (server-side). */ +#include #include #include "xprt_rdma.h" -- cgit v1.2.3 From 43cc277a93a2b74430e2ae03e92bd9bc4f303814 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:02:55 -0800 Subject: net: cgroups: fix build errors when linux/phy*.h is removed from net/dsa.h net/core/netprio_cgroup.c:303:16: error: expected declaration specifiers or '...' before string constant MODULE_LICENSE("GPL v2"); ^~~~~~~~ Add linux/module.h to fix this. Signed-off-by: Russell King Signed-off-by: David S. Miller --- net/core/netprio_cgroup.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 2ec86fc552df..756637dc7a57 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3 From fc182b8512aa9b71c6cb43be4953c0ad9ebea908 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:02:56 -0800 Subject: net: macb: fix build errors when linux/phy*.h is removed from net/dsa.h drivers/net/ethernet/cadence/macb.h:862:33: sparse: expected ; at end of declaration drivers/net/ethernet/cadence/macb.h:862:33: sparse: Expected } at end of struct-union-enum-specifier drivers/net/ethernet/cadence/macb.h:862:33: sparse: got phy_interface drivers/net/ethernet/cadence/macb.h:877:1: sparse: Expected ; at the end of type declaration drivers/net/ethernet/cadence/macb.h:877:1: sparse: got } In file included from drivers/net/ethernet/cadence/macb_pci.c:29:0: drivers/net/ethernet/cadence/macb.h:862:2: error: unknown type name 'phy_interface_t' phy_interface_t phy_interface; ^~~~~~~~~~~~~~~ Add linux/phy.h to macb.h Signed-off-by: Russell King Acked-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index a2cf91223003..234a49eaccfd 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -10,6 +10,8 @@ #ifndef _MACB_H #define _MACB_H +#include + #define MACB_GREGS_NBR 16 #define MACB_GREGS_VERSION 2 #define MACB_MAX_QUEUES 8 -- cgit v1.2.3 From 8c56ea410efb537e5b4d10365e14b140f110314c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:02:57 -0800 Subject: net: lan78xx: fix build errors when linux/phy*.h is removed from net/dsa.h drivers/net/usb/lan78xx.c:394:33: sparse: expected ; at end of declaration drivers/net/usb/lan78xx.c:394:33: sparse: Expected } at end of struct-union-enum-specifier drivers/net/usb/lan78xx.c:394:33: sparse: got interface drivers/net/usb/lan78xx.c:403:1: sparse: Expected ; at the end of type declaration drivers/net/usb/lan78xx.c:403:1: sparse: got } Add linux/phy.h to lan78xx.c Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 08f8703e4d54..9889a70ff4f6 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "lan78xx.h" #define DRIVER_AUTHOR "WOOJUNG HUH " -- cgit v1.2.3 From 13bf77604e163b957948dea464752cf3cd4bbc52 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:02:58 -0800 Subject: net: bgmac: fix build errors when linux/phy*.h is removed from net/dsa.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/net/ethernet/broadcom/bgmac.c:1015:17: error: dereferencing pointer to incomplete type 'struct mii_bus' drivers/net/ethernet/broadcom/bgmac.c:1185:2: error: implicit declaration of function 'phy_start' [-Werror=implicit-function-declaration] drivers/net/ethernet/broadcom/bgmac.c:1198:2: error: implicit declaration of function 'phy_stop' [-Werror=implicit-function-declaration] drivers/net/ethernet/broadcom/bgmac.c:1239:9: error: implicit declaration of function 'phy_mii_ioctl' [-Werror=implicit-function-declaration] drivers/net/ethernet/broadcom/bgmac.c:1389:28: error: 'phy_ethtool_get_link_ksettings' undeclared here (not in a function) drivers/net/ethernet/broadcom/bgmac.c:1390:28: error: 'phy_ethtool_set_link_ksettings' undeclared here (not in a function) drivers/net/ethernet/broadcom/bgmac.c:1403:13: error: dereferencing pointer to incomplete type 'struct phy_device' drivers/net/ethernet/broadcom/bgmac.c:1417:3: error: implicit declaration of function 'phy_print_status' [-Werror=implicit-function-declaration] drivers/net/ethernet/broadcom/bgmac.c:1424:26: error: storage size of 'fphy_status' isn't known drivers/net/ethernet/broadcom/bgmac.c:1424:9: error: variable 'fphy_status' has initializer but incomplete type drivers/net/ethernet/broadcom/bgmac.c:1425:11: warning: excess elements in struct initializer drivers/net/ethernet/broadcom/bgmac.c:1425:3: error: unknown field 'link' specified in initializer drivers/net/ethernet/broadcom/bgmac.c:1426:12: note: in expansion of macro 'SPEED_1000' drivers/net/ethernet/broadcom/bgmac.c:1426:3: error: unknown field 'speed' specified in initializer drivers/net/ethernet/broadcom/bgmac.c:1427:13: note: in expansion of macro 'DUPLEX_FULL' drivers/net/ethernet/broadcom/bgmac.c:1427:3: error: unknown field 'duplex' specified in initializer drivers/net/ethernet/broadcom/bgmac.c:1432:12: error: implicit declaration of function 'fixed_phy_register' [-Werror=implicit-function-declaration] drivers/net/ethernet/broadcom/bgmac.c:1432:31: error: 'PHY_POLL' undeclared (first use in this function) drivers/net/ethernet/broadcom/bgmac.c:1438:8: error: implicit declaration of function 'phy_connect_direct' [-Werror=implicit-function-declaration] drivers/net/ethernet/broadcom/bgmac.c:1439:6: error: 'PHY_INTERFACE_MODE_MII' undeclared (first use in this function) drivers/net/ethernet/broadcom/bgmac.c:1521:2: error: implicit declaration of function 'phy_disconnect' [-Werror=implicit-function-declaration] drivers/net/ethernet/broadcom/bgmac.c:1541:15: error: expected declaration specifiers or '...' before string constant Add linux/phy.h to bgmac.c Signed-off-by: Russell King Acked-by: Rafał Miłecki Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index fe88126b1e0c..20fe2520da42 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include "bgmac.h" static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask, -- cgit v1.2.3 From f225e4e69770a79663c62c8d6629fdf3fcbc360a Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:02:59 -0800 Subject: net: fman: fix build errors when linux/phy*.h is removed from net/dsa.h drivers/net/ethernet/freescale/fman/fman_memac.c:519:21: error: dereferencing pointer to incomplete type 'struct fixed_phy_status' Add linux/phy_fixed.h to fman_memac.c Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_memac.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 71a5ded9d1de..cd6a53eaf161 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -38,6 +38,7 @@ #include #include #include +#include #include /* PCS registers */ -- cgit v1.2.3 From 9303ab2b3402b60f6c39abfdbfa4ce00fce8bee4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:03:00 -0800 Subject: net: mvneta: fix build errors when linux/phy*.h is removed from net/dsa.h drivers/net/ethernet/marvell/mvneta.c:2694:26: error: storage size of 'status' isn't known drivers/net/ethernet/marvell/mvneta.c:2695:26: error: storage size of 'changed' isn't known drivers/net/ethernet/marvell/mvneta.c:2695:9: error: variable 'changed' has initializer but incomplete type drivers/net/ethernet/marvell/mvneta.c:2709:2: error: implicit declaration of function 'fixed_phy_update_state' [-Werror=implicit-function-declaration] Add linux/phy_fixed.h to mvneta.c Signed-off-by: Russell King Acked-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 0f4d1697be46..fdf71720e707 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From da5df6205b9c493b9367dab057b0f80da46a0fdb Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:03:01 -0800 Subject: iscsi: fix build errors when linux/phy*.h is removed from net/dsa.h drivers/target/iscsi/iscsi_target_login.c:1135:7: error: implicit declaration of function 'try_module_get' [-Werror=implicit-function-declaration] Add linux/module.h to iscsi_target_login.c. Signed-off-by: Russell King Reviewed-by: Bart Van Assche Acked-by: Nicholas Bellinger Signed-off-by: David S. Miller --- drivers/target/iscsi/iscsi_target_login.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 450f51deb2a2..eab274d17b5c 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -17,6 +17,7 @@ ******************************************************************************/ #include +#include #include #include #include -- cgit v1.2.3 From 7ca2ea8253f1fbb69c437b66255a2331ff2e14c2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:03:02 -0800 Subject: MIPS: Octeon: Remove unnecessary MODULE_*() octeon-platform.c can not be built as a module for two reasons: (a) the Makefile doesn't allow it: obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o (b) the multiple *_initcall() statements, each of which are translated to a module_init() call when attempting a module build, become aliases to init_module(). Having more than one alias will cause a build error. Hence, rather than adding a linux/module.h include, remove the redundant MODULE_*() from this file. Acked-by: David Daney Signed-off-by: Russell King Signed-off-by: David S. Miller --- arch/mips/cavium-octeon/octeon-platform.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index 37a932d9148c..8297ce714c5e 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -1060,7 +1060,3 @@ static int __init octeon_publish_devices(void) return of_platform_bus_probe(NULL, octeon_ids, NULL); } arch_initcall(octeon_publish_devices); - -MODULE_AUTHOR("David Daney "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Platform driver for Octeon SOC"); -- cgit v1.2.3 From e3bfc6e7baaac3992f62754228128427ac955e3a Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:03:03 -0800 Subject: net: liquidio: fix build errors when linux/phy*.h is removed from net/dsa.h drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:30: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:30: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:30: error: type defaults to 'int' in declaration of 'MODULE_AUTHOR' drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:30: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:31: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:31: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:31: error: type defaults to 'int' in declaration of 'MODULE_DESCRIPTION' drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:31: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:32: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:32: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:32: error: type defaults to 'int' in declaration of 'MODULE_LICENSE' drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:32: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:33: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:33: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:33: error: type defaults to 'int' in declaration of 'MODULE_VERSION' drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:33: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:36: error: expected ')' before 'int' drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:37: error: expected ')' before string constant drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:325: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:325: error: type defaults to 'int' in declaration of 'MODULE_DEVICE_TABLE' drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:325: warning: parameter names (without types) in function declaration drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:3250: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:3250: error: type defaults to 'int' in declaration of 'module_init' drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:3250: warning: parameter names (without types) in function declaration drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:3251: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:3251: error: type defaults to 'int' in declaration of 'module_exit' drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:3251: warning: parameter names (without types) in function declaration drivers/net/ethernet/cavium/liquidio/lio_main.c:36: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:36: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:36: error: type defaults to 'int' in declaration of 'MODULE_AUTHOR' drivers/net/ethernet/cavium/liquidio/lio_main.c:36: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_main.c:37: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:37: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:37: error: type defaults to 'int' in declaration of 'MODULE_DESCRIPTION' drivers/net/ethernet/cavium/liquidio/lio_main.c:37: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_main.c:38: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:38: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:38: error: type defaults to 'int' in declaration of 'MODULE_LICENSE' drivers/net/ethernet/cavium/liquidio/lio_main.c:38: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_main.c:39: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:39: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:39: error: type defaults to 'int' in declaration of 'MODULE_VERSION' drivers/net/ethernet/cavium/liquidio/lio_main.c:39: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_main.c:40: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:40: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:40: error: type defaults to 'int' in declaration of 'MODULE_FIRMWARE' drivers/net/ethernet/cavium/liquidio/lio_main.c:40: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_main.c:41: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:41: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:41: error: type defaults to 'int' in declaration of 'MODULE_FIRMWARE' drivers/net/ethernet/cavium/liquidio/lio_main.c:41: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_main.c:42: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:42: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:42: error: type defaults to 'int' in declaration of 'MODULE_FIRMWARE' drivers/net/ethernet/cavium/liquidio/lio_main.c:42: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_main.c:43: error: expected declaration specifiers or '...' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:43: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:43: error: type defaults to 'int' in declaration of 'MODULE_FIRMWARE' drivers/net/ethernet/cavium/liquidio/lio_main.c:43: error: function declaration isn't a prototype drivers/net/ethernet/cavium/liquidio/lio_main.c:46: error: expected ')' before 'int' drivers/net/ethernet/cavium/liquidio/lio_main.c:48: error: expected ')' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:53: error: expected ')' before 'int' drivers/net/ethernet/cavium/liquidio/lio_main.c:54: error: expected ')' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:57: error: expected ')' before 'sizeof' drivers/net/ethernet/cavium/liquidio/lio_main.c:58: error: expected ')' before string constant drivers/net/ethernet/cavium/liquidio/lio_main.c:498: warning: data definitionhas no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:498: error: type defaults to 'int' in declaration of 'MODULE_DEVICE_TABLE' drivers/net/ethernet/cavium/liquidio/lio_main.c:498: warning: parameter names (without types) in function declaration drivers/net/ethernet/cavium/liquidio/lio_main.c: In function 'octeon_recv_vf_drv_notice': drivers/net/ethernet/cavium/liquidio/lio_main.c:4393: error: implicit declaration of function 'try_module_get' drivers/net/ethernet/cavium/liquidio/lio_main.c:4400: error: implicit declaration of function 'module_put' drivers/net/ethernet/cavium/liquidio/lio_main.c: At top level: drivers/net/ethernet/cavium/liquidio/lio_main.c:4670: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:4670: error: type defaults to 'int' in declaration of 'module_init' drivers/net/ethernet/cavium/liquidio/lio_main.c:4670: warning: parameter names (without types) in function declaration drivers/net/ethernet/cavium/liquidio/lio_main.c:4671: warning: data definition has no type or storage class drivers/net/ethernet/cavium/liquidio/lio_main.c:4671: error: type defaults to 'int' in declaration of 'module_exit' drivers/net/ethernet/cavium/liquidio/lio_main.c:4671: warning: parameter names (without types) in function declaration Add linux/module.h to both these files. drivers/net/ethernet/cavium/liquidio/octeon_console.c:40:31: error: expected ')' before 'int' drivers/net/ethernet/cavium/liquidio/octeon_console.c:42:4: error: expected ')' before string constant Add linux/moduleparam.h to this file. Signed-off-by: Russell King Acked-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 1 + drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 1 + drivers/net/ethernet/cavium/liquidio/octeon_console.c | 1 + 3 files changed, 3 insertions(+) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index a3c7b999e526..be9c0e3f5ade 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -15,6 +15,7 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details. ***********************************************************************/ +#include #include #include #include diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 0536cb9f6182..9d5e03502c76 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -15,6 +15,7 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details. ***********************************************************************/ +#include #include #include #include "liquidio_common.h" diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 42b673dce533..53f38d05f7c2 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -18,6 +18,7 @@ /** * @file octeon_console.c */ +#include #include #include #include -- cgit v1.2.3 From 7ecb6227ca339f93cda447d586d119df5687ddf2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:03:04 -0800 Subject: net: ath5k: fix build errors when linux/phy*.h is removed from net/dsa.h Fix these errors reported by the 0-day builder by replacing the linux/export.h include with linux/module.h. In file included from include/linux/platform_device.h:14:0, from drivers/net/wireless/ath/ath5k/ahb.c:20: include/linux/device.h:1463:1: warning: data definition has no type or storage class module_init(__driver##_init); \ ^ include/linux/platform_device.h:228:2: note: in expansion of macro 'module_driver' module_driver(__platform_driver, platform_driver_register, \ ^~~~~~~~~~~~~ drivers/net/wireless/ath/ath5k/ahb.c:233:1: note: in expansion of macro 'module_platform_driver' module_platform_driver(ath_ahb_driver); ^~~~~~~~~~~~~~~~~~~~~~ include/linux/device.h:1463:1: error: type defaults to 'int' in declaration of 'module_init' [-Werror=implicit-int] module_init(__driver##_init); \ ^ include/linux/platform_device.h:228:2: note: in expansion of macro 'module_driver' module_driver(__platform_driver, platform_driver_register, \ ^~~~~~~~~~~~~ drivers/net/wireless/ath/ath5k/ahb.c:233:1: note: in expansion of macro 'module_platform_driver' module_platform_driver(ath_ahb_driver); ^~~~~~~~~~~~~~~~~~~~~~ drivers/net/wireless/ath/ath5k/ahb.c:233:1: warning: parameter names (without types) in function declaration In file included from include/linux/platform_device.h:14:0, from drivers/net/wireless/ath/ath5k/ahb.c:20: include/linux/device.h:1468:1: warning: data definition has no type or storage class module_exit(__driver##_exit); ^ include/linux/platform_device.h:228:2: note: in expansion of macro 'module_driver' module_driver(__platform_driver, platform_driver_register, \ ^~~~~~~~~~~~~ drivers/net/wireless/ath/ath5k/ahb.c:233:1: note: in expansion of macro 'module_platform_driver' module_platform_driver(ath_ahb_driver); ^~~~~~~~~~~~~~~~~~~~~~ include/linux/device.h:1468:1: error: type defaults to 'int' in declaration of 'module_exit' [-Werror=implicit-int] module_exit(__driver##_exit); ^ include/linux/platform_device.h:228:2: note: in expansion of macro 'module_driver' module_driver(__platform_driver, platform_driver_register, \ ^~~~~~~~~~~~~ drivers/net/wireless/ath/ath5k/ahb.c:233:1: note: in expansion of macro 'module_platform_driver' module_platform_driver(ath_ahb_driver); ^~~~~~~~~~~~~~~~~~~~~~ drivers/net/wireless/ath/ath5k/ahb.c:233:1: warning: parameter names (without types) in function declaration In file included from include/linux/platform_device.h:14:0, from drivers/net/wireless/ath/ath5k/ahb.c:20: drivers/net/wireless/ath/ath5k/ahb.c:233:24: warning: 'ath_ahb_driver_exit' defined but not used [-Wunused-function] module_platform_driver(ath_ahb_driver); ^ include/linux/device.h:1464:20: note: in definition of macro 'module_driver' static void __exit __driver##_exit(void) \ ^~~~~~~~ drivers/net/wireless/ath/ath5k/ahb.c:233:1: note: in expansion of macro 'module_platform_driver' module_platform_driver(ath_ahb_driver); ^~~~~~~~~~~~~~~~~~~~~~ drivers/net/wireless/ath/ath5k/ahb.c:233:24: warning: 'ath_ahb_driver_init' defined but not used [-Wunused-function] module_platform_driver(ath_ahb_driver); ^ include/linux/device.h:1459:19: note: in definition of macro 'module_driver' static int __init __driver##_init(void) \ ^~~~~~~~ drivers/net/wireless/ath/ath5k/ahb.c:233:1: note: in expansion of macro 'module_platform_driver' module_platform_driver(ath_ahb_driver); ^~~~~~~~~~~~~~~~~~~~~~ Signed-off-by: Russell King Acked-by: Kalle Valo Signed-off-by: David S. Miller --- drivers/net/wireless/ath/ath5k/ahb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c index 2ca88b593e4c..c0794f5988b3 100644 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ b/drivers/net/wireless/ath/ath5k/ahb.c @@ -16,10 +16,10 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include #include #include #include -#include #include #include "ath5k.h" #include "debug.h" -- cgit v1.2.3 From 4d56a29f17508b2eb8bee66b8f0e3679201fa807 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 7 Feb 2017 15:03:05 -0800 Subject: net: dsa: remove unnecessary phy*.h includes Including phy.h and phy_fixed.h into net/dsa.h causes phy*.h to be an unnecessary dependency for quite a large amount of the kernel. There's very little which actually requires definitions from phy.h in net/dsa.h - the include itself only wants the declaration of a couple of structures and IFNAMSIZ. Add linux/if.h for IFNAMSIZ, declarations for the structures, phy.h to mv88e6xxx.h as it needs it for phy_interface_t, and remove both phy.h and phy_fixed.h from net/dsa.h. This patch reduces from around 800 files rebuilt to around 40 - even with ccache, the time difference is noticable. Tested-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 1 + include/net/dsa.h | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index d6b335cd8c09..ac54f40813f7 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -15,6 +15,7 @@ #include #include #include +#include #ifndef UINT64_MAX #define UINT64_MAX (u64)(~((u64)0)) diff --git a/include/net/dsa.h b/include/net/dsa.h index b49b2004891e..4e13e695f025 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -11,17 +11,18 @@ #ifndef __LINUX_NET_DSA_H #define __LINUX_NET_DSA_H +#include #include #include #include #include #include #include -#include -#include #include struct tc_action; +struct phy_device; +struct fixed_phy_status; enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, -- cgit v1.2.3 From 1deeaa0b86973bef6629396cc0f5f092872bb6de Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Thu, 9 Feb 2017 16:17:40 +0200 Subject: net: ethernet: ti: netcp_core: remove netif_trans_update No need to update jiffies in txq->trans_start twice and only for tx 0, it's supposed to be done in netdev_start_xmit() and per tx queue. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_core.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index ebab1473f366..7c7ae0890e90 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1316,8 +1316,6 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (ret) goto drop; - netif_trans_update(ndev); - /* Check Tx pool count & stop subqueue if needed */ desc_count = knav_pool_count(netcp->tx_pool); if (desc_count < netcp->tx_pause_threshold) { -- cgit v1.2.3 From adf200f31c000d707e4afe238ed1d1199e0cce7c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 15:54:33 +0100 Subject: devlink: fix the name of eswitch commands The eswitch_[gs]et command is supposed to be similar to port_[gs]et command - for multiple eswitch attributes. However, when it was introduced by 08f4b5918b2d ("net/devlink: Add E-Switch mode control") it was wrongly named with the word "mode" in it. So fix this now, make the oririnal enum value existing but obsolete. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/devlink.h | 10 ++++++++-- net/core/devlink.c | 18 +++++++++--------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 9014c33d4e77..0f1f3a12e23c 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -57,8 +57,14 @@ enum devlink_command { DEVLINK_CMD_SB_OCC_SNAPSHOT, DEVLINK_CMD_SB_OCC_MAX_CLEAR, - DEVLINK_CMD_ESWITCH_MODE_GET, - DEVLINK_CMD_ESWITCH_MODE_SET, + DEVLINK_CMD_ESWITCH_GET, +#define DEVLINK_CMD_ESWITCH_MODE_GET /* obsolete, never use this! */ \ + DEVLINK_CMD_ESWITCH_GET + + DEVLINK_CMD_ESWITCH_SET, +#define DEVLINK_CMD_ESWITCH_MODE_SET /* obsolete, never use this! */ \ + DEVLINK_CMD_ESWITCH_SET + /* add new commands above here */ __DEVLINK_CMD_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index 2b5bf9efa720..7aa8e5369dc5 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1435,8 +1435,8 @@ out: return err; } -static int devlink_nl_cmd_eswitch_mode_get_doit(struct sk_buff *skb, - struct genl_info *info) +static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, + struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; const struct devlink_ops *ops = devlink->ops; @@ -1450,7 +1450,7 @@ static int devlink_nl_cmd_eswitch_mode_get_doit(struct sk_buff *skb, if (!msg) return -ENOMEM; - err = devlink_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_MODE_GET, + err = devlink_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET, info->snd_portid, info->snd_seq, 0); if (err) { @@ -1461,8 +1461,8 @@ static int devlink_nl_cmd_eswitch_mode_get_doit(struct sk_buff *skb, return genlmsg_reply(msg, info); } -static int devlink_nl_cmd_eswitch_mode_set_doit(struct sk_buff *skb, - struct genl_info *info) +static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, + struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; const struct devlink_ops *ops = devlink->ops; @@ -1629,15 +1629,15 @@ static const struct genl_ops devlink_nl_ops[] = { DEVLINK_NL_FLAG_LOCK_PORTS, }, { - .cmd = DEVLINK_CMD_ESWITCH_MODE_GET, - .doit = devlink_nl_cmd_eswitch_mode_get_doit, + .cmd = DEVLINK_CMD_ESWITCH_GET, + .doit = devlink_nl_cmd_eswitch_get_doit, .policy = devlink_nl_policy, .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { - .cmd = DEVLINK_CMD_ESWITCH_MODE_SET, - .doit = devlink_nl_cmd_eswitch_mode_set_doit, + .cmd = DEVLINK_CMD_ESWITCH_SET, + .doit = devlink_nl_cmd_eswitch_set_doit, .policy = devlink_nl_policy, .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, -- cgit v1.2.3 From 21e3d2dd4a19f842e7d134c341eb584970ff3b32 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 15:54:34 +0100 Subject: devlink: rename devlink_eswitch_fill to devlink_nl_eswitch_fill Be aligned with the rest of the file and name the helper function accordingly. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 7aa8e5369dc5..f36192406f9a 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1392,9 +1392,9 @@ static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb, return -EOPNOTSUPP; } -static int devlink_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, - enum devlink_command cmd, u32 portid, - u32 seq, int flags) +static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, + enum devlink_command cmd, u32 portid, + u32 seq, int flags) { const struct devlink_ops *ops = devlink->ops; void *hdr; @@ -1450,8 +1450,8 @@ static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, if (!msg) return -ENOMEM; - err = devlink_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET, - info->snd_portid, info->snd_seq, 0); + err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET, + info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); -- cgit v1.2.3 From 1a6aa36b6f92b1a2f2e6789f6785372d4d6ddca9 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 15:54:35 +0100 Subject: devlink: use nla_put_failure goto label instead of out Be aligned with the rest of the code and use label named nla_put_failure. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index f36192406f9a..7f88cc879d43 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1408,29 +1408,29 @@ static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, err = devlink_nl_put_handle(msg, devlink); if (err) - goto out; + goto nla_put_failure; err = ops->eswitch_mode_get(devlink, &mode); if (err) - goto out; + goto nla_put_failure; err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode); if (err) - goto out; + goto nla_put_failure; if (ops->eswitch_inline_mode_get) { err = ops->eswitch_inline_mode_get(devlink, &inline_mode); if (err) - goto out; + goto nla_put_failure; err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE, inline_mode); if (err) - goto out; + goto nla_put_failure; } genlmsg_end(msg, hdr); return 0; -out: +nla_put_failure: genlmsg_cancel(msg, hdr); return err; } -- cgit v1.2.3 From 4456f61cfd2a589c4368fe0b9080b646b9bd470d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Feb 2017 15:54:36 +0100 Subject: devlink: allow to fillup eswitch attrs even if mode_get op does not exist Even when mode_get op is not present, other eswitch attrs need to be filled-up. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 7f88cc879d43..e9c1e6acfb6d 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1410,12 +1410,14 @@ static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, if (err) goto nla_put_failure; - err = ops->eswitch_mode_get(devlink, &mode); - if (err) - goto nla_put_failure; - err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode); - if (err) - goto nla_put_failure; + if (ops->eswitch_mode_get) { + err = ops->eswitch_mode_get(devlink, &mode); + if (err) + goto nla_put_failure; + err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode); + if (err) + goto nla_put_failure; + } if (ops->eswitch_inline_mode_get) { err = ops->eswitch_inline_mode_get(devlink, &inline_mode); @@ -1443,7 +1445,7 @@ static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, struct sk_buff *msg; int err; - if (!ops || !ops->eswitch_mode_get) + if (!ops) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); -- cgit v1.2.3 From 1697599ee301a52cded6499a09bd609f7f63fd06 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:27 -0800 Subject: bitfield.h: add FIELD_FIT() helper Add a helper for checking at runtime that a value will fit inside a specified field/mask. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_bpf.h | 2 -- include/linux/bitfield.h | 13 +++++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h index 76a19f1796af..9513c80f7be5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h @@ -39,8 +39,6 @@ #include #include -#define FIELD_FIT(mask, val) (!((((u64)val) << __bf_shf(mask)) & ~(mask))) - /* For branch fixup logic use up-most byte of branch instruction as scratch * area. Remember to clear this before sending instructions to HW! */ diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index f6505d83069d..8b9d6fff002d 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -62,6 +62,19 @@ (1ULL << __bf_shf(_mask))); \ }) +/** + * FIELD_FIT() - check if value fits in the field + * @_mask: shifted mask defining the field's length and position + * @_val: value to test against the field + * + * Return: true if @_val can fit inside @_mask, false if @_val is too big. + */ +#define FIELD_FIT(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ + }) + /** * FIELD_PREP() - prepare a bitfield element * @_mask: shifted mask defining the field's length and position -- cgit v1.2.3 From 2633beb99b81f4dd005f99aabd98c8afa056c528 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:28 -0800 Subject: nfp: rename the driver and add new main file Support for the PF driver is about to be added and will share much of the code. When the VF driver was added we planned to maintain the PF driver as a separate module but have decided that for our simple use case just maintaining a single module is more reasonable. Rename the driver to just "nfp" and update the Kconfig. While at it remove latent references to NFP3200. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/Kconfig | 18 ++--- drivers/net/ethernet/netronome/Makefile | 2 +- drivers/net/ethernet/netronome/nfp/Makefile | 9 ++- drivers/net/ethernet/netronome/nfp/nfp_main.c | 85 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_main.h | 48 ++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_net.h | 9 +-- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 8 +- .../net/ethernet/netronome/nfp/nfp_netvf_main.c | 38 +--------- 8 files changed, 160 insertions(+), 57 deletions(-) create mode 100644 drivers/net/ethernet/netronome/nfp/nfp_main.c create mode 100644 drivers/net/ethernet/netronome/nfp/nfp_main.h diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index 9508ad782c30..967d7ca8c28c 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -15,21 +15,21 @@ config NET_VENDOR_NETRONOME if NET_VENDOR_NETRONOME -config NFP_NETVF - tristate "Netronome(R) NFP4000/NFP6000 VF NIC driver" +config NFP + tristate "Netronome(R) NFP4000/NFP6000 NIC driver" depends on PCI && PCI_MSI depends on VXLAN || VXLAN=n ---help--- - This driver supports SR-IOV virtual functions of - the Netronome(R) NFP4000/NFP6000 cards working as - a advanced Ethernet NIC. + This driver supports the Netronome(R) NFP4000/NFP6000 based + cards working as a advanced Ethernet NIC. It works with both + SR-IOV physical and virtual functions. -config NFP_NET_DEBUG - bool "Debug support for Netronome(R) NFP3200/NFP6000 NIC drivers" - depends on NFP_NET || NFP_NETVF +config NFP_DEBUG + bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers" + depends on NFP ---help--- Enable extra sanity checks and debugfs support in - Netronome(R) NFP3200/NFP6000 NIC PF and VF drivers. + Netronome(R) NFP4000/NFP6000 NIC drivers. Note: selecting this option may adversely impact performance. diff --git a/drivers/net/ethernet/netronome/Makefile b/drivers/net/ethernet/netronome/Makefile index dcb7b383f634..7fb3b84b5556 100644 --- a/drivers/net/ethernet/netronome/Makefile +++ b/drivers/net/ethernet/netronome/Makefile @@ -2,4 +2,4 @@ # Makefile for the Netronome network device drivers # -obj-$(CONFIG_NFP_NETVF) += nfp/ +obj-$(CONFIG_NFP) += nfp/ diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 0efb2ba9a558..1103cb498323 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -1,15 +1,16 @@ -obj-$(CONFIG_NFP_NETVF) += nfp_netvf.o +obj-$(CONFIG_NFP) += nfp.o -nfp_netvf-objs := \ +nfp-objs := \ + nfp_main.o \ nfp_net_common.o \ nfp_net_ethtool.o \ nfp_net_offload.o \ nfp_netvf_main.o ifeq ($(CONFIG_BPF_SYSCALL),y) -nfp_netvf-objs += \ +nfp-objs += \ nfp_bpf_verifier.o \ nfp_bpf_jit.o endif -nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o +nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c new file mode 100644 index 000000000000..cc6d75fff005 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_main.c + * Authors: Jakub Kicinski + * Alejandro Lucero + * Jason McMullan + * Rolf Neugebauer + */ + +#include +#include +#include +#include +#include + +#include "nfp_main.h" +#include "nfp_net.h" + +static const char nfp_driver_name[] = "nfp"; +const char nfp_driver_version[] = VERMAGIC_STRING; + +static int __init nfp_main_init(void) +{ + int err; + + pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2017 Netronome Systems\n", + nfp_driver_name); + + nfp_net_debugfs_create(); + + err = pci_register_driver(&nfp_netvf_pci_driver); + if (err) + goto err_destroy_debugfs; + + return err; + +err_destroy_debugfs: + nfp_net_debugfs_destroy(); + return err; +} + +static void __exit nfp_main_exit(void) +{ + pci_unregister_driver(&nfp_netvf_pci_driver); + nfp_net_debugfs_destroy(); +} + +module_init(nfp_main_init); +module_exit(nfp_main_exit); + +MODULE_AUTHOR("Netronome Systems "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver."); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h new file mode 100644 index 000000000000..371ae2731909 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_main.h + * Author: Jason McMullan + */ + +#ifndef NFP_MAIN_H +#define NFP_MAIN_H + +#include +#include +#include + +extern struct pci_driver nfp_netvf_pci_driver; + +#endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 2115f446031e..f05f750c2ea0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -770,8 +770,7 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q) } /* Globals */ -extern const char nfp_net_driver_name[]; -extern const char nfp_net_driver_version[]; +extern const char nfp_driver_version[]; /* Prototypes */ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, @@ -795,7 +794,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx); -#ifdef CONFIG_NFP_NET_DEBUG +#ifdef CONFIG_NFP_DEBUG void nfp_net_debugfs_create(void); void nfp_net_debugfs_destroy(void); void nfp_net_debugfs_adapter_add(struct nfp_net *nn); @@ -816,7 +815,7 @@ static inline void nfp_net_debugfs_adapter_add(struct nfp_net *nn) static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn) { } -#endif /* CONFIG_NFP_NET_DEBUG */ +#endif /* CONFIG_NFP_DEBUG */ void nfp_net_filter_stats_timer(unsigned long data); int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 1b26e9646574..255f30252550 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -132,9 +132,9 @@ static void nfp_net_get_drvinfo(struct net_device *netdev, { struct nfp_net *nn = netdev_priv(netdev); - strlcpy(drvinfo->driver, nfp_net_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, nfp_net_driver_version, - sizeof(drvinfo->version)); + strlcpy(drvinfo->driver, nn->pdev->driver->name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d.%d", diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index d065235034d4..5bacc48e6627 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -46,8 +46,8 @@ #include "nfp_net_ctrl.h" #include "nfp_net.h" -const char nfp_net_driver_name[] = "nfp_netvf"; -const char nfp_net_driver_version[] = "0.1"; +static const char nfp_net_driver_name[] = "nfp_netvf"; + #define PCI_DEVICE_NFP6000VF 0x6003 static const struct pci_device_id nfp_netvf_pci_device_ids[] = { { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF, @@ -315,39 +315,9 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static struct pci_driver nfp_netvf_pci_driver = { +struct pci_driver nfp_netvf_pci_driver = { .name = nfp_net_driver_name, .id_table = nfp_netvf_pci_device_ids, .probe = nfp_netvf_pci_probe, .remove = nfp_netvf_pci_remove, }; - -static int __init nfp_netvf_init(void) -{ - int err; - - pr_info("%s: NFP VF Network driver, Copyright (C) 2014-2015 Netronome Systems\n", - nfp_net_driver_name); - - nfp_net_debugfs_create(); - err = pci_register_driver(&nfp_netvf_pci_driver); - if (err) { - nfp_net_debugfs_destroy(); - return err; - } - - return 0; -} - -static void __exit nfp_netvf_exit(void) -{ - pci_unregister_driver(&nfp_netvf_pci_driver); - nfp_net_debugfs_destroy(); -} - -module_init(nfp_netvf_init); -module_exit(nfp_netvf_exit); - -MODULE_AUTHOR("Netronome Systems "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("NFP VF network device driver"); -- cgit v1.2.3 From 4cb584e0ee7df70fd0376aee60cf701855ea8c81 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:29 -0800 Subject: nfp: add CPP access core Command Push Pull is the name of NFP's network on a chip. PCIe PF can access the interconnect through a number of mappings controlled via Base Access Registers. BARs allow the PF to issue pretty much any command or address any memory on the chip. Add appropriate logic and a handful of helper for simple operations like reading scalars from memories. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 4 + .../netronome/nfp/nfpcore/nfp6000/nfp6000.h | 88 + .../netronome/nfp/nfpcore/nfp6000/nfp_xpb.h | 57 + .../ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c | 1364 ++++++++++++++++ .../ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h | 46 + .../net/ethernet/netronome/nfp/nfpcore/nfp_arm.h | 246 +++ .../net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h | 431 +++++ .../ethernet/netronome/nfp/nfpcore/nfp_cppcore.c | 1706 ++++++++++++++++++++ .../ethernet/netronome/nfp/nfpcore/nfp_cpplib.c | 281 ++++ .../ethernet/netronome/nfp/nfpcore/nfp_target.c | 764 +++++++++ 10 files changed, 4987 insertions(+) create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 1103cb498323..fb9dadf9236d 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -1,6 +1,10 @@ obj-$(CONFIG_NFP) += nfp.o nfp-objs := \ + nfpcore/nfp6000_pcie.o \ + nfpcore/nfp_cppcore.o \ + nfpcore/nfp_cpplib.o \ + nfpcore/nfp_target.o \ nfp_main.o \ nfp_net_common.o \ nfp_net_ethtool.o \ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h new file mode 100644 index 000000000000..0e497a6154db --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NFP6000_NFP6000_H +#define NFP6000_NFP6000_H + +#include +#include + +/* CPP Target IDs */ +#define NFP_CPP_TARGET_INVALID 0 +#define NFP_CPP_TARGET_NBI 1 +#define NFP_CPP_TARGET_QDR 2 +#define NFP_CPP_TARGET_ILA 6 +#define NFP_CPP_TARGET_MU 7 +#define NFP_CPP_TARGET_PCIE 9 +#define NFP_CPP_TARGET_ARM 10 +#define NFP_CPP_TARGET_CRYPTO 12 +#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */ +#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */ +#define NFP_CPP_TARGET_CT_XPB 14 +#define NFP_CPP_TARGET_LOCAL_SCRATCH 15 +#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH + +#define NFP_ISL_EMEM0 24 + +#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL +#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL + +#define PUSHPULL(_pull, _push) ((_pull) << 4 | (_push) << 0) +#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0) +#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4) + +static inline int pushpull_width(int pp) +{ + pp &= 0xf; + + if (pp == 0) + return -EINVAL; + return 2 << pp; +} + +static inline int nfp_cppat_mu_locality_lsb(int mode, bool addr40) +{ + switch (mode) { + case 0 ... 3: + return addr40 ? 38 : 30; + default: + return -EINVAL; + } +} + +int nfp_target_pushpull(u32 cpp_id, u64 address); +int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, + u32 *cpp_target_id, u64 *cpp_target_address, + const u32 *imb_table); + +#endif /* NFP6000_NFP6000_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h new file mode 100644 index 000000000000..40fb19939505 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_xpb.h + * Author: Jason McMullan + */ + +#ifndef NFP6000_XPB_H +#define NFP6000_XPB_H + +/* For use with NFP6000 Databook "XPB Addressing" section + */ +#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24) + +#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000) + +#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F) + +/* For use with NFP6000 Databook "XPB Island and Device IDs" chapter + */ +#define NFP_XPB_DEVICE(island, slave, device) \ + (NFP_XPB_OVERLAY(island) | \ + (((slave) & 3) << 22) | \ + (((device) & 0x3f) << 16)) + +#endif /* NFP6000_XPB_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c new file mode 100644 index 000000000000..165e085c2a24 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -0,0 +1,1364 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp6000_pcie.c + * Authors: Jakub Kicinski + * Jason McMullan + * Rolf Neugebauer + * + * Multiplexes the NFP BARs between NFP internal resources and + * implements the PCIe specific interface for generic CPP bus access. + * + * The BARs are managed with refcounts and are allocated/acquired + * using target, token and offset/size matching. The generic CPP bus + * abstraction builds upon this BAR interface. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nfp_cpp.h" + +#include "nfp6000/nfp6000.h" + +#include "nfp6000_pcie.h" + +#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0) +#define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \ + (0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3))) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x) (((_x) & 0x3) << 30) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x) (((_x) >> 30) & 0x3) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x) (((_x) & 0x3) << 28) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x) (((_x) >> 28) & 0x3) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x) (((_x) & 0xffffff) << 0) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x) (((_x) >> 0) & 0xffffff) +#define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \ + (0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3))) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x) (((_x) & 0x7f) << 24) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x) (((_x) >> 24) & 0x7f) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x) (((_x) & 0x3ff) << 14) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x) (((_x) >> 14) & 0x3ff) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x) (((_x) & 0x3fff) << 0) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x) (((_x) >> 0) & 0x3fff) +#define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \ + (0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3))) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x) (((_x) & 0xf) << 28) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x) (((_x) >> 28) & 0xf) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x) (((_x) & 0x1f) << 23) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x) (((_x) >> 23) & 0x1f) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x) (((_x) & 0x1f) << 18) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x) (((_x) >> 18) & 0x1f) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x) (((_x) & 0xff) << 10) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x) (((_x) >> 10) & 0xff) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x) (((_x) & 0x3ff) << 0) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff) + +#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x) (((_x) & 0x1f) << 16) +#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f) +#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x) (((_x) & 0xffff) << 0) +#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x) (((_x) >> 0) & 0xffff) +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x) (((_x) & 0x3) << 27) +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x) (((_x) >> 27) & 0x3) +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT 0 +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT 1 +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE 3 +#define NFP_PCIE_BAR_PCIE2CPP_MapType(_x) (((_x) & 0x7) << 29) +#define NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x) (((_x) >> 29) & 0x7) +#define NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED 0 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_BULK 1 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET 2 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL 3 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0 4 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1 5 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2 6 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3 7 +#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x) (((_x) & 0xf) << 23) +#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf) +#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x) (((_x) & 0x3) << 21) +#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3) +#define NFP_PCIE_EM 0x020000 +#define NFP_PCIE_SRAM 0x000000 + +#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2)) +#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4)) +#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4)) + +#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \ + (0x400 + ((bar) * 8 + (slot)) * 4) + +#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \ + (((bar) * 8 + (slot)) * 4) + +/* The number of explicit BARs to reserve. + * Minimum is 0, maximum is 4 on the NFP6000. + */ +#define NFP_PCIE_EXPLICIT_BARS 2 + +struct nfp6000_pcie; +struct nfp6000_area_priv; + +/** + * struct nfp_bar - describes BAR configuration and usage + * @nfp: backlink to owner + * @barcfg: cached contents of BAR config CSR + * @base: the BAR's base CPP offset + * @mask: mask for the BAR aperture (read only) + * @bitsize: bitsize of BAR aperture (read only) + * @index: index of the BAR + * @refcnt: number of current users + * @iomem: mapped IO memory + * @resource: iomem resource window + */ +struct nfp_bar { + struct nfp6000_pcie *nfp; + u32 barcfg; + u64 base; /* CPP address base */ + u64 mask; /* Bit mask of the bar */ + u32 bitsize; /* Bit size of the bar */ + int index; + atomic_t refcnt; + + void __iomem *iomem; + struct resource *resource; +}; + +#define NFP_PCI_BAR_MAX (PCI_64BIT_BAR_COUNT * 8) + +struct nfp6000_pcie { + struct pci_dev *pdev; + struct device *dev; + + /* PCI BAR management */ + spinlock_t bar_lock; /* Protect the PCI2CPP BAR cache */ + int bars; + struct nfp_bar bar[NFP_PCI_BAR_MAX]; + wait_queue_head_t bar_waiters; + + /* Reserved BAR access */ + struct { + void __iomem *csr; + void __iomem *em; + void __iomem *expl[4]; + } iomem; + + /* Explicit IO access */ + struct { + struct mutex mutex; /* Lock access to this explicit group */ + u8 master_id; + u8 signal_ref; + void __iomem *data; + struct { + void __iomem *addr; + int bitsize; + int free[4]; + } group[4]; + } expl; +}; + +static u32 nfp_bar_maptype(struct nfp_bar *bar) +{ + return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg); +} + +static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar) +{ + return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8; +} + +static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar) +{ + return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2) + + nfp_bar_resource_len(bar) * (bar->index & 7); +} + +#define TARGET_WIDTH_32 4 +#define TARGET_WIDTH_64 8 + +static int +compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar, + u32 *bar_config, u64 *bar_base, + int tgt, int act, int tok, u64 offset, size_t size, int width) +{ + int bitsize; + u32 newcfg; + + if (tgt >= NFP_CPP_NUM_TARGETS) + return -EINVAL; + + switch (width) { + case 8: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT); + break; + case 4: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT); + break; + case 0: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE); + break; + default: + return -EINVAL; + } + + if (act != NFP_CPP_ACTION_RW && act != 0) { + /* Fixed CPP mapping with specific action */ + u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1); + + newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) + return -EINVAL; + offset &= mask; + + bitsize = 40 - 16; + } else { + u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1); + + /* Bulk mapping */ + newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_BULK); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) + return -EINVAL; + + offset &= mask; + + bitsize = 40 - 21; + } + + if (bar->bitsize < bitsize) + return -EINVAL; + + newcfg |= offset >> bitsize; + + if (bar_base) + *bar_base = offset; + + if (bar_config) + *bar_config = newcfg; + + return 0; +} + +static int +nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg) +{ + int base, slot; + int xbar; + + base = bar->index >> 3; + slot = bar->index & 7; + + if (nfp->iomem.csr) { + xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot); + writel(newcfg, nfp->iomem.csr + xbar); + /* Readback to ensure BAR is flushed */ + readl(nfp->iomem.csr + xbar); + } else { + xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot); + pci_write_config_dword(nfp->pdev, xbar, newcfg); + } + + bar->barcfg = newcfg; + + return 0; +} + +static int +reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar, + int tgt, int act, int tok, u64 offset, size_t size, int width) +{ + u64 newbase; + u32 newcfg; + int err; + + err = compute_bar(nfp, bar, &newcfg, &newbase, + tgt, act, tok, offset, size, width); + if (err) + return err; + + bar->base = newbase; + + return nfp6000_bar_write(nfp, bar, newcfg); +} + +/* Check if BAR can be used with the given parameters. */ +static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok, + u64 offset, size_t size, int width) +{ + int bartgt, baract, bartok; + int barwidth; + u32 maptype; + + maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg); + bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg); + bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg); + baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg); + + barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg); + switch (barwidth) { + case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT: + barwidth = 4; + break; + case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT: + barwidth = 8; + break; + case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE: + barwidth = 0; + break; + default: + barwidth = -1; + break; + } + + switch (maptype) { + case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET: + bartok = -1; + /* FALLTHROUGH */ + case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK: + baract = NFP_CPP_ACTION_RW; + if (act == 0) + act = NFP_CPP_ACTION_RW; + /* FALLTHROUGH */ + case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED: + break; + default: + /* We don't match explicit bars through the area interface */ + return 0; + } + + /* Make sure to match up the width */ + if (barwidth != width) + return 0; + + if ((bartgt < 0 || bartgt == tgt) && + (bartok < 0 || bartok == tok) && + (baract == act) && + bar->base <= offset && + (bar->base + (1 << bar->bitsize)) >= (offset + size)) + return 1; + + /* No match */ + return 0; +} + +static int +find_matching_bar(struct nfp6000_pcie *nfp, + u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width) +{ + int n; + + for (n = 0; n < nfp->bars; n++) { + struct nfp_bar *bar = &nfp->bar[n]; + + if (matching_bar(bar, tgt, act, tok, offset, size, width)) + return n; + } + + return -1; +} + +/* Return EAGAIN if no resource is available */ +static int +find_unused_bar_noblock(struct nfp6000_pcie *nfp, + int tgt, int act, int tok, + u64 offset, size_t size, int width) +{ + int n, invalid = 0; + + for (n = 0; n < nfp->bars; n++) { + struct nfp_bar *bar = &nfp->bar[n]; + int err; + + if (bar->bitsize == 0) { + invalid++; + continue; + } + + if (atomic_read(&bar->refcnt) != 0) + continue; + + /* Just check to see if we can make it fit... */ + err = compute_bar(nfp, bar, NULL, NULL, + tgt, act, tok, offset, size, width); + + if (err < 0) + invalid++; + else + return n; + } + + return (n == invalid) ? -EINVAL : -EAGAIN; +} + +static int +find_unused_bar_and_lock(struct nfp6000_pcie *nfp, + int tgt, int act, int tok, + u64 offset, size_t size, int width) +{ + unsigned long flags; + int n; + + spin_lock_irqsave(&nfp->bar_lock, flags); + + n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width); + if (n < 0) + spin_unlock_irqrestore(&nfp->bar_lock, flags); + else + __release(&nfp->bar_lock); + + return n; +} + +static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar) +{ + atomic_inc(&bar->refcnt); +} + +static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar) +{ + if (atomic_dec_and_test(&bar->refcnt)) + wake_up_interruptible(&nfp->bar_waiters); +} + +static int +nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum, + u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width) +{ + return wait_event_interruptible(nfp->bar_waiters, + (*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok, + offset, size, width)) + != -EAGAIN); +} + +static int +nfp_alloc_bar(struct nfp6000_pcie *nfp, + u32 tgt, u32 act, u32 tok, + u64 offset, size_t size, int width, int nonblocking) +{ + unsigned long irqflags; + int barnum, retval; + + if (size > (1 << 24)) + return -EINVAL; + + spin_lock_irqsave(&nfp->bar_lock, irqflags); + barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width); + if (barnum >= 0) { + /* Found a perfect match. */ + nfp_bar_get(nfp, &nfp->bar[barnum]); + spin_unlock_irqrestore(&nfp->bar_lock, irqflags); + return barnum; + } + + barnum = find_unused_bar_noblock(nfp, tgt, act, tok, + offset, size, width); + if (barnum < 0) { + if (nonblocking) + goto err_nobar; + + /* Wait until a BAR becomes available. The + * find_unused_bar function will reclaim the bar_lock + * if a free BAR is found. + */ + spin_unlock_irqrestore(&nfp->bar_lock, irqflags); + retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok, + offset, size, width); + if (retval) + return retval; + __acquire(&nfp->bar_lock); + } + + nfp_bar_get(nfp, &nfp->bar[barnum]); + retval = reconfigure_bar(nfp, &nfp->bar[barnum], + tgt, act, tok, offset, size, width); + if (retval < 0) { + nfp_bar_put(nfp, &nfp->bar[barnum]); + barnum = retval; + } + +err_nobar: + spin_unlock_irqrestore(&nfp->bar_lock, irqflags); + return barnum; +} + +static void disable_bars(struct nfp6000_pcie *nfp); + +static int bar_cmp(const void *aptr, const void *bptr) +{ + const struct nfp_bar *a = aptr, *b = bptr; + + if (a->bitsize == b->bitsize) + return a->index - b->index; + else + return a->bitsize - b->bitsize; +} + +/* Map all PCI bars and fetch the actual BAR configurations from the + * board. We assume that the BAR with the PCIe config block is + * already mapped. + * + * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM) + * BAR0.1: Reserved for XPB access (for MSI-X access to PCIe PBA) + * BAR0.2: -- + * BAR0.3: -- + * BAR0.4: Reserved for Explicit 0.0-0.3 access + * BAR0.5: Reserved for Explicit 1.0-1.3 access + * BAR0.6: Reserved for Explicit 2.0-2.3 access + * BAR0.7: Reserved for Explicit 3.0-3.3 access + * + * BAR1.0-BAR1.7: -- + * BAR2.0-BAR2.7: -- + */ +static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) +{ + const u32 barcfg_msix_general = + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) | + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT; + const u32 barcfg_msix_xpb = + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) | + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT | + NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress( + NFP_CPP_TARGET_ISLAND_XPB); + const u32 barcfg_explicit[4] = { + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0), + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1), + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2), + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3), + }; + struct nfp_bar *bar; + int i, bars_free; + int expl_groups; + + bar = &nfp->bar[0]; + for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) { + struct resource *res; + + res = &nfp->pdev->resource[(i >> 3) * 2]; + + /* Skip over BARs that are not IORESOURCE_MEM */ + if (!(resource_type(res) & IORESOURCE_MEM)) { + bar--; + continue; + } + + bar->resource = res; + bar->barcfg = 0; + + bar->nfp = nfp; + bar->index = i; + bar->mask = nfp_bar_resource_len(bar) - 1; + bar->bitsize = fls(bar->mask); + bar->base = 0; + bar->iomem = NULL; + } + + nfp->bars = bar - &nfp->bar[0]; + if (nfp->bars < 8) { + dev_err(nfp->dev, "No usable BARs found!\n"); + return -EINVAL; + } + + bars_free = nfp->bars; + + /* Convert unit ID (0..3) to signal master/data master ID (0x40..0x70) + */ + mutex_init(&nfp->expl.mutex); + + nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4) + << 4; + nfp->expl.signal_ref = 0x10; + + /* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */ + bar = &nfp->bar[0]; + bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), + nfp_bar_resource_len(bar)); + if (bar->iomem) { + dev_info(nfp->dev, + "BAR0.0 RESERVED: General Mapping/MSI-X SRAM\n"); + atomic_inc(&bar->refcnt); + bars_free--; + + nfp6000_bar_write(nfp, bar, barcfg_msix_general); + + nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000; + } + + if (nfp->pdev->device == PCI_DEVICE_NFP4000 || + nfp->pdev->device == PCI_DEVICE_NFP6000) { + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); + expl_groups = 4; + } else { + int pf = nfp->pdev->devfn & 7; + + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); + expl_groups = 1; + } + nfp->iomem.em = bar->iomem + NFP_PCIE_EM; + + /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */ + bar = &nfp->bar[1]; + dev_info(nfp->dev, "BAR0.1 RESERVED: PCIe XPB/MSI-X PBA\n"); + atomic_inc(&bar->refcnt); + bars_free--; + + nfp6000_bar_write(nfp, bar, barcfg_msix_xpb); + + /* Use BAR0.4..BAR0.7 for EXPL IO */ + for (i = 0; i < 4; i++) { + int j; + + if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) { + nfp->expl.group[i].bitsize = 0; + continue; + } + + bar = &nfp->bar[4 + i]; + bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), + nfp_bar_resource_len(bar)); + if (bar->iomem) { + dev_info(nfp->dev, + "BAR0.%d RESERVED: Explicit%d Mapping\n", + 4 + i, i); + atomic_inc(&bar->refcnt); + bars_free--; + + nfp->expl.group[i].bitsize = bar->bitsize; + nfp->expl.group[i].addr = bar->iomem; + nfp6000_bar_write(nfp, bar, barcfg_explicit[i]); + + for (j = 0; j < 4; j++) + nfp->expl.group[i].free[j] = true; + } + nfp->iomem.expl[i] = bar->iomem; + } + + /* Sort bars by bit size - use the smallest possible first. */ + sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]), + bar_cmp, NULL); + + dev_info(nfp->dev, "%d NFP PCI2CPP BARs, %d free\n", + nfp->bars, bars_free); + + return 0; +} + +static void disable_bars(struct nfp6000_pcie *nfp) +{ + struct nfp_bar *bar = &nfp->bar[0]; + int n; + + for (n = 0; n < nfp->bars; n++, bar++) { + if (bar->iomem) { + iounmap(bar->iomem); + bar->iomem = NULL; + } + } +} + +/* + * Generic CPP bus access interface. + */ + +struct nfp6000_area_priv { + atomic_t refcnt; + + struct nfp_bar *bar; + u32 bar_offset; + + u32 target; + u32 action; + u32 token; + u64 offset; + struct { + int read; + int write; + int bar; + } width; + size_t size; + + void __iomem *iomem; + phys_addr_t phys; + struct resource resource; +}; + +static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest, + unsigned long long address, unsigned long size) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + u32 target = NFP_CPP_ID_TARGET_of(dest); + u32 action = NFP_CPP_ID_ACTION_of(dest); + u32 token = NFP_CPP_ID_TOKEN_of(dest); + int pp; + + pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address); + if (pp < 0) + return pp; + + priv->width.read = PUSH_WIDTH(pp); + priv->width.write = PULL_WIDTH(pp); + if (priv->width.read > 0 && + priv->width.write > 0 && + priv->width.read != priv->width.write) { + return -EINVAL; + } + + if (priv->width.read > 0) + priv->width.bar = priv->width.read; + else + priv->width.bar = priv->width.write; + + atomic_set(&priv->refcnt, 0); + priv->bar = NULL; + + priv->target = target; + priv->action = action; + priv->token = token; + priv->offset = address; + priv->size = size; + memset(&priv->resource, 0, sizeof(priv->resource)); + + return 0; +} + +static void nfp6000_area_cleanup(struct nfp_cpp_area *area) +{ +} + +static void priv_area_get(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + atomic_inc(&priv->refcnt); +} + +static int priv_area_put(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + if (WARN_ON(!atomic_read(&priv->refcnt))) + return 0; + + return atomic_dec_and_test(&priv->refcnt); +} + +static int nfp6000_area_acquire(struct nfp_cpp_area *area) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + int barnum, err; + + if (priv->bar) { + /* Already allocated. */ + priv_area_get(area); + return 0; + } + + barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token, + priv->offset, priv->size, priv->width.bar, 1); + + if (barnum < 0) { + err = barnum; + goto err_alloc_bar; + } + priv->bar = &nfp->bar[barnum]; + + /* Calculate offset into BAR. */ + if (nfp_bar_maptype(priv->bar) == + NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) { + priv->bar_offset = priv->offset & + (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1); + priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET( + priv->bar, priv->target); + priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET( + priv->bar, priv->token); + } else { + priv->bar_offset = priv->offset & priv->bar->mask; + } + + /* We don't actually try to acquire the resource area using + * request_resource. This would prevent sharing the mapped + * BAR between multiple CPP areas and prevent us from + * effectively utilizing the limited amount of BAR resources. + */ + priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset; + priv->resource.name = nfp_cpp_area_name(area); + priv->resource.start = priv->phys; + priv->resource.end = priv->resource.start + priv->size - 1; + priv->resource.flags = IORESOURCE_MEM; + + /* If the bar is already mapped in, use its mapping */ + if (priv->bar->iomem) + priv->iomem = priv->bar->iomem + priv->bar_offset; + else + /* Must have been too big. Sub-allocate. */ + priv->iomem = ioremap_nocache(priv->phys, priv->size); + + if (IS_ERR_OR_NULL(priv->iomem)) { + dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n", + (int)priv->size, priv->bar->index); + err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem); + priv->iomem = NULL; + goto err_iomem_remap; + } + + priv_area_get(area); + return 0; + +err_iomem_remap: + nfp_bar_put(nfp, priv->bar); + priv->bar = NULL; +err_alloc_bar: + return err; +} + +static void nfp6000_area_release(struct nfp_cpp_area *area) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + if (!priv_area_put(area)) + return; + + if (!priv->bar->iomem) + iounmap(priv->iomem); + + nfp_bar_put(nfp, priv->bar); + + priv->bar = NULL; + priv->iomem = NULL; +} + +static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + return priv->phys; +} + +static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + return priv->iomem; +} + +static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area) +{ + /* Use the BAR resource as the resource for the CPP area. + * This enables us to share the BAR among multiple CPP areas + * without resource conflicts. + */ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + return priv->bar->resource; +} + +static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + u64 __maybe_unused *wrptr64 = kernel_vaddr; + const u64 __iomem __maybe_unused *rdptr64; + struct nfp6000_area_priv *priv; + u32 *wrptr32 = kernel_vaddr; + const u32 __iomem *rdptr32; + int n, width; + bool is_64; + + priv = nfp_cpp_area_priv(area); + rdptr64 = priv->iomem + offset; + rdptr32 = priv->iomem + offset; + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.read; + + if (width <= 0) + return -EINVAL; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) + return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area), + NFP_CPP_ID(priv->target, + priv->action, + priv->token), + priv->offset + offset, + kernel_vaddr, length, width); + + is_64 = width == TARGET_WIDTH_64; + + /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW) + is_64 = false; + + if (is_64) { + if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) + return -EINVAL; + } else { + if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) + return -EINVAL; + } + + if (WARN_ON(!priv->bar)) + return -EFAULT; + + if (is_64) +#ifndef __raw_readq + return -EINVAL; +#else + for (n = 0; n < length; n += sizeof(u64)) + *wrptr64++ = __raw_readq(rdptr64++); +#endif + else + for (n = 0; n < length; n += sizeof(u32)) + *wrptr32++ = __raw_readl(rdptr32++); + + return n; +} + +static int +nfp6000_area_write(struct nfp_cpp_area *area, + const void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + const u64 __maybe_unused *rdptr64 = kernel_vaddr; + u64 __iomem __maybe_unused *wrptr64; + const u32 *rdptr32 = kernel_vaddr; + struct nfp6000_area_priv *priv; + u32 __iomem *wrptr32; + int n, width; + bool is_64; + + priv = nfp_cpp_area_priv(area); + wrptr64 = priv->iomem + offset; + wrptr32 = priv->iomem + offset; + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.write; + + if (width <= 0) + return -EINVAL; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) + return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area), + NFP_CPP_ID(priv->target, + priv->action, + priv->token), + priv->offset + offset, + kernel_vaddr, length, width); + + is_64 = width == TARGET_WIDTH_64; + + /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW) + is_64 = false; + + if (is_64) { + if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) + return -EINVAL; + } else { + if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) + return -EINVAL; + } + + if (WARN_ON(!priv->bar)) + return -EFAULT; + + if (is_64) +#ifndef __raw_writeq + return -EINVAL; +#else + for (n = 0; n < length; n += sizeof(u64)) { + __raw_writeq(*rdptr64++, wrptr64++); + wmb(); + } +#endif + else + for (n = 0; n < length; n += sizeof(u32)) { + __raw_writel(*rdptr32++, wrptr32++); + wmb(); + } + + return n; +} + +struct nfp6000_explicit_priv { + struct nfp6000_pcie *nfp; + struct { + int group; + int area; + } bar; + int bitsize; + void __iomem *data; + void __iomem *addr; +}; + +static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl)); + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + int i, j; + + mutex_lock(&nfp->expl.mutex); + for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) { + if (!nfp->expl.group[i].bitsize) + continue; + + for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) { + u16 data_offset; + + if (!nfp->expl.group[i].free[j]) + continue; + + priv->nfp = nfp; + priv->bar.group = i; + priv->bar.area = j; + priv->bitsize = nfp->expl.group[i].bitsize - 2; + + data_offset = (priv->bar.group << 9) + + (priv->bar.area << 7); + priv->data = nfp->expl.data + data_offset; + priv->addr = nfp->expl.group[i].addr + + (priv->bar.area << priv->bitsize); + nfp->expl.group[i].free[j] = false; + + mutex_unlock(&nfp->expl.mutex); + return 0; + } + } + mutex_unlock(&nfp->expl.mutex); + + return -EAGAIN; +} + +static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + struct nfp6000_pcie *nfp = priv->nfp; + + mutex_lock(&nfp->expl.mutex); + nfp->expl.group[priv->bar.group].free[priv->bar.area] = true; + mutex_unlock(&nfp->expl.mutex); +} + +static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl, + const void *buff, size_t len) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + const u32 *src = buff; + size_t i; + + for (i = 0; i < len; i += sizeof(u32)) + writel(*(src++), priv->data + i); + + return i; +} + +static int +nfp6000_explicit_do(struct nfp_cpp_explicit *expl, + const struct nfp_cpp_explicit_command *cmd, u64 address) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + u8 signal_master, signal_ref, data_master; + struct nfp6000_pcie *nfp = priv->nfp; + int sigmask = 0; + u16 data_ref; + u32 csr[3]; + + if (cmd->siga_mode) + sigmask |= 1 << cmd->siga; + if (cmd->sigb_mode) + sigmask |= 1 << cmd->sigb; + + signal_master = cmd->signal_master; + if (!signal_master) + signal_master = nfp->expl.master_id; + + signal_ref = cmd->signal_ref; + if (signal_master == nfp->expl.master_id) + signal_ref = nfp->expl.signal_ref + + ((priv->bar.group * 4 + priv->bar.area) << 1); + + data_master = cmd->data_master; + if (!data_master) + data_master = nfp->expl.master_id; + + data_ref = cmd->data_ref; + if (data_master == nfp->expl.master_id) + data_ref = 0x1000 + + (priv->bar.group << 9) + (priv->bar.area << 7); + + csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) | + NFP_PCIE_BAR_EXPLICIT_BAR0_Token( + NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) | + NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16); + + csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) | + NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) | + NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref); + + csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target( + NFP_CPP_ID_TARGET_of(cmd->cpp_id)) | + NFP_PCIE_BAR_EXPLICIT_BAR2_Action( + NFP_CPP_ID_ACTION_of(cmd->cpp_id)) | + NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) | + NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) | + NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master); + + if (nfp->iomem.csr) { + writel(csr[0], nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group, + priv->bar.area)); + writel(csr[1], nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group, + priv->bar.area)); + writel(csr[2], nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group, + priv->bar.area)); + /* Readback to ensure BAR is flushed */ + readl(nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group, + priv->bar.area)); + readl(nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group, + priv->bar.area)); + readl(nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group, + priv->bar.area)); + } else { + pci_write_config_dword(nfp->pdev, 0x400 + + NFP_PCIE_BAR_EXPLICIT_BAR0( + priv->bar.group, priv->bar.area), + csr[0]); + + pci_write_config_dword(nfp->pdev, 0x400 + + NFP_PCIE_BAR_EXPLICIT_BAR1( + priv->bar.group, priv->bar.area), + csr[1]); + + pci_write_config_dword(nfp->pdev, 0x400 + + NFP_PCIE_BAR_EXPLICIT_BAR2( + priv->bar.group, priv->bar.area), + csr[2]); + } + + /* Issue the 'kickoff' transaction */ + readb(priv->addr + (address & ((1 << priv->bitsize) - 1))); + + return sigmask; +} + +static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl, + void *buff, size_t len) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + u32 *dst = buff; + size_t i; + + for (i = 0; i < len; i += sizeof(u32)) + *(dst++) = readl(priv->data + i); + + return i; +} + +static int nfp6000_init(struct nfp_cpp *cpp) +{ + nfp_cpp_area_cache_add(cpp, SZ_64K); + nfp_cpp_area_cache_add(cpp, SZ_64K); + nfp_cpp_area_cache_add(cpp, SZ_256K); + + return 0; +} + +static void nfp6000_free(struct nfp_cpp *cpp) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp); + + disable_bars(nfp); + kfree(nfp); +} + +static void nfp6000_read_serial(struct device *dev, u8 *serial) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int pos; + u32 reg; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + if (!pos) { + memset(serial, 0, NFP_SERIAL_LEN); + return; + } + + pci_read_config_dword(pdev, pos + 4, ®); + put_unaligned_be16(reg >> 16, serial + 4); + pci_read_config_dword(pdev, pos + 8, ®); + put_unaligned_be32(reg, serial); +} + +static u16 nfp6000_get_interface(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int pos; + u32 reg; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff); + + pci_read_config_dword(pdev, pos + 4, ®); + + return reg & 0xffff; +} + +static const struct nfp_cpp_operations nfp6000_pcie_ops = { + .owner = THIS_MODULE, + + .init = nfp6000_init, + .free = nfp6000_free, + + .read_serial = nfp6000_read_serial, + .get_interface = nfp6000_get_interface, + + .area_priv_size = sizeof(struct nfp6000_area_priv), + .area_init = nfp6000_area_init, + .area_cleanup = nfp6000_area_cleanup, + .area_acquire = nfp6000_area_acquire, + .area_release = nfp6000_area_release, + .area_phys = nfp6000_area_phys, + .area_iomem = nfp6000_area_iomem, + .area_resource = nfp6000_area_resource, + .area_read = nfp6000_area_read, + .area_write = nfp6000_area_write, + + .explicit_priv_size = sizeof(struct nfp6000_explicit_priv), + .explicit_acquire = nfp6000_explicit_acquire, + .explicit_release = nfp6000_explicit_release, + .explicit_put = nfp6000_explicit_put, + .explicit_do = nfp6000_explicit_do, + .explicit_get = nfp6000_explicit_get, +}; + +/** + * nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device + * @pdev: NFP6000 PCI device + * + * Return: NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev) +{ + struct nfp6000_pcie *nfp; + u16 interface; + int err; + + /* Finished with card initialization. */ + dev_info(&pdev->dev, + "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n"); + + nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); + if (!nfp) { + err = -ENOMEM; + goto err_ret; + } + + nfp->dev = &pdev->dev; + nfp->pdev = pdev; + init_waitqueue_head(&nfp->bar_waiters); + spin_lock_init(&nfp->bar_lock); + + interface = nfp6000_get_interface(&pdev->dev); + + if (NFP_CPP_INTERFACE_TYPE_of(interface) != + NFP_CPP_INTERFACE_TYPE_PCI) { + dev_err(&pdev->dev, + "Interface type %d is not the expected %d\n", + NFP_CPP_INTERFACE_TYPE_of(interface), + NFP_CPP_INTERFACE_TYPE_PCI); + err = -ENODEV; + goto err_free_nfp; + } + + if (NFP_CPP_INTERFACE_CHANNEL_of(interface) != + NFP_CPP_INTERFACE_CHANNEL_PEROPENER) { + dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n", + NFP_CPP_INTERFACE_CHANNEL_of(interface), + NFP_CPP_INTERFACE_CHANNEL_PEROPENER); + err = -ENODEV; + goto err_free_nfp; + } + + err = enable_bars(nfp, interface); + if (err) + goto err_free_nfp; + + /* Probe for all the common NFP devices */ + return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp); + +err_free_nfp: + kfree(nfp); +err_ret: + dev_err(&pdev->dev, "NFP6000 PCI setup failed\n"); + return ERR_PTR(err); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h new file mode 100644 index 000000000000..245d8aaaa97d --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp6000_pcie.h + * Author: Jason McMullan + */ + +#ifndef NFP6000_PCIE_H +#define NFP6000_PCIE_H + +#include "nfp_cpp.h" + +struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev); + +#endif /* NFP6000_PCIE_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h new file mode 100644 index 000000000000..31fe92247f51 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h @@ -0,0 +1,246 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_arm.h + * Definitions for ARM-based registers and memory spaces + */ + +#ifndef NFP_ARM_H +#define NFP_ARM_H + +#define NFP_ARM_QUEUE(_q) (0x100000 + (0x800 * ((_q) & 0xff))) +#define NFP_ARM_IM 0x200000 +#define NFP_ARM_EM 0x300000 +#define NFP_ARM_GCSR 0x400000 +#define NFP_ARM_MPCORE 0x800000 +#define NFP_ARM_PL310 0xa00000 +/* Register Type: BulkBARConfig */ +#define NFP_ARM_GCSR_BULK_BAR(_bar) (0x0 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_BULK_BAR_TYPE (0x1 << 31) +#define NFP_ARM_GCSR_BULK_BAR_TYPE_BULK (0x0) +#define NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA (0x80000000) +#define NFP_ARM_GCSR_BULK_BAR_TGT(_x) (((_x) & 0xf) << 27) +#define NFP_ARM_GCSR_BULK_BAR_TGT_of(_x) (((_x) >> 27) & 0xf) +#define NFP_ARM_GCSR_BULK_BAR_TOK(_x) (((_x) & 0x3) << 25) +#define NFP_ARM_GCSR_BULK_BAR_TOK_of(_x) (((_x) >> 25) & 0x3) +#define NFP_ARM_GCSR_BULK_BAR_LEN (0x1 << 24) +#define NFP_ARM_GCSR_BULK_BAR_LEN_32BIT (0x0) +#define NFP_ARM_GCSR_BULK_BAR_LEN_64BIT (0x1000000) +#define NFP_ARM_GCSR_BULK_BAR_ADDR(_x) ((_x) & 0x7ff) +#define NFP_ARM_GCSR_BULK_BAR_ADDR_of(_x) ((_x) & 0x7ff) +/* Register Type: ExpansionBARConfig */ +#define NFP_ARM_GCSR_EXPA_BAR(_bar) (0x20 + (0x4 * ((_bar) & 0xf))) +#define NFP_ARM_GCSR_EXPA_BAR_TYPE (0x1 << 31) +#define NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA (0x0) +#define NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL (0x80000000) +#define NFP_ARM_GCSR_EXPA_BAR_TGT(_x) (((_x) & 0xf) << 27) +#define NFP_ARM_GCSR_EXPA_BAR_TGT_of(_x) (((_x) >> 27) & 0xf) +#define NFP_ARM_GCSR_EXPA_BAR_TOK(_x) (((_x) & 0x3) << 25) +#define NFP_ARM_GCSR_EXPA_BAR_TOK_of(_x) (((_x) >> 25) & 0x3) +#define NFP_ARM_GCSR_EXPA_BAR_LEN (0x1 << 24) +#define NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT (0x0) +#define NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT (0x1000000) +#define NFP_ARM_GCSR_EXPA_BAR_ACT(_x) (((_x) & 0x1f) << 19) +#define NFP_ARM_GCSR_EXPA_BAR_ACT_of(_x) (((_x) >> 19) & 0x1f) +#define NFP_ARM_GCSR_EXPA_BAR_ACT_DERIVED (0) +#define NFP_ARM_GCSR_EXPA_BAR_ADDR(_x) ((_x) & 0x7fff) +#define NFP_ARM_GCSR_EXPA_BAR_ADDR_of(_x) ((_x) & 0x7fff) +/* Register Type: ExplicitBARConfig0_Reg */ +#define NFP_ARM_GCSR_EXPL0_BAR(_bar) (0x60 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL0_BAR_ADDR(_x) ((_x) & 0x3ffff) +#define NFP_ARM_GCSR_EXPL0_BAR_ADDR_of(_x) ((_x) & 0x3ffff) +/* Register Type: ExplicitBARConfig1_Reg */ +#define NFP_ARM_GCSR_EXPL1_BAR(_bar) (0x80 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL1_BAR_POSTED (0x1 << 31) +#define NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(_x) (((_x) & 0x7f) << 24) +#define NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF_of(_x) (((_x) >> 24) & 0x7f) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(_x) (((_x) & 0xff) << 16) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER_of(_x) (((_x) >> 16) & 0xff) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(_x) ((_x) & 0x3fff) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_REF_of(_x) ((_x) & 0x3fff) +/* Register Type: ExplicitBARConfig2_Reg */ +#define NFP_ARM_GCSR_EXPL2_BAR(_bar) (0xa0 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL2_BAR_TGT(_x) (((_x) & 0xf) << 28) +#define NFP_ARM_GCSR_EXPL2_BAR_TGT_of(_x) (((_x) >> 28) & 0xf) +#define NFP_ARM_GCSR_EXPL2_BAR_ACT(_x) (((_x) & 0x1f) << 23) +#define NFP_ARM_GCSR_EXPL2_BAR_ACT_of(_x) (((_x) >> 23) & 0x1f) +#define NFP_ARM_GCSR_EXPL2_BAR_LEN(_x) (((_x) & 0x1f) << 18) +#define NFP_ARM_GCSR_EXPL2_BAR_LEN_of(_x) (((_x) >> 18) & 0x1f) +#define NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(_x) (((_x) & 0xff) << 10) +#define NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK_of(_x) (((_x) >> 10) & 0xff) +#define NFP_ARM_GCSR_EXPL2_BAR_TOK(_x) (((_x) & 0x3) << 8) +#define NFP_ARM_GCSR_EXPL2_BAR_TOK_of(_x) (((_x) >> 8) & 0x3) +#define NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(_x) ((_x) & 0xff) +#define NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER_of(_x) ((_x) & 0xff) +/* Register Type: PostedCommandSignal */ +#define NFP_ARM_GCSR_EXPL_POST(_bar) (0xc0 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B(_x) (((_x) & 0x7f) << 25) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_of(_x) (((_x) >> 25) & 0x7f) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS (0x1 << 24) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL (0x0) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH (0x1000000) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A(_x) (((_x) & 0x7f) << 17) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_of(_x) (((_x) >> 17) & 0x7f) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS (0x1 << 16) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL (0x0) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH (0x10000) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_RCVD (0x1 << 7) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID (0x1 << 6) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_RCVD (0x1 << 5) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID (0x1 << 4) +#define NFP_ARM_GCSR_EXPL_POST_CMD_COMPLETE (0x1) +/* Register Type: MPCoreBaseAddress */ +#define NFP_ARM_GCSR_MPCORE_BASE 0x00e0 +#define NFP_ARM_GCSR_MPCORE_BASE_ADDR(_x) (((_x) & 0x7ffff) << 13) +#define NFP_ARM_GCSR_MPCORE_BASE_ADDR_of(_x) (((_x) >> 13) & 0x7ffff) +/* Register Type: PL310BaseAddress */ +#define NFP_ARM_GCSR_PL310_BASE 0x00e4 +#define NFP_ARM_GCSR_PL310_BASE_ADDR(_x) (((_x) & 0xfffff) << 12) +#define NFP_ARM_GCSR_PL310_BASE_ADDR_of(_x) (((_x) >> 12) & 0xfffff) +/* Register Type: MPCoreConfig */ +#define NFP_ARM_GCSR_MP0_CFG 0x00e8 +#define NFP_ARM_GCSR_MP0_CFG_SPI_BOOT (0x1 << 14) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN(_x) (((_x) & 0x3) << 12) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_of(_x) (((_x) >> 12) & 0x3) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_LITTLE (0) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_BIG (1) +#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR (0x1 << 8) +#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_LO (0x0) +#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_HI (0x100) +#define NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN(_x) (((_x) & 0xf) << 4) +#define NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN_of(_x) (((_x) >> 4) & 0xf) +#define NFP_ARM_GCSR_MP0_CFG_ARMID(_x) ((_x) & 0xf) +#define NFP_ARM_GCSR_MP0_CFG_ARMID_of(_x) ((_x) & 0xf) +/* Register Type: MPCoreIDCacheDataError */ +#define NFP_ARM_GCSR_MP0_CACHE_ERR 0x00ec +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D7 (0x1 << 15) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D6 (0x1 << 14) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D5 (0x1 << 13) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D4 (0x1 << 12) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D3 (0x1 << 11) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D2 (0x1 << 10) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D1 (0x1 << 9) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D0 (0x1 << 8) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I7 (0x1 << 7) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I6 (0x1 << 6) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I5 (0x1 << 5) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I4 (0x1 << 4) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I3 (0x1 << 3) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I2 (0x1 << 2) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I1 (0x1 << 1) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I0 (0x1) +/* Register Type: ARMDFT */ +#define NFP_ARM_GCSR_DFT 0x0100 +#define NFP_ARM_GCSR_DFT_DBG_REQ (0x1 << 20) +#define NFP_ARM_GCSR_DFT_DBG_EN (0x1 << 19) +#define NFP_ARM_GCSR_DFT_WFE_EVT_TRG (0x1 << 18) +#define NFP_ARM_GCSR_DFT_ETM_WFI_RDY (0x1 << 17) +#define NFP_ARM_GCSR_DFT_ETM_PWR_ON (0x1 << 16) +#define NFP_ARM_GCSR_DFT_BIST_FAIL_of(_x) (((_x) >> 8) & 0xf) +#define NFP_ARM_GCSR_DFT_BIST_DONE_of(_x) (((_x) >> 4) & 0xf) +#define NFP_ARM_GCSR_DFT_BIST_RUN(_x) ((_x) & 0x7) +#define NFP_ARM_GCSR_DFT_BIST_RUN_of(_x) ((_x) & 0x7) + +/* Gasket CSRs */ +/* NOTE: These cannot be remapped, and are always at this location. + */ +#define NFP_ARM_GCSR_START (0xd6000000 + NFP_ARM_GCSR) +#define NFP_ARM_GCSR_SIZE SZ_64K + +/* BAR CSRs + */ +#define NFP_ARM_GCSR_BULK_BITS 11 +#define NFP_ARM_GCSR_EXPA_BITS 15 +#define NFP_ARM_GCSR_EXPL_BITS 18 + +#define NFP_ARM_GCSR_BULK_SHIFT (40 - 11) +#define NFP_ARM_GCSR_EXPA_SHIFT (40 - 15) +#define NFP_ARM_GCSR_EXPL_SHIFT (40 - 18) + +#define NFP_ARM_GCSR_BULK_SIZE (1 << NFP_ARM_GCSR_BULK_SHIFT) +#define NFP_ARM_GCSR_EXPA_SIZE (1 << NFP_ARM_GCSR_EXPA_SHIFT) +#define NFP_ARM_GCSR_EXPL_SIZE (1 << NFP_ARM_GCSR_EXPL_SHIFT) + +#define NFP_ARM_GCSR_EXPL2_CSR(target, action, length, \ + byte_mask, token, signal_master) \ + (NFP_ARM_GCSR_EXPL2_BAR_TGT(target) | \ + NFP_ARM_GCSR_EXPL2_BAR_ACT(action) | \ + NFP_ARM_GCSR_EXPL2_BAR_LEN(length) | \ + NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(byte_mask) | \ + NFP_ARM_GCSR_EXPL2_BAR_TOK(token) | \ + NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(signal_master)) +#define NFP_ARM_GCSR_EXPL1_CSR(posted, signal_ref, data_master, data_ref) \ + (((posted) ? NFP_ARM_GCSR_EXPL1_BAR_POSTED : 0) | \ + NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(signal_ref) | \ + NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(data_master) | \ + NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(data_ref)) +#define NFP_ARM_GCSR_EXPL0_CSR(address) \ + NFP_ARM_GCSR_EXPL0_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPL_SHIFT) +#define NFP_ARM_GCSR_EXPL_POST_EXPECT_A(sig_ref, is_push, is_required) \ + (NFP_ARM_GCSR_EXPL_POST_SIG_A(sig_ref) | \ + ((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH : \ + NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL) | \ + ((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID : 0)) +#define NFP_ARM_GCSR_EXPL_POST_EXPECT_B(sig_ref, is_push, is_required) \ + (NFP_ARM_GCSR_EXPL_POST_SIG_B(sig_ref) | \ + ((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH : \ + NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL) | \ + ((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID : 0)) + +#define NFP_ARM_GCSR_EXPA_CSR(mode, target, token, is_64, action, address) \ + (((mode) ? NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL : \ + NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA) | \ + NFP_ARM_GCSR_EXPA_BAR_TGT(target) | \ + NFP_ARM_GCSR_EXPA_BAR_TOK(token) | \ + ((is_64) ? NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT : \ + NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT) | \ + NFP_ARM_GCSR_EXPA_BAR_ACT(action) | \ + NFP_ARM_GCSR_EXPA_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPA_SHIFT)) + +#define NFP_ARM_GCSR_BULK_CSR(mode, target, token, is_64, address) \ + (((mode) ? NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA : \ + NFP_ARM_GCSR_BULK_BAR_TYPE_BULK) | \ + NFP_ARM_GCSR_BULK_BAR_TGT(target) | \ + NFP_ARM_GCSR_BULK_BAR_TOK(token) | \ + ((is_64) ? NFP_ARM_GCSR_BULK_BAR_LEN_64BIT : \ + NFP_ARM_GCSR_BULK_BAR_LEN_32BIT) | \ + NFP_ARM_GCSR_BULK_BAR_ADDR((address) >> NFP_ARM_GCSR_BULK_SHIFT)) + + /* MP Core CSRs */ +#define NFP_ARM_MPCORE_SIZE SZ_128K + + /* PL320 CSRs */ +#define NFP_ARM_PCSR_SIZE SZ_64K + +#endif /* NFP_ARM_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h new file mode 100644 index 000000000000..f49f12a15669 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -0,0 +1,431 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_cpp.h + * Interface for low-level NFP CPP access. + * Authors: Jason McMullan + * Rolf Neugebauer + */ +#ifndef __NFP_CPP_H__ +#define __NFP_CPP_H__ + +#include +#include + +#ifndef NFP_SUBSYS +#define NFP_SUBSYS "nfp" +#endif + +#define nfp_err(cpp, fmt, args...) \ + dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_warn(cpp, fmt, args...) \ + dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_info(cpp, fmt, args...) \ + dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_dbg(cpp, fmt, args...) \ + dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) + +#define PCI_64BIT_BAR_COUNT 3 + +/* NFP hardware vendor/device ids. + */ +#define PCI_DEVICE_NFP4000 0x4000 +#define PCI_DEVICE_NFP6000 0x6000 + +#define NFP_CPP_NUM_TARGETS 16 + +struct device; + +struct nfp_cpp_area; +struct nfp_cpp; +struct resource; + +/* Wildcard indicating a CPP read or write action + * + * The action used will be either read or write depending on whether a + * read or write instruction/call is performed on the NFP_CPP_ID. It + * is recomended that the RW action is used even if all actions to be + * performed on a NFP_CPP_ID are known to be only reads or writes. + * Doing so will in many cases save NFP CPP internal software + * resources. + */ +#define NFP_CPP_ACTION_RW 32 + +#define NFP_CPP_TARGET_ID_MASK 0x1f + +/** + * NFP_CPP_ID() - pack target, token, and action into a CPP ID. + * @target: NFP CPP target id + * @action: NFP CPP action id + * @token: NFP CPP token id + * + * Create a 32-bit CPP identifier representing the access to be made. + * These identifiers are used as parameters to other NFP CPP + * functions. Some CPP devices may allow wildcard identifiers to be + * specified. + * + * Return: NFP CPP ID + */ +#define NFP_CPP_ID(target, action, token) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8)) + +/** + * NFP_CPP_ISLAND_ID() - pack target, token, action, and island into a CPP ID. + * @target: NFP CPP target id + * @action: NFP CPP action id + * @token: NFP CPP token id + * @island: NFP CPP island id + * + * Create a 32-bit CPP identifier representing the access to be made. + * These identifiers are used as parameters to other NFP CPP + * functions. Some CPP devices may allow wildcard identifiers to be + * specified. + * + * Return: NFP CPP ID + */ +#define NFP_CPP_ISLAND_ID(target, action, token, island) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8) | (((island) & 0xff) << 0)) + +/** + * NFP_CPP_ID_TARGET_of() - Return the NFP CPP target of a NFP CPP ID + * @id: NFP CPP ID + * + * Return: NFP CPP target + */ +static inline u8 NFP_CPP_ID_TARGET_of(u32 id) +{ + return (id >> 24) & NFP_CPP_TARGET_ID_MASK; +} + +/** + * NFP_CPP_ID_TOKEN_of() - Return the NFP CPP token of a NFP CPP ID + * @id: NFP CPP ID + * Return: NFP CPP token + */ +static inline u8 NFP_CPP_ID_TOKEN_of(u32 id) +{ + return (id >> 16) & 0xff; +} + +/** + * NFP_CPP_ID_ACTION_of() - Return the NFP CPP action of a NFP CPP ID + * @id: NFP CPP ID + * + * Return: NFP CPP action + */ +static inline u8 NFP_CPP_ID_ACTION_of(u32 id) +{ + return (id >> 8) & 0xff; +} + +/** + * NFP_CPP_ID_ISLAND_of() - Return the NFP CPP island of a NFP CPP ID + * @id: NFP CPP ID + * + * Return: NFP CPP island + */ +static inline u8 NFP_CPP_ID_ISLAND_of(u32 id) +{ + return (id >> 0) & 0xff; +} + +/* NFP Interface types - logical interface for this CPP connection + * 4 bits are reserved for interface type. + */ +#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0 +#define NFP_CPP_INTERFACE_TYPE_PCI 0x1 +#define NFP_CPP_INTERFACE_TYPE_ARM 0x2 +#define NFP_CPP_INTERFACE_TYPE_RPC 0x3 +#define NFP_CPP_INTERFACE_TYPE_ILA 0x4 + +/** + * NFP_CPP_INTERFACE() - Construct a 16-bit NFP Interface ID + * @type: NFP Interface Type + * @unit: Unit identifier for the interface type + * @channel: Channel identifier for the interface unit + * + * Interface IDs consists of 4 bits of interface type, + * 4 bits of unit identifier, and 8 bits of channel identifier. + * + * The NFP Interface ID is used in the implementation of + * NFP CPP API mutexes, which use the MU Atomic CompareAndWrite + * operation - hence the limit to 16 bits to be able to + * use the NFP Interface ID as a lock owner. + * + * Return: Interface ID + */ +#define NFP_CPP_INTERFACE(type, unit, channel) \ + ((((type) & 0xf) << 12) | \ + (((unit) & 0xf) << 8) | \ + (((channel) & 0xff) << 0)) + +/** + * NFP_CPP_INTERFACE_TYPE_of() - Get the interface type + * @interface: NFP Interface ID + * Return: NFP Interface ID's type + */ +#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf) + +/** + * NFP_CPP_INTERFACE_UNIT_of() - Get the interface unit + * @interface: NFP Interface ID + * Return: NFP Interface ID's unit + */ +#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf) + +/** + * NFP_CPP_INTERFACE_CHANNEL_of() - Get the interface channel + * @interface: NFP Interface ID + * Return: NFP Interface ID's channel + */ +#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff) + +/* Implemented in nfp_cppcore.c */ +void nfp_cpp_free(struct nfp_cpp *cpp); +u32 nfp_cpp_model(struct nfp_cpp *cpp); +u16 nfp_cpp_interface(struct nfp_cpp *cpp); +int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial); + +struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, + u32 cpp_id, + const char *name, + unsigned long long address, + unsigned long size); +struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, + unsigned long size); +void nfp_cpp_area_free(struct nfp_cpp_area *area); +int nfp_cpp_area_acquire(struct nfp_cpp_area *area); +int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area); +void nfp_cpp_area_release(struct nfp_cpp_area *area); +void nfp_cpp_area_release_free(struct nfp_cpp_area *area); +int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, + void *buffer, size_t length); +int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, + const void *buffer, size_t length); +int nfp_cpp_area_check_range(struct nfp_cpp_area *area, + unsigned long long offset, unsigned long size); +const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area); +void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area); +struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area); +struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area); +phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area); +void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area); + +int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, + u32 *value); +int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, + u32 value); +int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, + u64 *value); +int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, + u64 value); +int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + u32 value, size_t length); + +int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_tgt, u32 *value); +int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_tgt, u32 value); +int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt, u32 mask, u32 value); + +/* Implemented in nfp_cpplib.c */ +int nfp_cpp_read(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, void *kernel_vaddr, size_t length); +int nfp_cpp_write(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, const void *kernel_vaddr, + size_t length); +int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 *value); +int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 value); +int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 *value); +int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 value); + +struct nfp_cpp_mutex; + +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, + unsigned long long address, u32 key_id); +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, + u32 key_id); +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); + +struct nfp_cpp_explicit; + +struct nfp_cpp_explicit_command { + u32 cpp_id; + u16 data_ref; + u8 data_master; + u8 len; + u8 byte_mask; + u8 signal_master; + u8 signal_ref; + u8 posted; + u8 siga; + u8 sigb; + s8 siga_mode; + s8 sigb_mode; +}; + +#define NFP_SERIAL_LEN 6 + +/** + * struct nfp_cpp_operations - NFP CPP operations structure + * @area_priv_size: Size of the nfp_cpp_area private data + * @owner: Owner module + * @init: Initialize the NFP CPP bus + * @free: Free the bus + * @read_serial: Read serial number to memory provided + * @get_interface: Return CPP interface + * @area_init: Initialize a new NFP CPP area (not serialized) + * @area_cleanup: Clean up a NFP CPP area (not serialized) + * @area_acquire: Acquire the NFP CPP area (serialized) + * @area_release: Release area (serialized) + * @area_resource: Get resource range of area (not serialized) + * @area_phys: Get physical address of area (not serialized) + * @area_iomem: Get iomem of area (not serialized) + * @area_read: Perform a read from a NFP CPP area (serialized) + * @area_write: Perform a write to a NFP CPP area (serialized) + * @explicit_priv_size: Size of an explicit's private area + * @explicit_acquire: Acquire an explicit area + * @explicit_release: Release an explicit area + * @explicit_put: Write data to send + * @explicit_get: Read data received + * @explicit_do: Perform the transaction + */ +struct nfp_cpp_operations { + size_t area_priv_size; + struct module *owner; + + int (*init)(struct nfp_cpp *cpp); + void (*free)(struct nfp_cpp *cpp); + + void (*read_serial)(struct device *dev, u8 *serial); + u16 (*get_interface)(struct device *dev); + + int (*area_init)(struct nfp_cpp_area *area, + u32 dest, unsigned long long address, + unsigned long size); + void (*area_cleanup)(struct nfp_cpp_area *area); + int (*area_acquire)(struct nfp_cpp_area *area); + void (*area_release)(struct nfp_cpp_area *area); + struct resource *(*area_resource)(struct nfp_cpp_area *area); + phys_addr_t (*area_phys)(struct nfp_cpp_area *area); + void __iomem *(*area_iomem)(struct nfp_cpp_area *area); + int (*area_read)(struct nfp_cpp_area *area, void *kernel_vaddr, + unsigned long offset, unsigned int length); + int (*area_write)(struct nfp_cpp_area *area, const void *kernel_vaddr, + unsigned long offset, unsigned int length); + + size_t explicit_priv_size; + int (*explicit_acquire)(struct nfp_cpp_explicit *expl); + void (*explicit_release)(struct nfp_cpp_explicit *expl); + int (*explicit_put)(struct nfp_cpp_explicit *expl, + const void *buff, size_t len); + int (*explicit_get)(struct nfp_cpp_explicit *expl, + void *buff, size_t len); + int (*explicit_do)(struct nfp_cpp_explicit *expl, + const struct nfp_cpp_explicit_command *cmd, + u64 address); +}; + +struct nfp_cpp * +nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, + struct device *parent, void *priv); +void *nfp_cpp_priv(struct nfp_cpp *priv); + +int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size); + +/* The following section contains extensions to the + * NFP CPP API, to be used in a Linux kernel-space context. + */ + +/* Use this channel ID for multiple virtual channel interfaces + * (ie ARM and PCIe) when setting up the interface field. + */ +#define NFP_CPP_INTERFACE_CHANNEL_PEROPENER 255 +struct device *nfp_cpp_device(struct nfp_cpp *cpp); + +/* Return code masks for nfp_cpp_explicit_do() + */ +#define NFP_SIGNAL_MASK_A BIT(0) /* Signal A fired */ +#define NFP_SIGNAL_MASK_B BIT(1) /* Signal B fired */ + +enum nfp_cpp_explicit_signal_mode { + NFP_SIGNAL_NONE = 0, + NFP_SIGNAL_PUSH = 1, + NFP_SIGNAL_PUSH_OPTIONAL = -1, + NFP_SIGNAL_PULL = 2, + NFP_SIGNAL_PULL_OPTIONAL = -2, +}; + +struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp); +int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl, u32 cpp_id, + u8 len, u8 mask); +int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl, + u8 data_master, u16 data_ref); +int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl, + u8 signal_master, u8 signal_ref); +int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted, + u8 siga, + enum nfp_cpp_explicit_signal_mode siga_mode, + u8 sigb, + enum nfp_cpp_explicit_signal_mode sigb_mode); +int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl, + const void *buff, size_t len); +int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address); +int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len); +void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl); +struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *expl); +void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit); + +/* Implemented in nfp_cpplib.c */ + +int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model); + +int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id, + u64 addr, void *buff, size_t len, + int width_read); + +int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id, + u64 addr, const void *buff, size_t len, + int width_write); + +#endif /* !__NFP_CPP_H__ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c new file mode 100644 index 000000000000..8e3322022322 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -0,0 +1,1706 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_cppcore.c + * Provides low-level access to the NFP's internal CPP bus + * Authors: Jakub Kicinski + * Jason McMullan + * Rolf Neugebauer + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nfp_arm.h" +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define NFP_ARM_GCSR_SOFTMODEL2 0x0000014c +#define NFP_ARM_GCSR_SOFTMODEL3 0x00000150 + +struct nfp_cpp_resource { + struct list_head list; + const char *name; + u32 cpp_id; + u64 start; + u64 end; +}; + +struct nfp_cpp_mutex { + struct list_head list; + struct nfp_cpp *cpp; + int target; + u16 usage; + u16 depth; + unsigned long long address; + u32 key; +}; + +struct nfp_cpp { + struct device dev; + + void *priv; /* Private data of the low-level implementation */ + + u32 model; + u16 interface; + u8 serial[NFP_SERIAL_LEN]; + + const struct nfp_cpp_operations *op; + struct list_head resource_list; /* NFP CPP resource list */ + struct list_head mutex_cache; /* Mutex cache */ + rwlock_t resource_lock; + wait_queue_head_t waitq; + + /* NFP6000 CPP Mapping Table */ + u32 imb_cat_table[16]; + + /* Cached areas for cpp/xpb readl/writel speedups */ + struct mutex area_cache_mutex; /* Lock for the area cache */ + struct list_head area_cache_list; +}; + +/* Element of the area_cache_list */ +struct nfp_cpp_area_cache { + struct list_head entry; + u32 id; + u64 addr; + u32 size; + struct nfp_cpp_area *area; +}; + +struct nfp_cpp_area { + struct nfp_cpp *cpp; + struct kref kref; + atomic_t refcount; + struct mutex mutex; /* Lock for the area's refcount */ + unsigned long long offset; + unsigned long size; + struct nfp_cpp_resource resource; + void __iomem *iomem; + /* Here follows the 'priv' part of nfp_cpp_area. */ +}; + +struct nfp_cpp_explicit { + struct nfp_cpp *cpp; + struct nfp_cpp_explicit_command cmd; + /* Here follows the 'priv' part of nfp_cpp_area. */ +}; + +static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res) +{ + struct nfp_cpp_resource *tmp; + struct list_head *pos; + + list_for_each(pos, head) { + tmp = container_of(pos, struct nfp_cpp_resource, list); + + if (tmp->cpp_id > res->cpp_id) + break; + + if (tmp->cpp_id == res->cpp_id && tmp->start > res->start) + break; + } + + list_add_tail(&res->list, pos); +} + +static void __resource_del(struct nfp_cpp_resource *res) +{ + list_del_init(&res->list); +} + +static void __release_cpp_area(struct kref *kref) +{ + struct nfp_cpp_area *area = + container_of(kref, struct nfp_cpp_area, kref); + struct nfp_cpp *cpp = nfp_cpp_area_cpp(area); + + if (area->cpp->op->area_cleanup) + area->cpp->op->area_cleanup(area); + + write_lock(&cpp->resource_lock); + __resource_del(&area->resource); + write_unlock(&cpp->resource_lock); + kfree(area); +} + +static void nfp_cpp_area_put(struct nfp_cpp_area *area) +{ + kref_put(&area->kref, __release_cpp_area); +} + +static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area) +{ + kref_get(&area->kref); + + return area; +} + +/** + * nfp_cpp_free() - free the CPP handle + * @cpp: CPP handle + */ +void nfp_cpp_free(struct nfp_cpp *cpp) +{ + struct nfp_cpp_area_cache *cache, *ctmp; + struct nfp_cpp_resource *res, *rtmp; + struct nfp_cpp_mutex *mutex, *mtmp; + + /* There should be no mutexes in the cache at this point. */ + WARN_ON(!list_empty(&cpp->mutex_cache)); + /* .. but if there are, unlock them and complain. */ + list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) { + dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n", + mutex->target, (unsigned long long)mutex->address, + mutex->depth, mutex->usage); + + /* Forcing an unlock */ + mutex->depth = 1; + nfp_cpp_mutex_unlock(mutex); + + /* Forcing a free */ + mutex->usage = 1; + nfp_cpp_mutex_free(mutex); + } + + /* Remove all caches */ + list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) { + list_del(&cache->entry); + if (cache->id) + nfp_cpp_area_release(cache->area); + nfp_cpp_area_free(cache->area); + kfree(cache); + } + + /* There should be no dangling areas at this point */ + WARN_ON(!list_empty(&cpp->resource_list)); + + /* .. but if they weren't, try to clean up. */ + list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) { + struct nfp_cpp_area *area = container_of(res, + struct nfp_cpp_area, + resource); + + dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n", + NFP_CPP_ID_TARGET_of(res->cpp_id), + NFP_CPP_ID_ACTION_of(res->cpp_id), + NFP_CPP_ID_TOKEN_of(res->cpp_id), + res->start, res->end, + res->name ? " " : "", + res->name ? res->name : ""); + + if (area->cpp->op->area_release) + area->cpp->op->area_release(area); + + __release_cpp_area(&area->kref); + } + + if (cpp->op->free) + cpp->op->free(cpp); + + device_unregister(&cpp->dev); + + kfree(cpp); +} + +/** + * nfp_cpp_model() - Retrieve the Model ID of the NFP + * @cpp: NFP CPP handle + * + * Return: NFP CPP Model ID + */ +u32 nfp_cpp_model(struct nfp_cpp *cpp) +{ + return cpp->model; +} + +/** + * nfp_cpp_interface() - Retrieve the Interface ID of the NFP + * @cpp: NFP CPP handle + * + * Return: NFP CPP Interface ID + */ +u16 nfp_cpp_interface(struct nfp_cpp *cpp) +{ + return cpp->interface; +} + +/** + * nfp_cpp_serial() - Retrieve the Serial ID of the NFP + * @cpp: NFP CPP handle + * @serial: Pointer to NFP serial number + * + * Return: Length of NFP serial number + */ +int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial) +{ + *serial = &cpp->serial[0]; + return sizeof(cpp->serial); +} + +/** + * nfp_cpp_area_alloc_with_name() - allocate a new CPP area + * @cpp: CPP device handle + * @dest: NFP CPP ID + * @name: Name of region + * @address: Address of region + * @size: Size of region + * + * Allocate and initialize a CPP area structure. The area must later + * be locked down with an 'acquire' before it can be safely accessed. + * + * NOTE: @address and @size must be 32-bit aligned values. + * + * Return: NFP CPP area handle, or NULL + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name, + unsigned long long address, unsigned long size) +{ + struct nfp_cpp_area *area; + u64 tmp64 = address; + int err, name_len; + + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table); + if (err < 0) + return NULL; + + address = tmp64; + + if (!name) + name = "(reserved)"; + + name_len = strlen(name) + 1; + area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len, + GFP_KERNEL); + if (!area) + return NULL; + + area->cpp = cpp; + area->resource.name = (void *)area + sizeof(*area) + + cpp->op->area_priv_size; + memcpy((char *)area->resource.name, name, name_len); + + area->resource.cpp_id = dest; + area->resource.start = address; + area->resource.end = area->resource.start + size - 1; + INIT_LIST_HEAD(&area->resource.list); + + atomic_set(&area->refcount, 0); + kref_init(&area->kref); + mutex_init(&area->mutex); + + if (cpp->op->area_init) { + int err; + + err = cpp->op->area_init(area, dest, address, size); + if (err < 0) { + kfree(area); + return NULL; + } + } + + write_lock(&cpp->resource_lock); + __resource_add(&cpp->resource_list, &area->resource); + write_unlock(&cpp->resource_lock); + + area->offset = address; + area->size = size; + + return area; +} + +/** + * nfp_cpp_area_alloc() - allocate a new CPP area + * @cpp: CPP handle + * @dest: CPP id + * @address: Start address on CPP target + * @size: Size of area in bytes + * + * Allocate and initialize a CPP area structure. The area must later + * be locked down with an 'acquire' before it can be safely accessed. + * + * NOTE: @address and @size must be 32-bit aligned values. + * + * Return: NFP CPP Area handle, or NULL + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest, + unsigned long long address, unsigned long size) +{ + return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size); +} + +/** + * nfp_cpp_area_free() - free up the CPP area + * @area: CPP area handle + * + * Frees up memory resources held by the CPP area. + */ +void nfp_cpp_area_free(struct nfp_cpp_area *area) +{ + nfp_cpp_area_put(area); +} + +/** + * nfp_cpp_area_acquire() - lock down a CPP area for access + * @area: CPP area handle + * + * Locks down the CPP area for a potential long term activity. Area + * must always be locked down before being accessed. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_area_acquire(struct nfp_cpp_area *area) +{ + mutex_lock(&area->mutex); + if (atomic_inc_return(&area->refcount) == 1) { + int (*a_a)(struct nfp_cpp_area *); + + a_a = area->cpp->op->area_acquire; + if (a_a) { + int err; + + wait_event_interruptible(area->cpp->waitq, + (err = a_a(area)) != -EAGAIN); + if (err < 0) { + atomic_dec(&area->refcount); + mutex_unlock(&area->mutex); + return err; + } + } + } + mutex_unlock(&area->mutex); + + nfp_cpp_area_get(area); + return 0; +} + +/** + * nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access + * @area: CPP area handle + * + * Locks down the CPP area for a potential long term activity. Area + * must always be locked down before being accessed. + * + * NOTE: Returns -EAGAIN is no area is available + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area) +{ + mutex_lock(&area->mutex); + if (atomic_inc_return(&area->refcount) == 1) { + if (area->cpp->op->area_acquire) { + int err; + + err = area->cpp->op->area_acquire(area); + if (err < 0) { + atomic_dec(&area->refcount); + mutex_unlock(&area->mutex); + return err; + } + } + } + mutex_unlock(&area->mutex); + + nfp_cpp_area_get(area); + return 0; +} + +/** + * nfp_cpp_area_release() - release a locked down CPP area + * @area: CPP area handle + * + * Releases a previously locked down CPP area. + */ +void nfp_cpp_area_release(struct nfp_cpp_area *area) +{ + mutex_lock(&area->mutex); + /* Only call the release on refcount == 0 */ + if (atomic_dec_and_test(&area->refcount)) { + if (area->cpp->op->area_release) { + area->cpp->op->area_release(area); + /* Let anyone waiting for a BAR try to get one.. */ + wake_up_interruptible_all(&area->cpp->waitq); + } + } + mutex_unlock(&area->mutex); + + nfp_cpp_area_put(area); +} + +/** + * nfp_cpp_area_release_free() - release CPP area and free it + * @area: CPP area handle + * + * Releases CPP area and frees up memory resources held by the it. + */ +void nfp_cpp_area_release_free(struct nfp_cpp_area *area) +{ + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); +} + +/** + * nfp_cpp_area_read() - read data from CPP area + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to put data into + * @length: number of bytes to read + * + * Read data from indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_area_read(struct nfp_cpp_area *area, + unsigned long offset, void *kernel_vaddr, + size_t length) +{ + return area->cpp->op->area_read(area, kernel_vaddr, offset, length); +} + +/** + * nfp_cpp_area_write() - write data to CPP area + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to read data from + * @length: number of bytes to write + * + * Write data to indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_area_write(struct nfp_cpp_area *area, + unsigned long offset, const void *kernel_vaddr, + size_t length) +{ + return area->cpp->op->area_write(area, kernel_vaddr, offset, length); +} + +/** + * nfp_cpp_area_check_range() - check if address range fits in CPP area + * @area: CPP area handle + * @offset: offset into CPP target + * @length: size of address range in bytes + * + * Check if address range fits within CPP area. Return 0 if area + * fits or -EFAULT on error. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_area_check_range(struct nfp_cpp_area *area, + unsigned long long offset, unsigned long length) +{ + if (offset < area->offset || + offset + length > area->offset + area->size) + return -EFAULT; + + return 0; +} + +/** + * nfp_cpp_area_name() - return name of a CPP area + * @cpp_area: CPP area handle + * + * Return: Name of the area, or NULL + */ +const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->resource.name; +} + +/** + * nfp_cpp_area_priv() - return private struct for CPP area + * @cpp_area: CPP area handle + * + * Return: Private data for the CPP area + */ +void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area) +{ + return &cpp_area[1]; +} + +/** + * nfp_cpp_area_cpp() - return CPP handle for CPP area + * @cpp_area: CPP area handle + * + * Return: NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->cpp; +} + +/** + * nfp_cpp_area_resource() - get resource + * @area: CPP area handle + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: struct resource pointer, or NULL + */ +struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area) +{ + struct resource *res = NULL; + + if (area->cpp->op->area_resource) + res = area->cpp->op->area_resource(area); + + return res; +} + +/** + * nfp_cpp_area_phys() - get physical address of CPP area + * @area: CPP area handle + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: phy_addr_t of the area, or NULL + */ +phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area) +{ + phys_addr_t addr = ~0; + + if (area->cpp->op->area_phys) + addr = area->cpp->op->area_phys(area); + + return addr; +} + +/** + * nfp_cpp_area_iomem() - get IOMEM region for CPP area + * @area: CPP area handle + * + * Returns an iomem pointer for use with readl()/writel() style + * operations. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: __iomem pointer to the area, or NULL + */ +void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area) +{ + void __iomem *iomem = NULL; + + if (area->cpp->op->area_iomem) + iomem = area->cpp->op->area_iomem(area); + + return iomem; +} + +/** + * nfp_cpp_area_readl() - Read a u32 word from an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Pointer to read buffer + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_area_readl(struct nfp_cpp_area *area, + unsigned long offset, u32 *value) +{ + u8 tmp[4]; + int err; + + err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + *value = get_unaligned_le32(tmp); + + return err; +} + +/** + * nfp_cpp_area_writel() - Write a u32 word to an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Value to write + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_area_writel(struct nfp_cpp_area *area, + unsigned long offset, u32 value) +{ + u8 tmp[4]; + + put_unaligned_le32(value, tmp); + + return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); +} + +/** + * nfp_cpp_area_readq() - Read a u64 word from an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Pointer to read buffer + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_area_readq(struct nfp_cpp_area *area, + unsigned long offset, u64 *value) +{ + u8 tmp[8]; + int err; + + err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + *value = get_unaligned_le64(tmp); + + return err; +} + +/** + * nfp_cpp_area_writeq() - Write a u64 word to an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Value to write + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_area_writeq(struct nfp_cpp_area *area, + unsigned long offset, u64 value) +{ + u8 tmp[8]; + + put_unaligned_le64(value, tmp); + + return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); +} + +/** + * nfp_cpp_area_fill() - fill a CPP area with a value + * @area: CPP area + * @offset: offset into CPP area + * @value: value to fill with + * @length: length of area to fill + * + * Fill indicated area with given value. + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_area_fill(struct nfp_cpp_area *area, + unsigned long offset, u32 value, size_t length) +{ + u8 tmp[4]; + size_t i; + int k; + + put_unaligned_le32(value, tmp); + + if (offset % sizeof(tmp) || length % sizeof(tmp)) + return -EINVAL; + + for (i = 0; i < length; i += sizeof(tmp)) { + k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp)); + if (k < 0) + return k; + } + + return i; +} + +/** + * nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache + * @cpp: NFP CPP handle + * @size: Size of the area - MUST BE A POWER OF 2. + */ +int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size) +{ + struct nfp_cpp_area_cache *cache; + struct nfp_cpp_area *area; + + /* Allocate an area - we use the MU target's base as a placeholder, + * as all supported chips have a MU. + */ + area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0), + 0, size); + if (!area) + return -ENOMEM; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); + if (!cache) + return -ENOMEM; + + cache->id = 0; + cache->addr = 0; + cache->size = size; + cache->area = area; + mutex_lock(&cpp->area_cache_mutex); + list_add_tail(&cache->entry, &cpp->area_cache_list); + mutex_unlock(&cpp->area_cache_mutex); + + return 0; +} + +static struct nfp_cpp_area_cache * +area_cache_get(struct nfp_cpp *cpp, u32 id, + u64 addr, unsigned long *offset, size_t length) +{ + struct nfp_cpp_area_cache *cache; + int err; + + /* Early exit when length == 0, which prevents + * the need for special case code below when + * checking against available cache size. + */ + if (length == 0) + return NULL; + + if (list_empty(&cpp->area_cache_list) || id == 0) + return NULL; + + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table); + if (err < 0) + return NULL; + + addr += *offset; + + mutex_lock(&cpp->area_cache_mutex); + + /* See if we have a match */ + list_for_each_entry(cache, &cpp->area_cache_list, entry) { + if (id == cache->id && + addr >= cache->addr && + addr + length <= cache->addr + cache->size) + goto exit; + } + + /* No matches - inspect the tail of the LRU */ + cache = list_entry(cpp->area_cache_list.prev, + struct nfp_cpp_area_cache, entry); + + /* Can we fit in the cache entry? */ + if (round_down(addr + length - 1, cache->size) != + round_down(addr, cache->size)) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + + /* If id != 0, we will need to release it */ + if (cache->id) { + nfp_cpp_area_release(cache->area); + cache->id = 0; + cache->addr = 0; + } + + /* Adjust the start address to be cache size aligned */ + cache->id = id; + cache->addr = addr & ~(u64)(cache->size - 1); + + /* Re-init to the new ID and address */ + if (cpp->op->area_init) { + err = cpp->op->area_init(cache->area, + id, cache->addr, cache->size); + if (err < 0) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + } + + /* Attempt to acquire */ + err = nfp_cpp_area_acquire(cache->area); + if (err < 0) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + +exit: + /* Adjust offset */ + *offset = addr - cache->addr; + return cache; +} + +static void +area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache) +{ + if (!cache) + return; + + /* Move to front of LRU */ + list_del(&cache->entry); + list_add(&cache->entry, &cpp->area_cache_list); + + mutex_unlock(&cpp->area_cache_mutex); +} + +/** + * nfp_cpp_read() - read from CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer for result + * @length: number of bytes to read + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination, + unsigned long long address, void *kernel_vaddr, size_t length) +{ + struct nfp_cpp_area_cache *cache; + struct nfp_cpp_area *area; + unsigned long offset = 0; + int err; + + cache = area_cache_get(cpp, destination, address, &offset, length); + if (cache) { + area = cache->area; + } else { + area = nfp_cpp_area_alloc(cpp, destination, address, length); + if (!area) + return -ENOMEM; + + err = nfp_cpp_area_acquire(area); + if (err) + goto out; + } + + err = nfp_cpp_area_read(area, offset, kernel_vaddr, length); +out: + if (cache) + area_cache_put(cpp, cache); + else + nfp_cpp_area_release_free(area); + + return err; +} + +/** + * nfp_cpp_write() - write to CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer to read from + * @length: number of bytes to write + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination, + unsigned long long address, + const void *kernel_vaddr, size_t length) +{ + struct nfp_cpp_area_cache *cache; + struct nfp_cpp_area *area; + unsigned long offset = 0; + int err; + + cache = area_cache_get(cpp, destination, address, &offset, length); + if (cache) { + area = cache->area; + } else { + area = nfp_cpp_area_alloc(cpp, destination, address, length); + if (!area) + return -ENOMEM; + + err = nfp_cpp_area_acquire(area); + if (err) + goto out; + } + + err = nfp_cpp_area_write(area, offset, kernel_vaddr, length); + +out: + if (cache) + area_cache_put(cpp, cache); + else + nfp_cpp_area_release_free(area); + + return err; +} + +/* Return the correct CPP address, and fixup xpb_addr as needed. */ +static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr) +{ + int island; + u32 xpb; + + xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0); + /* Ensure that non-local XPB accesses go + * out through the global XPBM bus. + */ + island = (*xpb_addr >> 24) & 0x3f; + if (!island) + return xpb; + + if (island != 1) { + *xpb_addr |= 1 << 30; + return xpb; + } + + /* Accesses to the ARM Island overlay uses Island 0 / Global Bit */ + *xpb_addr &= ~0x7f000000; + if (*xpb_addr < 0x60000) { + *xpb_addr |= 1 << 30; + } else { + /* And only non-ARM interfaces use the island id = 1 */ + if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) + != NFP_CPP_INTERFACE_TYPE_ARM) + *xpb_addr |= 1 << 24; + } + + return xpb; +} + +/** + * nfp_xpb_readl() - Read a u32 word from a XPB location + * @cpp: CPP device handle + * @xpb_addr: Address for operation + * @value: Pointer to read buffer + * + * Return: length of the io, or -ERRNO + */ +int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value) +{ + u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value); +} + +/** + * nfp_xpb_writel() - Write a u32 word to a XPB location + * @cpp: CPP device handle + * @xpb_addr: Address for operation + * @value: Value to write + * + * Return: length of the io, or -ERRNO + */ +int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value) +{ + u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value); +} + +/** + * nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus + * @cpp: NFP CPP device handle + * @xpb_tgt: XPB target and address + * @mask: mask of bits to alter + * @value: value to modify + * + * KERNEL: This operation is safe to call in interrupt or softirq context. + * + * Return: length of the io, or -ERRNO + */ +int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt, + u32 mask, u32 value) +{ + int err; + u32 tmp; + + err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); + if (err < 0) + return err; + + tmp &= ~mask; + tmp |= mask & value; + return nfp_xpb_writel(cpp, xpb_tgt, tmp); +} + +/* Lockdep markers */ +static struct lock_class_key nfp_cpp_resource_lock_key; + +static void nfp_cpp_dev_release(struct device *dev) +{ + /* Nothing to do here - it just makes the kernel happy */ +} + +/** + * nfp_cpp_from_operations() - Create a NFP CPP handle + * from an operations structure + * @ops: NFP CPP operations structure + * @parent: Parent device + * @priv: Private data of low-level implementation + * + * NOTE: On failure, cpp_ops->free will be called! + * + * Return: NFP CPP handle on success, ERR_PTR on failure + */ +struct nfp_cpp * +nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, + struct device *parent, void *priv) +{ + const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0); + struct nfp_cpp *cpp; + u32 mask[2]; + u32 xpbaddr; + size_t tgt; + int err; + + cpp = kzalloc(sizeof(*cpp), GFP_KERNEL); + if (!cpp) { + err = -ENOMEM; + goto err_malloc; + } + + cpp->op = ops; + cpp->priv = priv; + cpp->interface = ops->get_interface(parent); + if (ops->read_serial) + ops->read_serial(parent, cpp->serial); + rwlock_init(&cpp->resource_lock); + init_waitqueue_head(&cpp->waitq); + lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key); + INIT_LIST_HEAD(&cpp->mutex_cache); + INIT_LIST_HEAD(&cpp->resource_list); + INIT_LIST_HEAD(&cpp->area_cache_list); + mutex_init(&cpp->area_cache_mutex); + cpp->dev.init_name = "cpp"; + cpp->dev.parent = parent; + cpp->dev.release = nfp_cpp_dev_release; + err = device_register(&cpp->dev); + if (err < 0) { + put_device(&cpp->dev); + goto err_dev; + } + + dev_set_drvdata(&cpp->dev, cpp); + + /* NOTE: cpp_lock is NOT locked for op->init, + * since it may call NFP CPP API operations + */ + if (cpp->op->init) { + err = cpp->op->init(cpp); + if (err < 0) { + dev_err(parent, + "NFP interface initialization failed\n"); + goto err_out; + } + } + + err = nfp_cpp_model_autodetect(cpp, &cpp->model); + if (err < 0) { + dev_err(parent, "NFP model detection failed\n"); + goto err_out; + } + + for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) { + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + (tgt * 4); + err = nfp_xpb_readl(cpp, xpbaddr, + &cpp->imb_cat_table[tgt]); + if (err < 0) { + dev_err(parent, + "Can't read CPP mapping from device\n"); + goto err_out; + } + } + + nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2, + &mask[0]); + nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3, + &mask[1]); + + dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n", + nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp)); + + return cpp; + +err_out: + device_unregister(&cpp->dev); +err_dev: + kfree(cpp); +err_malloc: + return ERR_PTR(err); +} + +/** + * nfp_cpp_priv() - Get the operations private data of a CPP handle + * @cpp: CPP handle + * + * Return: Private data for the NFP CPP handle + */ +void *nfp_cpp_priv(struct nfp_cpp *cpp) +{ + return cpp->priv; +} + +/** + * nfp_cpp_device() - Get the Linux device handle of a CPP handle + * @cpp: CPP handle + * + * Return: Device for the NFP CPP bus + */ +struct device *nfp_cpp_device(struct nfp_cpp *cpp) +{ + return &cpp->dev; +} + +#define NFP_EXPL_OP(func, expl, args...) \ + ({ \ + struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \ + int err = -ENODEV; \ + \ + if (cpp->op->func) \ + err = cpp->op->func(expl, ##args); \ + err; \ + }) + +#define NFP_EXPL_OP_NR(func, expl, args...) \ + ({ \ + struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \ + \ + if (cpp->op->func) \ + cpp->op->func(expl, ##args); \ + \ + }) + +/** + * nfp_cpp_explicit_acquire() - Acquire explicit access handle + * @cpp: NFP CPP handle + * + * The 'data_ref' and 'signal_ref' values are useful when + * constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values. + * + * Return: NFP CPP explicit handle + */ +struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp) +{ + struct nfp_cpp_explicit *expl; + int err; + + expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL); + if (!expl) + return NULL; + + expl->cpp = cpp; + err = NFP_EXPL_OP(explicit_acquire, expl); + if (err < 0) { + kfree(expl); + return NULL; + } + + return expl; +} + +/** + * nfp_cpp_explicit_set_target() - Set target fields for explicit + * @expl: Explicit handle + * @cpp_id: CPP ID field + * @len: CPP Length field + * @mask: CPP Mask field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl, + u32 cpp_id, u8 len, u8 mask) +{ + expl->cmd.cpp_id = cpp_id; + expl->cmd.len = len; + expl->cmd.byte_mask = mask; + + return 0; +} + +/** + * nfp_cpp_explicit_set_data() - Set data fields for explicit + * @expl: Explicit handle + * @data_master: CPP Data Master field + * @data_ref: CPP Data Ref field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl, + u8 data_master, u16 data_ref) +{ + expl->cmd.data_master = data_master; + expl->cmd.data_ref = data_ref; + + return 0; +} + +/** + * nfp_cpp_explicit_set_signal() - Set signal fields for explicit + * @expl: Explicit handle + * @signal_master: CPP Signal Master field + * @signal_ref: CPP Signal Ref field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl, + u8 signal_master, u8 signal_ref) +{ + expl->cmd.signal_master = signal_master; + expl->cmd.signal_ref = signal_ref; + + return 0; +} + +/** + * nfp_cpp_explicit_set_posted() - Set completion fields for explicit + * @expl: Explicit handle + * @posted: True for signaled completion, false otherwise + * @siga: CPP Signal A field + * @siga_mode: CPP Signal A Mode field + * @sigb: CPP Signal B field + * @sigb_mode: CPP Signal B Mode field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted, + u8 siga, + enum nfp_cpp_explicit_signal_mode siga_mode, + u8 sigb, + enum nfp_cpp_explicit_signal_mode sigb_mode) +{ + expl->cmd.posted = posted; + expl->cmd.siga = siga; + expl->cmd.sigb = sigb; + expl->cmd.siga_mode = siga_mode; + expl->cmd.sigb_mode = sigb_mode; + + return 0; +} + +/** + * nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access + * @expl: NFP CPP Explicit handle + * @buff: Data to have the target pull in the transaction + * @len: Length of data, in bytes + * + * The 'len' parameter must be less than or equal to 128 bytes. + * + * If this function is called before the configuration + * registers are set, it will return -EINVAL. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl, + const void *buff, size_t len) +{ + return NFP_EXPL_OP(explicit_put, expl, buff, len); +} + +/** + * nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete + * @expl: NFP CPP Explicit handle + * @address: Address to send in the explicit transaction + * + * If this function is called before the configuration + * registers are set, it will return -1, with an errno of EINVAL. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address) +{ + return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address); +} + +/** + * nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access + * @expl: NFP CPP Explicit handle + * @buff: Data that the target pushed in the transaction + * @len: Length of data, in bytes + * + * The 'len' parameter must be less than or equal to 128 bytes. + * + * If this function is called before all three configuration + * registers are set, it will return -1, with an errno of EINVAL. + * + * If this function is called before nfp_cpp_explicit_do() + * has completed, it will return -1, with an errno of EBUSY. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len) +{ + return NFP_EXPL_OP(explicit_get, expl, buff, len); +} + +/** + * nfp_cpp_explicit_release() - Release explicit access handle + * @expl: NFP CPP Explicit handle + * + */ +void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl) +{ + NFP_EXPL_OP_NR(explicit_release, expl); + kfree(expl); +} + +/** + * nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit + * @cpp_explicit: CPP explicit handle + * + * Return: NFP CPP handle of the explicit + */ +struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit) +{ + return cpp_explicit->cpp; +} + +/** + * nfp_cpp_explicit_priv() - return private struct for CPP explicit + * @cpp_explicit: CPP explicit handle + * + * Return: private data of the explicit, or NULL + */ +void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit) +{ + return &cpp_explicit[1]; +} + +/* THIS FUNCTION IS NOT EXPORTED */ +static u32 nfp_mutex_locked(u16 interface) +{ + return (u32)interface << 16 | 0x000f; +} + +static u32 nfp_mutex_unlocked(u16 interface) +{ + return (u32)interface << 16 | 0x0000; +} + +static bool nfp_mutex_is_locked(u32 val) +{ + return (val & 0xffff) == 0x000f; +} + +static bool nfp_mutex_is_unlocked(u32 val) +{ + return (val & 0xffff) == 0000; +} + +/* If you need more than 65536 recursive locks, please rethink your code. */ +#define MUTEX_DEPTH_MAX 0xffff + +static int +nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address) +{ + /* Not permitted on invalid interfaces */ + if (NFP_CPP_INTERFACE_TYPE_of(interface) == + NFP_CPP_INTERFACE_TYPE_INVALID) + return -EINVAL; + + /* Address must be 64-bit aligned */ + if (address & 7) + return -EINVAL; + + if (*target != NFP_CPP_TARGET_MU) + return -EINVAL; + + return 0; +} + +/** + * nfp_cpp_mutex_init() - Initialize a mutex location + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * @key: Unique 32-bit value for this mutex + * + * The CPP target:address must point to a 64-bit aligned location, and + * will initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this + * nfp_cpp_interface(). + * + * This function should only be called when setting up + * the initial lock state upon boot-up of the system. + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, + int target, unsigned long long address, u32 key) +{ + const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + u16 interface = nfp_cpp_interface(cpp); + int err; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return err; + + err = nfp_cpp_writel(cpp, muw, address + 4, key); + if (err) + return err; + + err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface)); + if (err) + return err; + + return 0; +} + +/** + * nfp_cpp_mutex_alloc() - Create a mutex handle + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * @key: 32-bit unique key (must match the key at this location) + * + * The CPP target:address must point to a 64-bit aligned location, and + * reserve 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the + * MU Atomic Engine's CmpAndSwap32 command are supported. + * + * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. + */ +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, u32 key) +{ + const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + u16 interface = nfp_cpp_interface(cpp); + struct nfp_cpp_mutex *mutex; + int err; + u32 tmp; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return NULL; + + /* Look for mutex on cache list */ + list_for_each_entry(mutex, &cpp->mutex_cache, list) { + if (mutex->target == target && mutex->address == address) { + mutex->usage++; + return mutex; + } + } + + err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); + if (err < 0) + return NULL; + + if (tmp != key) + return NULL; + + mutex = kzalloc(sizeof(*mutex), GFP_KERNEL); + if (!mutex) + return NULL; + + mutex->cpp = cpp; + mutex->target = target; + mutex->address = address; + mutex->key = key; + mutex->depth = 0; + mutex->usage = 1; + + /* Add mutex to cache list */ + list_add(&mutex->list, &cpp->mutex_cache); + + return mutex; +} + +/** + * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state + * @mutex: NFP CPP Mutex handle + */ +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) +{ + if (--mutex->usage) + return; + + /* Remove mutex from cache */ + list_del(&mutex->list); + kfree(mutex); +} + +/** + * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine + * @mutex: NFP CPP Mutex handle + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) +{ + unsigned long warn_at = jiffies + 15 * HZ; + unsigned int timeout_ms = 1; + int err; + + /* We can't use a waitqueue here, because the unlocker + * might be on a separate CPU. + * + * So just wait for now. + */ + for (;;) { + err = nfp_cpp_mutex_trylock(mutex); + if (err != -EBUSY) + break; + + err = msleep_interruptible(timeout_ms); + if (err != 0) + return -ERESTARTSYS; + + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + 60 * HZ; + dev_warn(mutex->cpp->dev.parent, + "Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n", + mutex->usage, mutex->depth, + mutex->target, mutex->address, mutex->key); + } + } + + return err; +} + +/** + * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine + * @mutex: NFP CPP Mutex handle + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) +{ + const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + u32 key, value; + u16 interface; + int err; + + interface = nfp_cpp_interface(cpp); + + if (mutex->depth > 1) { + mutex->depth--; + return 0; + } + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return -EPERM; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + return err; + + if (value != nfp_mutex_locked(interface)) + return -EACCES; + + err = nfp_cpp_writel(cpp, muw, mutex->address, + nfp_mutex_unlocked(interface)); + if (err < 0) + return err; + + mutex->depth = 0; + return 0; +} + +/** + * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle + * @mutex: NFP CPP Mutex handle + * + * Return: 0 if the lock succeeded, -errno on failure + */ +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) +{ + const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ + const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + u32 key, value, tmp; + int err; + + if (mutex->depth > 0) { + if (mutex->depth == MUTEX_DEPTH_MAX) + return -E2BIG; + mutex->depth++; + return 0; + } + + /* Verify that the lock marker is not damaged */ + err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return -EPERM; + + /* Compare against the unlocked state, and if true, + * write the interface id into the top 16 bits, and + * mark as locked. + */ + value = nfp_mutex_locked(nfp_cpp_interface(cpp)); + + /* We use test_set_imm here, as it implies a read + * of the current state, and sets the bits in the + * bytemask of the command to 1s. Since the mutex + * is guaranteed to be 64-bit aligned, the bytemask + * of this 32-bit command is ensured to be 8'b00001111, + * which implies that the lower 4 bits will be set to + * ones regardless of the initial state. + * + * Since this is a 'Readback' operation, with no Pull + * data, we can treat this as a normal Push (read) + * atomic, which returns the original value. + */ + err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); + if (err < 0) + return err; + + /* Was it unlocked? */ + if (nfp_mutex_is_unlocked(tmp)) { + /* The read value can only be 0x....0000 in the unlocked state. + * If there was another contending for this lock, then + * the lock state would be 0x....000f + */ + + /* Write our owner ID into the lock + * While not strictly necessary, this helps with + * debug and bookkeeping. + */ + err = nfp_cpp_writel(cpp, muw, mutex->address, value); + if (err < 0) + return err; + + mutex->depth = 1; + return 0; + } + + /* Already locked by us? Success! */ + if (tmp == value) { + mutex->depth = 1; + return 0; + } + + return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c new file mode 100644 index 000000000000..0ba0379b8f75 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -0,0 +1,281 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_cpplib.c + * Library of functions to access the NFP's CPP bus + * Authors: Jakub Kicinski + * Jason McMullan + * Rolf Neugebauer + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" +#include "nfp6000/nfp_xpb.h" + +/* NFP6000 PL */ +#define NFP_PL_DEVICE_ID 0x00000004 +#define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0) + +#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144 + +/** + * nfp_cpp_readl() - Read a u32 word from a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Pointer to read buffer + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 *value) +{ + u8 tmp[4]; + int err; + + err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); + *value = get_unaligned_le32(tmp); + + return err; +} + +/** + * nfp_cpp_writel() - Write a u32 word to a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Value to write + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 value) +{ + u8 tmp[4]; + + put_unaligned_le32(value, tmp); + return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); +} + +/** + * nfp_cpp_readq() - Read a u64 word from a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Pointer to read buffer + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 *value) +{ + u8 tmp[8]; + int err; + + err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); + *value = get_unaligned_le64(tmp); + + return err; +} + +/** + * nfp_cpp_writeq() - Write a u64 word to a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Value to write + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 value) +{ + u8 tmp[8]; + + put_unaligned_le64(value, tmp); + return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); +} + +/* NOTE: This code should not use nfp_xpb_* functions, + * as those are model-specific + */ +int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model) +{ + const u32 arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0); + u32 reg; + int err; + + err = nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, model); + if (err < 0) + return err; + + /* The PL's PluDeviceID revision code is authoratative */ + *model &= ~0xff; + err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID, + ®); + if (err < 0) + return err; + + *model |= (NFP_PL_DEVICE_ID_MASK & reg) - 0x10; + + return 0; +} + +static u8 nfp_bytemask(int width, u64 addr) +{ + if (width == 8) + return 0xff; + else if (width == 4) + return 0x0f << (addr & 4); + else if (width == 2) + return 0x03 << (addr & 6); + else if (width == 1) + return 0x01 << (addr & 7); + else + return 0; +} + +int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id, + u64 addr, void *buff, size_t len, int width_read) +{ + struct nfp_cpp_explicit *expl; + char *tmp = buff; + int err, i, incr; + u8 byte_mask; + + if (len & (width_read - 1)) + return -EINVAL; + + expl = nfp_cpp_explicit_acquire(cpp); + if (!expl) + return -EBUSY; + + incr = min_t(int, 16 * width_read, 128); + incr = min_t(int, incr, len); + + /* Translate a NFP_CPP_ACTION_RW to action 0 */ + if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW) + cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 0, + NFP_CPP_ID_TOKEN_of(cpp_id)); + + byte_mask = nfp_bytemask(width_read, addr); + + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_read - 1, byte_mask); + nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PUSH, + 0, NFP_SIGNAL_NONE); + + for (i = 0; i < len; i += incr, addr += incr, tmp += incr) { + if (i + incr > len) { + incr = len - i; + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_read - 1, + 0xff); + } + + err = nfp_cpp_explicit_do(expl, addr); + if (err < 0) + goto exit_release; + + err = nfp_cpp_explicit_get(expl, tmp, incr); + if (err < 0) + goto exit_release; + } + err = len; +exit_release: + nfp_cpp_explicit_release(expl); + + return err; +} + +int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, + const void *buff, size_t len, int width_write) +{ + struct nfp_cpp_explicit *expl; + const char *tmp = buff; + int err, i, incr; + u8 byte_mask; + + if (len & (width_write - 1)) + return -EINVAL; + + expl = nfp_cpp_explicit_acquire(cpp); + if (!expl) + return -EBUSY; + + incr = min_t(int, 16 * width_write, 128); + incr = min_t(int, incr, len); + + /* Translate a NFP_CPP_ACTION_RW to action 1 */ + if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW) + cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 1, + NFP_CPP_ID_TOKEN_of(cpp_id)); + + byte_mask = nfp_bytemask(width_write, addr); + + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_write - 1, byte_mask); + nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PULL, + 0, NFP_SIGNAL_NONE); + + for (i = 0; i < len; i += incr, addr += incr, tmp += incr) { + if (i + incr > len) { + incr = len - i; + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_write - 1, + 0xff); + } + + err = nfp_cpp_explicit_put(expl, tmp, incr); + if (err < 0) + goto exit_release; + + err = nfp_cpp_explicit_do(expl, addr); + if (err < 0) + goto exit_release; + } + err = len; +exit_release: + nfp_cpp_explicit_release(expl); + + return err; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c new file mode 100644 index 000000000000..4ea1e585d945 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c @@ -0,0 +1,764 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_target.c + * CPP Access Width Decoder + * Authors: Jakub Kicinski + * Jason McMullan + * Francois H. Theron + */ + +#include + +#include "nfp_cpp.h" + +#include "nfp6000/nfp6000.h" + +#define P32 1 +#define P64 2 + +/* This structure ONLY includes items that can be done with a read or write of + * 32-bit or 64-bit words. All others are not listed. + */ + +#define AT(_action, _token, _pull, _push) \ + case NFP_CPP_ID(0, (_action), (_token)): \ + return PUSHPULL((_pull), (_push)) + +static int target_rw(u32 cpp_id, int pp, int start, int len) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, pp); + AT(1, 0, pp, 0); + AT(NFP_CPP_ACTION_RW, 0, pp, pp); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_dma(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P64); /* ReadNbiDma */ + AT(1, 0, P64, 0); /* WriteNbiDma */ + AT(NFP_CPP_ACTION_RW, 0, P64, P64); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_stats(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P32); /* ReadNbiStats */ + AT(1, 0, P32, 0); /* WriteNbiStats */ + AT(NFP_CPP_ACTION_RW, 0, P32, P32); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_tm(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P64); /* ReadNbiTM */ + AT(1, 0, P64, 0); /* WriteNbiTM */ + AT(NFP_CPP_ACTION_RW, 0, P64, P64); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_ppc(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P64); /* ReadNbiPreclassifier */ + AT(1, 0, P64, 0); /* WriteNbiPreclassifier */ + AT(NFP_CPP_ACTION_RW, 0, P64, P64); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi(u32 cpp_id, u64 address) +{ + u64 rel_addr = address & 0x3fFFFF; + + if (rel_addr < (1 << 20)) + return nfp6000_nbi_dma(cpp_id); + if (rel_addr < (2 << 20)) + return nfp6000_nbi_stats(cpp_id); + if (rel_addr < (3 << 20)) + return nfp6000_nbi_tm(cpp_id); + return nfp6000_nbi_ppc(cpp_id); +} + +/* This structure ONLY includes items that can be done with a read or write of + * 32-bit or 64-bit words. All others are not listed. + */ +static int nfp6000_mu_common(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(NFP_CPP_ACTION_RW, 0, P64, P64); /* read_be/write_be */ + AT(NFP_CPP_ACTION_RW, 1, P64, P64); /* read_le/write_le */ + AT(NFP_CPP_ACTION_RW, 2, P64, P64); /* read_swap_be/write_swap_be */ + AT(NFP_CPP_ACTION_RW, 3, P64, P64); /* read_swap_le/write_swap_le */ + AT(0, 0, 0, P64); /* read_be */ + AT(0, 1, 0, P64); /* read_le */ + AT(0, 2, 0, P64); /* read_swap_be */ + AT(0, 3, 0, P64); /* read_swap_le */ + AT(1, 0, P64, 0); /* write_be */ + AT(1, 1, P64, 0); /* write_le */ + AT(1, 2, P64, 0); /* write_swap_be */ + AT(1, 3, P64, 0); /* write_swap_le */ + AT(3, 0, 0, P32); /* atomic_read */ + AT(3, 2, P32, 0); /* mask_compare_write */ + AT(4, 0, P32, 0); /* atomic_write */ + AT(4, 2, 0, 0); /* atomic_write_imm */ + AT(4, 3, 0, P32); /* swap_imm */ + AT(5, 0, P32, 0); /* set */ + AT(5, 3, 0, P32); /* test_set_imm */ + AT(6, 0, P32, 0); /* clr */ + AT(6, 3, 0, P32); /* test_clr_imm */ + AT(7, 0, P32, 0); /* add */ + AT(7, 3, 0, P32); /* test_add_imm */ + AT(8, 0, P32, 0); /* addsat */ + AT(8, 3, 0, P32); /* test_subsat_imm */ + AT(9, 0, P32, 0); /* sub */ + AT(9, 3, 0, P32); /* test_sub_imm */ + AT(10, 0, P32, 0); /* subsat */ + AT(10, 3, 0, P32); /* test_subsat_imm */ + AT(13, 0, 0, P32); /* microq128_get */ + AT(13, 1, 0, P32); /* microq128_pop */ + AT(13, 2, P32, 0); /* microq128_put */ + AT(15, 0, P32, 0); /* xor */ + AT(15, 3, 0, P32); /* test_xor_imm */ + AT(28, 0, 0, P32); /* read32_be */ + AT(28, 1, 0, P32); /* read32_le */ + AT(28, 2, 0, P32); /* read32_swap_be */ + AT(28, 3, 0, P32); /* read32_swap_le */ + AT(31, 0, P32, 0); /* write32_be */ + AT(31, 1, P32, 0); /* write32_le */ + AT(31, 2, P32, 0); /* write32_swap_be */ + AT(31, 3, P32, 0); /* write32_swap_le */ + default: + return -EINVAL; + } +} + +static int nfp6000_mu_ctm(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(16, 1, 0, P32); /* packet_read_packet_status */ + AT(17, 1, 0, P32); /* packet_credit_get */ + AT(17, 3, 0, P64); /* packet_add_thread */ + AT(18, 2, 0, P64); /* packet_free_and_return_pointer */ + AT(18, 3, 0, P64); /* packet_return_pointer */ + AT(21, 0, 0, P64); /* pe_dma_to_memory_indirect */ + AT(21, 1, 0, P64); /* pe_dma_to_memory_indirect_swap */ + AT(21, 2, 0, P64); /* pe_dma_to_memory_indirect_free */ + AT(21, 3, 0, P64); /* pe_dma_to_memory_indirect_free_swap */ + default: + return nfp6000_mu_common(cpp_id); + } +} + +static int nfp6000_mu_emu(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(18, 0, 0, P32); /* read_queue */ + AT(18, 1, 0, P32); /* read_queue_ring */ + AT(18, 2, P32, 0); /* write_queue */ + AT(18, 3, P32, 0); /* write_queue_ring */ + AT(20, 2, P32, 0); /* journal */ + AT(21, 0, 0, P32); /* get */ + AT(21, 1, 0, P32); /* get_eop */ + AT(21, 2, 0, P32); /* get_freely */ + AT(22, 0, 0, P32); /* pop */ + AT(22, 1, 0, P32); /* pop_eop */ + AT(22, 2, 0, P32); /* pop_freely */ + default: + return nfp6000_mu_common(cpp_id); + } +} + +static int nfp6000_mu_imu(u32 cpp_id) +{ + return nfp6000_mu_common(cpp_id); +} + +static int nfp6000_mu(u32 cpp_id, u64 address) +{ + int pp; + + if (address < 0x2000000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x8000000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0x9800000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x9C00000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0xA000000000ULL) + pp = nfp6000_mu_imu(cpp_id); + else + pp = nfp6000_mu_ctm(cpp_id); + + return pp; +} + +static int nfp6000_ila(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 1, 0, P32); /* read_check_error */ + AT(2, 0, 0, P32); /* read_int */ + AT(3, 0, P32, 0); /* write_int */ + default: + return target_rw(cpp_id, P32, 48, 4); + } +} + +static int nfp6000_pci(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(2, 0, 0, P32); + AT(3, 0, P32, 0); + default: + return target_rw(cpp_id, P32, 4, 4); + } +} + +static int nfp6000_crypto(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(2, 0, P64, 0); + default: + return target_rw(cpp_id, P64, 12, 4); + } +} + +static int nfp6000_cap_xpb(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 1, 0, P32); /* RingGet */ + AT(0, 2, P32, 0); /* Interthread Signal */ + AT(1, 1, P32, 0); /* RingPut */ + AT(1, 2, P32, 0); /* CTNNWr */ + AT(2, 0, 0, P32); /* ReflectRd, signal none */ + AT(2, 1, 0, P32); /* ReflectRd, signal self */ + AT(2, 2, 0, P32); /* ReflectRd, signal remote */ + AT(2, 3, 0, P32); /* ReflectRd, signal both */ + AT(3, 0, P32, 0); /* ReflectWr, signal none */ + AT(3, 1, P32, 0); /* ReflectWr, signal self */ + AT(3, 2, P32, 0); /* ReflectWr, signal remote */ + AT(3, 3, P32, 0); /* ReflectWr, signal both */ + AT(NFP_CPP_ACTION_RW, 1, P32, P32); + default: + return target_rw(cpp_id, P32, 1, 63); + } +} + +static int nfp6000_cls(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 3, P32, 0); /* xor */ + AT(2, 0, P32, 0); /* set */ + AT(2, 1, P32, 0); /* clr */ + AT(4, 0, P32, 0); /* add */ + AT(4, 1, P32, 0); /* add64 */ + AT(6, 0, P32, 0); /* sub */ + AT(6, 1, P32, 0); /* sub64 */ + AT(6, 2, P32, 0); /* subsat */ + AT(8, 2, P32, 0); /* hash_mask */ + AT(8, 3, P32, 0); /* hash_clear */ + AT(9, 0, 0, P32); /* ring_get */ + AT(9, 1, 0, P32); /* ring_pop */ + AT(9, 2, 0, P32); /* ring_get_freely */ + AT(9, 3, 0, P32); /* ring_pop_freely */ + AT(10, 0, P32, 0); /* ring_put */ + AT(10, 2, P32, 0); /* ring_journal */ + AT(14, 0, P32, 0); /* reflect_write_sig_local */ + AT(15, 1, 0, P32); /* reflect_read_sig_local */ + AT(17, 2, P32, 0); /* statisic */ + AT(24, 0, 0, P32); /* ring_read */ + AT(24, 1, P32, 0); /* ring_write */ + AT(25, 0, 0, P32); /* ring_workq_add_thread */ + AT(25, 1, P32, 0); /* ring_workq_add_work */ + default: + return target_rw(cpp_id, P32, 0, 64); + } +} + +int nfp_target_pushpull(u32 cpp_id, u64 address) +{ + switch (NFP_CPP_ID_TARGET_of(cpp_id)) { + case NFP_CPP_TARGET_NBI: + return nfp6000_nbi(cpp_id, address); + case NFP_CPP_TARGET_QDR: + return target_rw(cpp_id, P32, 24, 4); + case NFP_CPP_TARGET_ILA: + return nfp6000_ila(cpp_id); + case NFP_CPP_TARGET_MU: + return nfp6000_mu(cpp_id, address); + case NFP_CPP_TARGET_PCIE: + return nfp6000_pci(cpp_id); + case NFP_CPP_TARGET_ARM: + if (address < 0x10000) + return target_rw(cpp_id, P64, 1, 1); + else + return target_rw(cpp_id, P32, 1, 1); + case NFP_CPP_TARGET_CRYPTO: + return nfp6000_crypto(cpp_id); + case NFP_CPP_TARGET_CT_XPB: + return nfp6000_cap_xpb(cpp_id); + case NFP_CPP_TARGET_CLS: + return nfp6000_cls(cpp_id); + case 0: + return target_rw(cpp_id, P32, 4, 4); + default: + return -EINVAL; + } +} + +#undef AT +#undef P32 +#undef P64 + +/* All magic NFP-6xxx IMB 'mode' numbers here are from: + * Databook (1 August 2013) + * - System Overview and Connectivity + * -- Internal Connectivity + * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus + * ---- CPP addressing + * ----- Table 3.6. CPP Address Translation Mode Commands + */ + +#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2 + +static int nfp_decode_basic(u64 addr, int *dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb; + + /* This function doesn't handle MU or CTXBP */ + if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB) + return -EINVAL; + + switch (mode) { + case 0: + /* For VQDR, in this mode for 32-bit addressing + * it would be islands 0, 16, 32 and 48 depending on channel + * and upper address bits. + * Since those are not all valid islands, most decode + * cases would result in bad island IDs, but we do them + * anyway since this is decoding an address that is already + * assumed to be used as-is to get to sram. + */ + iid_lsb = addr40 ? 34 : 26; + *dest_island = (addr >> iid_lsb) & 0x3F; + return 0; + case 1: + /* For VQDR 32-bit, this would decode as: + * Channel 0: island#0 + * Channel 1: island#0 + * Channel 2: island#1 + * Channel 3: island#1 + * That would be valid as long as both islands + * have VQDR. Let's allow this. + */ + idx_lsb = addr40 ? 39 : 31; + if (addr & BIT_ULL(idx_lsb)) + *dest_island = isld1; + else + *dest_island = isld0; + + return 0; + case 2: + /* For VQDR 32-bit: + * Channel 0: (island#0 | 0) + * Channel 1: (island#0 | 1) + * Channel 2: (island#1 | 0) + * Channel 3: (island#1 | 1) + * + * Make sure we compare against isldN values + * by clearing the LSB. + * This is what the silicon does. + */ + isld0 &= ~1; + isld1 &= ~1; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 1; + + if (addr & BIT_ULL(idx_lsb)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 1); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 1); + + return 0; + case 3: + /* In this mode the data address starts to affect the island ID + * so rather not allow it. In some really specific case + * one could use this to send the upper half of the + * VQDR channel to another MU, but this is getting very + * specific. + * However, as above for mode 0, this is the decoder + * and the caller should validate the resulting IID. + * This blindly does what the silicon would do. + */ + isld0 &= ~3; + isld1 &= ~3; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 2; + + if (addr & BIT_ULL(idx_lsb)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 3); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 3); + + return 0; + default: + return -EINVAL; + } +} + +static int nfp_encode_basic_qdr(u64 addr, int dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + int v, ret; + + /* Full Island ID and channel bits overlap? */ + ret = nfp_decode_basic(addr, &v, cpp_tgt, mode, addr40, isld1, isld0); + if (ret) + return ret; + + /* The current address won't go where expected? */ + if (dest_island != -1 && dest_island != v) + return -EINVAL; + + /* If dest_island was -1, we don't care where it goes. */ + return 0; +} + +/* Try each option, take first one that fits. + * Not sure if we would want to do some smarter + * searching and prefer 0 or non-0 island IDs. + */ +static int nfp_encode_basic_search(u64 *addr, int dest_island, int *isld, + int iid_lsb, int idx_lsb, int v_max) +{ + int i, v; + + for (i = 0; i < 2; i++) + for (v = 0; v < v_max; v++) { + if (dest_island != (isld[i] | v)) + continue; + + *addr &= ~GENMASK_ULL(idx_lsb, iid_lsb); + *addr |= ((u64)i << idx_lsb); + *addr |= ((u64)v << iid_lsb); + return 0; + } + + return -ENODEV; +} + +/* For VQDR, we may not modify the Channel bits, which might overlap + * with the Index bit. When it does, we need to ensure that isld0 == isld1. + */ +static int nfp_encode_basic(u64 *addr, int dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb; + int isld[2]; + u64 v64; + + isld[0] = isld0; + isld[1] = isld1; + + /* This function doesn't handle MU or CTXBP */ + if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB) + return -EINVAL; + + switch (mode) { + case 0: + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + /* In this specific mode we'd rather not modify + * the address but we can verify if the existing + * contents will point to a valid island. + */ + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + iid_lsb = addr40 ? 34 : 26; + /* <39:34> or <31:26> */ + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= ((u64)dest_island << iid_lsb) & v64; + return 0; + case 1: + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + idx_lsb = addr40 ? 39 : 31; + if (dest_island == isld0) { + /* Only need to clear the Index bit */ + *addr &= ~BIT_ULL(idx_lsb); + return 0; + } + + if (dest_island == isld1) { + /* Only need to set the Index bit */ + *addr |= BIT_ULL(idx_lsb); + return 0; + } + + return -ENODEV; + case 2: + /* iid<0> = addr<30> = channel<0> + * channel<1> = addr<31> = Index + */ + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + /* Special case where we allow channel bits to + * be set before hand and with them select an island. + * So we need to confirm that it's at least plausible. + */ + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + /* Make sure we compare against isldN values + * by clearing the LSB. + * This is what the silicon does. + */ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 1; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 2); + case 3: + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + /* iid<0> = addr<29> = data + * iid<1> = addr<30> = channel<0> + * channel<1> = addr<31> = Index + */ + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 2; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 4); + default: + return -EINVAL; + } +} + +static int nfp_encode_mu(u64 *addr, int dest_island, int mode, + bool addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb, locality_lsb; + int isld[2]; + u64 v64; + int da; + + isld[0] = isld0; + isld[1] = isld1; + locality_lsb = nfp_cppat_mu_locality_lsb(mode, addr40); + + if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT) + da = 1; + else + da = 0; + + switch (mode) { + case 0: + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + case 1: + if (da) { + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + } + + idx_lsb = addr40 ? 37 : 29; + if (dest_island == isld0) { + *addr &= ~BIT_ULL(idx_lsb); + return 0; + } + + if (dest_island == isld1) { + *addr |= BIT_ULL(idx_lsb); + return 0; + } + + return -ENODEV; + case 2: + if (da) { + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + } + + /* Make sure we compare against isldN values + * by clearing the LSB. + * This is what the silicon does. + */ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = addr40 ? 37 : 29; + iid_lsb = idx_lsb - 1; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 2); + case 3: + /* Only the EMU will use 40 bit addressing. Silently + * set the direct locality bit for everyone else. + * The SDK toolchain uses dest_island <= 0 to test + * for atypical address encodings to support access + * to local-island CTM with a 32-but address (high-locality + * is effewctively ignored and just used for + * routing to island #0). + */ + if (dest_island > 0 && (dest_island < 24 || dest_island > 26)) { + *addr |= ((u64)_NIC_NFP6000_MU_LOCALITY_DIRECT) + << locality_lsb; + da = 1; + } + + if (da) { + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + } + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = addr40 ? 37 : 29; + iid_lsb = idx_lsb - 2; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 4); + default: + return -EINVAL; + } +} + +static int nfp_cppat_addr_encode(u64 *addr, int dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + switch (cpp_tgt) { + case NFP_CPP_TARGET_NBI: + case NFP_CPP_TARGET_QDR: + case NFP_CPP_TARGET_ILA: + case NFP_CPP_TARGET_PCIE: + case NFP_CPP_TARGET_ARM: + case NFP_CPP_TARGET_CRYPTO: + case NFP_CPP_TARGET_CLS: + return nfp_encode_basic(addr, dest_island, cpp_tgt, mode, + addr40, isld1, isld0); + + case NFP_CPP_TARGET_MU: + return nfp_encode_mu(addr, dest_island, mode, + addr40, isld1, isld0); + + case NFP_CPP_TARGET_CT_XPB: + if (mode != 1 || addr40) + return -EINVAL; + *addr &= ~GENMASK_ULL(29, 24); + *addr |= ((u64)dest_island << 24) & GENMASK_ULL(29, 24); + return 0; + default: + return -EINVAL; + } +} + +int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, + u32 *cpp_target_id, u64 *cpp_target_address, + const u32 *imb_table) +{ + const int island = NFP_CPP_ID_ISLAND_of(cpp_island_id); + const int target = NFP_CPP_ID_TARGET_of(cpp_island_id); + u32 imb; + int err; + + if (target < 0 || target >= 16) + return -EINVAL; + + if (island == 0) { + /* Already translated */ + *cpp_target_id = cpp_island_id; + *cpp_target_address = cpp_island_address; + return 0; + } + + /* CPP + Island only allowed on systems with IMB tables */ + if (!imb_table) + return -EINVAL; + + imb = imb_table[target]; + + *cpp_target_address = cpp_island_address; + err = nfp_cppat_addr_encode(cpp_target_address, island, target, + ((imb >> 13) & 7), ((imb >> 12) & 1), + ((imb >> 6) & 0x3f), ((imb >> 0) & 0x3f)); + if (err) + return err; + + *cpp_target_id = NFP_CPP_ID(target, + NFP_CPP_ID_ACTION_of(cpp_island_id), + NFP_CPP_ID_TOKEN_of(cpp_island_id)); + + return 0; +} -- cgit v1.2.3 From f01a2161577d31b14581e0db3bbbdfa963f145b6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:30 -0800 Subject: nfp: add support for resources Resource table is an array placed in a well defined location in device's memory which describes device resources and contains locks which have to be acquired to use them. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 1 + drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h | 65 +++++ drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h | 88 +++++++ .../ethernet/netronome/nfp/nfpcore/nfp_resource.c | 279 +++++++++++++++++++++ 4 files changed, 433 insertions(+) create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index fb9dadf9236d..47010b7d3efa 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -4,6 +4,7 @@ nfp-objs := \ nfpcore/nfp6000_pcie.o \ nfpcore/nfp_cppcore.o \ nfpcore/nfp_cpplib.o \ + nfpcore/nfp_resource.o \ nfpcore/nfp_target.o \ nfp_main.o \ nfp_net_common.o \ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h new file mode 100644 index 000000000000..6cee6382deb4 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NFP_CRC32_H +#define NFP_CRC32_H + +#include +#include + +/** + * crc32_posix_end() - Finalize POSIX CRC32 working state + * @crc: Current CRC32 working state + * @total_len: Total length of data that was CRC32'd + * + * Return: Final POSIX CRC32 value + */ +static inline u32 crc32_posix_end(u32 crc, size_t total_len) +{ + /* Extend with the length of the string. */ + while (total_len != 0) { + u8 c = total_len & 0xff; + + crc = crc32_be(crc, &c, 1); + total_len >>= 8; + } + + return ~crc; +} + +static inline u32 crc32_posix(const void *buff, size_t len) +{ + return crc32_posix_end(crc32_be(0, buff, len), len); +} + +#endif /* NFP_CRC32_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h new file mode 100644 index 000000000000..1aa57161f23e --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp.h + * Interface for NFP device access and query functions. + */ + +#ifndef __NFP_H__ +#define __NFP_H__ + +#include + +#include "nfp_cpp.h" + +/* Implemented in nfp_resource.c */ + +#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU +#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL + +/* NFP Resource Table self-identifier */ +#define NFP_RESOURCE_TBL_NAME "nfp.res" +#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ + +/* All other keys are CRC32-POSIX of the 8-byte identification string */ + +/* ARM/PCI vNIC Interfaces 0..3 */ +#define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0" +#define NFP_RESOURCE_VNIC_PCI_1 "vnic.p1" +#define NFP_RESOURCE_VNIC_PCI_2 "vnic.p2" +#define NFP_RESOURCE_VNIC_PCI_3 "vnic.p3" + +/* NFP Hardware Info Database */ +#define NFP_RESOURCE_NFP_HWINFO "nfp.info" + +/* Service Processor */ +#define NFP_RESOURCE_NSP "nfp.sp" + +/* Netronone Flow Firmware Table */ +#define NFP_RESOURCE_NFP_NFFW "nfp.nffw" + +/* MAC Statistics Accumulator */ +#define NFP_RESOURCE_MAC_STATISTICS "mac.stat" + +struct nfp_resource * +nfp_resource_acquire(struct nfp_cpp *cpp, const char *name); + +void nfp_resource_release(struct nfp_resource *res); + +u32 nfp_resource_cpp_id(struct nfp_resource *res); + +const char *nfp_resource_name(struct nfp_resource *res); + +u64 nfp_resource_address(struct nfp_resource *res); + +u64 nfp_resource_size(struct nfp_resource *res); + +#endif /* !__NFP_H__ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c new file mode 100644 index 000000000000..a2850344f8b4 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -0,0 +1,279 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_resource.c + * Author: Jakub Kicinski + * Jason McMullan + */ +#include +#include +#include + +#include "crc32.h" +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define NFP_RESOURCE_ENTRY_NAME_SZ 8 + +/** + * struct nfp_resource_entry - Resource table entry + * @owner: NFP CPP Lock, interface owner + * @key: NFP CPP Lock, posix_crc32(name, 8) + * @region: Memory region descriptor + * @name: ASCII, zero padded name + * @reserved + * @cpp_action: CPP Action + * @cpp_token: CPP Token + * @cpp_target: CPP Target ID + * @page_offset: 256-byte page offset into target's CPP address + * @page_size: size, in 256-byte pages + */ +struct nfp_resource_entry { + struct nfp_resource_entry_mutex { + u32 owner; + u32 key; + } mutex; + struct nfp_resource_entry_region { + u8 name[NFP_RESOURCE_ENTRY_NAME_SZ]; + u8 reserved[5]; + u8 cpp_action; + u8 cpp_token; + u8 cpp_target; + u32 page_offset; + u32 page_size; + } region; +}; + +#define NFP_RESOURCE_TBL_SIZE 4096 +#define NFP_RESOURCE_TBL_ENTRIES (NFP_RESOURCE_TBL_SIZE / \ + sizeof(struct nfp_resource_entry)) + +struct nfp_resource { + char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1]; + u32 cpp_id; + u64 addr; + u64 size; + struct nfp_cpp_mutex *mutex; +}; + +static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) +{ + char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ] = {}; + struct nfp_resource_entry entry; + u32 cpp_id, key; + int ret, i; + + cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ + + strncpy(name_pad, res->name, sizeof(name_pad)); + + /* Search for a matching entry */ + key = NFP_RESOURCE_TBL_KEY; + if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) + key = crc32_posix(name_pad, sizeof(name_pad)); + + for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { + u64 addr = NFP_RESOURCE_TBL_BASE + + sizeof(struct nfp_resource_entry) * i; + + ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry)); + if (ret != sizeof(entry)) + return -EIO; + + if (entry.mutex.key != key) + continue; + + /* Found key! */ + res->mutex = + nfp_cpp_mutex_alloc(cpp, + NFP_RESOURCE_TBL_TARGET, addr, key); + res->cpp_id = NFP_CPP_ID(entry.region.cpp_target, + entry.region.cpp_action, + entry.region.cpp_token); + res->addr = (u64)entry.region.page_offset << 8; + res->size = (u64)entry.region.page_size << 8; + + return 0; + } + + return -ENOENT; +} + +static int +nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res, + struct nfp_cpp_mutex *dev_mutex) +{ + int err; + + if (nfp_cpp_mutex_lock(dev_mutex)) + return -EINVAL; + + err = nfp_cpp_resource_find(cpp, res); + if (err) + goto err_unlock_dev; + + err = nfp_cpp_mutex_trylock(res->mutex); + if (err) + goto err_res_mutex_free; + + nfp_cpp_mutex_unlock(dev_mutex); + + return 0; + +err_res_mutex_free: + nfp_cpp_mutex_free(res->mutex); +err_unlock_dev: + nfp_cpp_mutex_unlock(dev_mutex); + + return err; +} + +/** + * nfp_resource_acquire() - Acquire a resource handle + * @cpp: NFP CPP handle + * @name: Name of the resource + * + * NOTE: This function locks the acquired resource + * + * Return: NFP Resource handle, or ERR_PTR() + */ +struct nfp_resource * +nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) +{ + unsigned long warn_at = jiffies + 15 * HZ; + struct nfp_cpp_mutex *dev_mutex; + struct nfp_resource *res; + int err; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return ERR_PTR(-ENOMEM); + + strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ); + + dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE, + NFP_RESOURCE_TBL_KEY); + if (!dev_mutex) { + kfree(res); + return ERR_PTR(-ENOMEM); + } + + for (;;) { + err = nfp_resource_try_acquire(cpp, res, dev_mutex); + if (!err) + break; + if (err != -EBUSY) + goto err_free; + + err = msleep_interruptible(1); + if (err != 0) { + err = -ERESTARTSYS; + goto err_free; + } + + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + 60 * HZ; + nfp_warn(cpp, "Warning: waiting for NFP resource %s\n", + name); + } + } + + nfp_cpp_mutex_free(dev_mutex); + + return res; + +err_free: + nfp_cpp_mutex_free(dev_mutex); + kfree(res); + return ERR_PTR(err); +} + +/** + * nfp_resource_release() - Release a NFP Resource handle + * @res: NFP Resource handle + * + * NOTE: This function implictly unlocks the resource handle + */ +void nfp_resource_release(struct nfp_resource *res) +{ + nfp_cpp_mutex_unlock(res->mutex); + nfp_cpp_mutex_free(res->mutex); + kfree(res); +} + +/** + * nfp_resource_cpp_id() - Return the cpp_id of a resource handle + * @res: NFP Resource handle + * + * Return: NFP CPP ID + */ +u32 nfp_resource_cpp_id(struct nfp_resource *res) +{ + return res->cpp_id; +} + +/** + * nfp_resource_name() - Return the name of a resource handle + * @res: NFP Resource handle + * + * Return: const char pointer to the name of the resource + */ +const char *nfp_resource_name(struct nfp_resource *res) +{ + return res->name; +} + +/** + * nfp_resource_address() - Return the address of a resource handle + * @res: NFP Resource handle + * + * Return: Address of the resource + */ +u64 nfp_resource_address(struct nfp_resource *res) +{ + return res->addr; +} + +/** + * nfp_resource_size() - Return the size in bytes of a resource handle + * @res: NFP Resource handle + * + * Return: Size of the resource in bytes + */ +u64 nfp_resource_size(struct nfp_resource *res) +{ + return res->size; +} -- cgit v1.2.3 From 59a8474888e9b06252ebe8979630b371c926a00c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:31 -0800 Subject: nfp: add hwinfo support Hwinfo is a simple key=value store of information which is read from the flash and populated during chip power on. Add code to look up information in it. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 1 + drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h | 4 + .../net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h | 3 + .../ethernet/netronome/nfp/nfpcore/nfp_cppcore.c | 15 + .../ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c | 318 +++++++++++++++++++++ 5 files changed, 341 insertions(+) create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 47010b7d3efa..e9177f04f67b 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -4,6 +4,7 @@ nfp-objs := \ nfpcore/nfp6000_pcie.o \ nfpcore/nfp_cppcore.o \ nfpcore/nfp_cpplib.o \ + nfpcore/nfp_hwinfo.o \ nfpcore/nfp_resource.o \ nfpcore/nfp_target.o \ nfp_main.o \ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index 1aa57161f23e..1eae3e1e5fbe 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -43,6 +43,10 @@ #include "nfp_cpp.h" +/* Implemented in nfp_hwinfo.c */ + +const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup); + /* Implemented in nfp_resource.c */ #define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index f49f12a15669..1798ece689f7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -219,6 +219,9 @@ u32 nfp_cpp_model(struct nfp_cpp *cpp); u16 nfp_cpp_interface(struct nfp_cpp *cpp); int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial); +void *nfp_hwinfo_cache(struct nfp_cpp *cpp); +void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val); + struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 cpp_id, const char *name, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 8e3322022322..aa94780f4dc7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -96,6 +96,9 @@ struct nfp_cpp { /* Cached areas for cpp/xpb readl/writel speedups */ struct mutex area_cache_mutex; /* Lock for the area cache */ struct list_head area_cache_list; + + /* Cached information */ + void *hwinfo; }; /* Element of the area_cache_list */ @@ -237,6 +240,8 @@ void nfp_cpp_free(struct nfp_cpp *cpp) if (cpp->op->free) cpp->op->free(cpp); + kfree(cpp->hwinfo); + device_unregister(&cpp->dev); kfree(cpp); @@ -277,6 +282,16 @@ int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial) return sizeof(cpp->serial); } +void *nfp_hwinfo_cache(struct nfp_cpp *cpp) +{ + return cpp->hwinfo; +} + +void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val) +{ + cpp->hwinfo = val; +} + /** * nfp_cpp_area_alloc_with_name() - allocate a new CPP area * @cpp: CPP device handle diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c new file mode 100644 index 000000000000..8d8f311ffa6e --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c @@ -0,0 +1,318 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM + * after chip reset. + * + * Examples of the fields: + * me.count = 40 + * me.mask = 0x7f_ffff_ffff + * + * me.count is the total number of MEs on the system. + * me.mask is the bitmask of MEs that are available for application usage. + * + * (ie, in this example, ME 39 has been reserved by boardconfig.) + */ + +#include +#include +#include +#include +#include +#include +#include + +#define NFP_SUBSYS "nfp_hwinfo" + +#include "crc32.h" +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define HWINFO_SIZE_MIN 0x100 +#define HWINFO_WAIT 20 /* seconds */ + +/* The Hardware Info Table defines the properties of the system. + * + * HWInfo v1 Table (fixed size) + * + * 0x0000: u32 version Hardware Info Table version (1.0) + * 0x0004: u32 size Total size of the table, including + * the CRC32 (IEEE 802.3) + * 0x0008: u32 jumptab Offset of key/value table + * 0x000c: u32 keys Total number of keys in the key/value table + * NNNNNN: Key/value jump table and string data + * (size - 4): u32 crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * HWInfo v2 Table (variable size) + * + * 0x0000: u32 version Hardware Info Table version (2.0) + * 0x0004: u32 size Current size of the data area, excluding CRC32 + * 0x0008: u32 limit Maximum size of the table + * 0x000c: u32 reserved Unused, set to zero + * NNNNNN: Key/value data + * (size - 4): u32 crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * If the HWInfo table is in the process of being updated, the low bit + * of version will be set. + * + * HWInfo v1 Key/Value Table + * ------------------------- + * + * The key/value table is a set of offsets to ASCIIZ strings which have + * been strcmp(3) sorted (yes, please use bsearch(3) on the table). + * + * All keys are guaranteed to be unique. + * + * N+0: u32 key_1 Offset to the first key + * N+4: u32 val_1 Offset to the first value + * N+8: u32 key_2 Offset to the second key + * N+c: u32 val_2 Offset to the second value + * ... + * + * HWInfo v2 Key/Value Table + * ------------------------- + * + * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000' + * + * Unsorted. + */ + +#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_UPDATING BIT(0) + +struct nfp_hwinfo { + u8 start[0]; + + __le32 version; + __le32 size; + + /* v2 specific fields */ + __le32 limit; + __le32 resv; + + char data[]; +}; + +static bool nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo) +{ + return le32_to_cpu(hwinfo->version) & NFP_HWINFO_VERSION_UPDATING; +} + +static int +hwinfo_db_walk(struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo, u32 size) +{ + const char *key, *val, *end = hwinfo->data + size; + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + + val = key + strlen(key) + 1; + if (val >= end) { + nfp_warn(cpp, "Bad HWINFO - overflowing key\n"); + return -EINVAL; + } + + if (val + strlen(val) + 1 > end) { + nfp_warn(cpp, "Bad HWINFO - overflowing value\n"); + return -EINVAL; + } + } + + return 0; +} + +static int +hwinfo_db_validate(struct nfp_cpp *cpp, struct nfp_hwinfo *db, u32 len) +{ + u32 size, crc; + + size = le32_to_cpu(db->size); + if (size > len) { + nfp_err(cpp, "Unsupported hwinfo size %u > %u\n", size, len); + return -EINVAL; + } + + size -= sizeof(u32); + crc = crc32_posix(db, size); + if (crc != get_unaligned_le32(db->start + size)) { + nfp_err(cpp, "Corrupt hwinfo table (CRC mismatch), calculated 0x%x, expected 0x%x\n", + crc, get_unaligned_le32(db->start + size)); + + return -EINVAL; + } + + return hwinfo_db_walk(cpp, db, size); +} + +static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size) +{ + struct nfp_hwinfo *header; + struct nfp_resource *res; + u64 cpp_addr; + u32 cpp_id; + int err; + u8 *db; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO); + if (!IS_ERR(res)) { + cpp_id = nfp_resource_cpp_id(res); + cpp_addr = nfp_resource_address(res); + *cpp_size = nfp_resource_size(res); + + nfp_resource_release(res); + + if (*cpp_size < HWINFO_SIZE_MIN) + return -ENOENT; + } else if (PTR_ERR(res) == -ENOENT) { + /* Try getting the HWInfo table from the 'classic' location */ + cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, + NFP_CPP_ACTION_RW, 0, 1); + cpp_addr = 0x30000; + *cpp_size = 0x0e000; + } else { + return PTR_ERR(res); + } + + db = kmalloc(*cpp_size + 1, GFP_KERNEL); + if (!db) + return -ENOMEM; + + err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size); + if (err != *cpp_size) { + kfree(db); + return err < 0 ? err : -EIO; + } + + header = (void *)db; + if (nfp_hwinfo_is_updating(header)) { + kfree(db); + return -EBUSY; + } + + if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) { + nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n", + le32_to_cpu(header->version)); + kfree(db); + return -EINVAL; + } + + /* NULL-terminate for safety */ + db[*cpp_size] = '\0'; + + nfp_hwinfo_cache_set(cpp, db); + + return 0; +} + +static int hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size) +{ + const unsigned long wait_until = jiffies + HWINFO_WAIT * HZ; + int err; + + for (;;) { + const unsigned long start_time = jiffies; + + err = hwinfo_try_fetch(cpp, hwdb_size); + if (!err) + return 0; + + err = msleep_interruptible(100); + if (err || time_after(start_time, wait_until)) { + nfp_err(cpp, "NFP access error\n"); + return -EIO; + } + } +} + +static int nfp_hwinfo_load(struct nfp_cpp *cpp) +{ + struct nfp_hwinfo *db; + size_t hwdb_size = 0; + int err; + + err = hwinfo_fetch(cpp, &hwdb_size); + if (err) + return err; + + db = nfp_hwinfo_cache(cpp); + err = hwinfo_db_validate(cpp, db, hwdb_size); + if (err) { + kfree(db); + nfp_hwinfo_cache_set(cpp, NULL); + return err; + } + + return 0; +} + +/** + * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name + * @cpp: NFP CPP handle + * @lookup: HWInfo name to search for + * + * Return: Value of the HWInfo name, or NULL + */ +const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup) +{ + const char *key, *val, *end; + struct nfp_hwinfo *hwinfo; + int err; + + hwinfo = nfp_hwinfo_cache(cpp); + if (!hwinfo) { + err = nfp_hwinfo_load(cpp); + if (err) + return NULL; + hwinfo = nfp_hwinfo_cache(cpp); + } + + if (!hwinfo || !lookup) + return NULL; + + end = hwinfo->data + le32_to_cpu(hwinfo->size) - sizeof(u32); + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + + val = key + strlen(key) + 1; + + if (strcmp(key, lookup) == 0) + return val; + } + + return NULL; +} -- cgit v1.2.3 From a0d8e02c35ff6068925d8c5ff4202b561aa90de6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:32 -0800 Subject: nfp: add support for reading nffw info NFFW info is a resource which contains information about the loaded application firmware. Add code which will allow us to decode it and retrieve MIP location. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 1 + .../net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c | 323 +++++++++++++++++++++ .../net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h | 51 ++++ 3 files changed, 375 insertions(+) create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index e9177f04f67b..408cb219a306 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -5,6 +5,7 @@ nfp-objs := \ nfpcore/nfp_cppcore.o \ nfpcore/nfp_cpplib.o \ nfpcore/nfp_hwinfo.o \ + nfpcore/nfp_nffw.o \ nfpcore/nfp_resource.o \ nfpcore/nfp_target.o \ nfp_main.o \ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c new file mode 100644 index 000000000000..cd34097b79f1 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -0,0 +1,323 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_nffw.c + * Authors: Jakub Kicinski + * Jason McMullan + * Francois H. Theron + */ + +#include +#include + +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp_nffw.h" +#include "nfp6000/nfp6000.h" + +/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4. + * Lower IDs are reserved for target and loader IDs. + */ +#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ +#define NFFW_FWID_BASE 4 + +#define NFFW_FWID_ALL 255 + +/** + * NFFW_INFO_VERSION history: + * 0: This was never actually used (before versioning), but it refers to + * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later + * changed to 200. + * 1: First versioned struct, with + * FWINFO_CNT = 120 + * MEINFO_CNT = 120 + * 2: FWINFO_CNT = 200 + * MEINFO_CNT = 200 + */ +#define NFFW_INFO_VERSION_CURRENT 2 + +/* Enough for all current chip families */ +#define NFFW_MEINFO_CNT_V1 120 +#define NFFW_FWINFO_CNT_V1 120 +#define NFFW_MEINFO_CNT_V2 200 +#define NFFW_FWINFO_CNT_V2 200 + +/* Work in 32-bit words to make cross-platform endianness easier to handle */ + +/** nfp.nffw meinfo **/ +struct nffw_meinfo { + __le32 ctxmask__fwid__meid; +}; + +struct nffw_fwinfo { + __le32 loaded__mu_da__mip_off_hi; + __le32 mip_cppid; /* 0 means no MIP */ + __le32 mip_offset_lo; +}; + +struct nfp_nffw_info_v1 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1]; +}; + +struct nfp_nffw_info_v2 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2]; +}; + +/** Resource: nfp.nffw main **/ +struct nfp_nffw_info_data { + __le32 flags[2]; + union { + struct nfp_nffw_info_v1 v1; + struct nfp_nffw_info_v2 v2; + } info; +}; + +struct nfp_nffw_info { + struct nfp_cpp *cpp; + struct nfp_resource *res; + + struct nfp_nffw_info_data fwinf; +}; + +/* flg_info_version = flags[0]<27:16> + * This is a small version counter intended only to detect if the current + * implementation can read the current struct. Struct changes should be very + * rare and as such a 12-bit counter should cover large spans of time. By the + * time it wraps around, we don't expect to have 4096 versions of this struct + * to be in use at the same time. + */ +static u32 nffw_res_info_version_get(const struct nfp_nffw_info_data *res) +{ + return (le32_to_cpu(res->flags[0]) >> 16) & 0xfff; +} + +/* flg_init = flags[0]<0> */ +static u32 nffw_res_flg_init_get(const struct nfp_nffw_info_data *res) +{ + return (le32_to_cpu(res->flags[0]) >> 0) & 1; +} + +/* loaded = loaded__mu_da__mip_off_hi<31:31> */ +static u32 nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi) +{ + return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 31) & 1; +} + +/* mip_cppid = mip_cppid */ +static u32 nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi) +{ + return le32_to_cpu(fi->mip_cppid); +} + +/* loaded = loaded__mu_da__mip_off_hi<8:8> */ +static u32 nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi) +{ + return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 8) & 1; +} + +/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */ +static u64 nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi) +{ + u64 mip_off_hi = le32_to_cpu(fi->loaded__mu_da__mip_off_hi); + + return (mip_off_hi & 0xFF) << 32 | le32_to_cpu(fi->mip_offset_lo); +} + +#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) + +static int nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp) +{ + unsigned int mode, addr40; + u32 xpbaddr, imbcppat; + int err; + + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4; + err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat); + if (err < 0) + return err; + + mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); + addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); + + return nfp_cppat_mu_locality_lsb(mode, addr40); +} + +static unsigned int +nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) +{ + /* For the this code, version 0 is most likely to be + * version 1 in this case. Since the kernel driver + * does not take responsibility for initialising the + * nfp.nffw resource, any previous code (CA firmware or + * userspace) that left the version 0 and did set + * the init flag is going to be version 1. + */ + switch (nffw_res_info_version_get(fwinf)) { + case 0: + case 1: + *arr = &fwinf->info.v1.fwinfo[0]; + return NFFW_FWINFO_CNT_V1; + case 2: + *arr = &fwinf->info.v2.fwinfo[0]; + return NFFW_FWINFO_CNT_V2; + default: + *arr = NULL; + return 0; + } +} + +/** + * nfp_nffw_info_open() - Acquire the lock on the NFFW table + * @cpp: NFP CPP handle + * + * Return: 0, or -ERRNO + */ +struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp) +{ + struct nfp_nffw_info_data *fwinf; + struct nfp_nffw_info *state; + u32 info_ver; + int err; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW); + if (IS_ERR(state->res)) + goto err_free; + + fwinf = &state->fwinf; + + if (sizeof(*fwinf) > nfp_resource_size(state->res)) + goto err_release; + + err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), + nfp_resource_address(state->res), + fwinf, sizeof(*fwinf)); + if (err < sizeof(*fwinf)) + goto err_release; + + if (!nffw_res_flg_init_get(fwinf)) + goto err_release; + + info_ver = nffw_res_info_version_get(fwinf); + if (info_ver > NFFW_INFO_VERSION_CURRENT) + goto err_release; + + state->cpp = cpp; + return state; + +err_release: + nfp_resource_release(state->res); +err_free: + kfree(state); + return ERR_PTR(-EIO); +} + +/** + * nfp_nffw_info_release() - Release the lock on the NFFW table + * @state: NFP FW info state + * + * Return: 0, or -ERRNO + */ +void nfp_nffw_info_close(struct nfp_nffw_info *state) +{ + nfp_resource_release(state->res); + kfree(state); +} + +/** + * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW + * @state: NFP FW info state + * + * Return: First NFFW firmware info, NULL on failure + */ +static struct nffw_fwinfo *nfp_nffw_info_fwid_first(struct nfp_nffw_info *state) +{ + struct nffw_fwinfo *fwinfo; + unsigned int cnt, i; + + cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo); + if (!cnt) + return NULL; + + for (i = 0; i < cnt; i++) + if (nffw_fwinfo_loaded_get(&fwinfo[i])) + return &fwinfo[i]; + + return NULL; +} + +/** + * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP + * @state: NFP FW info state + * @cpp_id: Pointer to the CPP ID of the MIP + * @off: Pointer to the CPP Address of the MIP + * + * Return: 0, or -ERRNO + */ +int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off) +{ + struct nffw_fwinfo *fwinfo; + + fwinfo = nfp_nffw_info_fwid_first(state); + if (!fwinfo) + return -EINVAL; + + *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo); + *off = nffw_fwinfo_mip_offset_get(fwinfo); + + if (nffw_fwinfo_mip_mu_da_get(fwinfo)) { + int locality_off; + + if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU) + return 0; + + locality_off = nfp_mip_mu_locality_lsb(state->cpp); + if (locality_off < 0) + return locality_off; + + *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + } + + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h new file mode 100644 index 000000000000..47a6571e236d --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_nffw.h + * Authors: Jason McMullan + * Francois H. Theron + */ + +#ifndef NFP_NFFW_H +#define NFP_NFFW_H + +/* Implemented in nfp_nffw.c */ + +struct nfp_nffw_info; + +struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp); +void nfp_nffw_info_close(struct nfp_nffw_info *state); +int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off); + +#endif /* NFP_NFFW_H */ -- cgit v1.2.3 From ab78c1d286df46ddc7aeaef2c22e27b7ec061ecc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:33 -0800 Subject: nfp: add MIP reading support MIP is a vector of information which linker can optionally include in application firmware. It will be used to retrieve the location of symbol tables. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 1 + .../net/ethernet/netronome/nfp/nfpcore/nfp_mip.c | 174 +++++++++++++++++++++ .../net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h | 10 ++ 3 files changed, 185 insertions(+) create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 408cb219a306..9c0662170de8 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -5,6 +5,7 @@ nfp-objs := \ nfpcore/nfp_cppcore.o \ nfpcore/nfp_cpplib.o \ nfpcore/nfp_hwinfo.o \ + nfpcore/nfp_mip.o \ nfpcore/nfp_nffw.o \ nfpcore/nfp_resource.o \ nfpcore/nfp_target.o \ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c new file mode 100644 index 000000000000..3d15dd03647e --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_mip.c + * Authors: Jakub Kicinski + * Jason McMullan + * Espen Skoglund + */ +#include +#include + +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp_nffw.h" + +#define NFP_MIP_SIGNATURE cpu_to_le32(0x0050494d) /* "MIP\0" */ +#define NFP_MIP_VERSION cpu_to_le32(1) +#define NFP_MIP_MAX_OFFSET (256 * 1024) + +struct nfp_mip { + __le32 signature; + __le32 mip_version; + __le32 mip_size; + __le32 first_entry; + + __le32 version; + __le32 buildnum; + __le32 buildtime; + __le32 loadtime; + + __le32 symtab_addr; + __le32 symtab_size; + __le32 strtab_addr; + __le32 strtab_size; + + char name[16]; + char toolchain[32]; +}; + +/* Read memory and check if it could be a valid MIP */ +static int +nfp_mip_try_read(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, struct nfp_mip *mip) +{ + int ret; + + ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip)); + if (ret != sizeof(*mip)) { + nfp_err(cpp, "Failed to read MIP data (%d, %zu)\n", + ret, sizeof(*mip)); + return -EIO; + } + if (mip->signature != NFP_MIP_SIGNATURE) { + nfp_warn(cpp, "Incorrect MIP signature (0x%08x)\n", + le32_to_cpu(mip->signature)); + return -EINVAL; + } + if (mip->mip_version != NFP_MIP_VERSION) { + nfp_warn(cpp, "Unsupported MIP version (%d)\n", + le32_to_cpu(mip->mip_version)); + return -EINVAL; + } + + return 0; +} + +/* Try to locate MIP using the resource table */ +static int nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip) +{ + struct nfp_nffw_info *nffw_info; + u32 cpp_id; + u64 addr; + int err; + + nffw_info = nfp_nffw_info_open(cpp); + if (IS_ERR(nffw_info)) + return PTR_ERR(nffw_info); + + err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr); + if (err) + goto exit_close_nffw; + + err = nfp_mip_try_read(cpp, cpp_id, addr, mip); +exit_close_nffw: + nfp_nffw_info_close(nffw_info); + return err; +} + +/** + * nfp_mip_open() - Get device MIP structure + * @cpp: NFP CPP Handle + * + * Copy MIP structure from NFP device and return it. The returned + * structure is handled internally by the library and should be + * freed by calling nfp_mip_close(). + * + * Return: pointer to mip, NULL on failure. + */ +const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp) +{ + struct nfp_mip *mip; + int err; + + mip = kmalloc(sizeof(*mip), GFP_KERNEL); + if (!mip) + return NULL; + + err = nfp_mip_read_resource(cpp, mip); + if (err) { + kfree(mip); + return NULL; + } + + return mip; +} + +void nfp_mip_close(const struct nfp_mip *mip) +{ + kfree(mip); +} + +/** + * nfp_mip_symtab() - Get the address and size of the MIP symbol table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol table + * @size: Location for size of MIP symbol table + */ +void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size) +{ + *addr = le32_to_cpu(mip->symtab_addr); + *size = le32_to_cpu(mip->symtab_size); +} + +/** + * nfp_mip_strtab() - Get the address and size of the MIP symbol name table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol name table + * @size: Location for size of MIP symbol name table + */ +void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size) +{ + *addr = le32_to_cpu(mip->strtab_addr); + *size = le32_to_cpu(mip->strtab_size); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index 47a6571e236d..c91138c0daab 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -48,4 +48,14 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp); void nfp_nffw_info_close(struct nfp_nffw_info *state); int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off); +/* Implemented in nfp_mip.c */ + +struct nfp_mip; + +const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp); +void nfp_mip_close(const struct nfp_mip *mip); + +void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size); +void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size); + #endif /* NFP_NFFW_H */ -- cgit v1.2.3 From 5f30fe4d460c975ca3debe90ec71cbd0a46e35e2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:34 -0800 Subject: nfp: add rtsym support Add support for using application FW symbol table to look up location of information in device memory. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 1 + .../net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h | 2 + .../ethernet/netronome/nfp/nfpcore/nfp_cppcore.c | 12 + .../net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h | 34 +++ .../net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c | 304 +++++++++++++++++++++ 5 files changed, 353 insertions(+) create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 9c0662170de8..7765088f7e01 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -8,6 +8,7 @@ nfp-objs := \ nfpcore/nfp_mip.o \ nfpcore/nfp_nffw.o \ nfpcore/nfp_resource.o \ + nfpcore/nfp_rtsym.o \ nfpcore/nfp_target.o \ nfp_main.o \ nfp_net_common.o \ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 1798ece689f7..58a15185572e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -221,6 +221,8 @@ int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial); void *nfp_hwinfo_cache(struct nfp_cpp *cpp); void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val); +void *nfp_rtsym_cache(struct nfp_cpp *cpp); +void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val); struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 cpp_id, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index aa94780f4dc7..ab870294c604 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -99,6 +99,7 @@ struct nfp_cpp { /* Cached information */ void *hwinfo; + void *rtsym; }; /* Element of the area_cache_list */ @@ -241,6 +242,7 @@ void nfp_cpp_free(struct nfp_cpp *cpp) cpp->op->free(cpp); kfree(cpp->hwinfo); + kfree(cpp->rtsym); device_unregister(&cpp->dev); @@ -292,6 +294,16 @@ void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val) cpp->hwinfo = val; } +void *nfp_rtsym_cache(struct nfp_cpp *cpp) +{ + return cpp->rtsym; +} + +void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val) +{ + cpp->rtsym = val; +} + /** * nfp_cpp_area_alloc_with_name() - allocate a new CPP area * @cpp: CPP device handle diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index c91138c0daab..988badd230d1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -58,4 +58,38 @@ void nfp_mip_close(const struct nfp_mip *mip); void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size); void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size); +/* Implemented in nfp_rtsym.c */ + +#define NFP_RTSYM_TYPE_NONE 0 +#define NFP_RTSYM_TYPE_OBJECT 1 +#define NFP_RTSYM_TYPE_FUNCTION 2 +#define NFP_RTSYM_TYPE_ABS 3 + +#define NFP_RTSYM_TARGET_NONE 0 +#define NFP_RTSYM_TARGET_LMEM -1 +#define NFP_RTSYM_TARGET_EMU_CACHE -7 + +/** + * struct nfp_rtsym - RTSYM descriptor + * @name: Symbol name + * @addr: Address in the domain/target's address space + * @size: Size (in bytes) of the symbol + * @type: NFP_RTSYM_TYPE_* of the symbol + * @target: CPP Target identifier, or NFP_RTSYM_TARGET_* + * @domain: CPP Target Domain (island) + */ +struct nfp_rtsym { + const char *name; + u64 addr; + u64 size; + int type; + int target; + int domain; +}; + +int nfp_rtsym_count(struct nfp_cpp *cpp); +const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx); +const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name); +u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error); + #endif /* NFP_NFFW_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c new file mode 100644 index 000000000000..c659b1d999be --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -0,0 +1,304 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_rtsym.c + * Interface for accessing run-time symbol table + * Authors: Jakub Kicinski + * Jason McMullan + * Espen Skoglund + * Francois H. Theron + */ +#include +#include +#include +#include + +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp_nffw.h" +#include "nfp6000/nfp6000.h" + +/* These need to match the linker */ +#define SYM_TGT_LMEM 0 +#define SYM_TGT_EMU_CACHE 0x17 + +struct nfp_rtsym_entry { + u8 type; + u8 target; + u8 island; + u8 addr_hi; + __le32 addr_lo; + __le16 name; + u8 menum; + u8 size_hi; + __le32 size_lo; +}; + +struct nfp_rtsym_cache { + int num; + char *strtab; + struct nfp_rtsym symtab[]; +}; + +static int nfp_meid(u8 island_id, u8 menum) +{ + return (island_id & 0x3F) == island_id && menum < 12 ? + (island_id << 4) | (menum + 4) : -1; +} + +static void +nfp_rtsym_sw_entry_init(struct nfp_rtsym_cache *cache, u32 strtab_size, + struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw) +{ + sw->type = fw->type; + sw->name = cache->strtab + le16_to_cpu(fw->name) % strtab_size; + sw->addr = ((u64)fw->addr_hi << 32) | le32_to_cpu(fw->addr_lo); + sw->size = ((u64)fw->size_hi << 32) | le32_to_cpu(fw->size_lo); + + switch (fw->target) { + case SYM_TGT_LMEM: + sw->target = NFP_RTSYM_TARGET_LMEM; + break; + case SYM_TGT_EMU_CACHE: + sw->target = NFP_RTSYM_TARGET_EMU_CACHE; + break; + default: + sw->target = fw->target; + break; + } + + if (fw->menum != 0xff) + sw->domain = nfp_meid(fw->island, fw->menum); + else if (fw->island != 0xff) + sw->domain = fw->island; + else + sw->domain = -1; +} + +static int nfp_rtsymtab_probe(struct nfp_cpp *cpp) +{ + const u32 dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) | + NFP_ISL_EMEM0; + u32 strtab_addr, symtab_addr, strtab_size, symtab_size; + struct nfp_rtsym_entry *rtsymtab; + struct nfp_rtsym_cache *cache; + const struct nfp_mip *mip; + int err, n, size; + + mip = nfp_mip_open(cpp); + if (!mip) + return -EIO; + + nfp_mip_strtab(mip, &strtab_addr, &strtab_size); + nfp_mip_symtab(mip, &symtab_addr, &symtab_size); + nfp_mip_close(mip); + + if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab)) + return -ENXIO; + + /* Align to 64 bits */ + symtab_size = round_up(symtab_size, 8); + strtab_size = round_up(strtab_size, 8); + + rtsymtab = kmalloc(symtab_size, GFP_KERNEL); + if (!rtsymtab) + return -ENOMEM; + + size = sizeof(*cache); + size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym); + size += strtab_size + 1; + cache = kmalloc(size, GFP_KERNEL); + if (!cache) { + err = -ENOMEM; + goto err_free_rtsym_raw; + } + + cache->num = symtab_size / sizeof(*rtsymtab); + cache->strtab = (void *)&cache->symtab[cache->num]; + + err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size); + if (err != symtab_size) + goto err_free_cache; + + err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size); + if (err != strtab_size) + goto err_free_cache; + cache->strtab[strtab_size] = '\0'; + + for (n = 0; n < cache->num; n++) + nfp_rtsym_sw_entry_init(cache, strtab_size, + &cache->symtab[n], &rtsymtab[n]); + + kfree(rtsymtab); + nfp_rtsym_cache_set(cpp, cache); + return 0; + +err_free_cache: + kfree(cache); +err_free_rtsym_raw: + kfree(rtsymtab); + return err; +} + +static struct nfp_rtsym_cache *nfp_rtsym(struct nfp_cpp *cpp) +{ + struct nfp_rtsym_cache *cache; + int err; + + cache = nfp_rtsym_cache(cpp); + if (cache) + return cache; + + err = nfp_rtsymtab_probe(cpp); + if (err < 0) + return ERR_PTR(err); + + return nfp_rtsym_cache(cpp); +} + +/** + * nfp_rtsym_count() - Get the number of RTSYM descriptors + * @cpp: NFP CPP handle + * + * Return: Number of RTSYM descriptors, or -ERRNO + */ +int nfp_rtsym_count(struct nfp_cpp *cpp) +{ + struct nfp_rtsym_cache *cache; + + cache = nfp_rtsym(cpp); + if (IS_ERR(cache)) + return PTR_ERR(cache); + + return cache->num; +} + +/** + * nfp_rtsym_get() - Get the Nth RTSYM descriptor + * @cpp: NFP CPP handle + * @idx: Index (0-based) of the RTSYM descriptor + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx) +{ + struct nfp_rtsym_cache *cache; + + cache = nfp_rtsym(cpp); + if (IS_ERR(cache)) + return NULL; + + if (idx >= cache->num) + return NULL; + + return &cache->symtab[idx]; +} + +/** + * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name + * @cpp: NFP CPP handle + * @name: Symbol name + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name) +{ + struct nfp_rtsym_cache *cache; + int n; + + cache = nfp_rtsym(cpp); + if (IS_ERR(cache)) + return NULL; + + for (n = 0; n < cache->num; n++) { + if (strcmp(name, cache->symtab[n].name) == 0) + return &cache->symtab[n]; + } + + return NULL; +} + +/** + * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol + * @cpp: NFP CPP handle + * @name: Symbol name + * @error: Poniter to error code (optional) + * + * Lookup a symbol, map, read it and return it's value. Value of the symbol + * will be interpreted as a simple little-endian unsigned value. Symbol can + * be 4 or 8 bytes in size. + * + * Return: value read, on error sets the error and returns ~0ULL. + */ +u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error) +{ + const struct nfp_rtsym *sym; + u32 val32, id; + u64 val; + int err; + + sym = nfp_rtsym_lookup(cpp, name); + if (!sym) + return -ENOENT; + + id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); + + switch (sym->size) { + case 4: + err = nfp_cpp_readl(cpp, id, sym->addr, &val32); + val = val32; + break; + case 8: + err = nfp_cpp_readq(cpp, id, sym->addr, &val); + break; + default: + nfp_err(cpp, + "rtsym '%s' unsupported or non-scalar size: %lld\n", + name, sym->size); + err = -EINVAL; + break; + } + + if (err == sym->size) + err = 0; + else if (err >= 0) + err = -EIO; + + if (error) + *error = err; + + if (err) + return ~0ULL; + return val; +} -- cgit v1.2.3 From 1a64821c6af7a85aa69835236f49aa3aa4c22ed1 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:35 -0800 Subject: nfp: add support for service processor access NFP Service Processor (NSP) is an ARM core inside the chip which is responsible for management and control functions. Add support for chip reset, FW load and external module access using the NSP. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 2 + drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h | 14 + .../net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h | 2 + .../ethernet/netronome/nfp/nfpcore/nfp_cppcore.c | 13 + .../net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 417 +++++++++++++++++++++ .../ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c | 270 +++++++++++++ .../ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h | 81 ++++ 7 files changed, 799 insertions(+) create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c create mode 100644 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 7765088f7e01..eab39aec5b24 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -7,6 +7,8 @@ nfp-objs := \ nfpcore/nfp_hwinfo.o \ nfpcore/nfp_mip.o \ nfpcore/nfp_nffw.o \ + nfpcore/nfp_nsp.o \ + nfpcore/nfp_nsp_eth.o \ nfpcore/nfp_resource.o \ nfpcore/nfp_rtsym.o \ nfpcore/nfp_target.o \ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index 1eae3e1e5fbe..3d70a8578a98 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -47,6 +47,20 @@ const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup); +/* Implemented in nfp_nsp.c */ + +struct nfp_nsp; +struct firmware; + +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); +void nfp_nsp_close(struct nfp_nsp *state); +int nfp_nsp_wait(struct nfp_nsp *state); +int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); +int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_write_eth_table(struct nfp_nsp *state, + const void *buf, unsigned int size); + /* Implemented in nfp_resource.c */ #define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 58a15185572e..74e6f9f59bdc 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -224,6 +224,8 @@ void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val); void *nfp_rtsym_cache(struct nfp_cpp *cpp); void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val); +void nfp_nffw_cache_flush(struct nfp_cpp *cpp); + struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 cpp_id, const char *name, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index ab870294c604..40108e66c654 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -304,6 +304,19 @@ void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val) cpp->rtsym = val; } +/** + * nfp_nffw_cache_flush() - Flush cached firmware information + * @cpp: NFP CPP handle + * + * Flush cached firmware information. This function should be called + * every time firmware is loaded on unloaded. + */ +void nfp_nffw_cache_flush(struct nfp_cpp *cpp) +{ + kfree(nfp_rtsym_cache(cpp)); + nfp_rtsym_cache_set(cpp, NULL); +} + /** * nfp_cpp_area_alloc_with_name() - allocate a new CPP area * @cpp: CPP device handle diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c new file mode 100644 index 000000000000..f07f2fc3fba0 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -0,0 +1,417 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_nsp.c + * Author: Jakub Kicinski + * Jason McMullan + */ + +#include +#include +#include +#include +#include +#include +#include + +#define NFP_SUBSYS "nfp_nsp" + +#include "nfp.h" +#include "nfp_cpp.h" + +/* Offsets relative to the CSR base */ +#define NSP_STATUS 0x00 +#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48) +#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44) +#define NSP_STATUS_MINOR GENMASK_ULL(43, 32) +#define NSP_STATUS_CODE GENMASK_ULL(31, 16) +#define NSP_STATUS_RESULT GENMASK_ULL(15, 8) +#define NSP_STATUS_BUSY BIT_ULL(0) + +#define NSP_COMMAND 0x08 +#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) +#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_START BIT_ULL(0) + +/* CPP address to retrieve the data from */ +#define NSP_BUFFER 0x10 +#define NSP_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38) +#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0) + +#define NSP_DFLT_BUFFER 0x18 + +#define NSP_DFLT_BUFFER_CONFIG 0x20 +#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) + +#define NSP_MAGIC 0xab10 +#define NSP_MAJOR 0 +#define NSP_MINOR (__MAX_SPCODE - 1) + +#define NSP_CODE_MAJOR GENMASK(15, 12) +#define NSP_CODE_MINOR GENMASK(11, 0) + +enum nfp_nsp_cmd { + SPCODE_NOOP = 0, /* No operation */ + SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ + SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */ + SPCODE_PHY_INIT = 3, /* Initialize the PHY */ + SPCODE_MAC_INIT = 4, /* Initialize the MAC */ + SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */ + SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ + SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ + SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ + + __MAX_SPCODE, +}; + +struct nfp_nsp { + struct nfp_cpp *cpp; + struct nfp_resource *res; +}; + +static int nfp_nsp_check(struct nfp_nsp *state) +{ + struct nfp_cpp *cpp = state->cpp; + u64 nsp_status, reg; + u32 nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_status = nfp_resource_address(state->res) + NSP_STATUS; + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, ®); + if (err < 0) + return err; + + if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) { + nfp_err(cpp, "Cannot detect NFP Service Processor\n"); + return -ENODEV; + } + + if (FIELD_GET(NSP_STATUS_MAJOR, reg) != NSP_MAJOR || + FIELD_GET(NSP_STATUS_MINOR, reg) < NSP_MINOR) { + nfp_err(cpp, "Unsupported ABI %lld.%lld\n", + FIELD_GET(NSP_STATUS_MAJOR, reg), + FIELD_GET(NSP_STATUS_MINOR, reg)); + return -EINVAL; + } + + if (reg & NSP_STATUS_BUSY) { + nfp_err(cpp, "Service processor busy!\n"); + return -EBUSY; + } + + return 0; +} + +/** + * nfp_nsp_open() - Prepare for communication and lock the NSP resource. + * @cpp: NFP CPP Handle + */ +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp) +{ + struct nfp_resource *res; + struct nfp_nsp *state; + int err; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP); + if (IS_ERR(res)) + return (void *)res; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) { + nfp_resource_release(res); + return ERR_PTR(-ENOMEM); + } + state->cpp = cpp; + state->res = res; + + err = nfp_nsp_check(state); + if (err) { + nfp_nsp_close(state); + return ERR_PTR(err); + } + + return state; +} + +/** + * nfp_nsp_close() - Clean up and unlock the NSP resource. + * @state: NFP SP state + */ +void nfp_nsp_close(struct nfp_nsp *state) +{ + nfp_resource_release(state->res); + kfree(state); +} + +static int +nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, + u32 nsp_cpp, u64 addr, u64 mask, u64 val) +{ + const unsigned long wait_until = jiffies + 30 * HZ; + int err; + + for (;;) { + const unsigned long start_time = jiffies; + + err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg); + if (err < 0) + return err; + + if ((*reg & mask) == val) + return 0; + + err = msleep_interruptible(100); + if (err) + return err; + + if (time_after(start_time, wait_until)) + return -ETIMEDOUT; + } +} + +/** + * nfp_nsp_command() - Execute a command on the NFP Service Processor + * @state: NFP SP state + * @code: NFP SP Command Code + * @option: NFP SP Command Argument + * @buff_cpp: NFP SP Buffer CPP Address info + * @buff_addr: NFP SP Buffer Host address + * + * Return: 0 for success with no result + * + * 1..255 for NSP completion with a result code + * + * -EAGAIN if the NSP is not yet present + * -ENODEV if the NSP is not a supported model + * -EBUSY if the NSP is stuck + * -EINTR if interrupted while waiting for completion + * -ETIMEDOUT if the NSP took longer than 30 seconds to complete + */ +static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, + u32 buff_cpp, u64 buff_addr) +{ + u64 reg, nsp_base, nsp_buffer, nsp_status, nsp_command; + struct nfp_cpp *cpp = state->cpp; + u32 nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_base = nfp_resource_address(state->res); + nsp_status = nsp_base + NSP_STATUS; + nsp_command = nsp_base + NSP_COMMAND; + nsp_buffer = nsp_base + NSP_BUFFER; + + err = nfp_nsp_check(state); + if (err) + return err; + + if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) { + nfp_err(cpp, "Host buffer out of reach %08x %016llx\n", + buff_cpp, buff_addr); + return -EINVAL; + } + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, + FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr)); + if (err < 0) + return err; + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, + FIELD_PREP(NSP_COMMAND_OPTION, option) | + FIELD_PREP(NSP_COMMAND_CODE, code) | + FIELD_PREP(NSP_COMMAND_START, 1)); + if (err < 0) + return err; + + /* Wait for NSP_COMMAND_START to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, + nsp_cpp, nsp_command, NSP_COMMAND_START, 0); + if (err) { + nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n", + err, code); + return err; + } + + /* Wait for NSP_STATUS_BUSY to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, + nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0); + if (err) { + nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n", + err, code); + return err; + } + + err = FIELD_GET(NSP_STATUS_RESULT, reg); + if (err) { + nfp_warn(cpp, "Result (error) code set: %d command: %d\n", + -err, code); + return -err; + } + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, ®); + if (err < 0) + return err; + + return FIELD_GET(NSP_COMMAND_OPTION, reg); +} + +static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, + const void *in_buf, unsigned int in_size, + void *out_buf, unsigned int out_size) +{ + struct nfp_cpp *cpp = nsp->cpp; + unsigned int max_size; + u64 reg, cpp_buf; + int ret, err; + u32 cpp_id; + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + NSP_STATUS, ®); + if (err < 0) + return err; + + if (FIELD_GET(NSP_STATUS_MINOR, reg) < 13) { + nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %lld.%lld)\n", + code, FIELD_GET(NSP_STATUS_MAJOR, reg), + FIELD_GET(NSP_STATUS_MINOR, reg)); + return -EOPNOTSUPP; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER_CONFIG, + ®); + if (err < 0) + return err; + + max_size = max(in_size, out_size); + if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) { + nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n", + code, FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, + max_size); + return -EINVAL; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER, + ®); + if (err < 0) + return err; + + cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8; + cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg); + + if (in_buf && in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); + if (err < 0) + return err; + } + + ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf); + if (ret < 0) + return ret; + + if (out_buf && out_size) { + err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size); + if (err < 0) + return err; + } + + return ret; +} + +int nfp_nsp_wait(struct nfp_nsp *state) +{ + const unsigned long wait_until = jiffies + 30 * HZ; + int err; + + nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n"); + + for (;;) { + const unsigned long start_time = jiffies; + + err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0); + if (err != -EAGAIN) + break; + + err = msleep_interruptible(100); + if (err) + break; + + if (time_after(start_time, wait_until)) { + err = -ETIMEDOUT; + break; + } + } + if (err) + nfp_err(state->cpp, "NSP failed to respond %d\n", err); + + return err; +} + +int nfp_nsp_device_soft_reset(struct nfp_nsp *state) +{ + int err; + + err = nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0); + + nfp_nffw_cache_flush(state->cpp); + + return err; +} + +int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw) +{ + return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data, + fw->size, NULL, 0); +} + +int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0, + buf, size); +} + +int nfp_nsp_write_eth_table(struct nfp_nsp *state, + const void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, + NULL, 0); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c new file mode 100644 index 000000000000..1ece1f8ae4b3 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* Authors: David Brunecz + * Jakub Kicinski + * Jason Mcmullan + */ + +#include +#include +#include +#include +#include + +#include "nfp.h" +#include "nfp_nsp_eth.h" +#include "nfp6000/nfp6000.h" + +#define NSP_ETH_NBI_PORT_COUNT 24 +#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) +#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \ + sizeof(struct eth_table_entry)) + +#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0) +#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) +#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48) +#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54) + +#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES) + +#define NSP_ETH_STATE_ENABLED BIT_ULL(1) +#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8) + +#define NSP_ETH_CTRL_ENABLED BIT_ULL(1) +#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3) + +enum nfp_eth_rate { + RATE_INVALID = 0, + RATE_10M, + RATE_100M, + RATE_1G, + RATE_10G, + RATE_25G, +}; + +struct eth_table_entry { + __le64 port; + __le64 state; + u8 mac_addr[6]; + u8 resv[2]; + __le64 control; +}; + +static unsigned int nfp_eth_rate(enum nfp_eth_rate rate) +{ + unsigned int rate_xlate[] = { + [RATE_INVALID] = 0, + [RATE_10M] = SPEED_10, + [RATE_100M] = SPEED_100, + [RATE_1G] = SPEED_1000, + [RATE_10G] = SPEED_10000, + [RATE_25G] = SPEED_25000, + }; + + if (rate >= ARRAY_SIZE(rate_xlate)) + return 0; + + return rate_xlate[rate]; +} + +static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + dst[ETH_ALEN - i - 1] = src[i]; +} + +static void +nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index, + struct nfp_eth_table_port *dst) +{ + unsigned int rate; + u64 port, state; + + port = le64_to_cpu(src->port); + state = le64_to_cpu(src->state); + + dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port); + dst->index = index; + dst->nbi = index / NSP_ETH_NBI_PORT_COUNT; + dst->base = index % NSP_ETH_NBI_PORT_COUNT; + dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port); + + dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state); + dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state); + dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state); + + rate = nfp_eth_rate(FIELD_GET(NSP_ETH_STATE_RATE, state)); + dst->speed = dst->lanes * rate; + + nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr); + + snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu", + FIELD_GET(NSP_ETH_PORT_PHYLABEL, port), + FIELD_GET(NSP_ETH_PORT_LABEL, port)); +} + +/** + * nfp_eth_read_ports() - retrieve port information + * @cpp: NFP CPP handle + * + * Read the port information from the device. Returned structure should + * be freed with kfree() once no longer needed. + * + * Return: populated ETH table or NULL on error. + */ +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp) +{ + struct nfp_eth_table *ret; + struct nfp_nsp *nsp; + + nsp = nfp_nsp_open(cpp); + if (IS_ERR(nsp)) + return NULL; + + ret = __nfp_eth_read_ports(cpp, nsp); + nfp_nsp_close(nsp); + + return ret; +} + +struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp) +{ + struct eth_table_entry *entries; + struct nfp_eth_table *table; + unsigned int cnt; + int i, j, ret; + + entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); + if (!entries) + return NULL; + + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + nfp_err(cpp, "reading port table failed %d\n", ret); + kfree(entries); + return NULL; + } + + /* Some versions of flash will give us 0 instead of port count */ + cnt = ret; + if (!cnt) { + for (i = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + cnt++; + } + + table = kzalloc(sizeof(*table) + + sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL); + if (!table) { + kfree(entries); + return NULL; + } + + table->count = cnt; + for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + nfp_eth_port_translate(&entries[i], i, + &table->ports[j++]); + + kfree(entries); + + return table; +} + +/** + * nfp_eth_set_mod_enable() - set PHY module enable control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @enable: Desired state + * + * Enable or disable PHY module (this usually means setting the TX lanes + * disable bits). + * + * Return: 0 or -ERRNO. + */ +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) +{ + struct eth_table_entry *entries; + struct nfp_nsp *nsp; + u64 reg; + int ret; + + entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); + if (!entries) + return -ENOMEM; + + nsp = nfp_nsp_open(cpp); + if (IS_ERR(nsp)) { + kfree(entries); + return PTR_ERR(nsp); + } + + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + nfp_err(cpp, "reading port table failed %d\n", ret); + goto exit_close_nsp; + } + + if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) { + nfp_warn(cpp, "trying to set port state on disabled port %d\n", + idx); + ret = -EINVAL; + goto exit_close_nsp; + } + + /* Check if we are already in requested state */ + reg = le64_to_cpu(entries[idx].state); + if (enable == FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) { + ret = 0; + goto exit_close_nsp; + } + + reg = le64_to_cpu(entries[idx].control); + reg &= ~NSP_ETH_CTRL_ENABLED; + reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); + entries[idx].control = cpu_to_le64(reg); + + ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); +exit_close_nsp: + nfp_nsp_close(nsp); + kfree(entries); + + return ret < 0 ? ret : 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h new file mode 100644 index 000000000000..edf703d319c8 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NSP_NSP_ETH_H +#define NSP_NSP_ETH_H 1 + +#include +#include + +/** + * struct nfp_eth_table - ETH table information + * @count: number of table entries + * @ports: table of ports + * + * @eth_index: port index according to legacy ethX numbering + * @index: chip-wide first channel index + * @nbi: NBI index + * @base: first channel index (within NBI) + * @lanes: number of channels + * @speed: interface speed (in Mbps) + * @mac_addr: interface MAC address + * @label: interface id string + * @enabled: is enabled? + * @tx_enabled: is TX enabled? + * @rx_enabled: is RX enabled? + */ +struct nfp_eth_table { + unsigned int count; + struct nfp_eth_table_port { + unsigned int eth_index; + unsigned int index; + unsigned int nbi; + unsigned int base; + unsigned int lanes; + unsigned int speed; + + u8 mac_addr[ETH_ALEN]; + char label[8]; + + bool enabled; + bool tx_enabled; + bool rx_enabled; + } ports[0]; +}; + +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); +struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp); +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable); + +#endif -- cgit v1.2.3 From 6f1cd5ca359276508a2c55e638a4a2cc28f8e4ae Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:36 -0800 Subject: nfp: add port layer to debugfs directories PF driver will support multiple ports per PCI device, add port number to DebugFS paths. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 15 ++++++--- .../net/ethernet/netronome/nfp/nfp_net_debugfs.c | 39 +++++++++++++++------- .../net/ethernet/netronome/nfp/nfp_netvf_main.c | 7 ++-- 3 files changed, 43 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index f05f750c2ea0..60c9aa80bbd1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -797,8 +797,9 @@ nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, #ifdef CONFIG_NFP_DEBUG void nfp_net_debugfs_create(void); void nfp_net_debugfs_destroy(void); -void nfp_net_debugfs_adapter_add(struct nfp_net *nn); -void nfp_net_debugfs_adapter_del(struct nfp_net *nn); +struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev); +void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id); +void nfp_net_debugfs_dir_clean(struct dentry **dir); #else static inline void nfp_net_debugfs_create(void) { @@ -808,11 +809,17 @@ static inline void nfp_net_debugfs_destroy(void) { } -static inline void nfp_net_debugfs_adapter_add(struct nfp_net *nn) +static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) { + return NULL; } -static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn) +static inline void +nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id) +{ +} + +static inline void nfp_net_debugfs_dir_clean(struct dentry **dir) { } #endif /* CONFIG_NFP_DEBUG */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index c66f3f954aa8..6e9372a18375 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -202,16 +202,17 @@ static const struct file_operations nfp_xdp_q_fops = { .llseek = seq_lseek }; -void nfp_net_debugfs_adapter_add(struct nfp_net *nn) +void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id) { struct dentry *queues, *tx, *rx, *xdp; - char int_name[16]; + char name[20]; int i; if (IS_ERR_OR_NULL(nfp_dir)) return; - nn->debugfs_dir = debugfs_create_dir(pci_name(nn->pdev), nfp_dir); + sprintf(name, "port%d", id); + nn->debugfs_dir = debugfs_create_dir(name, ddir); if (IS_ERR_OR_NULL(nn->debugfs_dir)) return; @@ -227,24 +228,38 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn) return; for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) { - sprintf(int_name, "%d", i); - debugfs_create_file(int_name, S_IRUSR, rx, + sprintf(name, "%d", i); + debugfs_create_file(name, S_IRUSR, rx, &nn->r_vecs[i], &nfp_rx_q_fops); - debugfs_create_file(int_name, S_IRUSR, xdp, + debugfs_create_file(name, S_IRUSR, xdp, &nn->r_vecs[i], &nfp_xdp_q_fops); } for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) { - sprintf(int_name, "%d", i); - debugfs_create_file(int_name, S_IRUSR, tx, + sprintf(name, "%d", i); + debugfs_create_file(name, S_IRUSR, tx, &nn->r_vecs[i], &nfp_tx_q_fops); } } -void nfp_net_debugfs_adapter_del(struct nfp_net *nn) +struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) { - debugfs_remove_recursive(nn->debugfs_dir); - nn->debugfs_dir = NULL; + struct dentry *dev_dir; + + if (IS_ERR_OR_NULL(nfp_dir)) + return NULL; + + dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir); + if (IS_ERR_OR_NULL(dev_dir)) + return NULL; + + return dev_dir; +} + +void nfp_net_debugfs_dir_clean(struct dentry **dir) +{ + debugfs_remove_recursive(*dir); + *dir = NULL; } void nfp_net_debugfs_create(void) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index 5bacc48e6627..ad0cc629cc32 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -83,6 +83,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, u32 tx_bar_sz, rx_bar_sz; int tx_bar_no, rx_bar_no; u8 __iomem *ctrl_bar; + struct dentry *ddir; struct nfp_net *nn; u32 startq; int stride; @@ -260,7 +261,9 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, nn); nfp_net_info(nn); - nfp_net_debugfs_adapter_add(nn); + ddir = nfp_net_debugfs_device_add(pdev); + nfp_net_debugfs_port_add(nn, ddir, 0); + nn->debugfs_dir = ddir; return 0; @@ -293,7 +296,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev) /* Note, the order is slightly different from above as we need * to keep the nn pointer around till we have freed everything. */ - nfp_net_debugfs_adapter_del(nn); + nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_netdev_clean(nn->netdev); -- cgit v1.2.3 From fdace6c224e9fc0e81337b6fee3cfd136824c983 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:37 -0800 Subject: nfp: allocate irqs in lower driver PF services multiple ports using single PCI device therefore IRQs can no longer be allocated in the netdev code. Lower portion of the driver has to allocate the IRQs and hand them out to ports. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 17 ++- .../net/ethernet/netronome/nfp/nfp_net_common.c | 144 +++++++++++---------- .../net/ethernet/netronome/nfp/nfp_netvf_main.c | 78 +++++++---- 3 files changed, 140 insertions(+), 99 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 60c9aa80bbd1..ef031010ae09 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -83,6 +83,7 @@ #define NFP_NET_NON_Q_VECTORS 2 #define NFP_NET_IRQ_LSC_IDX 0 #define NFP_NET_IRQ_EXN_IDX 1 +#define NFP_NET_MIN_PORT_IRQS (NFP_NET_NON_Q_VECTORS + 1) /* Queue/Ring definitions */ #define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */ @@ -345,7 +346,7 @@ struct nfp_net_rx_ring { * @tx_ring: Pointer to TX ring * @rx_ring: Pointer to RX ring * @xdp_ring: Pointer to an extra TX ring for XDP - * @irq_idx: Index into MSI-X table + * @irq_entry: MSI-X table entry (use for talking to the device) * @rx_sync: Seqlock for atomic updates of RX stats * @rx_pkts: Number of received packets * @rx_bytes: Number of received bytes @@ -362,6 +363,7 @@ struct nfp_net_rx_ring { * @tx_lso: Counter of LSO packets sent * @tx_errors: How many TX errors were encountered * @tx_busy: How often was TX busy (no space)? + * @irq_vector: Interrupt vector number (use for talking to the OS) * @handler: Interrupt handler for this ring vector * @name: Name of the interrupt vector * @affinity_mask: SMP affinity mask for this vector @@ -378,7 +380,7 @@ struct nfp_net_r_vector { struct nfp_net_tx_ring *tx_ring; struct nfp_net_rx_ring *rx_ring; - int irq_idx; + u16 irq_entry; struct u64_stats_sync rx_sync; u64 rx_pkts; @@ -400,6 +402,7 @@ struct nfp_net_r_vector { u64 tx_errors; u64 tx_busy; + u32 irq_vector; irq_handler_t handler; char name[IFNAMSIZ + 8]; cpumask_t affinity_mask; @@ -788,8 +791,14 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update); void nfp_net_rss_write_itbl(struct nfp_net *nn); void nfp_net_rss_write_key(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn); -int nfp_net_irqs_alloc(struct nfp_net *nn); -void nfp_net_irqs_disable(struct nfp_net *nn); + +unsigned int +nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, + unsigned int min_irqs, unsigned int want_irqs); +void nfp_net_irqs_disable(struct pci_dev *pdev); +void +nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, + unsigned int n); int nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 6ac43abf561b..074259cc8e06 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -281,72 +281,76 @@ static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr) } /** - * nfp_net_msix_alloc() - Try to allocate MSI-X irqs - * @nn: NFP Network structure - * @nr_vecs: Number of MSI-X vectors to allocate - * - * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors. + * nfp_net_irqs_alloc() - allocates MSI-X irqs + * @pdev: PCI device structure + * @irq_entries: Array to be initialized and used to hold the irq entries + * @min_irqs: Minimal acceptable number of interrupts + * @wanted_irqs: Target number of interrupts to allocate * - * Return: Number of MSI-X vectors obtained or 0 on error. + * Return: Number of irqs obtained or 0 on error. */ -static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs) +unsigned int +nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, + unsigned int min_irqs, unsigned int wanted_irqs) { - struct pci_dev *pdev = nn->pdev; - int nvecs; - int i; + unsigned int i; + int got_irqs; - for (i = 0; i < nr_vecs; i++) - nn->irq_entries[i].entry = i; + for (i = 0; i < wanted_irqs; i++) + irq_entries[i].entry = i; - nvecs = pci_enable_msix_range(pdev, nn->irq_entries, - NFP_NET_NON_Q_VECTORS + 1, nr_vecs); - if (nvecs < 0) { - nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n", - NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs); + got_irqs = pci_enable_msix_range(pdev, irq_entries, + min_irqs, wanted_irqs); + if (got_irqs < 0) { + dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n", + min_irqs, wanted_irqs, got_irqs); return 0; } - return nvecs; + if (got_irqs < wanted_irqs) + dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n", + wanted_irqs, got_irqs); + + return got_irqs; } /** - * nfp_net_irqs_alloc() - allocates MSI-X irqs - * @nn: NFP Network structure + * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev + * @nn: NFP Network structure + * @irq_entries: Table of allocated interrupts + * @n: Size of @irq_entries (number of entries to grab) * - * Return: Number of irqs obtained or 0 on error. + * After interrupts are allocated with nfp_net_irqs_alloc() this function + * should be called to assign them to a specific netdev (port). */ -int nfp_net_irqs_alloc(struct nfp_net *nn) +void +nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, + unsigned int n) { - int wanted_irqs; - unsigned int n; - - wanted_irqs = nn->num_r_vecs + NFP_NET_NON_Q_VECTORS; - - n = nfp_net_msix_alloc(nn, wanted_irqs); - if (n == 0) { - nn_err(nn, "Failed to allocate MSI-X IRQs\n"); - return 0; - } - nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS; nn->num_r_vecs = nn->max_r_vecs; - if (n < wanted_irqs) - nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n", - wanted_irqs, n); + memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n); - return n; + if (nn->num_rx_rings > nn->num_r_vecs || + nn->num_tx_rings > nn->num_r_vecs) + nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n", + nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs); + + nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings); + nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings); + nn->num_stack_tx_rings = nn->num_tx_rings; } /** * nfp_net_irqs_disable() - Disable interrupts - * @nn: NFP Network structure + * @pdev: PCI device structure * * Undoes what @nfp_net_irqs_alloc() does. */ -void nfp_net_irqs_disable(struct nfp_net *nn) +void nfp_net_irqs_disable(struct pci_dev *pdev) { - pci_disable_msix(nn->pdev); + pci_disable_msix(pdev); } /** @@ -410,10 +414,13 @@ out: static irqreturn_t nfp_net_irq_lsc(int irq, void *data) { struct nfp_net *nn = data; + struct msix_entry *entry; + + entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX]; nfp_net_read_link_status(nn); - nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX); + nfp_net_irq_unmask(nn, entry->entry); return IRQ_HANDLED; } @@ -476,32 +483,28 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, } /** - * nfp_net_irqs_assign() - Assign IRQs and setup rvecs. + * nfp_net_vecs_init() - Assign IRQs and setup rvecs. * @netdev: netdev structure */ -static void nfp_net_irqs_assign(struct net_device *netdev) +static void nfp_net_vecs_init(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); struct nfp_net_r_vector *r_vec; int r; - if (nn->num_rx_rings > nn->num_r_vecs || - nn->num_tx_rings > nn->num_r_vecs) - nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n", - nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs); - - nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings); - nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings); - nn->num_stack_tx_rings = nn->num_tx_rings; - nn->lsc_handler = nfp_net_irq_lsc; nn->exn_handler = nfp_net_irq_exn; for (r = 0; r < nn->max_r_vecs; r++) { + struct msix_entry *entry; + + entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r]; + r_vec = &nn->r_vecs[r]; r_vec->nfp_net = nn; r_vec->handler = nfp_net_irq_rxtx; - r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r; + r_vec->irq_entry = entry->entry; + r_vec->irq_vector = entry->vector; cpumask_set_cpu(r, &r_vec->affinity_mask); } @@ -534,7 +537,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, entry->vector, err); return err; } - nn_writeb(nn, ctrl_offset, vector_idx); + nn_writeb(nn, ctrl_offset, entry->entry); return 0; } @@ -1706,7 +1709,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) if (pkts_polled < budget) { napi_complete_done(napi, pkts_polled); - nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_idx); + nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); } return pkts_polled; @@ -1988,7 +1991,6 @@ static int nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, int idx) { - struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; int err; /* Setup NAPI */ @@ -1997,17 +1999,19 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, snprintf(r_vec->name, sizeof(r_vec->name), "%s-rxtx-%d", nn->netdev->name, idx); - err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec); + err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, + r_vec); if (err) { netif_napi_del(&r_vec->napi); - nn_err(nn, "Error requesting IRQ %d\n", entry->vector); + nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector); return err; } - disable_irq(entry->vector); + disable_irq(r_vec->irq_vector); - irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); + irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask); - nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry); + nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector, + r_vec->irq_entry); return 0; } @@ -2015,11 +2019,9 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, static void nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) { - struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; - - irq_set_affinity_hint(entry->vector, NULL); + irq_set_affinity_hint(r_vec->irq_vector, NULL); netif_napi_del(&r_vec->napi); - free_irq(entry->vector, r_vec); + free_irq(r_vec->irq_vector, r_vec); } /** @@ -2148,7 +2150,7 @@ nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn, /* Write the DMA address, size and MSI-X info to the device */ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma); nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt)); - nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_idx); + nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry); } static void @@ -2157,7 +2159,7 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn, { nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma); nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt)); - nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_idx); + nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry); } static int __nfp_net_set_config_and_enable(struct nfp_net *nn) @@ -2251,7 +2253,7 @@ static void nfp_net_open_stack(struct nfp_net *nn) for (r = 0; r < nn->num_r_vecs; r++) { napi_enable(&nn->r_vecs[r].napi); - enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector); + enable_irq(nn->r_vecs[r].irq_vector); } netif_tx_wake_all_queues(nn->netdev); @@ -2375,7 +2377,7 @@ static void nfp_net_close_stack(struct nfp_net *nn) nn->link_up = false; for (r = 0; r < nn->num_r_vecs; r++) { - disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector); + disable_irq(nn->r_vecs[r].irq_vector); napi_disable(&nn->r_vecs[r].napi); } @@ -3259,7 +3261,7 @@ int nfp_net_netdev_init(struct net_device *netdev) netif_carrier_off(netdev); nfp_net_set_ethtool_ops(netdev); - nfp_net_irqs_assign(netdev); + nfp_net_vecs_init(netdev); return register_netdev(netdev); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index ad0cc629cc32..39407f7cc586 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -45,6 +45,24 @@ #include "nfp_net_ctrl.h" #include "nfp_net.h" +#include "nfp_main.h" + +/** + * struct nfp_net_vf - NFP VF-specific device structure + * @nn: NFP Net structure for this device + * @irq_entries: Pre-allocated array of MSI-X entries + * @q_bar: Pointer to mapped QC memory (NULL if TX/RX mapped directly) + * @ddir: Per-device debugfs directory + */ +struct nfp_net_vf { + struct nfp_net *nn; + + struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS + + NFP_NET_MAX_TX_RINGS]; + u8 __iomem *q_bar; + + struct dentry *ddir; +}; static const char nfp_net_driver_name[] = "nfp_netvf"; @@ -82,16 +100,22 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, u32 tx_bar_off, rx_bar_off; u32 tx_bar_sz, rx_bar_sz; int tx_bar_no, rx_bar_no; + struct nfp_net_vf *vf; + unsigned int num_irqs; u8 __iomem *ctrl_bar; - struct dentry *ddir; struct nfp_net *nn; u32 startq; int stride; int err; + vf = kzalloc(sizeof(*vf), GFP_KERNEL); + if (!vf) + return -ENOMEM; + pci_set_drvdata(pdev, vf); + err = pci_enable_device_mem(pdev); if (err) - return err; + goto err_free_vf; err = pci_request_regions(pdev, nfp_net_driver_name); if (err) { @@ -183,6 +207,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, err = PTR_ERR(nn); goto err_ctrl_unmap; } + vf->nn = nn; nn->fw_ver = fw_ver; nn->ctrl_bar = ctrl_bar; @@ -206,17 +231,17 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, bar_sz = (rx_bar_off + rx_bar_sz) - bar_off; map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off; - nn->q_bar = ioremap_nocache(map_addr, bar_sz); - if (!nn->q_bar) { + vf->q_bar = ioremap_nocache(map_addr, bar_sz); + if (!vf->q_bar) { nn_err(nn, "Failed to map resource %d\n", tx_bar_no); err = -EIO; goto err_netdev_free; } /* TX queues */ - nn->tx_bar = nn->q_bar + (tx_bar_off - bar_off); + nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off); /* RX queues */ - nn->rx_bar = nn->q_bar + (rx_bar_off - bar_off); + nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off); } else { resource_size_t map_addr; @@ -241,12 +266,15 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, nfp_netvf_get_mac_addr(nn); - err = nfp_net_irqs_alloc(nn); - if (!err) { + num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries, + NFP_NET_MIN_PORT_IRQS, + NFP_NET_NON_Q_VECTORS + nn->num_r_vecs); + if (!num_irqs) { nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); err = -EIO; goto err_unmap_rx; } + nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs); /* Get ME clock frequency from ctrl BAR * XXX for now frequency is hardcoded until we figure out how @@ -258,27 +286,23 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, if (err) goto err_irqs_disable; - pci_set_drvdata(pdev, nn); - nfp_net_info(nn); - ddir = nfp_net_debugfs_device_add(pdev); - nfp_net_debugfs_port_add(nn, ddir, 0); - nn->debugfs_dir = ddir; + vf->ddir = nfp_net_debugfs_device_add(pdev); + nfp_net_debugfs_port_add(nn, vf->ddir, 0); return 0; err_irqs_disable: - nfp_net_irqs_disable(nn); + nfp_net_irqs_disable(pdev); err_unmap_rx: - if (!nn->q_bar) + if (!vf->q_bar) iounmap(nn->rx_bar); err_unmap_tx: - if (!nn->q_bar) + if (!vf->q_bar) iounmap(nn->tx_bar); else - iounmap(nn->q_bar); + iounmap(vf->q_bar); err_netdev_free: - pci_set_drvdata(pdev, NULL); nfp_net_netdev_free(nn); err_ctrl_unmap: iounmap(ctrl_bar); @@ -286,36 +310,42 @@ err_pci_regions: pci_release_regions(pdev); err_pci_disable: pci_disable_device(pdev); +err_free_vf: + pci_set_drvdata(pdev, NULL); + kfree(vf); return err; } static void nfp_netvf_pci_remove(struct pci_dev *pdev) { - struct nfp_net *nn = pci_get_drvdata(pdev); + struct nfp_net_vf *vf = pci_get_drvdata(pdev); + struct nfp_net *nn = vf->nn; /* Note, the order is slightly different from above as we need * to keep the nn pointer around till we have freed everything. */ nfp_net_debugfs_dir_clean(&nn->debugfs_dir); + nfp_net_debugfs_dir_clean(&vf->ddir); nfp_net_netdev_clean(nn->netdev); - nfp_net_irqs_disable(nn); + nfp_net_irqs_disable(pdev); - if (!nn->q_bar) { + if (!vf->q_bar) { iounmap(nn->rx_bar); iounmap(nn->tx_bar); } else { - iounmap(nn->q_bar); + iounmap(vf->q_bar); } iounmap(nn->ctrl_bar); - pci_set_drvdata(pdev, NULL); - nfp_net_netdev_free(nn); pci_release_regions(pdev); pci_disable_device(pdev); + + pci_set_drvdata(pdev, NULL); + kfree(vf); } struct pci_driver nfp_netvf_pci_driver = { -- cgit v1.2.3 From 63461a028f761f8e45d22d06fb7e7468def024b7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Feb 2017 09:17:38 -0800 Subject: nfp: add the PF driver Add PF driver for NFP4000 and NFP6000. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 1 + drivers/net/ethernet/netronome/nfp/nfp_main.c | 332 +++++++++++- drivers/net/ethernet/netronome/nfp/nfp_main.h | 49 ++ drivers/net/ethernet/netronome/nfp/nfp_net.h | 26 +- drivers/net/ethernet/netronome/nfp/nfp_net_main.c | 585 ++++++++++++++++++++++ 5 files changed, 970 insertions(+), 23 deletions(-) create mode 100644 drivers/net/ethernet/netronome/nfp/nfp_net_main.c diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index eab39aec5b24..6933afa69df2 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -16,6 +16,7 @@ nfp-objs := \ nfp_net_common.o \ nfp_net_ethtool.o \ nfp_net_offload.o \ + nfp_net_main.o \ nfp_netvf_main.o ifeq ($(CONFIG_BPF_SYSCALL),y) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index cc6d75fff005..6e1bb3a4a611 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -45,12 +45,326 @@ #include #include +#include "nfpcore/nfp.h" +#include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nsp_eth.h" + +#include "nfpcore/nfp6000_pcie.h" + #include "nfp_main.h" #include "nfp_net.h" static const char nfp_driver_name[] = "nfp"; const char nfp_driver_version[] = VERMAGIC_STRING; +static const struct pci_device_id nfp_pci_device_ids[] = { + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, + }, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP4000, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, + }, + { 0, } /* Required last entry. */ +}; +MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); + +static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct nfp_pf *pf = pci_get_drvdata(pdev); + int err; + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err); + return err; + } + + pf->num_vfs = num_vfs; + + dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs); + + return num_vfs; +#endif + return 0; +} + +static int nfp_pcie_sriov_disable(struct pci_dev *pdev) +{ +#ifdef CONFIG_PCI_IOV + struct nfp_pf *pf = pci_get_drvdata(pdev); + + /* If the VFs are assigned we cannot shut down SR-IOV without + * causing issues, so just leave the hardware available but + * disabled + */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n"); + return -EPERM; + } + + pf->num_vfs = 0; + + pci_disable_sriov(pdev); + dev_dbg(&pdev->dev, "Removed VFs.\n"); +#endif + return 0; +} + +static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs == 0) + return nfp_pcie_sriov_disable(pdev); + else + return nfp_pcie_sriov_enable(pdev, num_vfs); +} + +/** + * nfp_net_fw_find() - Find the correct firmware image for netdev mode + * @pdev: PCI Device structure + * @pf: NFP PF Device structure + * + * Return: firmware if found and requested successfully. + */ +static const struct firmware * +nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) +{ + const struct firmware *fw = NULL; + struct nfp_eth_table_port *port; + const char *fw_model; + char fw_name[256]; + int spc, err = 0; + int i, j; + + if (!pf->eth_tbl) { + dev_err(&pdev->dev, "Error: can't identify media config\n"); + return NULL; + } + + fw_model = nfp_hwinfo_lookup(pf->cpp, "assembly.partno"); + if (!fw_model) { + dev_err(&pdev->dev, "Error: can't read part number\n"); + return NULL; + } + + spc = ARRAY_SIZE(fw_name); + spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model); + + for (i = 0; spc > 0 && i < pf->eth_tbl->count; i += j) { + port = &pf->eth_tbl->ports[i]; + j = 1; + while (i + j < pf->eth_tbl->count && + port->speed == port[j].speed) + j++; + + spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, + "_%dx%d", j, port->speed / 1000); + } + + if (spc <= 0) + return NULL; + + spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, ".nffw"); + if (spc <= 0) + return NULL; + + err = request_firmware(&fw, fw_name, &pdev->dev); + if (err) + return NULL; + + dev_info(&pdev->dev, "Loading FW image: %s\n", fw_name); + + return fw; +} + +/** + * nfp_net_fw_load() - Load the firmware image + * @pdev: PCI Device structure + * @pf: NFP PF Device structure + * @nsp: NFP SP handle + * + * Return: -ERRNO, 0 for no firmware loaded, 1 for firmware loaded + */ +static int +nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) +{ + const struct firmware *fw; + u16 interface; + int err; + + interface = nfp_cpp_interface(pf->cpp); + if (NFP_CPP_INTERFACE_UNIT_of(interface) != 0) { + /* Only Unit 0 should reset or load firmware */ + dev_info(&pdev->dev, "Firmware will be loaded by partner\n"); + return 0; + } + + fw = nfp_net_fw_find(pdev, pf); + if (!fw) + return 0; + + dev_info(&pdev->dev, "Soft-reset, loading FW image\n"); + err = nfp_nsp_device_soft_reset(nsp); + if (err < 0) { + dev_err(&pdev->dev, "Failed to soft reset the NFP: %d\n", + err); + goto exit_release_fw; + } + + err = nfp_nsp_load_fw(nsp, fw); + + if (err < 0) { + dev_err(&pdev->dev, "FW loading failed: %d\n", err); + goto exit_release_fw; + } + + dev_info(&pdev->dev, "Finished loading FW image\n"); + +exit_release_fw: + release_firmware(fw); + + return err < 0 ? err : 1; +} + +static void nfp_fw_unload(struct nfp_pf *pf) +{ + struct nfp_nsp *nsp; + int err; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + nfp_err(pf->cpp, "Reset failed, can't open NSP\n"); + return; + } + + err = nfp_nsp_device_soft_reset(nsp); + if (err < 0) + dev_warn(&pf->pdev->dev, "Couldn't unload firmware: %d\n", err); + else + dev_info(&pf->pdev->dev, "Firmware safely unloaded\n"); + + nfp_nsp_close(nsp); +} + +static int nfp_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct nfp_nsp *nsp; + struct nfp_pf *pf; + int err; + + err = pci_enable_device(pdev); + if (err < 0) + return err; + + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS)); + if (err) + goto err_pci_disable; + + err = pci_request_regions(pdev, nfp_driver_name); + if (err < 0) { + dev_err(&pdev->dev, "Unable to reserve pci resources.\n"); + goto err_pci_disable; + } + + pf = kzalloc(sizeof(*pf), GFP_KERNEL); + if (!pf) { + err = -ENOMEM; + goto err_rel_regions; + } + INIT_LIST_HEAD(&pf->ports); + pci_set_drvdata(pdev, pf); + pf->pdev = pdev; + + pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev); + if (IS_ERR_OR_NULL(pf->cpp)) { + err = PTR_ERR(pf->cpp); + if (err >= 0) + err = -ENOMEM; + goto err_disable_msix; + } + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + goto err_cpp_free; + } + + err = nfp_nsp_wait(nsp); + if (err < 0) { + nfp_nsp_close(nsp); + goto err_cpp_free; + } + + pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); + + err = nfp_fw_load(pdev, pf, nsp); + nfp_nsp_close(nsp); + if (err < 0) { + dev_err(&pdev->dev, "Failed to load FW\n"); + goto err_eth_tbl_free; + } + + pf->fw_loaded = !!err; + + err = nfp_net_pci_probe(pf); + if (err) + goto err_fw_unload; + + return 0; + +err_fw_unload: + if (pf->fw_loaded) + nfp_fw_unload(pf); +err_eth_tbl_free: + kfree(pf->eth_tbl); +err_cpp_free: + nfp_cpp_free(pf->cpp); +err_disable_msix: + pci_set_drvdata(pdev, NULL); + kfree(pf); +err_rel_regions: + pci_release_regions(pdev); +err_pci_disable: + pci_disable_device(pdev); + + return err; +} + +static void nfp_pci_remove(struct pci_dev *pdev) +{ + struct nfp_pf *pf = pci_get_drvdata(pdev); + + if (!list_empty(&pf->ports)) + nfp_net_pci_remove(pf); + + nfp_pcie_sriov_disable(pdev); + + if (pf->fw_loaded) + nfp_fw_unload(pf); + + pci_set_drvdata(pdev, NULL); + nfp_cpp_free(pf->cpp); + + kfree(pf->eth_tbl); + kfree(pf); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver nfp_pci_driver = { + .name = nfp_driver_name, + .id_table = nfp_pci_device_ids, + .probe = nfp_pci_probe, + .remove = nfp_pci_remove, + .sriov_configure = nfp_pcie_sriov_configure, +}; + static int __init nfp_main_init(void) { int err; @@ -60,12 +374,18 @@ static int __init nfp_main_init(void) nfp_net_debugfs_create(); + err = pci_register_driver(&nfp_pci_driver); + if (err < 0) + goto err_destroy_debugfs; + err = pci_register_driver(&nfp_netvf_pci_driver); if (err) - goto err_destroy_debugfs; + goto err_unreg_pf; return err; +err_unreg_pf: + pci_unregister_driver(&nfp_pci_driver); err_destroy_debugfs: nfp_net_debugfs_destroy(); return err; @@ -74,12 +394,22 @@ err_destroy_debugfs: static void __exit nfp_main_exit(void) { pci_unregister_driver(&nfp_netvf_pci_driver); + pci_unregister_driver(&nfp_pci_driver); nfp_net_debugfs_destroy(); } module_init(nfp_main_init); module_exit(nfp_main_exit); +MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_2x40.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_4x10_1x40.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); + MODULE_AUTHOR("Netronome Systems "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver."); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 371ae2731909..6c40fa322da3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -39,10 +39,59 @@ #ifndef NFP_MAIN_H #define NFP_MAIN_H +#include #include #include #include +struct dentry; +struct pci_dev; + +struct nfp_cpp; +struct nfp_cpp_area; +struct nfp_eth_table; + +/** + * struct nfp_pf - NFP PF-specific device structure + * @pdev: Backpointer to PCI device + * @cpp: Pointer to the CPP handle + * @ctrl_area: Pointer to the CPP area for the control BAR + * @tx_area: Pointer to the CPP area for the TX queues + * @rx_area: Pointer to the CPP area for the FL/RX queues + * @irq_entries: Array of MSI-X entries for all ports + * @num_vfs: Number of SR-IOV VFs enabled + * @fw_loaded: Is the firmware loaded? + * @eth_tbl: NSP ETH table + * @ddir: Per-device debugfs directory + * @num_ports: Number of adapter ports + * @ports: Linked list of port structures (struct nfp_net) + */ +struct nfp_pf { + struct pci_dev *pdev; + + struct nfp_cpp *cpp; + + struct nfp_cpp_area *ctrl_area; + struct nfp_cpp_area *tx_area; + struct nfp_cpp_area *rx_area; + + struct msix_entry *irq_entries; + + unsigned int num_vfs; + + bool fw_loaded; + + struct nfp_eth_table *eth_tbl; + + struct dentry *ddir; + + unsigned int num_ports; + struct list_head ports; +}; + extern struct pci_driver nfp_netvf_pci_driver; +int nfp_net_pci_probe(struct nfp_pf *pf); +void nfp_net_pci_remove(struct nfp_pf *pf); + #endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index ef031010ae09..d37d2391b4fe 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -43,6 +43,7 @@ #define _NFP_NET_H_ #include +#include #include #include #include @@ -434,20 +435,13 @@ struct nfp_stat_pair { * struct nfp_net - NFP network device structure * @pdev: Backpointer to PCI device * @netdev: Backpointer to net_device structure - * @nfp_fallback: Is the driver used in fallback mode? * @is_vf: Is the driver attached to a VF? - * @fw_loaded: Is the firmware loaded? * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf * @bpf_offload_xdp: Offloaded BPF program is XDP * @ctrl: Local copy of the control register/word. * @fl_bufsz: Currently configured size of the freelist buffers * @rx_offset: Offset in the RX buffers where packet data starts * @xdp_prog: Installed XDP program - * @cpp: Pointer to the CPP handle - * @nfp_dev_cpp: Pointer to the NFP Device handle - * @ctrl_area: Pointer to the CPP area for the control BAR - * @tx_area: Pointer to the CPP area for the TX queues - * @rx_area: Pointer to the CPP area for the FL/RX queues * @fw_ver: Firmware version * @cap: Capabilities advertised by the Firmware * @max_mtu: Maximum support MTU advertised by the Firmware @@ -497,14 +491,13 @@ struct nfp_stat_pair { * @tx_bar: Pointer to mapped TX queues * @rx_bar: Pointer to mapped FL/RX queues * @debugfs_dir: Device directory in debugfs + * @port_list: Entry on device port list */ struct nfp_net { struct pci_dev *pdev; struct net_device *netdev; - unsigned nfp_fallback:1; unsigned is_vf:1; - unsigned fw_loaded:1; unsigned bpf_offload_skip_sw:1; unsigned bpf_offload_xdp:1; @@ -518,18 +511,6 @@ struct nfp_net { struct nfp_net_tx_ring *tx_rings; struct nfp_net_rx_ring *rx_rings; -#ifdef CONFIG_PCI_IOV - unsigned int num_vfs; - struct vf_data_storage *vfinfo; - int vf_rate_link_speed; -#endif - - struct nfp_cpp *cpp; - struct platform_device *nfp_dev_cpp; - struct nfp_cpp_area *ctrl_area; - struct nfp_cpp_area *tx_area; - struct nfp_cpp_area *rx_area; - struct nfp_net_fw_version fw_ver; u32 cap; u32 max_mtu; @@ -592,11 +573,12 @@ struct nfp_net { u8 __iomem *qcp_cfg; u8 __iomem *ctrl_bar; - u8 __iomem *q_bar; u8 __iomem *tx_bar; u8 __iomem *rx_bar; struct dentry *debugfs_dir; + + struct list_head port_list; }; struct nfp_net_ring_set { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c new file mode 100644 index 000000000000..eccd31003b0d --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -0,0 +1,585 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_net_main.c + * Netronome network device driver: Main entry point + * Authors: Jakub Kicinski + * Alejandro Lucero + * Jason McMullan + * Rolf Neugebauer + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "nfpcore/nfp.h" +#include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nffw.h" +#include "nfpcore/nfp_nsp_eth.h" +#include "nfpcore/nfp6000_pcie.h" + +#include "nfp_net_ctrl.h" +#include "nfp_net.h" +#include "nfp_main.h" + +#define NFP_PF_CSR_SLICE_SIZE (32 * 1024) + +static int nfp_is_ready(struct nfp_cpp *cpp) +{ + const char *cp; + long state; + int err; + + cp = nfp_hwinfo_lookup(cpp, "board.state"); + if (!cp) + return 0; + + err = kstrtol(cp, 0, &state); + if (err < 0) + return 0; + + return state == 15; +} + +/** + * nfp_net_map_area() - Help function to map an area + * @cpp: NFP CPP handler + * @name: Name for the area + * @target: CPP target + * @addr: CPP address + * @size: Size of the area + * @area: Area handle (returned). + * + * This function is primarily to simplify the code in the main probe + * function. To undo the effect of this functions call + * @nfp_cpp_area_release_free(*area); + * + * Return: Pointer to memory mapped area or ERR_PTR + */ +static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp, + const char *name, int isl, int target, + unsigned long long addr, unsigned long size, + struct nfp_cpp_area **area) +{ + u8 __iomem *res; + u32 dest; + int err; + + dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl); + + *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size); + if (!*area) { + err = -EIO; + goto err_area; + } + + err = nfp_cpp_area_acquire(*area); + if (err < 0) + goto err_acquire; + + res = nfp_cpp_area_iomem(*area); + if (!res) { + err = -EIO; + goto err_map; + } + + return res; + +err_map: + nfp_cpp_area_release(*area); +err_acquire: + nfp_cpp_area_free(*area); +err_area: + return (u8 __iomem *)ERR_PTR(err); +} + +static void +nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp, + unsigned int id) +{ + u8 mac_addr[ETH_ALEN]; + const char *mac_str; + char name[32]; + + snprintf(name, sizeof(name), "eth%d.mac", id); + + mac_str = nfp_hwinfo_lookup(cpp, name); + if (!mac_str) { + dev_warn(&nn->pdev->dev, + "Can't lookup MAC address. Generate\n"); + eth_hw_addr_random(nn->netdev); + return; + } + + if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", + &mac_addr[0], &mac_addr[1], &mac_addr[2], + &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { + dev_warn(&nn->pdev->dev, + "Can't parse MAC address (%s). Generate.\n", mac_str); + eth_hw_addr_random(nn->netdev); + return; + } + + ether_addr_copy(nn->netdev->dev_addr, mac_addr); + ether_addr_copy(nn->netdev->perm_addr, mac_addr); +} + +/** + * nfp_net_get_mac_addr() - Get the MAC address. + * @nn: NFP Network structure + * @pf: NFP PF device structure + * @id: NFP port id + * + * First try to get the MAC address from NSP ETH table. If that + * fails try HWInfo. As a last resort generate a random address. + */ +static void +nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id) +{ + int i; + + for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++) + if (pf->eth_tbl->ports[i].eth_index == id) { + const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr; + + ether_addr_copy(nn->netdev->dev_addr, mac_addr); + ether_addr_copy(nn->netdev->perm_addr, mac_addr); + return; + } + + nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id); +} + +static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf) +{ + char name[256]; + u16 interface; + int pcie_pf; + int err = 0; + u64 val; + + interface = nfp_cpp_interface(pf->cpp); + pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface); + + snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf); + + val = nfp_rtsym_read_le(pf->cpp, name, &err); + /* Default to one port */ + if (err) { + if (err != -ENOENT) + nfp_err(pf->cpp, "Unable to read adapter port count\n"); + val = 1; + } + + return val; +} + +static unsigned int +nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar, + unsigned int stride, u32 start_off, u32 num_off) +{ + unsigned int i, min_qc, max_qc; + + min_qc = readl(ctrl_bar + start_off); + max_qc = min_qc; + + for (i = 0; i < pf->num_ports; i++) { + /* To make our lives simpler only accept configuration where + * queues are allocated to PFs in order (queues of PFn all have + * indexes lower than PFn+1). + */ + if (max_qc > readl(ctrl_bar + start_off)) + return 0; + + max_qc = readl(ctrl_bar + start_off); + max_qc += readl(ctrl_bar + num_off) * stride; + ctrl_bar += NFP_PF_CSR_SLICE_SIZE; + } + + return max_qc - min_qc; +} + +static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf) +{ + const struct nfp_rtsym *ctrl_sym; + u8 __iomem *ctrl_bar; + char pf_symbol[256]; + u16 interface; + int pcie_pf; + + interface = nfp_cpp_interface(pf->cpp); + pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface); + + snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf); + + ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol); + if (!ctrl_sym) { + dev_err(&pf->pdev->dev, + "Failed to find PF BAR0 symbol %s\n", pf_symbol); + return NULL; + } + + if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) { + dev_err(&pf->pdev->dev, + "PF BAR0 too small to contain %d ports\n", + pf->num_ports); + return NULL; + } + + ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl", + ctrl_sym->domain, ctrl_sym->target, + ctrl_sym->addr, ctrl_sym->size, + &pf->ctrl_area); + if (IS_ERR(ctrl_bar)) { + dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n", + PTR_ERR(ctrl_bar)); + return NULL; + } + + return ctrl_bar; +} + +static void nfp_net_pf_free_netdevs(struct nfp_pf *pf) +{ + struct nfp_net *nn; + + while (!list_empty(&pf->ports)) { + nn = list_first_entry(&pf->ports, struct nfp_net, port_list); + list_del(&nn->port_list); + + nfp_net_netdev_free(nn); + } +} + +static struct nfp_net * +nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, + void __iomem *tx_bar, void __iomem *rx_bar, + int stride, struct nfp_net_fw_version *fw_ver) +{ + u32 n_tx_rings, n_rx_rings; + struct nfp_net *nn; + + n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); + n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); + + /* Allocate and initialise the netdev */ + nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings); + if (IS_ERR(nn)) + return nn; + + nn->fw_ver = *fw_ver; + nn->ctrl_bar = ctrl_bar; + nn->tx_bar = tx_bar; + nn->rx_bar = rx_bar; + nn->is_vf = 0; + nn->stride_rx = stride; + nn->stride_tx = stride; + + return nn; +} + +static int +nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, + unsigned int id) +{ + int err; + + /* Get MAC address */ + nfp_net_get_mac_addr(nn, pf, id); + + /* Get ME clock frequency from ctrl BAR + * XXX for now frequency is hardcoded until we figure out how + * to get the value from nfp-hwinfo into ctrl bar + */ + nn->me_freq_mhz = 1200; + + err = nfp_net_netdev_init(nn->netdev); + if (err) + return err; + + nfp_net_debugfs_port_add(nn, pf->ddir, id); + + nfp_net_info(nn); + + return 0; +} + +static int +nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar, + void __iomem *tx_bar, void __iomem *rx_bar, + int stride, struct nfp_net_fw_version *fw_ver) +{ + u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base; + struct nfp_net *nn; + unsigned int i; + int err; + + prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); + prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); + + for (i = 0; i < pf->num_ports; i++) { + tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); + tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); + tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ; + rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ; + prev_tx_base = tgt_tx_base; + prev_rx_base = tgt_rx_base; + + nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar, + stride, fw_ver); + if (IS_ERR(nn)) { + err = PTR_ERR(nn); + goto err_free_prev; + } + list_add_tail(&nn->port_list, &pf->ports); + + ctrl_bar += NFP_PF_CSR_SLICE_SIZE; + } + + return 0; + +err_free_prev: + nfp_net_pf_free_netdevs(pf); + return err; +} + +static int +nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, + void __iomem *ctrl_bar, void __iomem *tx_bar, + void __iomem *rx_bar, int stride, + struct nfp_net_fw_version *fw_ver) +{ + unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left; + struct nfp_net *nn; + int err; + + /* Allocate the netdevs and do basic init */ + err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar, + stride, fw_ver); + if (err) + return err; + + /* Get MSI-X vectors */ + wanted_irqs = 0; + list_for_each_entry(nn, &pf->ports, port_list) + wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs; + pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), + GFP_KERNEL); + if (!pf->irq_entries) { + err = -ENOMEM; + goto err_nn_free; + } + + num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries, + NFP_NET_MIN_PORT_IRQS * pf->num_ports, + wanted_irqs); + if (!num_irqs) { + nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); + err = -ENOMEM; + goto err_vec_free; + } + + /* Distribute IRQs to ports */ + irqs_left = num_irqs; + ports_left = pf->num_ports; + list_for_each_entry(nn, &pf->ports, port_list) { + unsigned int n; + + n = DIV_ROUND_UP(irqs_left, ports_left); + nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left], + n); + irqs_left -= n; + ports_left--; + } + + /* Finish netdev init and register */ + id = 0; + list_for_each_entry(nn, &pf->ports, port_list) { + err = nfp_net_pf_init_port_netdev(pf, nn, id); + if (err) + goto err_prev_deinit; + + id++; + } + + return 0; + +err_prev_deinit: + list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) { + nfp_net_debugfs_dir_clean(&nn->debugfs_dir); + nfp_net_netdev_clean(nn->netdev); + } + nfp_net_irqs_disable(pf->pdev); +err_vec_free: + kfree(pf->irq_entries); +err_nn_free: + nfp_net_pf_free_netdevs(pf); + return err; +} + +/* + * PCI device functions + */ +int nfp_net_pci_probe(struct nfp_pf *pf) +{ + u8 __iomem *ctrl_bar, *tx_bar, *rx_bar; + u32 total_tx_qcs, total_rx_qcs; + struct nfp_net_fw_version fw_ver; + u32 tx_area_sz, rx_area_sz; + u32 start_q; + int stride; + int err; + + /* Verify that the board has completed initialization */ + if (!nfp_is_ready(pf->cpp)) { + nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n"); + return -EINVAL; + } + + pf->num_ports = nfp_net_pf_get_num_ports(pf); + + ctrl_bar = nfp_net_pf_map_ctrl_bar(pf); + if (!ctrl_bar) + return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER; + + nfp_net_get_fw_version(&fw_ver, ctrl_bar); + if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { + nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", + fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto err_ctrl_unmap; + } + + /* Determine stride */ + if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { + stride = 2; + nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); + } else { + switch (fw_ver.major) { + case 1 ... 4: + stride = 4; + break; + default: + nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", + fw_ver.resv, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto err_ctrl_unmap; + } + } + + /* Find how many QC structs need to be mapped */ + total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride, + NFP_NET_CFG_START_TXQ, + NFP_NET_CFG_MAX_TXRINGS); + total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride, + NFP_NET_CFG_START_RXQ, + NFP_NET_CFG_MAX_RXRINGS); + if (!total_tx_qcs || !total_rx_qcs) { + nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n", + total_tx_qcs, total_rx_qcs); + err = -EINVAL; + goto err_ctrl_unmap; + } + + tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs; + rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs; + + /* Map TX queues */ + start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); + tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0, + NFP_PCIE_QUEUE(start_q), + tx_area_sz, &pf->tx_area); + if (IS_ERR(tx_bar)) { + nfp_err(pf->cpp, "Failed to map TX area.\n"); + err = PTR_ERR(tx_bar); + goto err_ctrl_unmap; + } + + /* Map RX queues */ + start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); + rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0, + NFP_PCIE_QUEUE(start_q), + rx_area_sz, &pf->rx_area); + if (IS_ERR(rx_bar)) { + nfp_err(pf->cpp, "Failed to map RX area.\n"); + err = PTR_ERR(rx_bar); + goto err_unmap_tx; + } + + pf->ddir = nfp_net_debugfs_device_add(pf->pdev); + + err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar, + stride, &fw_ver); + if (err) + goto err_clean_ddir; + + return 0; + +err_clean_ddir: + nfp_net_debugfs_dir_clean(&pf->ddir); + nfp_cpp_area_release_free(pf->rx_area); +err_unmap_tx: + nfp_cpp_area_release_free(pf->tx_area); +err_ctrl_unmap: + nfp_cpp_area_release_free(pf->ctrl_area); + return err; +} + +void nfp_net_pci_remove(struct nfp_pf *pf) +{ + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->ports, port_list) { + nfp_net_debugfs_dir_clean(&nn->debugfs_dir); + + nfp_net_netdev_clean(nn->netdev); + } + + nfp_net_pf_free_netdevs(pf); + + nfp_net_debugfs_dir_clean(&pf->ddir); + + nfp_net_irqs_disable(pf->pdev); + kfree(pf->irq_entries); + + nfp_cpp_area_release_free(pf->rx_area); + nfp_cpp_area_release_free(pf->tx_area); + nfp_cpp_area_release_free(pf->ctrl_area); +} -- cgit v1.2.3 From 9a7382662fc85b29f2b0dc4e7c61896cbd06593d Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:35 +0100 Subject: tools: Sync {,tools/}include/uapi/linux/bpf.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The tools version of this header is out of date; update it to the latest version from kernel header. Synchronize with the following commits: * b95a5c4db09b ("bpf: add a longest prefix match trie map implementation") * a5e8c07059d0 ("bpf: add bpf_probe_read_str helper") * d1b662adcdb8 ("bpf: allow option for setting bpf_l4_csum_replace from scratch") Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Arnaldo Carvalho de Melo Cc: Daniel Borkmann Cc: Daniel Mack Cc: David S. Miller Cc: Gianluca Borello Signed-off-by: David S. Miller --- tools/include/uapi/linux/bpf.h | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0eb0e87dbe9f..e07fd5a324e6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -63,6 +63,12 @@ struct bpf_insn { __s32 imm; /* signed immediate constant */ }; +/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ +struct bpf_lpm_trie_key { + __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ + __u8 data[0]; /* Arbitrary size */ +}; + /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, @@ -89,6 +95,7 @@ enum bpf_map_type { BPF_MAP_TYPE_CGROUP_ARRAY, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, + BPF_MAP_TYPE_LPM_TRIE, }; enum bpf_prog_type { @@ -430,6 +437,18 @@ union bpf_attr { * @xdp_md: pointer to xdp_md * @delta: An positive/negative integer to be added to xdp_md.data * Return: 0 on success or negative on error + * + * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) + * Copy a NUL terminated string from unsafe address. In case the string + * length is smaller than size, the target is not padded with further NUL + * bytes. In case the string length is larger than size, just count-1 + * bytes are copied and the last byte is set to NUL. + * @dst: destination address + * @size: maximum number of bytes to copy, including the trailing NUL + * @unsafe_ptr: unsafe address + * Return: + * > 0 length of the string including the trailing NUL on success + * < 0 error */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -476,7 +495,8 @@ union bpf_attr { FN(set_hash_invalid), \ FN(get_numa_node_id), \ FN(skb_change_head), \ - FN(xdp_adjust_head), + FN(xdp_adjust_head), \ + FN(probe_read_str), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -502,6 +522,7 @@ enum bpf_func_id { /* BPF_FUNC_l4_csum_replace flags. */ #define BPF_F_PSEUDO_HDR (1ULL << 4) #define BPF_F_MARK_MANGLED_0 (1ULL << 5) +#define BPF_F_MARK_ENFORCE (1ULL << 6) /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ #define BPF_F_INGRESS (1ULL << 0) -- cgit v1.2.3 From 7f73f39a89c25c04ac684661ee61edcae476eb15 Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:36 +0100 Subject: bpf: Change the include directory for selftest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the tools include directory instead of the installed one to allow builds from other kernels. Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Arnaldo Carvalho de Melo Cc: Daniel Borkmann Cc: David S. Miller Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 769a6cb42b4b..c470c7301636 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -1,4 +1,4 @@ -CFLAGS += -Wall -O2 -I../../../../usr/include +CFLAGS += -Wall -O2 -I../../../include/uapi test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map -- cgit v1.2.3 From d02d8986a7688d3f0ff6ef61aa6beb41427692eb Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:37 +0100 Subject: bpf: Always test unprivileged programs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If selftests are run as root, then execute the unprivileged checks as well. This switch from 243 to 368 tests. The test numbers are suffixed with "/u" when executed as unprivileged or with "/p" when executed as privileged. The geteuid() check is replaced with a capability check. Handling capabilities requires the libcap dependency. Signed-off-by: Mickaël Salaün Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Cc: Shuah Khan Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/Makefile | 2 +- tools/testing/selftests/bpf/test_verifier.c | 68 ++++++++++++++++++++++++++--- 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index c470c7301636..f3d65ad53494 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -1,4 +1,4 @@ -CFLAGS += -Wall -O2 -I../../../include/uapi +CFLAGS += -Wall -O2 -lcap -I../../../include/uapi test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 71f6407cde60..878bd60da376 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -4574,6 +4575,55 @@ fail_log: goto close_fds; } +static bool is_admin(void) +{ + cap_t caps; + cap_flag_value_t sysadmin = CAP_CLEAR; + const cap_value_t cap_val = CAP_SYS_ADMIN; + + if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) { + perror("cap_get_flag"); + return false; + } + caps = cap_get_proc(); + if (!caps) { + perror("cap_get_proc"); + return false; + } + if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin)) + perror("cap_get_flag"); + if (cap_free(caps)) + perror("cap_free"); + return (sysadmin == CAP_SET); +} + +static int set_admin(bool admin) +{ + cap_t caps; + const cap_value_t cap_val = CAP_SYS_ADMIN; + int ret = -1; + + caps = cap_get_proc(); + if (!caps) { + perror("cap_get_proc"); + return -1; + } + if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val, + admin ? CAP_SET : CAP_CLEAR)) { + perror("cap_set_flag"); + goto out; + } + if (cap_set_proc(caps)) { + perror("cap_set_proc"); + goto out; + } + ret = 0; +out: + if (cap_free(caps)) + perror("cap_free"); + return ret; +} + static int do_test(bool unpriv, unsigned int from, unsigned int to) { int i, passes = 0, errors = 0; @@ -4584,11 +4634,19 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to) /* Program types that are not supported by non-root we * skip right away. */ - if (unpriv && test->prog_type) - continue; + if (!test->prog_type) { + if (!unpriv) + set_admin(false); + printf("#%d/u %s ", i, test->descr); + do_test_single(test, true, &passes, &errors); + if (!unpriv) + set_admin(true); + } - printf("#%d %s ", i, test->descr); - do_test_single(test, unpriv, &passes, &errors); + if (!unpriv) { + printf("#%d/p %s ", i, test->descr); + do_test_single(test, false, &passes, &errors); + } } printf("Summary: %d PASSED, %d FAILED\n", passes, errors); @@ -4600,7 +4658,7 @@ int main(int argc, char **argv) struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; struct rlimit rlim = { 1 << 20, 1 << 20 }; unsigned int from = 0, to = ARRAY_SIZE(tests); - bool unpriv = geteuid() != 0; + bool unpriv = !is_admin(); if (argc == 3) { unsigned int l = atoi(argv[argc - 2]); -- cgit v1.2.3 From 2ee89fb9a942e250b5adb5535de4acca14bb7fa2 Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:38 +0100 Subject: bpf: Use bpf_load_program() from the library MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace bpf_prog_load() with bpf_load_program() calls. Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Shuah Khan Signed-off-by: David S. Miller --- tools/lib/bpf/bpf.c | 6 +++--- tools/lib/bpf/bpf.h | 4 ++-- tools/testing/selftests/bpf/Makefile | 4 +++- tools/testing/selftests/bpf/bpf_sys.h | 21 --------------------- tools/testing/selftests/bpf/test_tag.c | 6 ++++-- tools/testing/selftests/bpf/test_verifier.c | 8 +++++--- 6 files changed, 17 insertions(+), 32 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 3ddb58a36d3c..58ce252073fa 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -42,7 +42,7 @@ # endif #endif -static __u64 ptr_to_u64(void *ptr) +static __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; } @@ -69,8 +69,8 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } -int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns, - size_t insns_cnt, char *license, +int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz) { int fd; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index a2f9853dd882..bc959a2de023 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -28,8 +28,8 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, /* Recommend log buffer size */ #define BPF_LOG_BUF_SIZE 65536 -int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns, - size_t insns_cnt, char *license, +int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz); diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index f3d65ad53494..a35f564f66a1 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -1,4 +1,4 @@ -CFLAGS += -Wall -O2 -lcap -I../../../include/uapi +CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I../../../lib test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map @@ -7,6 +7,8 @@ TEST_FILES := $(test_objs) all: $(test_objs) +$(test_objs): ../../../lib/bpf/bpf.o + include ../lib.mk clean: diff --git a/tools/testing/selftests/bpf/bpf_sys.h b/tools/testing/selftests/bpf/bpf_sys.h index 6b4565f2a3f2..e7bbe3e5402e 100644 --- a/tools/testing/selftests/bpf/bpf_sys.h +++ b/tools/testing/selftests/bpf/bpf_sys.h @@ -84,25 +84,4 @@ static inline int bpf_map_create(enum bpf_map_type type, uint32_t size_key, return bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } -static inline int bpf_prog_load(enum bpf_prog_type type, - const struct bpf_insn *insns, size_t size_insns, - const char *license, char *log, size_t size_log) -{ - union bpf_attr attr = {}; - - attr.prog_type = type; - attr.insns = bpf_ptr_to_u64(insns); - attr.insn_cnt = size_insns / sizeof(struct bpf_insn); - attr.license = bpf_ptr_to_u64(license); - - if (size_log > 0) { - attr.log_buf = bpf_ptr_to_u64(log); - attr.log_size = size_log; - attr.log_level = 1; - log[0] = 0; - } - - return bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); -} - #endif /* __BPF_SYS__ */ diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c index 5f7c602f47d1..dc209721ffd5 100644 --- a/tools/testing/selftests/bpf/test_tag.c +++ b/tools/testing/selftests/bpf/test_tag.c @@ -16,6 +16,8 @@ #include #include +#include + #include "../../../include/linux/filter.h" #include "bpf_sys.h" @@ -55,8 +57,8 @@ static int bpf_try_load_prog(int insns, int fd_map, int fd_prog; bpf_filler(insns, fd_map); - fd_prog = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, prog, insns * - sizeof(struct bpf_insn), "", NULL, 0); + fd_prog = bpf_load_program(BPF_PROG_TYPE_SCHED_CLS, prog, insns, "", 0, + NULL, 0); assert(fd_prog > 0); if (fd_map > 0) bpf_filler(insns, 0); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 878bd60da376..247830ecf68e 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -24,6 +24,8 @@ #include #include +#include + #include "../../../include/linux/filter.h" #include "bpf_sys.h" @@ -4535,9 +4537,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv, do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); - fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, - prog, prog_len * sizeof(struct bpf_insn), - "GPL", bpf_vlog, sizeof(bpf_vlog)); + fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, + prog, prog_len, "GPL", 0, bpf_vlog, + sizeof(bpf_vlog)); expected_ret = unpriv && test->result_unpriv != UNDEF ? test->result_unpriv : test->result; -- cgit v1.2.3 From 10ecc728fe12dbd206e2d4d8b6e96082632b969c Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:39 +0100 Subject: bpf: Use bpf_map_update_elem() from the library MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace bpf_map_update() with bpf_map_update_elem() calls. Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Shuah Khan Signed-off-by: David S. Miller --- tools/lib/bpf/bpf.c | 2 +- tools/lib/bpf/bpf.h | 2 +- tools/testing/selftests/bpf/bpf_sys.h | 13 ---- tools/testing/selftests/bpf/test_lpm_map.c | 15 ++--- tools/testing/selftests/bpf/test_lru_map.c | 97 +++++++++++++++++------------- tools/testing/selftests/bpf/test_maps.c | 61 ++++++++++--------- 6 files changed, 99 insertions(+), 91 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 58ce252073fa..1de762677a2f 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -98,7 +98,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); } -int bpf_map_update_elem(int fd, void *key, void *value, +int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags) { union bpf_attr attr; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index bc959a2de023..2458534c8b33 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -33,7 +33,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, __u32 kern_version, char *log_buf, size_t log_buf_sz); -int bpf_map_update_elem(int fd, void *key, void *value, +int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags); int bpf_map_lookup_elem(int fd, void *key, void *value); diff --git a/tools/testing/selftests/bpf/bpf_sys.h b/tools/testing/selftests/bpf/bpf_sys.h index e7bbe3e5402e..e08dec0db9e0 100644 --- a/tools/testing/selftests/bpf/bpf_sys.h +++ b/tools/testing/selftests/bpf/bpf_sys.h @@ -35,19 +35,6 @@ static inline int bpf_map_lookup(int fd, const void *key, void *value) return bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); } -static inline int bpf_map_update(int fd, const void *key, const void *value, - uint64_t flags) -{ - union bpf_attr attr = {}; - - attr.map_fd = fd; - attr.key = bpf_ptr_to_u64(key); - attr.value = bpf_ptr_to_u64(value); - attr.flags = flags; - - return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); -} - static inline int bpf_map_delete(int fd, const void *key) { union bpf_attr attr = {}; diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index 26775c00273f..e29ffbcd2932 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -22,6 +22,7 @@ #include #include +#include #include "bpf_sys.h" #include "bpf_util.h" @@ -198,7 +199,7 @@ static void test_lpm_map(int keysize) key->prefixlen = value[keysize]; memcpy(key->data, value, keysize); - r = bpf_map_update(map, key, value, 0); + r = bpf_map_update_elem(map, key, value, 0); assert(!r); } @@ -266,32 +267,32 @@ static void test_lpm_ipaddr(void) value = 1; key_ipv4->prefixlen = 16; inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); - assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); value = 2; key_ipv4->prefixlen = 24; inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); - assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); value = 3; key_ipv4->prefixlen = 24; inet_pton(AF_INET, "192.168.128.0", key_ipv4->data); - assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); value = 5; key_ipv4->prefixlen = 24; inet_pton(AF_INET, "192.168.1.0", key_ipv4->data); - assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); value = 4; key_ipv4->prefixlen = 23; inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); - assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); value = 0xdeadbeef; key_ipv6->prefixlen = 64; inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data); - assert(bpf_map_update(map_fd_ipv6, key_ipv6, &value, 0) == 0); + assert(bpf_map_update_elem(map_fd_ipv6, key_ipv6, &value, 0) == 0); /* Set tprefixlen to maximum for lookups */ key_ipv4->prefixlen = 32; diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 9f7bd1915c21..2f61b5817af4 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -18,6 +18,7 @@ #include #include +#include #include "bpf_sys.h" #include "bpf_util.h" @@ -119,15 +120,16 @@ static void test_lru_sanity0(int map_type, int map_flags) /* insert key=1 element */ key = 1; - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); - assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); /* BPF_NOEXIST means: add new element if it doesn't exist */ - assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST) == -1 && + assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1 /* key=1 already exists */ - errno == EEXIST); + && errno == EEXIST); - assert(bpf_map_update(lru_map_fd, &key, value, -1) == -1 && + assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -1 && errno == EINVAL); /* insert key=2 element */ @@ -138,11 +140,11 @@ static void test_lru_sanity0(int map_type, int map_flags) errno == ENOENT); /* BPF_EXIST means: update existing element */ - assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST) == -1 && + assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 && /* key=2 is not there */ errno == ENOENT); - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); /* insert key=3 element */ @@ -159,8 +161,9 @@ static void test_lru_sanity0(int map_type, int map_flags) assert(value[0] == 1234); key = 3; - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); - assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); /* key=2 has been removed from the LRU */ key = 2; @@ -217,14 +220,15 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) /* Insert 1 to tgt_free (+tgt_free keys) */ end_key = 1 + tgt_free; for (key = 1; key < end_key; key++) - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); /* Lookup 1 to tgt_free/2 */ end_key = 1 + batch_size; for (key = 1; key < end_key; key++) { assert(!bpf_map_lookup(lru_map_fd, &key, value)); - assert(!bpf_map_update(expected_map_fd, &key, value, - BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); } /* Insert 1+tgt_free to 2*tgt_free @@ -234,9 +238,10 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) key = 1 + tgt_free; end_key = key + tgt_free; for (; key < end_key; key++) { - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); - assert(!bpf_map_update(expected_map_fd, &key, value, - BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); } assert(map_equal(lru_map_fd, expected_map_fd)); @@ -301,9 +306,10 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) /* Insert 1 to tgt_free (+tgt_free keys) */ end_key = 1 + tgt_free; for (key = 1; key < end_key; key++) - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); - /* Any bpf_map_update will require to acquire a new node + /* Any bpf_map_update_elem will require to acquire a new node * from LRU first. * * The local list is running out of free nodes. @@ -316,10 +322,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) */ key = 1; if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); assert(!bpf_map_delete(lru_map_fd, &key)); } else { - assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST)); + assert(bpf_map_update_elem(lru_map_fd, &key, value, + BPF_EXIST)); } /* Re-insert 1 to tgt_free/2 again and do a lookup @@ -329,11 +337,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) value[0] = 4321; for (key = 1; key < end_key; key++) { assert(bpf_map_lookup(lru_map_fd, &key, value)); - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); assert(!bpf_map_lookup(lru_map_fd, &key, value)); assert(value[0] == 4321); - assert(!bpf_map_update(expected_map_fd, &key, value, - BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); } value[0] = 1234; @@ -344,14 +353,16 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) /* These newly added but not referenced keys will be * gone during the next LRU shrink. */ - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */ end_key = key + tgt_free; for (; key < end_key; key++) { - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); - assert(!bpf_map_update(expected_map_fd, &key, value, - BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); } assert(map_equal(lru_map_fd, expected_map_fd)); @@ -401,14 +412,15 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */ end_key = 1 + (2 * tgt_free); for (key = 1; key < end_key; key++) - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); /* Lookup key 1 to tgt_free*3/2 */ end_key = tgt_free + batch_size; for (key = 1; key < end_key; key++) { assert(!bpf_map_lookup(lru_map_fd, &key, value)); - assert(!bpf_map_update(expected_map_fd, &key, value, - BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); } /* Add 1+2*tgt_free to tgt_free*5/2 @@ -417,9 +429,10 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) key = 2 * tgt_free + 1; end_key = key + batch_size; for (; key < end_key; key++) { - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); - assert(!bpf_map_update(expected_map_fd, &key, value, - BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); } assert(map_equal(lru_map_fd, expected_map_fd)); @@ -457,15 +470,16 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) value[0] = 1234; for (key = 1; key <= 2 * tgt_free; key++) - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); key = 1; - assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); for (key = 1; key <= tgt_free; key++) { assert(!bpf_map_lookup(lru_map_fd, &key, value)); - assert(!bpf_map_update(expected_map_fd, &key, value, - BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); } for (; key <= 2 * tgt_free; key++) { @@ -475,9 +489,10 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) end_key = key + 2 * tgt_free; for (; key < end_key; key++) { - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); - assert(!bpf_map_update(expected_map_fd, &key, value, - BPF_NOEXIST)); + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); } assert(map_equal(lru_map_fd, expected_map_fd)); @@ -498,7 +513,7 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd) value[0] = 1234; key = last_key + 1; - assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST)); assert(!bpf_map_lookup(map_fd, &key, value)); /* Cannot find the last key because it was removed by LRU */ @@ -523,7 +538,7 @@ static void test_lru_sanity5(int map_type, int map_flags) value[0] = 1234; key = 0; - assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST)); while (sched_next_online(0, &next_cpu) != -1) { pid_t pid; diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index eedfef8d2946..d4b9ba6d6a0e 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -21,6 +21,7 @@ #include +#include #include "bpf_sys.h" #include "bpf_util.h" @@ -41,16 +42,17 @@ static void test_hashmap(int task, void *data) key = 1; value = 1234; /* Insert key=1 element. */ - assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); + assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); value = 0; /* BPF_NOEXIST means add new element if it doesn't exist. */ - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && /* key=1 already exists. */ errno == EEXIST); /* -1 is an invalid flag. */ - assert(bpf_map_update(fd, &key, &value, -1) == -1 && errno == EINVAL); + assert(bpf_map_update_elem(fd, &key, &value, -1) == -1 && + errno == EINVAL); /* Check that key=1 can be found. */ assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234); @@ -60,27 +62,27 @@ static void test_hashmap(int task, void *data) assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); /* BPF_EXIST means update existing element. */ - assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 && /* key=2 is not there. */ errno == ENOENT); /* Insert key=2 element. */ - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0); /* key=1 and key=2 were inserted, check that key=0 cannot be * inserted due to max_entries limit. */ key = 0; - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && errno == E2BIG); /* Update existing element, though the map is full. */ key = 1; - assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0); + assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); key = 2; - assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); + assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); key = 1; - assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); + assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); /* Check that key = 0 doesn't exist. */ key = 0; @@ -130,16 +132,17 @@ static void test_hashmap_percpu(int task, void *data) key = 1; /* Insert key=1 element. */ assert(!(expected_key_mask & key)); - assert(bpf_map_update(fd, &key, value, BPF_ANY) == 0); + assert(bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0); expected_key_mask |= key; /* BPF_NOEXIST means add new element if it doesn't exist. */ - assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 && /* key=1 already exists. */ errno == EEXIST); /* -1 is an invalid flag. */ - assert(bpf_map_update(fd, &key, value, -1) == -1 && errno == EINVAL); + assert(bpf_map_update_elem(fd, &key, value, -1) == -1 && + errno == EINVAL); /* Check that key=1 can be found. Value could be 0 if the lookup * was run from a different CPU. @@ -152,20 +155,20 @@ static void test_hashmap_percpu(int task, void *data) assert(bpf_map_lookup(fd, &key, value) == -1 && errno == ENOENT); /* BPF_EXIST means update existing element. */ - assert(bpf_map_update(fd, &key, value, BPF_EXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == -1 && /* key=2 is not there. */ errno == ENOENT); /* Insert key=2 element. */ assert(!(expected_key_mask & key)); - assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == 0); + assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == 0); expected_key_mask |= key; /* key=1 and key=2 were inserted, check that key=0 cannot be * inserted due to max_entries limit. */ key = 0; - assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 && errno == E2BIG); /* Check that key = 0 doesn't exist. */ @@ -187,7 +190,7 @@ static void test_hashmap_percpu(int task, void *data) /* Update with BPF_EXIST. */ key = 1; - assert(bpf_map_update(fd, &key, value, BPF_EXIST) == 0); + assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == 0); /* Delete both elements. */ key = 1; @@ -219,10 +222,10 @@ static void test_arraymap(int task, void *data) key = 1; value = 1234; /* Insert key=1 element. */ - assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); + assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); value = 0; - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && errno == EEXIST); /* Check that key=1 can be found. */ @@ -236,7 +239,7 @@ static void test_arraymap(int task, void *data) * due to max_entries limit. */ key = 2; - assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 && errno == E2BIG); /* Check that key = 2 doesn't exist. */ @@ -275,10 +278,10 @@ static void test_arraymap_percpu(int task, void *data) key = 1; /* Insert key=1 element. */ - assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0); + assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); values[0] = 0; - assert(bpf_map_update(fd, &key, values, BPF_NOEXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 && errno == EEXIST); /* Check that key=1 can be found. */ @@ -291,7 +294,7 @@ static void test_arraymap_percpu(int task, void *data) /* Check that key=2 cannot be inserted due to max_entries limit. */ key = 2; - assert(bpf_map_update(fd, &key, values, BPF_EXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, values, BPF_EXIST) == -1 && errno == E2BIG); /* Check that key = 2 doesn't exist. */ @@ -331,7 +334,7 @@ static void test_arraymap_percpu_many_keys(void) values[i] = i + 10; for (key = 0; key < nr_keys; key++) - assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0); + assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); for (key = 0; key < nr_keys; key++) { for (i = 0; i < nr_cpus; i++) @@ -368,11 +371,11 @@ static void test_map_large(void) key = (struct bigkey) { .c = i }; value = i; - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0); } key.c = -1; - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && errno == E2BIG); /* Iterate through all elements. */ @@ -437,8 +440,10 @@ static void do_work(int fn, void *data) key = value = i; if (do_update) { - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); - assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0); + assert(bpf_map_update_elem(fd, &key, &value, + BPF_NOEXIST) == 0); + assert(bpf_map_update_elem(fd, &key, &value, + BPF_EXIST) == 0); } else { assert(bpf_map_delete(fd, &key) == 0); } @@ -468,7 +473,7 @@ static void test_map_parallel(void) run_parallel(TASKS, do_work, data); /* Check that key=0 is already there. */ - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && errno == EEXIST); /* Check that all elements were inserted. */ -- cgit v1.2.3 From e5ff7c4019c6cb6e86bc9d6d16e8a8f921133c70 Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:40 +0100 Subject: bpf: Use bpf_map_lookup_elem() from the library MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace bpf_map_lookup() with bpf_map_lookup_elem() calls. Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Shuah Khan Signed-off-by: David S. Miller --- tools/lib/bpf/bpf.c | 2 +- tools/lib/bpf/bpf.h | 2 +- tools/testing/selftests/bpf/bpf_sys.h | 11 ----------- tools/testing/selftests/bpf/test_lpm_map.c | 16 ++++++++-------- tools/testing/selftests/bpf/test_lru_map.c | 28 ++++++++++++++-------------- tools/testing/selftests/bpf/test_maps.c | 30 +++++++++++++++--------------- 6 files changed, 39 insertions(+), 50 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 1de762677a2f..b1a1f58b99e0 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -112,7 +112,7 @@ int bpf_map_update_elem(int fd, const void *key, const void *value, return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); } -int bpf_map_lookup_elem(int fd, void *key, void *value) +int bpf_map_lookup_elem(int fd, const void *key, void *value) { union bpf_attr attr; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 2458534c8b33..171cf594f782 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -36,7 +36,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags); -int bpf_map_lookup_elem(int fd, void *key, void *value); +int bpf_map_lookup_elem(int fd, const void *key, void *value); int bpf_map_delete_elem(int fd, void *key); int bpf_map_get_next_key(int fd, void *key, void *next_key); int bpf_obj_pin(int fd, const char *pathname); diff --git a/tools/testing/selftests/bpf/bpf_sys.h b/tools/testing/selftests/bpf/bpf_sys.h index e08dec0db9e0..0a5a6060db70 100644 --- a/tools/testing/selftests/bpf/bpf_sys.h +++ b/tools/testing/selftests/bpf/bpf_sys.h @@ -24,17 +24,6 @@ static inline int bpf(int cmd, union bpf_attr *attr, unsigned int size) #endif } -static inline int bpf_map_lookup(int fd, const void *key, void *value) -{ - union bpf_attr attr = {}; - - attr.map_fd = fd; - attr.key = bpf_ptr_to_u64(key); - attr.value = bpf_ptr_to_u64(value); - - return bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); -} - static inline int bpf_map_delete(int fd, const void *key) { union bpf_attr attr = {}; diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index e29ffbcd2932..bd08394c26cb 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -211,7 +211,7 @@ static void test_lpm_map(int keysize) key->prefixlen = 8 * keysize; memcpy(key->data, data, keysize); - r = bpf_map_lookup(map, key, value); + r = bpf_map_lookup_elem(map, key, value); assert(!r || errno == ENOENT); assert(!t == !!r); @@ -300,32 +300,32 @@ static void test_lpm_ipaddr(void) /* Test some lookups that should come back with a value */ inet_pton(AF_INET, "192.168.128.23", key_ipv4->data); - assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == 0); + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0); assert(value == 3); inet_pton(AF_INET, "192.168.0.1", key_ipv4->data); - assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == 0); + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0); assert(value == 2); inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data); - assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == 0); + assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0); assert(value == 0xdeadbeef); inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data); - assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == 0); + assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0); assert(value == 0xdeadbeef); /* Test some lookups that should not match any entry */ inet_pton(AF_INET, "10.0.0.1", key_ipv4->data); - assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == -1 && + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 && errno == ENOENT); inet_pton(AF_INET, "11.11.11.11", key_ipv4->data); - assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == -1 && + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 && errno == ENOENT); inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data); - assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == -1 && + assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 && errno == ENOENT); close(map_fd_ipv4); diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 2f61b5817af4..eccf6d96e551 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -47,8 +47,8 @@ static int map_subset(int map0, int map1) int ret; while (!bpf_map_next_key(map1, &next_key, &next_key)) { - assert(!bpf_map_lookup(map1, &next_key, value1)); - ret = bpf_map_lookup(map0, &next_key, value0); + assert(!bpf_map_lookup_elem(map1, &next_key, value1)); + ret = bpf_map_lookup_elem(map0, &next_key, value0); if (ret) { printf("key:%llu not found from map. %s(%d)\n", next_key, strerror(errno), errno); @@ -136,7 +136,7 @@ static void test_lru_sanity0(int map_type, int map_flags) /* check that key=2 is not found */ key = 2; - assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 && + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && errno == ENOENT); /* BPF_EXIST means: update existing element */ @@ -150,14 +150,14 @@ static void test_lru_sanity0(int map_type, int map_flags) /* check that key=3 is not found */ key = 3; - assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 && + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && errno == ENOENT); /* check that key=1 can be found and mark the ref bit to * stop LRU from removing key=1 */ key = 1; - assert(!bpf_map_lookup(lru_map_fd, &key, value)); + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(value[0] == 1234); key = 3; @@ -167,7 +167,7 @@ static void test_lru_sanity0(int map_type, int map_flags) /* key=2 has been removed from the LRU */ key = 2; - assert(bpf_map_lookup(lru_map_fd, &key, value) == -1); + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1); assert(map_equal(lru_map_fd, expected_map_fd)); @@ -226,7 +226,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) /* Lookup 1 to tgt_free/2 */ end_key = 1 + batch_size; for (key = 1; key < end_key; key++) { - assert(!bpf_map_lookup(lru_map_fd, &key, value)); + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, BPF_NOEXIST)); } @@ -336,10 +336,10 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) end_key = 1 + batch_size; value[0] = 4321; for (key = 1; key < end_key; key++) { - assert(bpf_map_lookup(lru_map_fd, &key, value)); + assert(bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); - assert(!bpf_map_lookup(lru_map_fd, &key, value)); + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(value[0] == 4321); assert(!bpf_map_update_elem(expected_map_fd, &key, value, BPF_NOEXIST)); @@ -418,7 +418,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) /* Lookup key 1 to tgt_free*3/2 */ end_key = tgt_free + batch_size; for (key = 1; key < end_key; key++) { - assert(!bpf_map_lookup(lru_map_fd, &key, value)); + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, BPF_NOEXIST)); } @@ -477,7 +477,7 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); for (key = 1; key <= tgt_free; key++) { - assert(!bpf_map_lookup(lru_map_fd, &key, value)); + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, BPF_NOEXIST)); } @@ -508,16 +508,16 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd) unsigned long long key, value[nr_cpus]; /* Ensure the last key inserted by previous CPU can be found */ - assert(!bpf_map_lookup(map_fd, &last_key, value)); + assert(!bpf_map_lookup_elem(map_fd, &last_key, value)); value[0] = 1234; key = last_key + 1; assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST)); - assert(!bpf_map_lookup(map_fd, &key, value)); + assert(!bpf_map_lookup_elem(map_fd, &key, value)); /* Cannot find the last key because it was removed by LRU */ - assert(bpf_map_lookup(map_fd, &last_key, value)); + assert(bpf_map_lookup_elem(map_fd, &last_key, value)); } /* Test map with only one element */ diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index d4b9ba6d6a0e..5db1a939af69 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -55,11 +55,11 @@ static void test_hashmap(int task, void *data) errno == EINVAL); /* Check that key=1 can be found. */ - assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234); + assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234); key = 2; /* Check that key=2 is not found. */ - assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); + assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT); /* BPF_EXIST means update existing element. */ assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 && @@ -148,11 +148,11 @@ static void test_hashmap_percpu(int task, void *data) * was run from a different CPU. */ value[0] = 1; - assert(bpf_map_lookup(fd, &key, value) == 0 && value[0] == 100); + assert(bpf_map_lookup_elem(fd, &key, value) == 0 && value[0] == 100); key = 2; /* Check that key=2 is not found. */ - assert(bpf_map_lookup(fd, &key, value) == -1 && errno == ENOENT); + assert(bpf_map_lookup_elem(fd, &key, value) == -1 && errno == ENOENT); /* BPF_EXIST means update existing element. */ assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == -1 && @@ -179,7 +179,7 @@ static void test_hashmap_percpu(int task, void *data) assert((expected_key_mask & next_key) == next_key); expected_key_mask &= ~next_key; - assert(bpf_map_lookup(fd, &next_key, value) == 0); + assert(bpf_map_lookup_elem(fd, &next_key, value) == 0); for (i = 0; i < nr_cpus; i++) assert(value[i] == i + 100); @@ -229,11 +229,11 @@ static void test_arraymap(int task, void *data) errno == EEXIST); /* Check that key=1 can be found. */ - assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234); + assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234); key = 0; /* Check that key=0 is also found and zero initialized. */ - assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0); + assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0); /* key=0 and key=1 were inserted, check that key=2 cannot be inserted * due to max_entries limit. @@ -243,7 +243,7 @@ static void test_arraymap(int task, void *data) errno == E2BIG); /* Check that key = 2 doesn't exist. */ - assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); + assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT); /* Iterate over two elements. */ assert(bpf_map_next_key(fd, &key, &next_key) == 0 && @@ -285,11 +285,11 @@ static void test_arraymap_percpu(int task, void *data) errno == EEXIST); /* Check that key=1 can be found. */ - assert(bpf_map_lookup(fd, &key, values) == 0 && values[0] == 100); + assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 100); key = 0; /* Check that key=0 is also found and zero initialized. */ - assert(bpf_map_lookup(fd, &key, values) == 0 && + assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 0 && values[nr_cpus - 1] == 0); /* Check that key=2 cannot be inserted due to max_entries limit. */ @@ -298,7 +298,7 @@ static void test_arraymap_percpu(int task, void *data) errno == E2BIG); /* Check that key = 2 doesn't exist. */ - assert(bpf_map_lookup(fd, &key, values) == -1 && errno == ENOENT); + assert(bpf_map_lookup_elem(fd, &key, values) == -1 && errno == ENOENT); /* Iterate over two elements. */ assert(bpf_map_next_key(fd, &key, &next_key) == 0 && @@ -340,7 +340,7 @@ static void test_arraymap_percpu_many_keys(void) for (i = 0; i < nr_cpus; i++) values[i] = 0; - assert(bpf_map_lookup(fd, &key, values) == 0); + assert(bpf_map_lookup_elem(fd, &key, values) == 0); for (i = 0; i < nr_cpus; i++) assert(values[i] == i + 10); @@ -384,9 +384,9 @@ static void test_map_large(void) assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); key.c = 0; - assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0); + assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0); key.a = 1; - assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); + assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT); close(fd); } @@ -486,7 +486,7 @@ static void test_map_parallel(void) for (i = 0; i < MAP_SIZE; i++) { key = MAP_SIZE - i - 1; - assert(bpf_map_lookup(fd, &key, &value) == 0 && + assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == key); } -- cgit v1.2.3 From e58383b803499bd623b737070038af94d0b8a3c7 Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:41 +0100 Subject: bpf: Use bpf_map_delete_elem() from the library MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace bpf_map_delete() with bpf_map_delete_elem() calls. Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Shuah Khan Signed-off-by: David S. Miller --- tools/lib/bpf/bpf.c | 2 +- tools/lib/bpf/bpf.h | 2 +- tools/testing/selftests/bpf/bpf_sys.h | 10 ---------- tools/testing/selftests/bpf/test_lru_map.c | 6 +++--- tools/testing/selftests/bpf/test_maps.c | 22 +++++++++++----------- 5 files changed, 16 insertions(+), 26 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index b1a1f58b99e0..eab8c6bfbf8f 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -124,7 +124,7 @@ int bpf_map_lookup_elem(int fd, const void *key, void *value) return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); } -int bpf_map_delete_elem(int fd, void *key) +int bpf_map_delete_elem(int fd, const void *key) { union bpf_attr attr; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 171cf594f782..f559f648db45 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -37,7 +37,7 @@ int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags); int bpf_map_lookup_elem(int fd, const void *key, void *value); -int bpf_map_delete_elem(int fd, void *key); +int bpf_map_delete_elem(int fd, const void *key); int bpf_map_get_next_key(int fd, void *key, void *next_key); int bpf_obj_pin(int fd, const char *pathname); int bpf_obj_get(const char *pathname); diff --git a/tools/testing/selftests/bpf/bpf_sys.h b/tools/testing/selftests/bpf/bpf_sys.h index 0a5a6060db70..17581a42e1d9 100644 --- a/tools/testing/selftests/bpf/bpf_sys.h +++ b/tools/testing/selftests/bpf/bpf_sys.h @@ -24,16 +24,6 @@ static inline int bpf(int cmd, union bpf_attr *attr, unsigned int size) #endif } -static inline int bpf_map_delete(int fd, const void *key) -{ - union bpf_attr attr = {}; - - attr.map_fd = fd; - attr.key = bpf_ptr_to_u64(key); - - return bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); -} - static inline int bpf_map_next_key(int fd, const void *key, void *next_key) { union bpf_attr attr = {}; diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index eccf6d96e551..859c940a6e41 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -324,7 +324,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); - assert(!bpf_map_delete(lru_map_fd, &key)); + assert(!bpf_map_delete_elem(lru_map_fd, &key)); } else { assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST)); @@ -483,8 +483,8 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) } for (; key <= 2 * tgt_free; key++) { - assert(!bpf_map_delete(lru_map_fd, &key)); - assert(bpf_map_delete(lru_map_fd, &key)); + assert(!bpf_map_delete_elem(lru_map_fd, &key)); + assert(bpf_map_delete_elem(lru_map_fd, &key)); } end_key = key + 2 * tgt_free; diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 5db1a939af69..0f9f90455375 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -86,7 +86,7 @@ static void test_hashmap(int task, void *data) /* Check that key = 0 doesn't exist. */ key = 0; - assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); /* Iterate over two elements. */ assert(bpf_map_next_key(fd, &key, &next_key) == 0 && @@ -98,10 +98,10 @@ static void test_hashmap(int task, void *data) /* Delete both elements. */ key = 1; - assert(bpf_map_delete(fd, &key) == 0); + assert(bpf_map_delete_elem(fd, &key) == 0); key = 2; - assert(bpf_map_delete(fd, &key) == 0); - assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); + assert(bpf_map_delete_elem(fd, &key) == 0); + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); key = 0; /* Check that map is empty. */ @@ -172,7 +172,7 @@ static void test_hashmap_percpu(int task, void *data) errno == E2BIG); /* Check that key = 0 doesn't exist. */ - assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); /* Iterate over two elements. */ while (!bpf_map_next_key(fd, &key, &next_key)) { @@ -194,10 +194,10 @@ static void test_hashmap_percpu(int task, void *data) /* Delete both elements. */ key = 1; - assert(bpf_map_delete(fd, &key) == 0); + assert(bpf_map_delete_elem(fd, &key) == 0); key = 2; - assert(bpf_map_delete(fd, &key) == 0); - assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); + assert(bpf_map_delete_elem(fd, &key) == 0); + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); key = 0; /* Check that map is empty. */ @@ -255,7 +255,7 @@ static void test_arraymap(int task, void *data) /* Delete shouldn't succeed. */ key = 1; - assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL); + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL); close(fd); } @@ -310,7 +310,7 @@ static void test_arraymap_percpu(int task, void *data) /* Delete shouldn't succeed. */ key = 1; - assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL); + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL); close(fd); } @@ -445,7 +445,7 @@ static void do_work(int fn, void *data) assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); } else { - assert(bpf_map_delete(fd, &key) == 0); + assert(bpf_map_delete_elem(fd, &key) == 0); } } } -- cgit v1.2.3 From 5f155c2563592b1908a7df2dcbd44893fde3e419 Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:42 +0100 Subject: bpf: Use bpf_map_get_next_key() from the library MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace bpf_map_next_key() with bpf_map_get_next_key() calls. Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Shuah Khan Signed-off-by: David S. Miller --- tools/lib/bpf/bpf.c | 2 +- tools/lib/bpf/bpf.h | 2 +- tools/testing/selftests/bpf/bpf_sys.h | 11 ---------- tools/testing/selftests/bpf/test_lru_map.c | 2 +- tools/testing/selftests/bpf/test_maps.c | 34 +++++++++++++++--------------- 5 files changed, 20 insertions(+), 31 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index eab8c6bfbf8f..f8a2b7fa7741 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -135,7 +135,7 @@ int bpf_map_delete_elem(int fd, const void *key) return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); } -int bpf_map_get_next_key(int fd, void *key, void *next_key) +int bpf_map_get_next_key(int fd, const void *key, void *next_key) { union bpf_attr attr; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index f559f648db45..88f07c15423a 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -38,7 +38,7 @@ int bpf_map_update_elem(int fd, const void *key, const void *value, int bpf_map_lookup_elem(int fd, const void *key, void *value); int bpf_map_delete_elem(int fd, const void *key); -int bpf_map_get_next_key(int fd, void *key, void *next_key); +int bpf_map_get_next_key(int fd, const void *key, void *next_key); int bpf_obj_pin(int fd, const char *pathname); int bpf_obj_get(const char *pathname); int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type); diff --git a/tools/testing/selftests/bpf/bpf_sys.h b/tools/testing/selftests/bpf/bpf_sys.h index 17581a42e1d9..aeff99f0a411 100644 --- a/tools/testing/selftests/bpf/bpf_sys.h +++ b/tools/testing/selftests/bpf/bpf_sys.h @@ -24,17 +24,6 @@ static inline int bpf(int cmd, union bpf_attr *attr, unsigned int size) #endif } -static inline int bpf_map_next_key(int fd, const void *key, void *next_key) -{ - union bpf_attr attr = {}; - - attr.map_fd = fd; - attr.key = bpf_ptr_to_u64(key); - attr.next_key = bpf_ptr_to_u64(next_key); - - return bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); -} - static inline int bpf_map_create(enum bpf_map_type type, uint32_t size_key, uint32_t size_value, uint32_t max_elem, uint32_t flags) diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 859c940a6e41..360f7e006eb6 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -46,7 +46,7 @@ static int map_subset(int map0, int map1) unsigned long long value0[nr_cpus], value1[nr_cpus]; int ret; - while (!bpf_map_next_key(map1, &next_key, &next_key)) { + while (!bpf_map_get_next_key(map1, &next_key, &next_key)) { assert(!bpf_map_lookup_elem(map1, &next_key, value1)); ret = bpf_map_lookup_elem(map0, &next_key, value0); if (ret) { diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 0f9f90455375..be52c808d6cf 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -89,11 +89,11 @@ static void test_hashmap(int task, void *data) assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); /* Iterate over two elements. */ - assert(bpf_map_next_key(fd, &key, &next_key) == 0 && + assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && (next_key == 1 || next_key == 2)); - assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && (next_key == 1 || next_key == 2)); - assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 && errno == ENOENT); /* Delete both elements. */ @@ -105,7 +105,7 @@ static void test_hashmap(int task, void *data) key = 0; /* Check that map is empty. */ - assert(bpf_map_next_key(fd, &key, &next_key) == -1 && + assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 && errno == ENOENT); close(fd); @@ -175,7 +175,7 @@ static void test_hashmap_percpu(int task, void *data) assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); /* Iterate over two elements. */ - while (!bpf_map_next_key(fd, &key, &next_key)) { + while (!bpf_map_get_next_key(fd, &key, &next_key)) { assert((expected_key_mask & next_key) == next_key); expected_key_mask &= ~next_key; @@ -201,7 +201,7 @@ static void test_hashmap_percpu(int task, void *data) key = 0; /* Check that map is empty. */ - assert(bpf_map_next_key(fd, &key, &next_key) == -1 && + assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 && errno == ENOENT); close(fd); @@ -246,11 +246,11 @@ static void test_arraymap(int task, void *data) assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT); /* Iterate over two elements. */ - assert(bpf_map_next_key(fd, &key, &next_key) == 0 && + assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && next_key == 0); - assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && next_key == 1); - assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 && errno == ENOENT); /* Delete shouldn't succeed. */ @@ -301,11 +301,11 @@ static void test_arraymap_percpu(int task, void *data) assert(bpf_map_lookup_elem(fd, &key, values) == -1 && errno == ENOENT); /* Iterate over two elements. */ - assert(bpf_map_next_key(fd, &key, &next_key) == 0 && + assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && next_key == 0); - assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && next_key == 1); - assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 && errno == ENOENT); /* Delete shouldn't succeed. */ @@ -380,8 +380,8 @@ static void test_map_large(void) /* Iterate through all elements. */ for (i = 0; i < MAP_SIZE; i++) - assert(bpf_map_next_key(fd, &key, &key) == 0); - assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); + assert(bpf_map_get_next_key(fd, &key, &key) == 0); + assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); key.c = 0; assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0); @@ -479,8 +479,8 @@ static void test_map_parallel(void) /* Check that all elements were inserted. */ key = -1; for (i = 0; i < MAP_SIZE; i++) - assert(bpf_map_next_key(fd, &key, &key) == 0); - assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); + assert(bpf_map_get_next_key(fd, &key, &key) == 0); + assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); /* Another check for all elements */ for (i = 0; i < MAP_SIZE; i++) { @@ -496,7 +496,7 @@ static void test_map_parallel(void) /* Nothing should be left. */ key = -1; - assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); + assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); } static void run_all_tests(void) -- cgit v1.2.3 From f4874d01beba16a1bf2512929b9d460e003d7f3d Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:43 +0100 Subject: bpf: Use bpf_create_map() from the library MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace bpf_map_create() with bpf_create_map() calls. Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Shuah Khan Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/bpf_sys.h | 15 --------------- tools/testing/selftests/bpf/test_lpm_map.c | 6 +++--- tools/testing/selftests/bpf/test_lru_map.c | 4 ++-- tools/testing/selftests/bpf/test_maps.c | 14 +++++++------- tools/testing/selftests/bpf/test_tag.c | 2 +- tools/testing/selftests/bpf/test_verifier.c | 4 ++-- 6 files changed, 15 insertions(+), 30 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_sys.h b/tools/testing/selftests/bpf/bpf_sys.h index aeff99f0a411..aa076a8a07f7 100644 --- a/tools/testing/selftests/bpf/bpf_sys.h +++ b/tools/testing/selftests/bpf/bpf_sys.h @@ -24,19 +24,4 @@ static inline int bpf(int cmd, union bpf_attr *attr, unsigned int size) #endif } -static inline int bpf_map_create(enum bpf_map_type type, uint32_t size_key, - uint32_t size_value, uint32_t max_elem, - uint32_t flags) -{ - union bpf_attr attr = {}; - - attr.map_type = type; - attr.key_size = size_key; - attr.value_size = size_value; - attr.max_entries = max_elem; - attr.map_flags = flags; - - return bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); -} - #endif /* __BPF_SYS__ */ diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index bd08394c26cb..3cc812cac2d7 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -183,7 +183,7 @@ static void test_lpm_map(int keysize) key = alloca(sizeof(*key) + keysize); memset(key, 0, sizeof(*key) + keysize); - map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, + map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, sizeof(*key) + keysize, keysize + 1, 4096, @@ -253,12 +253,12 @@ static void test_lpm_ipaddr(void) key_ipv4 = alloca(key_size_ipv4); key_ipv6 = alloca(key_size_ipv6); - map_fd_ipv4 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, + map_fd_ipv4 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size_ipv4, sizeof(value), 100, BPF_F_NO_PREALLOC); assert(map_fd_ipv4 >= 0); - map_fd_ipv6 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, + map_fd_ipv6 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size_ipv6, sizeof(value), 100, BPF_F_NO_PREALLOC); assert(map_fd_ipv6 >= 0); diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 360f7e006eb6..48973ded1c96 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -31,11 +31,11 @@ static int create_map(int map_type, int map_flags, unsigned int size) { int map_fd; - map_fd = bpf_map_create(map_type, sizeof(unsigned long long), + map_fd = bpf_create_map(map_type, sizeof(unsigned long long), sizeof(unsigned long long), size, map_flags); if (map_fd == -1) - perror("bpf_map_create"); + perror("bpf_create_map"); return map_fd; } diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index be52c808d6cf..39168499f43f 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -32,7 +32,7 @@ static void test_hashmap(int task, void *data) long long key, next_key, value; int fd; - fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 2, map_flags); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); @@ -119,7 +119,7 @@ static void test_hashmap_percpu(int task, void *data) int expected_key_mask = 0; int fd, i; - fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), + fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), sizeof(value[0]), 2, map_flags); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); @@ -212,7 +212,7 @@ static void test_arraymap(int task, void *data) int key, next_key, fd; long long value; - fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), + fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), 2, 0); if (fd < 0) { printf("Failed to create arraymap '%s'!\n", strerror(errno)); @@ -266,7 +266,7 @@ static void test_arraymap_percpu(int task, void *data) int key, next_key, fd, i; long values[nr_cpus]; - fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), + fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), sizeof(values[0]), 2, 0); if (fd < 0) { printf("Failed to create arraymap '%s'!\n", strerror(errno)); @@ -322,7 +322,7 @@ static void test_arraymap_percpu_many_keys(void) long values[nr_cpus]; int key, fd, i; - fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), + fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), sizeof(values[0]), nr_keys, 0); if (fd < 0) { printf("Failed to create per-cpu arraymap '%s'!\n", @@ -360,7 +360,7 @@ static void test_map_large(void) } key; int fd, i, value; - fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), MAP_SIZE, map_flags); if (fd < 0) { printf("Failed to create large map '%s'!\n", strerror(errno)); @@ -455,7 +455,7 @@ static void test_map_parallel(void) int i, fd, key = 0, value = 0; int data[2]; - fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), MAP_SIZE, map_flags); if (fd < 0) { printf("Failed to create map for parallel test '%s'!\n", diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c index dc209721ffd5..ae4263638cd5 100644 --- a/tools/testing/selftests/bpf/test_tag.c +++ b/tools/testing/selftests/bpf/test_tag.c @@ -189,7 +189,7 @@ int main(void) int i, fd_map; setrlimit(RLIMIT_MEMLOCK, &rinf); - fd_map = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(int), + fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int), sizeof(int), 1, BPF_F_NO_PREALLOC); assert(fd_map > 0); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 247830ecf68e..63818cbb9fb1 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -4467,7 +4467,7 @@ static int create_map(uint32_t size_value, uint32_t max_elem) { int fd; - fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long), + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long), size_value, max_elem, BPF_F_NO_PREALLOC); if (fd < 0) printf("Failed to create hash map '%s'!\n", strerror(errno)); @@ -4479,7 +4479,7 @@ static int create_prog_array(void) { int fd; - fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), + fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), sizeof(int), 4, 0); if (fd < 0) printf("Failed to create prog array '%s'!\n", strerror(errno)); -- cgit v1.2.3 From 702498a1426bc95b6f49f9c5fba616110cbd3947 Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:44 +0100 Subject: bpf: Remove bpf_sys.h from selftests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add require dependency headers. Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Shuah Khan Signed-off-by: David S. Miller --- tools/lib/bpf/bpf.c | 6 ++++++ tools/testing/selftests/bpf/bpf_sys.h | 27 --------------------------- tools/testing/selftests/bpf/test_lpm_map.c | 1 - tools/testing/selftests/bpf/test_lru_map.c | 1 - tools/testing/selftests/bpf/test_maps.c | 1 - tools/testing/selftests/bpf/test_tag.c | 3 +-- tools/testing/selftests/bpf/test_verifier.c | 4 ++-- 7 files changed, 9 insertions(+), 34 deletions(-) delete mode 100644 tools/testing/selftests/bpf/bpf_sys.h diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index f8a2b7fa7741..50e04cc5dddd 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -50,7 +50,13 @@ static __u64 ptr_to_u64(const void *ptr) static int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size) { +#ifdef __NR_bpf return syscall(__NR_bpf, cmd, attr, size); +#else + fprintf(stderr, "No bpf syscall, kernel headers too old?\n"); + errno = ENOSYS; + return -1; +#endif } int bpf_create_map(enum bpf_map_type map_type, int key_size, diff --git a/tools/testing/selftests/bpf/bpf_sys.h b/tools/testing/selftests/bpf/bpf_sys.h deleted file mode 100644 index aa076a8a07f7..000000000000 --- a/tools/testing/selftests/bpf/bpf_sys.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef __BPF_SYS__ -#define __BPF_SYS__ - -#include -#include - -#include - -#include - -static inline __u64 bpf_ptr_to_u64(const void *ptr) -{ - return (__u64)(unsigned long) ptr; -} - -static inline int bpf(int cmd, union bpf_attr *attr, unsigned int size) -{ -#ifdef __NR_bpf - return syscall(__NR_bpf, cmd, attr, size); -#else - fprintf(stderr, "No bpf syscall, kernel headers too old?\n"); - errno = ENOSYS; - return -1; -#endif -} - -#endif /* __BPF_SYS__ */ diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index 3cc812cac2d7..e97565243d59 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -23,7 +23,6 @@ #include #include -#include "bpf_sys.h" #include "bpf_util.h" struct tlpm_node { diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 48973ded1c96..00b0aff56e2e 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -19,7 +19,6 @@ #include #include -#include "bpf_sys.h" #include "bpf_util.h" #define LOCAL_FREE_TARGET (128) diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 39168499f43f..cada17ac00b8 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -22,7 +22,6 @@ #include #include -#include "bpf_sys.h" #include "bpf_util.h" static int map_flags; diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c index ae4263638cd5..de409fc50c35 100644 --- a/tools/testing/selftests/bpf/test_tag.c +++ b/tools/testing/selftests/bpf/test_tag.c @@ -1,3 +1,4 @@ +#include #include #include #include @@ -20,8 +21,6 @@ #include "../../../include/linux/filter.h" -#include "bpf_sys.h" - static struct bpf_insn prog[BPF_MAXINSNS]; static void bpf_gen_imm_prog(unsigned int insns, int fd_map) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 63818cbb9fb1..e1f5b9eea1e8 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -8,7 +8,9 @@ * License as published by the Free Software Foundation. */ +#include #include +#include #include #include #include @@ -28,8 +30,6 @@ #include "../../../include/linux/filter.h" -#include "bpf_sys.h" - #ifndef ARRAY_SIZE # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #endif -- cgit v1.2.3 From bc6a3d9977b6ea093cd7c567ef83657023e77f26 Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 10 Feb 2017 00:21:45 +0100 Subject: bpf: Add test_tag to .gitignore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Shuah Khan Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index d3b1c9bca407..541d9d7fad5a 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -2,3 +2,4 @@ test_verifier test_maps test_lru_map test_lpm_map +test_tag -- cgit v1.2.3 From bed45f79a2afc9d7c279b880dfcac8f27d513b50 Mon Sep 17 00:00:00 2001 From: Pablo Neira Date: Fri, 10 Feb 2017 13:26:27 +0100 Subject: gtp: add MAINTAINERS Add maintainers for this tunnel driver. Include main osmocom.org mailist list too. Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 93a41ef97f18..49acc4cb34b1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5638,6 +5638,14 @@ T: git git://linuxtv.org/media_tree.git S: Odd Fixes F: drivers/media/usb/gspca/ +GTP (GPRS Tunneling Protocol) +M: Pablo Neira Ayuso +M: Harald Welte +L: osmocom-net-gprs@lists.osmocom.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/gtp.git +S: Maintained +F: drivers/net/gtp.c + GUID PARTITION TABLE (GPT) M: Davidlohr Bueso L: linux-efi@vger.kernel.org -- cgit v1.2.3 From a8e04698732736f59fefe72c675791a006b76e1d Mon Sep 17 00:00:00 2001 From: Sainath Grandhi Date: Fri, 10 Feb 2017 16:03:46 -0800 Subject: tap: Refactoring macvtap.c macvtap module has code for tap/queue management and link management. This patch splits the code into macvtap_main.c for link management and tap.c for tap/queue management. Functionality in tap.c can be re-used for implementing tap on other virtual interfaces. Signed-off-by: Sainath Grandhi Signed-off-by: David S. Miller --- drivers/net/Makefile | 2 + drivers/net/macvtap.c | 1374 -------------------------------------------- drivers/net/macvtap_main.c | 218 +++++++ drivers/net/tap.c | 1186 ++++++++++++++++++++++++++++++++++++++ include/linux/if_macvtap.h | 10 + 5 files changed, 1416 insertions(+), 1374 deletions(-) delete mode 100644 drivers/net/macvtap.c create mode 100644 drivers/net/macvtap_main.c create mode 100644 drivers/net/tap.c create mode 100644 include/linux/if_macvtap.h diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 7336cbd3ef5d..19b03a9fe0f6 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -29,6 +29,8 @@ obj-$(CONFIG_GTP) += gtp.o obj-$(CONFIG_NLMON) += nlmon.o obj-$(CONFIG_NET_VRF) += vrf.o +macvtap-objs := macvtap_main.o tap.o + # # Networking Drivers # diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c deleted file mode 100644 index c27011bbe30c..000000000000 --- a/drivers/net/macvtap.c +++ /dev/null @@ -1,1374 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/* - * A macvtap queue is the central object of this driver, it connects - * an open character device to a macvlan interface. There can be - * multiple queues on one interface, which map back to queues - * implemented in hardware on the underlying device. - * - * macvtap_proto is used to allocate queues through the sock allocation - * mechanism. - * - */ -struct macvtap_queue { - struct sock sk; - struct socket sock; - struct socket_wq wq; - int vnet_hdr_sz; - struct macvlan_dev __rcu *vlan; - struct file *file; - unsigned int flags; - u16 queue_index; - bool enabled; - struct list_head next; - struct skb_array skb_array; -}; - -#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) - -#define MACVTAP_VNET_LE 0x80000000 -#define MACVTAP_VNET_BE 0x40000000 - -#ifdef CONFIG_TUN_VNET_CROSS_LE -static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) -{ - return q->flags & MACVTAP_VNET_BE ? false : - virtio_legacy_is_little_endian(); -} - -static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp) -{ - int s = !!(q->flags & MACVTAP_VNET_BE); - - if (put_user(s, sp)) - return -EFAULT; - - return 0; -} - -static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp) -{ - int s; - - if (get_user(s, sp)) - return -EFAULT; - - if (s) - q->flags |= MACVTAP_VNET_BE; - else - q->flags &= ~MACVTAP_VNET_BE; - - return 0; -} -#else -static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) -{ - return virtio_legacy_is_little_endian(); -} - -static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp) -{ - return -EINVAL; -} - -static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp) -{ - return -EINVAL; -} -#endif /* CONFIG_TUN_VNET_CROSS_LE */ - -static inline bool macvtap_is_little_endian(struct macvtap_queue *q) -{ - return q->flags & MACVTAP_VNET_LE || - macvtap_legacy_is_little_endian(q); -} - -static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val) -{ - return __virtio16_to_cpu(macvtap_is_little_endian(q), val); -} - -static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val) -{ - return __cpu_to_virtio16(macvtap_is_little_endian(q), val); -} - -static struct proto macvtap_proto = { - .name = "macvtap", - .owner = THIS_MODULE, - .obj_size = sizeof (struct macvtap_queue), -}; - -/* - * Variables for dealing with macvtaps device numbers. - */ -static dev_t macvtap_major; -#define MACVTAP_NUM_DEVS (1U << MINORBITS) -static DEFINE_MUTEX(minor_lock); -static DEFINE_IDR(minor_idr); - -#define GOODCOPY_LEN 128 -static const void *macvtap_net_namespace(struct device *d) -{ - struct net_device *dev = to_net_dev(d->parent); - return dev_net(dev); -} - -static struct class macvtap_class = { - .name = "macvtap", - .owner = THIS_MODULE, - .ns_type = &net_ns_type_operations, - .namespace = macvtap_net_namespace, -}; -static struct cdev macvtap_cdev; - -static const struct proto_ops macvtap_socket_ops; - -#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6 | NETIF_F_UFO) -#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) -#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) - -static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) -{ - return rcu_dereference(dev->rx_handler_data); -} - -/* - * RCU usage: - * The macvtap_queue and the macvlan_dev are loosely coupled, the - * pointers from one to the other can only be read while rcu_read_lock - * or rtnl is held. - * - * Both the file and the macvlan_dev hold a reference on the macvtap_queue - * through sock_hold(&q->sk). When the macvlan_dev goes away first, - * q->vlan becomes inaccessible. When the files gets closed, - * macvtap_get_queue() fails. - * - * There may still be references to the struct sock inside of the - * queue from outbound SKBs, but these never reference back to the - * file or the dev. The data structure is freed through __sk_free - * when both our references and any pending SKBs are gone. - */ - -static int macvtap_enable_queue(struct net_device *dev, struct file *file, - struct macvtap_queue *q) -{ - struct macvlan_dev *vlan = netdev_priv(dev); - int err = -EINVAL; - - ASSERT_RTNL(); - - if (q->enabled) - goto out; - - err = 0; - rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); - q->queue_index = vlan->numvtaps; - q->enabled = true; - - vlan->numvtaps++; -out: - return err; -} - -/* Requires RTNL */ -static int macvtap_set_queue(struct net_device *dev, struct file *file, - struct macvtap_queue *q) -{ - struct macvlan_dev *vlan = netdev_priv(dev); - - if (vlan->numqueues == MAX_MACVTAP_QUEUES) - return -EBUSY; - - rcu_assign_pointer(q->vlan, vlan); - rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); - sock_hold(&q->sk); - - q->file = file; - q->queue_index = vlan->numvtaps; - q->enabled = true; - file->private_data = q; - list_add_tail(&q->next, &vlan->queue_list); - - vlan->numvtaps++; - vlan->numqueues++; - - return 0; -} - -static int macvtap_disable_queue(struct macvtap_queue *q) -{ - struct macvlan_dev *vlan; - struct macvtap_queue *nq; - - ASSERT_RTNL(); - if (!q->enabled) - return -EINVAL; - - vlan = rtnl_dereference(q->vlan); - - if (vlan) { - int index = q->queue_index; - BUG_ON(index >= vlan->numvtaps); - nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); - nq->queue_index = index; - - rcu_assign_pointer(vlan->taps[index], nq); - RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); - q->enabled = false; - - vlan->numvtaps--; - } - - return 0; -} - -/* - * The file owning the queue got closed, give up both - * the reference that the files holds as well as the - * one from the macvlan_dev if that still exists. - * - * Using the spinlock makes sure that we don't get - * to the queue again after destroying it. - */ -static void macvtap_put_queue(struct macvtap_queue *q) -{ - struct macvlan_dev *vlan; - - rtnl_lock(); - vlan = rtnl_dereference(q->vlan); - - if (vlan) { - if (q->enabled) - BUG_ON(macvtap_disable_queue(q)); - - vlan->numqueues--; - RCU_INIT_POINTER(q->vlan, NULL); - sock_put(&q->sk); - list_del_init(&q->next); - } - - rtnl_unlock(); - - synchronize_rcu(); - sock_put(&q->sk); -} - -/* - * Select a queue based on the rxq of the device on which this packet - * arrived. If the incoming device is not mq, calculate a flow hash - * to select a queue. If all fails, find the first available queue. - * Cache vlan->numvtaps since it can become zero during the execution - * of this function. - */ -static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, - struct sk_buff *skb) -{ - struct macvlan_dev *vlan = netdev_priv(dev); - struct macvtap_queue *tap = NULL; - /* Access to taps array is protected by rcu, but access to numvtaps - * isn't. Below we use it to lookup a queue, but treat it as a hint - * and validate that the result isn't NULL - in case we are - * racing against queue removal. - */ - int numvtaps = ACCESS_ONCE(vlan->numvtaps); - __u32 rxq; - - if (!numvtaps) - goto out; - - if (numvtaps == 1) - goto single; - - /* Check if we can use flow to select a queue */ - rxq = skb_get_hash(skb); - if (rxq) { - tap = rcu_dereference(vlan->taps[rxq % numvtaps]); - goto out; - } - - if (likely(skb_rx_queue_recorded(skb))) { - rxq = skb_get_rx_queue(skb); - - while (unlikely(rxq >= numvtaps)) - rxq -= numvtaps; - - tap = rcu_dereference(vlan->taps[rxq]); - goto out; - } - -single: - tap = rcu_dereference(vlan->taps[0]); -out: - return tap; -} - -/* - * The net_device is going away, give up the reference - * that it holds on all queues and safely set the pointer - * from the queues to NULL. - */ -static void macvtap_del_queues(struct net_device *dev) -{ - struct macvlan_dev *vlan = netdev_priv(dev); - struct macvtap_queue *q, *tmp; - - ASSERT_RTNL(); - list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { - list_del_init(&q->next); - RCU_INIT_POINTER(q->vlan, NULL); - if (q->enabled) - vlan->numvtaps--; - vlan->numqueues--; - sock_put(&q->sk); - } - BUG_ON(vlan->numvtaps); - BUG_ON(vlan->numqueues); - /* guarantee that any future macvtap_set_queue will fail */ - vlan->numvtaps = MAX_MACVTAP_QUEUES; -} - -static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) -{ - struct sk_buff *skb = *pskb; - struct net_device *dev = skb->dev; - struct macvlan_dev *vlan; - struct macvtap_queue *q; - netdev_features_t features = TAP_FEATURES; - - vlan = macvtap_get_vlan_rcu(dev); - if (!vlan) - return RX_HANDLER_PASS; - - q = macvtap_get_queue(dev, skb); - if (!q) - return RX_HANDLER_PASS; - - if (__skb_array_full(&q->skb_array)) - goto drop; - - skb_push(skb, ETH_HLEN); - - /* Apply the forward feature mask so that we perform segmentation - * according to users wishes. This only works if VNET_HDR is - * enabled. - */ - if (q->flags & IFF_VNET_HDR) - features |= vlan->tap_features; - if (netif_needs_gso(skb, features)) { - struct sk_buff *segs = __skb_gso_segment(skb, features, false); - - if (IS_ERR(segs)) - goto drop; - - if (!segs) { - if (skb_array_produce(&q->skb_array, skb)) - goto drop; - goto wake_up; - } - - consume_skb(skb); - while (segs) { - struct sk_buff *nskb = segs->next; - - segs->next = NULL; - if (skb_array_produce(&q->skb_array, segs)) { - kfree_skb(segs); - kfree_skb_list(nskb); - break; - } - segs = nskb; - } - } else { - /* If we receive a partial checksum and the tap side - * doesn't support checksum offload, compute the checksum. - * Note: it doesn't matter which checksum feature to - * check, we either support them all or none. - */ - if (skb->ip_summed == CHECKSUM_PARTIAL && - !(features & NETIF_F_CSUM_MASK) && - skb_checksum_help(skb)) - goto drop; - if (skb_array_produce(&q->skb_array, skb)) - goto drop; - } - -wake_up: - wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); - return RX_HANDLER_CONSUMED; - -drop: - /* Count errors/drops only here, thus don't care about args. */ - macvlan_count_rx(vlan, 0, 0, 0); - kfree_skb(skb); - return RX_HANDLER_CONSUMED; -} - -static int macvtap_get_minor(struct macvlan_dev *vlan) -{ - int retval = -ENOMEM; - - mutex_lock(&minor_lock); - retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); - if (retval >= 0) { - vlan->minor = retval; - } else if (retval == -ENOSPC) { - netdev_err(vlan->dev, "Too many macvtap devices\n"); - retval = -EINVAL; - } - mutex_unlock(&minor_lock); - return retval < 0 ? retval : 0; -} - -static void macvtap_free_minor(struct macvlan_dev *vlan) -{ - mutex_lock(&minor_lock); - if (vlan->minor) { - idr_remove(&minor_idr, vlan->minor); - vlan->minor = 0; - } - mutex_unlock(&minor_lock); -} - -static struct net_device *dev_get_by_macvtap_minor(int minor) -{ - struct net_device *dev = NULL; - struct macvlan_dev *vlan; - - mutex_lock(&minor_lock); - vlan = idr_find(&minor_idr, minor); - if (vlan) { - dev = vlan->dev; - dev_hold(dev); - } - mutex_unlock(&minor_lock); - return dev; -} - -static int macvtap_newlink(struct net *src_net, - struct net_device *dev, - struct nlattr *tb[], - struct nlattr *data[]) -{ - struct macvlan_dev *vlan = netdev_priv(dev); - int err; - - INIT_LIST_HEAD(&vlan->queue_list); - - /* Since macvlan supports all offloads by default, make - * tap support all offloads also. - */ - vlan->tap_features = TUN_OFFLOADS; - - err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan); - if (err) - return err; - - /* Don't put anything that may fail after macvlan_common_newlink - * because we can't undo what it does. - */ - err = macvlan_common_newlink(src_net, dev, tb, data); - if (err) { - netdev_rx_handler_unregister(dev); - return err; - } - - return 0; -} - -static void macvtap_dellink(struct net_device *dev, - struct list_head *head) -{ - netdev_rx_handler_unregister(dev); - macvtap_del_queues(dev); - macvlan_dellink(dev, head); -} - -static void macvtap_setup(struct net_device *dev) -{ - macvlan_common_setup(dev); - dev->tx_queue_len = TUN_READQ_SIZE; -} - -static struct rtnl_link_ops macvtap_link_ops __read_mostly = { - .kind = "macvtap", - .setup = macvtap_setup, - .newlink = macvtap_newlink, - .dellink = macvtap_dellink, -}; - - -static void macvtap_sock_write_space(struct sock *sk) -{ - wait_queue_head_t *wqueue; - - if (!sock_writeable(sk) || - !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) - return; - - wqueue = sk_sleep(sk); - if (wqueue && waitqueue_active(wqueue)) - wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); -} - -static void macvtap_sock_destruct(struct sock *sk) -{ - struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); - - skb_array_cleanup(&q->skb_array); -} - -static int macvtap_open(struct inode *inode, struct file *file) -{ - struct net *net = current->nsproxy->net_ns; - struct net_device *dev; - struct macvtap_queue *q; - int err = -ENODEV; - - rtnl_lock(); - dev = dev_get_by_macvtap_minor(iminor(inode)); - if (!dev) - goto err; - - err = -ENOMEM; - q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, - &macvtap_proto, 0); - if (!q) - goto err; - - RCU_INIT_POINTER(q->sock.wq, &q->wq); - init_waitqueue_head(&q->wq.wait); - q->sock.type = SOCK_RAW; - q->sock.state = SS_CONNECTED; - q->sock.file = file; - q->sock.ops = &macvtap_socket_ops; - sock_init_data(&q->sock, &q->sk); - q->sk.sk_write_space = macvtap_sock_write_space; - q->sk.sk_destruct = macvtap_sock_destruct; - q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; - q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); - - /* - * so far only KVM virtio_net uses macvtap, enable zero copy between - * guest kernel and host kernel when lower device supports zerocopy - * - * The macvlan supports zerocopy iff the lower device supports zero - * copy so we don't have to look at the lower device directly. - */ - if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) - sock_set_flag(&q->sk, SOCK_ZEROCOPY); - - err = -ENOMEM; - if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL)) - goto err_array; - - err = macvtap_set_queue(dev, file, q); - if (err) - goto err_queue; - - dev_put(dev); - - rtnl_unlock(); - return err; - -err_queue: - skb_array_cleanup(&q->skb_array); -err_array: - sock_put(&q->sk); -err: - if (dev) - dev_put(dev); - - rtnl_unlock(); - return err; -} - -static int macvtap_release(struct inode *inode, struct file *file) -{ - struct macvtap_queue *q = file->private_data; - macvtap_put_queue(q); - return 0; -} - -static unsigned int macvtap_poll(struct file *file, poll_table * wait) -{ - struct macvtap_queue *q = file->private_data; - unsigned int mask = POLLERR; - - if (!q) - goto out; - - mask = 0; - poll_wait(file, &q->wq.wait, wait); - - if (!skb_array_empty(&q->skb_array)) - mask |= POLLIN | POLLRDNORM; - - if (sock_writeable(&q->sk) || - (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && - sock_writeable(&q->sk))) - mask |= POLLOUT | POLLWRNORM; - -out: - return mask; -} - -static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, - size_t len, size_t linear, - int noblock, int *err) -{ - struct sk_buff *skb; - - /* Under a page? Don't bother with paged skb. */ - if (prepad + len < PAGE_SIZE || !linear) - linear = len; - - skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, - err, 0); - if (!skb) - return NULL; - - skb_reserve(skb, prepad); - skb_put(skb, linear); - skb->data_len = len - linear; - skb->len += len - linear; - - return skb; -} - -/* Neighbour code has some assumptions on HH_DATA_MOD alignment */ -#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) - -/* Get packet from user space buffer */ -static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, - struct iov_iter *from, int noblock) -{ - int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); - struct sk_buff *skb; - struct macvlan_dev *vlan; - unsigned long total_len = iov_iter_count(from); - unsigned long len = total_len; - int err; - struct virtio_net_hdr vnet_hdr = { 0 }; - int vnet_hdr_len = 0; - int copylen = 0; - int depth; - bool zerocopy = false; - size_t linear; - - if (q->flags & IFF_VNET_HDR) { - vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); - - err = -EINVAL; - if (len < vnet_hdr_len) - goto err; - len -= vnet_hdr_len; - - err = -EFAULT; - if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from)) - goto err; - iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); - if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && - macvtap16_to_cpu(q, vnet_hdr.csum_start) + - macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > - macvtap16_to_cpu(q, vnet_hdr.hdr_len)) - vnet_hdr.hdr_len = cpu_to_macvtap16(q, - macvtap16_to_cpu(q, vnet_hdr.csum_start) + - macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2); - err = -EINVAL; - if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len) - goto err; - } - - err = -EINVAL; - if (unlikely(len < ETH_HLEN)) - goto err; - - if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { - struct iov_iter i; - - copylen = vnet_hdr.hdr_len ? - macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; - if (copylen > good_linear) - copylen = good_linear; - else if (copylen < ETH_HLEN) - copylen = ETH_HLEN; - linear = copylen; - i = *from; - iov_iter_advance(&i, copylen); - if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) - zerocopy = true; - } - - if (!zerocopy) { - copylen = len; - linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); - if (linear > good_linear) - linear = good_linear; - else if (linear < ETH_HLEN) - linear = ETH_HLEN; - } - - skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, - linear, noblock, &err); - if (!skb) - goto err; - - if (zerocopy) - err = zerocopy_sg_from_iter(skb, from); - else - err = skb_copy_datagram_from_iter(skb, 0, from, len); - - if (err) - goto err_kfree; - - skb_set_network_header(skb, ETH_HLEN); - skb_reset_mac_header(skb); - skb->protocol = eth_hdr(skb)->h_proto; - - if (vnet_hdr_len) { - err = virtio_net_hdr_to_skb(skb, &vnet_hdr, - macvtap_is_little_endian(q)); - if (err) - goto err_kfree; - } - - skb_probe_transport_header(skb, ETH_HLEN); - - /* Move network header to the right position for VLAN tagged packets */ - if ((skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) && - __vlan_get_protocol(skb, skb->protocol, &depth) != 0) - skb_set_network_header(skb, depth); - - rcu_read_lock(); - vlan = rcu_dereference(q->vlan); - /* copy skb_ubuf_info for callback when skb has no error */ - if (zerocopy) { - skb_shinfo(skb)->destructor_arg = m->msg_control; - skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; - skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; - } else if (m && m->msg_control) { - struct ubuf_info *uarg = m->msg_control; - uarg->callback(uarg, false); - } - - if (vlan) { - skb->dev = vlan->dev; - dev_queue_xmit(skb); - } else { - kfree_skb(skb); - } - rcu_read_unlock(); - - return total_len; - -err_kfree: - kfree_skb(skb); - -err: - rcu_read_lock(); - vlan = rcu_dereference(q->vlan); - if (vlan) - this_cpu_inc(vlan->pcpu_stats->tx_dropped); - rcu_read_unlock(); - - return err; -} - -static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from) -{ - struct file *file = iocb->ki_filp; - struct macvtap_queue *q = file->private_data; - - return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); -} - -/* Put packet to the user space buffer */ -static ssize_t macvtap_put_user(struct macvtap_queue *q, - const struct sk_buff *skb, - struct iov_iter *iter) -{ - int ret; - int vnet_hdr_len = 0; - int vlan_offset = 0; - int total; - - if (q->flags & IFF_VNET_HDR) { - struct virtio_net_hdr vnet_hdr; - vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); - if (iov_iter_count(iter) < vnet_hdr_len) - return -EINVAL; - - if (virtio_net_hdr_from_skb(skb, &vnet_hdr, - macvtap_is_little_endian(q), true)) - BUG(); - - if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != - sizeof(vnet_hdr)) - return -EFAULT; - - iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); - } - total = vnet_hdr_len; - total += skb->len; - - if (skb_vlan_tag_present(skb)) { - struct { - __be16 h_vlan_proto; - __be16 h_vlan_TCI; - } veth; - veth.h_vlan_proto = skb->vlan_proto; - veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); - - vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); - total += VLAN_HLEN; - - ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); - if (ret || !iov_iter_count(iter)) - goto done; - - ret = copy_to_iter(&veth, sizeof(veth), iter); - if (ret != sizeof(veth) || !iov_iter_count(iter)) - goto done; - } - - ret = skb_copy_datagram_iter(skb, vlan_offset, iter, - skb->len - vlan_offset); - -done: - return ret ? ret : total; -} - -static ssize_t macvtap_do_read(struct macvtap_queue *q, - struct iov_iter *to, - int noblock) -{ - DEFINE_WAIT(wait); - struct sk_buff *skb; - ssize_t ret = 0; - - if (!iov_iter_count(to)) - return 0; - - while (1) { - if (!noblock) - prepare_to_wait(sk_sleep(&q->sk), &wait, - TASK_INTERRUPTIBLE); - - /* Read frames from the queue */ - skb = skb_array_consume(&q->skb_array); - if (skb) - break; - if (noblock) { - ret = -EAGAIN; - break; - } - if (signal_pending(current)) { - ret = -ERESTARTSYS; - break; - } - /* Nothing to read, let's sleep */ - schedule(); - } - if (!noblock) - finish_wait(sk_sleep(&q->sk), &wait); - - if (skb) { - ret = macvtap_put_user(q, skb, to); - if (unlikely(ret < 0)) - kfree_skb(skb); - else - consume_skb(skb); - } - return ret; -} - -static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to) -{ - struct file *file = iocb->ki_filp; - struct macvtap_queue *q = file->private_data; - ssize_t len = iov_iter_count(to), ret; - - ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK); - ret = min_t(ssize_t, ret, len); - if (ret > 0) - iocb->ki_pos = ret; - return ret; -} - -static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) -{ - struct macvlan_dev *vlan; - - ASSERT_RTNL(); - vlan = rtnl_dereference(q->vlan); - if (vlan) - dev_hold(vlan->dev); - - return vlan; -} - -static void macvtap_put_vlan(struct macvlan_dev *vlan) -{ - dev_put(vlan->dev); -} - -static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) -{ - struct macvtap_queue *q = file->private_data; - struct macvlan_dev *vlan; - int ret; - - vlan = macvtap_get_vlan(q); - if (!vlan) - return -EINVAL; - - if (flags & IFF_ATTACH_QUEUE) - ret = macvtap_enable_queue(vlan->dev, file, q); - else if (flags & IFF_DETACH_QUEUE) - ret = macvtap_disable_queue(q); - else - ret = -EINVAL; - - macvtap_put_vlan(vlan); - return ret; -} - -static int set_offload(struct macvtap_queue *q, unsigned long arg) -{ - struct macvlan_dev *vlan; - netdev_features_t features; - netdev_features_t feature_mask = 0; - - vlan = rtnl_dereference(q->vlan); - if (!vlan) - return -ENOLINK; - - features = vlan->dev->features; - - if (arg & TUN_F_CSUM) { - feature_mask = NETIF_F_HW_CSUM; - - if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { - if (arg & TUN_F_TSO_ECN) - feature_mask |= NETIF_F_TSO_ECN; - if (arg & TUN_F_TSO4) - feature_mask |= NETIF_F_TSO; - if (arg & TUN_F_TSO6) - feature_mask |= NETIF_F_TSO6; - } - - if (arg & TUN_F_UFO) - feature_mask |= NETIF_F_UFO; - } - - /* tun/tap driver inverts the usage for TSO offloads, where - * setting the TSO bit means that the userspace wants to - * accept TSO frames and turning it off means that user space - * does not support TSO. - * For macvtap, we have to invert it to mean the same thing. - * When user space turns off TSO, we turn off GSO/LRO so that - * user-space will not receive TSO frames. - */ - if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) - features |= RX_OFFLOADS; - else - features &= ~RX_OFFLOADS; - - /* tap_features are the same as features on tun/tap and - * reflect user expectations. - */ - vlan->tap_features = feature_mask; - vlan->set_features = features; - netdev_update_features(vlan->dev); - - return 0; -} - -/* - * provide compatibility with generic tun/tap interface - */ -static long macvtap_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct macvtap_queue *q = file->private_data; - struct macvlan_dev *vlan; - void __user *argp = (void __user *)arg; - struct ifreq __user *ifr = argp; - unsigned int __user *up = argp; - unsigned short u; - int __user *sp = argp; - struct sockaddr sa; - int s; - int ret; - - switch (cmd) { - case TUNSETIFF: - /* ignore the name, just look at flags */ - if (get_user(u, &ifr->ifr_flags)) - return -EFAULT; - - ret = 0; - if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP)) - ret = -EINVAL; - else - q->flags = (q->flags & ~MACVTAP_FEATURES) | u; - - return ret; - - case TUNGETIFF: - rtnl_lock(); - vlan = macvtap_get_vlan(q); - if (!vlan) { - rtnl_unlock(); - return -ENOLINK; - } - - ret = 0; - u = q->flags; - if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || - put_user(u, &ifr->ifr_flags)) - ret = -EFAULT; - macvtap_put_vlan(vlan); - rtnl_unlock(); - return ret; - - case TUNSETQUEUE: - if (get_user(u, &ifr->ifr_flags)) - return -EFAULT; - rtnl_lock(); - ret = macvtap_ioctl_set_queue(file, u); - rtnl_unlock(); - return ret; - - case TUNGETFEATURES: - if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up)) - return -EFAULT; - return 0; - - case TUNSETSNDBUF: - if (get_user(s, sp)) - return -EFAULT; - - q->sk.sk_sndbuf = s; - return 0; - - case TUNGETVNETHDRSZ: - s = q->vnet_hdr_sz; - if (put_user(s, sp)) - return -EFAULT; - return 0; - - case TUNSETVNETHDRSZ: - if (get_user(s, sp)) - return -EFAULT; - if (s < (int)sizeof(struct virtio_net_hdr)) - return -EINVAL; - - q->vnet_hdr_sz = s; - return 0; - - case TUNGETVNETLE: - s = !!(q->flags & MACVTAP_VNET_LE); - if (put_user(s, sp)) - return -EFAULT; - return 0; - - case TUNSETVNETLE: - if (get_user(s, sp)) - return -EFAULT; - if (s) - q->flags |= MACVTAP_VNET_LE; - else - q->flags &= ~MACVTAP_VNET_LE; - return 0; - - case TUNGETVNETBE: - return macvtap_get_vnet_be(q, sp); - - case TUNSETVNETBE: - return macvtap_set_vnet_be(q, sp); - - case TUNSETOFFLOAD: - /* let the user check for future flags */ - if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN | TUN_F_UFO)) - return -EINVAL; - - rtnl_lock(); - ret = set_offload(q, arg); - rtnl_unlock(); - return ret; - - case SIOCGIFHWADDR: - rtnl_lock(); - vlan = macvtap_get_vlan(q); - if (!vlan) { - rtnl_unlock(); - return -ENOLINK; - } - ret = 0; - u = vlan->dev->type; - if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || - copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) || - put_user(u, &ifr->ifr_hwaddr.sa_family)) - ret = -EFAULT; - macvtap_put_vlan(vlan); - rtnl_unlock(); - return ret; - - case SIOCSIFHWADDR: - if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa))) - return -EFAULT; - rtnl_lock(); - vlan = macvtap_get_vlan(q); - if (!vlan) { - rtnl_unlock(); - return -ENOLINK; - } - ret = dev_set_mac_address(vlan->dev, &sa); - macvtap_put_vlan(vlan); - rtnl_unlock(); - return ret; - - default: - return -EINVAL; - } -} - -#ifdef CONFIG_COMPAT -static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); -} -#endif - -static const struct file_operations macvtap_fops = { - .owner = THIS_MODULE, - .open = macvtap_open, - .release = macvtap_release, - .read_iter = macvtap_read_iter, - .write_iter = macvtap_write_iter, - .poll = macvtap_poll, - .llseek = no_llseek, - .unlocked_ioctl = macvtap_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = macvtap_compat_ioctl, -#endif -}; - -static int macvtap_sendmsg(struct socket *sock, struct msghdr *m, - size_t total_len) -{ - struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); - return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); -} - -static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, - size_t total_len, int flags) -{ - struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); - int ret; - if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) - return -EINVAL; - ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT); - if (ret > total_len) { - m->msg_flags |= MSG_TRUNC; - ret = flags & MSG_TRUNC ? ret : total_len; - } - return ret; -} - -static int macvtap_peek_len(struct socket *sock) -{ - struct macvtap_queue *q = container_of(sock, struct macvtap_queue, - sock); - return skb_array_peek_len(&q->skb_array); -} - -/* Ops structure to mimic raw sockets with tun */ -static const struct proto_ops macvtap_socket_ops = { - .sendmsg = macvtap_sendmsg, - .recvmsg = macvtap_recvmsg, - .peek_len = macvtap_peek_len, -}; - -/* Get an underlying socket object from tun file. Returns error unless file is - * attached to a device. The returned object works like a packet socket, it - * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for - * holding a reference to the file for as long as the socket is in use. */ -struct socket *macvtap_get_socket(struct file *file) -{ - struct macvtap_queue *q; - if (file->f_op != &macvtap_fops) - return ERR_PTR(-EINVAL); - q = file->private_data; - if (!q) - return ERR_PTR(-EBADFD); - return &q->sock; -} -EXPORT_SYMBOL_GPL(macvtap_get_socket); - -static int macvtap_queue_resize(struct macvlan_dev *vlan) -{ - struct net_device *dev = vlan->dev; - struct macvtap_queue *q; - struct skb_array **arrays; - int n = vlan->numqueues; - int ret, i = 0; - - arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); - if (!arrays) - return -ENOMEM; - - list_for_each_entry(q, &vlan->queue_list, next) - arrays[i++] = &q->skb_array; - - ret = skb_array_resize_multiple(arrays, n, - dev->tx_queue_len, GFP_KERNEL); - - kfree(arrays); - return ret; -} - -static int macvtap_device_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct macvlan_dev *vlan; - struct device *classdev; - dev_t devt; - int err; - char tap_name[IFNAMSIZ]; - - if (dev->rtnl_link_ops != &macvtap_link_ops) - return NOTIFY_DONE; - - snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex); - vlan = netdev_priv(dev); - - switch (event) { - case NETDEV_REGISTER: - /* Create the device node here after the network device has - * been registered but before register_netdevice has - * finished running. - */ - err = macvtap_get_minor(vlan); - if (err) - return notifier_from_errno(err); - - devt = MKDEV(MAJOR(macvtap_major), vlan->minor); - classdev = device_create(&macvtap_class, &dev->dev, devt, - dev, tap_name); - if (IS_ERR(classdev)) { - macvtap_free_minor(vlan); - return notifier_from_errno(PTR_ERR(classdev)); - } - err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, - tap_name); - if (err) - return notifier_from_errno(err); - break; - case NETDEV_UNREGISTER: - /* vlan->minor == 0 if NETDEV_REGISTER above failed */ - if (vlan->minor == 0) - break; - sysfs_remove_link(&dev->dev.kobj, tap_name); - devt = MKDEV(MAJOR(macvtap_major), vlan->minor); - device_destroy(&macvtap_class, devt); - macvtap_free_minor(vlan); - break; - case NETDEV_CHANGE_TX_QUEUE_LEN: - if (macvtap_queue_resize(vlan)) - return NOTIFY_BAD; - break; - } - - return NOTIFY_DONE; -} - -static struct notifier_block macvtap_notifier_block __read_mostly = { - .notifier_call = macvtap_device_event, -}; - -static int macvtap_init(void) -{ - int err; - - err = alloc_chrdev_region(&macvtap_major, 0, - MACVTAP_NUM_DEVS, "macvtap"); - if (err) - goto out1; - - cdev_init(&macvtap_cdev, &macvtap_fops); - err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS); - if (err) - goto out2; - - err = class_register(&macvtap_class); - if (err) - goto out3; - - err = register_netdevice_notifier(&macvtap_notifier_block); - if (err) - goto out4; - - err = macvlan_link_register(&macvtap_link_ops); - if (err) - goto out5; - - return 0; - -out5: - unregister_netdevice_notifier(&macvtap_notifier_block); -out4: - class_unregister(&macvtap_class); -out3: - cdev_del(&macvtap_cdev); -out2: - unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); -out1: - return err; -} -module_init(macvtap_init); - -static void macvtap_exit(void) -{ - rtnl_link_unregister(&macvtap_link_ops); - unregister_netdevice_notifier(&macvtap_notifier_block); - class_unregister(&macvtap_class); - cdev_del(&macvtap_cdev); - unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); - idr_destroy(&minor_idr); -} -module_exit(macvtap_exit); - -MODULE_ALIAS_RTNL_LINK("macvtap"); -MODULE_AUTHOR("Arnd Bergmann "); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/macvtap_main.c b/drivers/net/macvtap_main.c new file mode 100644 index 000000000000..96ffa60c5a36 --- /dev/null +++ b/drivers/net/macvtap_main.c @@ -0,0 +1,218 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * Variables for dealing with macvtaps device numbers. + */ +static dev_t macvtap_major; +#define MACVTAP_NUM_DEVS (1U << MINORBITS) + +static const void *macvtap_net_namespace(struct device *d) +{ + struct net_device *dev = to_net_dev(d->parent); + return dev_net(dev); +} + +static struct class macvtap_class = { + .name = "macvtap", + .owner = THIS_MODULE, + .ns_type = &net_ns_type_operations, + .namespace = macvtap_net_namespace, +}; +static struct cdev macvtap_cdev; + +#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ + NETIF_F_TSO6 | NETIF_F_UFO) + +static int macvtap_newlink(struct net *src_net, + struct net_device *dev, + struct nlattr *tb[], + struct nlattr *data[]) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + int err; + + INIT_LIST_HEAD(&vlan->queue_list); + + /* Since macvlan supports all offloads by default, make + * tap support all offloads also. + */ + vlan->tap_features = TUN_OFFLOADS; + + err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan); + if (err) + return err; + + /* Don't put anything that may fail after macvlan_common_newlink + * because we can't undo what it does. + */ + err = macvlan_common_newlink(src_net, dev, tb, data); + if (err) { + netdev_rx_handler_unregister(dev); + return err; + } + + return 0; +} + +static void macvtap_dellink(struct net_device *dev, + struct list_head *head) +{ + netdev_rx_handler_unregister(dev); + macvtap_del_queues(dev); + macvlan_dellink(dev, head); +} + +static void macvtap_setup(struct net_device *dev) +{ + macvlan_common_setup(dev); + dev->tx_queue_len = TUN_READQ_SIZE; +} + +static struct rtnl_link_ops macvtap_link_ops __read_mostly = { + .kind = "macvtap", + .setup = macvtap_setup, + .newlink = macvtap_newlink, + .dellink = macvtap_dellink, +}; + +static int macvtap_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct macvlan_dev *vlan; + struct device *classdev; + dev_t devt; + int err; + char tap_name[IFNAMSIZ]; + + if (dev->rtnl_link_ops != &macvtap_link_ops) + return NOTIFY_DONE; + + snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex); + vlan = netdev_priv(dev); + + switch (event) { + case NETDEV_REGISTER: + /* Create the device node here after the network device has + * been registered but before register_netdevice has + * finished running. + */ + err = macvtap_get_minor(vlan); + if (err) + return notifier_from_errno(err); + + devt = MKDEV(MAJOR(macvtap_major), vlan->minor); + classdev = device_create(&macvtap_class, &dev->dev, devt, + dev, tap_name); + if (IS_ERR(classdev)) { + macvtap_free_minor(vlan); + return notifier_from_errno(PTR_ERR(classdev)); + } + err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, + tap_name); + if (err) + return notifier_from_errno(err); + break; + case NETDEV_UNREGISTER: + /* vlan->minor == 0 if NETDEV_REGISTER above failed */ + if (vlan->minor == 0) + break; + sysfs_remove_link(&dev->dev.kobj, tap_name); + devt = MKDEV(MAJOR(macvtap_major), vlan->minor); + device_destroy(&macvtap_class, devt); + macvtap_free_minor(vlan); + break; + case NETDEV_CHANGE_TX_QUEUE_LEN: + if (macvtap_queue_resize(vlan)) + return NOTIFY_BAD; + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block macvtap_notifier_block __read_mostly = { + .notifier_call = macvtap_device_event, +}; + +extern struct file_operations macvtap_fops; +static int macvtap_init(void) +{ + int err; + + err = alloc_chrdev_region(&macvtap_major, 0, + MACVTAP_NUM_DEVS, "macvtap"); + if (err) + goto out1; + + cdev_init(&macvtap_cdev, &macvtap_fops); + err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS); + if (err) + goto out2; + + err = class_register(&macvtap_class); + if (err) + goto out3; + + err = register_netdevice_notifier(&macvtap_notifier_block); + if (err) + goto out4; + + err = macvlan_link_register(&macvtap_link_ops); + if (err) + goto out5; + + return 0; + +out5: + unregister_netdevice_notifier(&macvtap_notifier_block); +out4: + class_unregister(&macvtap_class); +out3: + cdev_del(&macvtap_cdev); +out2: + unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); +out1: + return err; +} +module_init(macvtap_init); + +extern struct idr minor_idr; +static void macvtap_exit(void) +{ + rtnl_link_unregister(&macvtap_link_ops); + unregister_netdevice_notifier(&macvtap_notifier_block); + class_unregister(&macvtap_class); + cdev_del(&macvtap_cdev); + unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); + idr_destroy(&minor_idr); +} +module_exit(macvtap_exit); + +MODULE_ALIAS_RTNL_LINK("macvtap"); +MODULE_AUTHOR("Arnd Bergmann "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/tap.c b/drivers/net/tap.c new file mode 100644 index 000000000000..6f6228e4fd3f --- /dev/null +++ b/drivers/net/tap.c @@ -0,0 +1,1186 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * A macvtap queue is the central object of this driver, it connects + * an open character device to a macvlan interface. There can be + * multiple queues on one interface, which map back to queues + * implemented in hardware on the underlying device. + * + * macvtap_proto is used to allocate queues through the sock allocation + * mechanism. + * + */ +struct macvtap_queue { + struct sock sk; + struct socket sock; + struct socket_wq wq; + int vnet_hdr_sz; + struct macvlan_dev __rcu *vlan; + struct file *file; + unsigned int flags; + u16 queue_index; + bool enabled; + struct list_head next; + struct skb_array skb_array; +}; + +#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) + +#define MACVTAP_VNET_LE 0x80000000 +#define MACVTAP_VNET_BE 0x40000000 + +#ifdef CONFIG_TUN_VNET_CROSS_LE +static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) +{ + return q->flags & MACVTAP_VNET_BE ? false : + virtio_legacy_is_little_endian(); +} + +static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp) +{ + int s = !!(q->flags & MACVTAP_VNET_BE); + + if (put_user(s, sp)) + return -EFAULT; + + return 0; +} + +static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp) +{ + int s; + + if (get_user(s, sp)) + return -EFAULT; + + if (s) + q->flags |= MACVTAP_VNET_BE; + else + q->flags &= ~MACVTAP_VNET_BE; + + return 0; +} +#else +static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) +{ + return virtio_legacy_is_little_endian(); +} + +static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp) +{ + return -EINVAL; +} + +static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp) +{ + return -EINVAL; +} +#endif /* CONFIG_TUN_VNET_CROSS_LE */ + +static inline bool macvtap_is_little_endian(struct macvtap_queue *q) +{ + return q->flags & MACVTAP_VNET_LE || + macvtap_legacy_is_little_endian(q); +} + +static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val) +{ + return __virtio16_to_cpu(macvtap_is_little_endian(q), val); +} + +static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val) +{ + return __cpu_to_virtio16(macvtap_is_little_endian(q), val); +} + +static struct proto macvtap_proto = { + .name = "macvtap", + .owner = THIS_MODULE, + .obj_size = sizeof (struct macvtap_queue), +}; + +#define MACVTAP_NUM_DEVS (1U << MINORBITS) +static DEFINE_MUTEX(minor_lock); +DEFINE_IDR(minor_idr); + +#define GOODCOPY_LEN 128 + +static const struct proto_ops macvtap_socket_ops; + +#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) +#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) + +static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) +{ + return rcu_dereference(dev->rx_handler_data); +} + +/* + * RCU usage: + * The macvtap_queue and the macvlan_dev are loosely coupled, the + * pointers from one to the other can only be read while rcu_read_lock + * or rtnl is held. + * + * Both the file and the macvlan_dev hold a reference on the macvtap_queue + * through sock_hold(&q->sk). When the macvlan_dev goes away first, + * q->vlan becomes inaccessible. When the files gets closed, + * macvtap_get_queue() fails. + * + * There may still be references to the struct sock inside of the + * queue from outbound SKBs, but these never reference back to the + * file or the dev. The data structure is freed through __sk_free + * when both our references and any pending SKBs are gone. + */ + +static int macvtap_enable_queue(struct net_device *dev, struct file *file, + struct macvtap_queue *q) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + int err = -EINVAL; + + ASSERT_RTNL(); + + if (q->enabled) + goto out; + + err = 0; + rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); + q->queue_index = vlan->numvtaps; + q->enabled = true; + + vlan->numvtaps++; +out: + return err; +} + +/* Requires RTNL */ +static int macvtap_set_queue(struct net_device *dev, struct file *file, + struct macvtap_queue *q) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + + if (vlan->numqueues == MAX_MACVTAP_QUEUES) + return -EBUSY; + + rcu_assign_pointer(q->vlan, vlan); + rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); + sock_hold(&q->sk); + + q->file = file; + q->queue_index = vlan->numvtaps; + q->enabled = true; + file->private_data = q; + list_add_tail(&q->next, &vlan->queue_list); + + vlan->numvtaps++; + vlan->numqueues++; + + return 0; +} + +static int macvtap_disable_queue(struct macvtap_queue *q) +{ + struct macvlan_dev *vlan; + struct macvtap_queue *nq; + + ASSERT_RTNL(); + if (!q->enabled) + return -EINVAL; + + vlan = rtnl_dereference(q->vlan); + + if (vlan) { + int index = q->queue_index; + BUG_ON(index >= vlan->numvtaps); + nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); + nq->queue_index = index; + + rcu_assign_pointer(vlan->taps[index], nq); + RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); + q->enabled = false; + + vlan->numvtaps--; + } + + return 0; +} + +/* + * The file owning the queue got closed, give up both + * the reference that the files holds as well as the + * one from the macvlan_dev if that still exists. + * + * Using the spinlock makes sure that we don't get + * to the queue again after destroying it. + */ +static void macvtap_put_queue(struct macvtap_queue *q) +{ + struct macvlan_dev *vlan; + + rtnl_lock(); + vlan = rtnl_dereference(q->vlan); + + if (vlan) { + if (q->enabled) + BUG_ON(macvtap_disable_queue(q)); + + vlan->numqueues--; + RCU_INIT_POINTER(q->vlan, NULL); + sock_put(&q->sk); + list_del_init(&q->next); + } + + rtnl_unlock(); + + synchronize_rcu(); + sock_put(&q->sk); +} + +/* + * Select a queue based on the rxq of the device on which this packet + * arrived. If the incoming device is not mq, calculate a flow hash + * to select a queue. If all fails, find the first available queue. + * Cache vlan->numvtaps since it can become zero during the execution + * of this function. + */ +static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, + struct sk_buff *skb) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + struct macvtap_queue *tap = NULL; + /* Access to taps array is protected by rcu, but access to numvtaps + * isn't. Below we use it to lookup a queue, but treat it as a hint + * and validate that the result isn't NULL - in case we are + * racing against queue removal. + */ + int numvtaps = ACCESS_ONCE(vlan->numvtaps); + __u32 rxq; + + if (!numvtaps) + goto out; + + if (numvtaps == 1) + goto single; + + /* Check if we can use flow to select a queue */ + rxq = skb_get_hash(skb); + if (rxq) { + tap = rcu_dereference(vlan->taps[rxq % numvtaps]); + goto out; + } + + if (likely(skb_rx_queue_recorded(skb))) { + rxq = skb_get_rx_queue(skb); + + while (unlikely(rxq >= numvtaps)) + rxq -= numvtaps; + + tap = rcu_dereference(vlan->taps[rxq]); + goto out; + } + +single: + tap = rcu_dereference(vlan->taps[0]); +out: + return tap; +} + +/* + * The net_device is going away, give up the reference + * that it holds on all queues and safely set the pointer + * from the queues to NULL. + */ +void macvtap_del_queues(struct net_device *dev) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + struct macvtap_queue *q, *tmp; + + ASSERT_RTNL(); + list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { + list_del_init(&q->next); + RCU_INIT_POINTER(q->vlan, NULL); + if (q->enabled) + vlan->numvtaps--; + vlan->numqueues--; + sock_put(&q->sk); + } + BUG_ON(vlan->numvtaps); + BUG_ON(vlan->numqueues); + /* guarantee that any future macvtap_set_queue will fail */ + vlan->numvtaps = MAX_MACVTAP_QUEUES; +} + +rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct net_device *dev = skb->dev; + struct macvlan_dev *vlan; + struct macvtap_queue *q; + netdev_features_t features = TAP_FEATURES; + + vlan = macvtap_get_vlan_rcu(dev); + if (!vlan) + return RX_HANDLER_PASS; + + q = macvtap_get_queue(dev, skb); + if (!q) + return RX_HANDLER_PASS; + + if (__skb_array_full(&q->skb_array)) + goto drop; + + skb_push(skb, ETH_HLEN); + + /* Apply the forward feature mask so that we perform segmentation + * according to users wishes. This only works if VNET_HDR is + * enabled. + */ + if (q->flags & IFF_VNET_HDR) + features |= vlan->tap_features; + if (netif_needs_gso(skb, features)) { + struct sk_buff *segs = __skb_gso_segment(skb, features, false); + + if (IS_ERR(segs)) + goto drop; + + if (!segs) { + if (skb_array_produce(&q->skb_array, skb)) + goto drop; + goto wake_up; + } + + consume_skb(skb); + while (segs) { + struct sk_buff *nskb = segs->next; + + segs->next = NULL; + if (skb_array_produce(&q->skb_array, segs)) { + kfree_skb(segs); + kfree_skb_list(nskb); + break; + } + segs = nskb; + } + } else { + /* If we receive a partial checksum and the tap side + * doesn't support checksum offload, compute the checksum. + * Note: it doesn't matter which checksum feature to + * check, we either support them all or none. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL && + !(features & NETIF_F_CSUM_MASK) && + skb_checksum_help(skb)) + goto drop; + if (skb_array_produce(&q->skb_array, skb)) + goto drop; + } + +wake_up: + wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); + return RX_HANDLER_CONSUMED; + +drop: + /* Count errors/drops only here, thus don't care about args. */ + macvlan_count_rx(vlan, 0, 0, 0); + kfree_skb(skb); + return RX_HANDLER_CONSUMED; +} + +int macvtap_get_minor(struct macvlan_dev *vlan) +{ + int retval = -ENOMEM; + + mutex_lock(&minor_lock); + retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); + if (retval >= 0) { + vlan->minor = retval; + } else if (retval == -ENOSPC) { + netdev_err(vlan->dev, "Too many macvtap devices\n"); + retval = -EINVAL; + } + mutex_unlock(&minor_lock); + return retval < 0 ? retval : 0; +} + +void macvtap_free_minor(struct macvlan_dev *vlan) +{ + mutex_lock(&minor_lock); + if (vlan->minor) { + idr_remove(&minor_idr, vlan->minor); + vlan->minor = 0; + } + mutex_unlock(&minor_lock); +} + +static struct net_device *dev_get_by_macvtap_minor(int minor) +{ + struct net_device *dev = NULL; + struct macvlan_dev *vlan; + + mutex_lock(&minor_lock); + vlan = idr_find(&minor_idr, minor); + if (vlan) { + dev = vlan->dev; + dev_hold(dev); + } + mutex_unlock(&minor_lock); + return dev; +} + +static void macvtap_sock_write_space(struct sock *sk) +{ + wait_queue_head_t *wqueue; + + if (!sock_writeable(sk) || + !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) + return; + + wqueue = sk_sleep(sk); + if (wqueue && waitqueue_active(wqueue)) + wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); +} + +static void macvtap_sock_destruct(struct sock *sk) +{ + struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); + + skb_array_cleanup(&q->skb_array); +} + +static int macvtap_open(struct inode *inode, struct file *file) +{ + struct net *net = current->nsproxy->net_ns; + struct net_device *dev; + struct macvtap_queue *q; + int err = -ENODEV; + + rtnl_lock(); + dev = dev_get_by_macvtap_minor(iminor(inode)); + if (!dev) + goto err; + + err = -ENOMEM; + q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, + &macvtap_proto, 0); + if (!q) + goto err; + + RCU_INIT_POINTER(q->sock.wq, &q->wq); + init_waitqueue_head(&q->wq.wait); + q->sock.type = SOCK_RAW; + q->sock.state = SS_CONNECTED; + q->sock.file = file; + q->sock.ops = &macvtap_socket_ops; + sock_init_data(&q->sock, &q->sk); + q->sk.sk_write_space = macvtap_sock_write_space; + q->sk.sk_destruct = macvtap_sock_destruct; + q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; + q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); + + /* + * so far only KVM virtio_net uses macvtap, enable zero copy between + * guest kernel and host kernel when lower device supports zerocopy + * + * The macvlan supports zerocopy iff the lower device supports zero + * copy so we don't have to look at the lower device directly. + */ + if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) + sock_set_flag(&q->sk, SOCK_ZEROCOPY); + + err = -ENOMEM; + if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL)) + goto err_array; + + err = macvtap_set_queue(dev, file, q); + if (err) + goto err_queue; + + dev_put(dev); + + rtnl_unlock(); + return err; + +err_queue: + skb_array_cleanup(&q->skb_array); +err_array: + sock_put(&q->sk); +err: + if (dev) + dev_put(dev); + + rtnl_unlock(); + return err; +} + +static int macvtap_release(struct inode *inode, struct file *file) +{ + struct macvtap_queue *q = file->private_data; + macvtap_put_queue(q); + return 0; +} + +static unsigned int macvtap_poll(struct file *file, poll_table * wait) +{ + struct macvtap_queue *q = file->private_data; + unsigned int mask = POLLERR; + + if (!q) + goto out; + + mask = 0; + poll_wait(file, &q->wq.wait, wait); + + if (!skb_array_empty(&q->skb_array)) + mask |= POLLIN | POLLRDNORM; + + if (sock_writeable(&q->sk) || + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && + sock_writeable(&q->sk))) + mask |= POLLOUT | POLLWRNORM; + +out: + return mask; +} + +static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, + size_t len, size_t linear, + int noblock, int *err) +{ + struct sk_buff *skb; + + /* Under a page? Don't bother with paged skb. */ + if (prepad + len < PAGE_SIZE || !linear) + linear = len; + + skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, + err, 0); + if (!skb) + return NULL; + + skb_reserve(skb, prepad); + skb_put(skb, linear); + skb->data_len = len - linear; + skb->len += len - linear; + + return skb; +} + +/* Neighbour code has some assumptions on HH_DATA_MOD alignment */ +#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) + +/* Get packet from user space buffer */ +static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, + struct iov_iter *from, int noblock) +{ + int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); + struct sk_buff *skb; + struct macvlan_dev *vlan; + unsigned long total_len = iov_iter_count(from); + unsigned long len = total_len; + int err; + struct virtio_net_hdr vnet_hdr = { 0 }; + int vnet_hdr_len = 0; + int copylen = 0; + int depth; + bool zerocopy = false; + size_t linear; + + if (q->flags & IFF_VNET_HDR) { + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); + + err = -EINVAL; + if (len < vnet_hdr_len) + goto err; + len -= vnet_hdr_len; + + err = -EFAULT; + if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from)) + goto err; + iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); + if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && + macvtap16_to_cpu(q, vnet_hdr.csum_start) + + macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > + macvtap16_to_cpu(q, vnet_hdr.hdr_len)) + vnet_hdr.hdr_len = cpu_to_macvtap16(q, + macvtap16_to_cpu(q, vnet_hdr.csum_start) + + macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2); + err = -EINVAL; + if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len) + goto err; + } + + err = -EINVAL; + if (unlikely(len < ETH_HLEN)) + goto err; + + if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { + struct iov_iter i; + + copylen = vnet_hdr.hdr_len ? + macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; + if (copylen > good_linear) + copylen = good_linear; + else if (copylen < ETH_HLEN) + copylen = ETH_HLEN; + linear = copylen; + i = *from; + iov_iter_advance(&i, copylen); + if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) + zerocopy = true; + } + + if (!zerocopy) { + copylen = len; + linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); + if (linear > good_linear) + linear = good_linear; + else if (linear < ETH_HLEN) + linear = ETH_HLEN; + } + + skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, + linear, noblock, &err); + if (!skb) + goto err; + + if (zerocopy) + err = zerocopy_sg_from_iter(skb, from); + else + err = skb_copy_datagram_from_iter(skb, 0, from, len); + + if (err) + goto err_kfree; + + skb_set_network_header(skb, ETH_HLEN); + skb_reset_mac_header(skb); + skb->protocol = eth_hdr(skb)->h_proto; + + if (vnet_hdr_len) { + err = virtio_net_hdr_to_skb(skb, &vnet_hdr, + macvtap_is_little_endian(q)); + if (err) + goto err_kfree; + } + + skb_probe_transport_header(skb, ETH_HLEN); + + /* Move network header to the right position for VLAN tagged packets */ + if ((skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) && + __vlan_get_protocol(skb, skb->protocol, &depth) != 0) + skb_set_network_header(skb, depth); + + rcu_read_lock(); + vlan = rcu_dereference(q->vlan); + /* copy skb_ubuf_info for callback when skb has no error */ + if (zerocopy) { + skb_shinfo(skb)->destructor_arg = m->msg_control; + skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; + } else if (m && m->msg_control) { + struct ubuf_info *uarg = m->msg_control; + uarg->callback(uarg, false); + } + + if (vlan) { + skb->dev = vlan->dev; + dev_queue_xmit(skb); + } else { + kfree_skb(skb); + } + rcu_read_unlock(); + + return total_len; + +err_kfree: + kfree_skb(skb); + +err: + rcu_read_lock(); + vlan = rcu_dereference(q->vlan); + if (vlan) + this_cpu_inc(vlan->pcpu_stats->tx_dropped); + rcu_read_unlock(); + + return err; +} + +static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct macvtap_queue *q = file->private_data; + + return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); +} + +/* Put packet to the user space buffer */ +static ssize_t macvtap_put_user(struct macvtap_queue *q, + const struct sk_buff *skb, + struct iov_iter *iter) +{ + int ret; + int vnet_hdr_len = 0; + int vlan_offset = 0; + int total; + + if (q->flags & IFF_VNET_HDR) { + struct virtio_net_hdr vnet_hdr; + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); + if (iov_iter_count(iter) < vnet_hdr_len) + return -EINVAL; + + if (virtio_net_hdr_from_skb(skb, &vnet_hdr, + macvtap_is_little_endian(q), true)) + BUG(); + + if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != + sizeof(vnet_hdr)) + return -EFAULT; + + iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); + } + total = vnet_hdr_len; + total += skb->len; + + if (skb_vlan_tag_present(skb)) { + struct { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + } veth; + veth.h_vlan_proto = skb->vlan_proto; + veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); + + vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); + total += VLAN_HLEN; + + ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); + if (ret || !iov_iter_count(iter)) + goto done; + + ret = copy_to_iter(&veth, sizeof(veth), iter); + if (ret != sizeof(veth) || !iov_iter_count(iter)) + goto done; + } + + ret = skb_copy_datagram_iter(skb, vlan_offset, iter, + skb->len - vlan_offset); + +done: + return ret ? ret : total; +} + +static ssize_t macvtap_do_read(struct macvtap_queue *q, + struct iov_iter *to, + int noblock) +{ + DEFINE_WAIT(wait); + struct sk_buff *skb; + ssize_t ret = 0; + + if (!iov_iter_count(to)) + return 0; + + while (1) { + if (!noblock) + prepare_to_wait(sk_sleep(&q->sk), &wait, + TASK_INTERRUPTIBLE); + + /* Read frames from the queue */ + skb = skb_array_consume(&q->skb_array); + if (skb) + break; + if (noblock) { + ret = -EAGAIN; + break; + } + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + /* Nothing to read, let's sleep */ + schedule(); + } + if (!noblock) + finish_wait(sk_sleep(&q->sk), &wait); + + if (skb) { + ret = macvtap_put_user(q, skb, to); + if (unlikely(ret < 0)) + kfree_skb(skb); + else + consume_skb(skb); + } + return ret; +} + +static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + struct file *file = iocb->ki_filp; + struct macvtap_queue *q = file->private_data; + ssize_t len = iov_iter_count(to), ret; + + ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK); + ret = min_t(ssize_t, ret, len); + if (ret > 0) + iocb->ki_pos = ret; + return ret; +} + +static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) +{ + struct macvlan_dev *vlan; + + ASSERT_RTNL(); + vlan = rtnl_dereference(q->vlan); + if (vlan) + dev_hold(vlan->dev); + + return vlan; +} + +static void macvtap_put_vlan(struct macvlan_dev *vlan) +{ + dev_put(vlan->dev); +} + +static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) +{ + struct macvtap_queue *q = file->private_data; + struct macvlan_dev *vlan; + int ret; + + vlan = macvtap_get_vlan(q); + if (!vlan) + return -EINVAL; + + if (flags & IFF_ATTACH_QUEUE) + ret = macvtap_enable_queue(vlan->dev, file, q); + else if (flags & IFF_DETACH_QUEUE) + ret = macvtap_disable_queue(q); + else + ret = -EINVAL; + + macvtap_put_vlan(vlan); + return ret; +} + +static int set_offload(struct macvtap_queue *q, unsigned long arg) +{ + struct macvlan_dev *vlan; + netdev_features_t features; + netdev_features_t feature_mask = 0; + + vlan = rtnl_dereference(q->vlan); + if (!vlan) + return -ENOLINK; + + features = vlan->dev->features; + + if (arg & TUN_F_CSUM) { + feature_mask = NETIF_F_HW_CSUM; + + if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { + if (arg & TUN_F_TSO_ECN) + feature_mask |= NETIF_F_TSO_ECN; + if (arg & TUN_F_TSO4) + feature_mask |= NETIF_F_TSO; + if (arg & TUN_F_TSO6) + feature_mask |= NETIF_F_TSO6; + } + + if (arg & TUN_F_UFO) + feature_mask |= NETIF_F_UFO; + } + + /* tun/tap driver inverts the usage for TSO offloads, where + * setting the TSO bit means that the userspace wants to + * accept TSO frames and turning it off means that user space + * does not support TSO. + * For macvtap, we have to invert it to mean the same thing. + * When user space turns off TSO, we turn off GSO/LRO so that + * user-space will not receive TSO frames. + */ + if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) + features |= RX_OFFLOADS; + else + features &= ~RX_OFFLOADS; + + /* tap_features are the same as features on tun/tap and + * reflect user expectations. + */ + vlan->tap_features = feature_mask; + vlan->set_features = features; + netdev_update_features(vlan->dev); + + return 0; +} + +/* + * provide compatibility with generic tun/tap interface + */ +static long macvtap_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct macvtap_queue *q = file->private_data; + struct macvlan_dev *vlan; + void __user *argp = (void __user *)arg; + struct ifreq __user *ifr = argp; + unsigned int __user *up = argp; + unsigned short u; + int __user *sp = argp; + struct sockaddr sa; + int s; + int ret; + + switch (cmd) { + case TUNSETIFF: + /* ignore the name, just look at flags */ + if (get_user(u, &ifr->ifr_flags)) + return -EFAULT; + + ret = 0; + if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP)) + ret = -EINVAL; + else + q->flags = (q->flags & ~MACVTAP_FEATURES) | u; + + return ret; + + case TUNGETIFF: + rtnl_lock(); + vlan = macvtap_get_vlan(q); + if (!vlan) { + rtnl_unlock(); + return -ENOLINK; + } + + ret = 0; + u = q->flags; + if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || + put_user(u, &ifr->ifr_flags)) + ret = -EFAULT; + macvtap_put_vlan(vlan); + rtnl_unlock(); + return ret; + + case TUNSETQUEUE: + if (get_user(u, &ifr->ifr_flags)) + return -EFAULT; + rtnl_lock(); + ret = macvtap_ioctl_set_queue(file, u); + rtnl_unlock(); + return ret; + + case TUNGETFEATURES: + if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up)) + return -EFAULT; + return 0; + + case TUNSETSNDBUF: + if (get_user(s, sp)) + return -EFAULT; + + q->sk.sk_sndbuf = s; + return 0; + + case TUNGETVNETHDRSZ: + s = q->vnet_hdr_sz; + if (put_user(s, sp)) + return -EFAULT; + return 0; + + case TUNSETVNETHDRSZ: + if (get_user(s, sp)) + return -EFAULT; + if (s < (int)sizeof(struct virtio_net_hdr)) + return -EINVAL; + + q->vnet_hdr_sz = s; + return 0; + + case TUNGETVNETLE: + s = !!(q->flags & MACVTAP_VNET_LE); + if (put_user(s, sp)) + return -EFAULT; + return 0; + + case TUNSETVNETLE: + if (get_user(s, sp)) + return -EFAULT; + if (s) + q->flags |= MACVTAP_VNET_LE; + else + q->flags &= ~MACVTAP_VNET_LE; + return 0; + + case TUNGETVNETBE: + return macvtap_get_vnet_be(q, sp); + + case TUNSETVNETBE: + return macvtap_set_vnet_be(q, sp); + + case TUNSETOFFLOAD: + /* let the user check for future flags */ + if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | + TUN_F_TSO_ECN | TUN_F_UFO)) + return -EINVAL; + + rtnl_lock(); + ret = set_offload(q, arg); + rtnl_unlock(); + return ret; + + case SIOCGIFHWADDR: + rtnl_lock(); + vlan = macvtap_get_vlan(q); + if (!vlan) { + rtnl_unlock(); + return -ENOLINK; + } + ret = 0; + u = vlan->dev->type; + if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || + copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) || + put_user(u, &ifr->ifr_hwaddr.sa_family)) + ret = -EFAULT; + macvtap_put_vlan(vlan); + rtnl_unlock(); + return ret; + + case SIOCSIFHWADDR: + if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa))) + return -EFAULT; + rtnl_lock(); + vlan = macvtap_get_vlan(q); + if (!vlan) { + rtnl_unlock(); + return -ENOLINK; + } + ret = dev_set_mac_address(vlan->dev, &sa); + macvtap_put_vlan(vlan); + rtnl_unlock(); + return ret; + + default: + return -EINVAL; + } +} + +#ifdef CONFIG_COMPAT +static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +const struct file_operations macvtap_fops = { + .owner = THIS_MODULE, + .open = macvtap_open, + .release = macvtap_release, + .read_iter = macvtap_read_iter, + .write_iter = macvtap_write_iter, + .poll = macvtap_poll, + .llseek = no_llseek, + .unlocked_ioctl = macvtap_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = macvtap_compat_ioctl, +#endif +}; + +static int macvtap_sendmsg(struct socket *sock, struct msghdr *m, + size_t total_len) +{ + struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); + return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); +} + +static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, + size_t total_len, int flags) +{ + struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); + int ret; + if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) + return -EINVAL; + ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT); + if (ret > total_len) { + m->msg_flags |= MSG_TRUNC; + ret = flags & MSG_TRUNC ? ret : total_len; + } + return ret; +} + +static int macvtap_peek_len(struct socket *sock) +{ + struct macvtap_queue *q = container_of(sock, struct macvtap_queue, + sock); + return skb_array_peek_len(&q->skb_array); +} + +/* Ops structure to mimic raw sockets with tun */ +static const struct proto_ops macvtap_socket_ops = { + .sendmsg = macvtap_sendmsg, + .recvmsg = macvtap_recvmsg, + .peek_len = macvtap_peek_len, +}; + +/* Get an underlying socket object from tun file. Returns error unless file is + * attached to a device. The returned object works like a packet socket, it + * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for + * holding a reference to the file for as long as the socket is in use. */ +struct socket *macvtap_get_socket(struct file *file) +{ + struct macvtap_queue *q; + if (file->f_op != &macvtap_fops) + return ERR_PTR(-EINVAL); + q = file->private_data; + if (!q) + return ERR_PTR(-EBADFD); + return &q->sock; +} +EXPORT_SYMBOL_GPL(macvtap_get_socket); + +int macvtap_queue_resize(struct macvlan_dev *vlan) +{ + struct net_device *dev = vlan->dev; + struct macvtap_queue *q; + struct skb_array **arrays; + int n = vlan->numqueues; + int ret, i = 0; + + arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); + if (!arrays) + return -ENOMEM; + + list_for_each_entry(q, &vlan->queue_list, next) + arrays[i++] = &q->skb_array; + + ret = skb_array_resize_multiple(arrays, n, + dev->tx_queue_len, GFP_KERNEL); + + kfree(arrays); + return ret; +} diff --git a/include/linux/if_macvtap.h b/include/linux/if_macvtap.h new file mode 100644 index 000000000000..c9bf84b75b27 --- /dev/null +++ b/include/linux/if_macvtap.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_IF_MACVTAP_H_ +#define _LINUX_IF_MACVTAP_H_ + +rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb); +void macvtap_del_queues(struct net_device *dev); +int macvtap_get_minor(struct macvlan_dev *vlan); +void macvtap_free_minor(struct macvlan_dev *vlan); +int macvtap_queue_resize(struct macvlan_dev *vlan); + +#endif /*_LINUX_IF_MACVTAP_H_*/ -- cgit v1.2.3 From 635b8c8ecdd27142d7fdab0df334b2e9201481cf Mon Sep 17 00:00:00 2001 From: Sainath Grandhi Date: Fri, 10 Feb 2017 16:03:47 -0800 Subject: tap: Renaming tap related APIs, data structures, macros Renaming tap related APIs, data structures and macros in tap.c from macvtap_.* to tap_.* Signed-off-by: Sainath Grandhi Signed-off-by: David S. Miller --- drivers/net/macvtap_main.c | 18 +-- drivers/net/tap.c | 332 ++++++++++++++++++++++----------------------- drivers/vhost/net.c | 3 +- include/linux/if_macvlan.h | 17 +-- include/linux/if_macvtap.h | 10 -- include/linux/if_tap.h | 23 ++++ 6 files changed, 202 insertions(+), 201 deletions(-) delete mode 100644 include/linux/if_macvtap.h create mode 100644 include/linux/if_tap.h diff --git a/drivers/net/macvtap_main.c b/drivers/net/macvtap_main.c index 96ffa60c5a36..548f339a75bd 100644 --- a/drivers/net/macvtap_main.c +++ b/drivers/net/macvtap_main.c @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include @@ -62,7 +62,7 @@ static int macvtap_newlink(struct net *src_net, */ vlan->tap_features = TUN_OFFLOADS; - err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan); + err = netdev_rx_handler_register(dev, tap_handle_frame, vlan); if (err) return err; @@ -82,7 +82,7 @@ static void macvtap_dellink(struct net_device *dev, struct list_head *head) { netdev_rx_handler_unregister(dev); - macvtap_del_queues(dev); + tap_del_queues(dev); macvlan_dellink(dev, head); } @@ -121,7 +121,7 @@ static int macvtap_device_event(struct notifier_block *unused, * been registered but before register_netdevice has * finished running. */ - err = macvtap_get_minor(vlan); + err = tap_get_minor(vlan); if (err) return notifier_from_errno(err); @@ -129,7 +129,7 @@ static int macvtap_device_event(struct notifier_block *unused, classdev = device_create(&macvtap_class, &dev->dev, devt, dev, tap_name); if (IS_ERR(classdev)) { - macvtap_free_minor(vlan); + tap_free_minor(vlan); return notifier_from_errno(PTR_ERR(classdev)); } err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, @@ -144,10 +144,10 @@ static int macvtap_device_event(struct notifier_block *unused, sysfs_remove_link(&dev->dev.kobj, tap_name); devt = MKDEV(MAJOR(macvtap_major), vlan->minor); device_destroy(&macvtap_class, devt); - macvtap_free_minor(vlan); + tap_free_minor(vlan); break; case NETDEV_CHANGE_TX_QUEUE_LEN: - if (macvtap_queue_resize(vlan)) + if (tap_queue_resize(vlan)) return NOTIFY_BAD; break; } @@ -159,7 +159,7 @@ static struct notifier_block macvtap_notifier_block __read_mostly = { .notifier_call = macvtap_device_event, }; -extern struct file_operations macvtap_fops; +extern struct file_operations tap_fops; static int macvtap_init(void) { int err; @@ -169,7 +169,7 @@ static int macvtap_init(void) if (err) goto out1; - cdev_init(&macvtap_cdev, &macvtap_fops); + cdev_init(&macvtap_cdev, &tap_fops); err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS); if (err) goto out2; diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 6f6228e4fd3f..15ca2d531d05 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -24,16 +24,16 @@ #include /* - * A macvtap queue is the central object of this driver, it connects + * A tap queue is the central object of this driver, it connects * an open character device to a macvlan interface. There can be * multiple queues on one interface, which map back to queues * implemented in hardware on the underlying device. * - * macvtap_proto is used to allocate queues through the sock allocation + * tap_proto is used to allocate queues through the sock allocation * mechanism. * */ -struct macvtap_queue { +struct tap_queue { struct sock sk; struct socket sock; struct socket_wq wq; @@ -47,21 +47,21 @@ struct macvtap_queue { struct skb_array skb_array; }; -#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) +#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) -#define MACVTAP_VNET_LE 0x80000000 -#define MACVTAP_VNET_BE 0x40000000 +#define TAP_VNET_LE 0x80000000 +#define TAP_VNET_BE 0x40000000 #ifdef CONFIG_TUN_VNET_CROSS_LE -static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) +static inline bool tap_legacy_is_little_endian(struct tap_queue *q) { - return q->flags & MACVTAP_VNET_BE ? false : + return q->flags & TAP_VNET_BE ? false : virtio_legacy_is_little_endian(); } -static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp) +static long tap_get_vnet_be(struct tap_queue *q, int __user *sp) { - int s = !!(q->flags & MACVTAP_VNET_BE); + int s = !!(q->flags & TAP_VNET_BE); if (put_user(s, sp)) return -EFAULT; @@ -69,7 +69,7 @@ static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp) return 0; } -static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp) +static long tap_set_vnet_be(struct tap_queue *q, int __user *sp) { int s; @@ -77,77 +77,77 @@ static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp) return -EFAULT; if (s) - q->flags |= MACVTAP_VNET_BE; + q->flags |= TAP_VNET_BE; else - q->flags &= ~MACVTAP_VNET_BE; + q->flags &= ~TAP_VNET_BE; return 0; } #else -static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) +static inline bool tap_legacy_is_little_endian(struct tap_queue *q) { return virtio_legacy_is_little_endian(); } -static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp) +static long tap_get_vnet_be(struct tap_queue *q, int __user *argp) { return -EINVAL; } -static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp) +static long tap_set_vnet_be(struct tap_queue *q, int __user *argp) { return -EINVAL; } #endif /* CONFIG_TUN_VNET_CROSS_LE */ -static inline bool macvtap_is_little_endian(struct macvtap_queue *q) +static inline bool tap_is_little_endian(struct tap_queue *q) { - return q->flags & MACVTAP_VNET_LE || - macvtap_legacy_is_little_endian(q); + return q->flags & TAP_VNET_LE || + tap_legacy_is_little_endian(q); } -static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val) +static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val) { - return __virtio16_to_cpu(macvtap_is_little_endian(q), val); + return __virtio16_to_cpu(tap_is_little_endian(q), val); } -static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val) +static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val) { - return __cpu_to_virtio16(macvtap_is_little_endian(q), val); + return __cpu_to_virtio16(tap_is_little_endian(q), val); } -static struct proto macvtap_proto = { - .name = "macvtap", +static struct proto tap_proto = { + .name = "tap", .owner = THIS_MODULE, - .obj_size = sizeof (struct macvtap_queue), + .obj_size = sizeof(struct tap_queue), }; -#define MACVTAP_NUM_DEVS (1U << MINORBITS) +#define TAP_NUM_DEVS (1U << MINORBITS) static DEFINE_MUTEX(minor_lock); DEFINE_IDR(minor_idr); #define GOODCOPY_LEN 128 -static const struct proto_ops macvtap_socket_ops; +static const struct proto_ops tap_socket_ops; #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) -static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) +static struct macvlan_dev *tap_get_vlan_rcu(const struct net_device *dev) { return rcu_dereference(dev->rx_handler_data); } /* * RCU usage: - * The macvtap_queue and the macvlan_dev are loosely coupled, the + * The tap_queue and the macvlan_dev are loosely coupled, the * pointers from one to the other can only be read while rcu_read_lock * or rtnl is held. * - * Both the file and the macvlan_dev hold a reference on the macvtap_queue + * Both the file and the macvlan_dev hold a reference on the tap_queue * through sock_hold(&q->sk). When the macvlan_dev goes away first, * q->vlan becomes inaccessible. When the files gets closed, - * macvtap_get_queue() fails. + * tap_get_queue() fails. * * There may still be references to the struct sock inside of the * queue from outbound SKBs, but these never reference back to the @@ -155,8 +155,8 @@ static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) * when both our references and any pending SKBs are gone. */ -static int macvtap_enable_queue(struct net_device *dev, struct file *file, - struct macvtap_queue *q) +static int tap_enable_queue(struct net_device *dev, struct file *file, + struct tap_queue *q) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; @@ -177,12 +177,12 @@ out: } /* Requires RTNL */ -static int macvtap_set_queue(struct net_device *dev, struct file *file, - struct macvtap_queue *q) +static int tap_set_queue(struct net_device *dev, struct file *file, + struct tap_queue *q) { struct macvlan_dev *vlan = netdev_priv(dev); - if (vlan->numqueues == MAX_MACVTAP_QUEUES) + if (vlan->numqueues == MAX_TAP_QUEUES) return -EBUSY; rcu_assign_pointer(q->vlan, vlan); @@ -201,10 +201,10 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file, return 0; } -static int macvtap_disable_queue(struct macvtap_queue *q) +static int tap_disable_queue(struct tap_queue *q) { struct macvlan_dev *vlan; - struct macvtap_queue *nq; + struct tap_queue *nq; ASSERT_RTNL(); if (!q->enabled) @@ -236,7 +236,7 @@ static int macvtap_disable_queue(struct macvtap_queue *q) * Using the spinlock makes sure that we don't get * to the queue again after destroying it. */ -static void macvtap_put_queue(struct macvtap_queue *q) +static void tap_put_queue(struct tap_queue *q) { struct macvlan_dev *vlan; @@ -245,7 +245,7 @@ static void macvtap_put_queue(struct macvtap_queue *q) if (vlan) { if (q->enabled) - BUG_ON(macvtap_disable_queue(q)); + BUG_ON(tap_disable_queue(q)); vlan->numqueues--; RCU_INIT_POINTER(q->vlan, NULL); @@ -266,11 +266,11 @@ static void macvtap_put_queue(struct macvtap_queue *q) * Cache vlan->numvtaps since it can become zero during the execution * of this function. */ -static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, - struct sk_buff *skb) +static struct tap_queue *tap_get_queue(struct net_device *dev, + struct sk_buff *skb) { struct macvlan_dev *vlan = netdev_priv(dev); - struct macvtap_queue *tap = NULL; + struct tap_queue *tap = NULL; /* Access to taps array is protected by rcu, but access to numvtaps * isn't. Below we use it to lookup a queue, but treat it as a hint * and validate that the result isn't NULL - in case we are @@ -313,10 +313,10 @@ out: * that it holds on all queues and safely set the pointer * from the queues to NULL. */ -void macvtap_del_queues(struct net_device *dev) +void tap_del_queues(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); - struct macvtap_queue *q, *tmp; + struct tap_queue *q, *tmp; ASSERT_RTNL(); list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { @@ -329,23 +329,23 @@ void macvtap_del_queues(struct net_device *dev) } BUG_ON(vlan->numvtaps); BUG_ON(vlan->numqueues); - /* guarantee that any future macvtap_set_queue will fail */ - vlan->numvtaps = MAX_MACVTAP_QUEUES; + /* guarantee that any future tap_set_queue will fail */ + vlan->numvtaps = MAX_TAP_QUEUES; } -rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) +rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct net_device *dev = skb->dev; struct macvlan_dev *vlan; - struct macvtap_queue *q; + struct tap_queue *q; netdev_features_t features = TAP_FEATURES; - vlan = macvtap_get_vlan_rcu(dev); + vlan = tap_get_vlan_rcu(dev); if (!vlan) return RX_HANDLER_PASS; - q = macvtap_get_queue(dev, skb); + q = tap_get_queue(dev, skb); if (!q) return RX_HANDLER_PASS; @@ -409,23 +409,23 @@ drop: return RX_HANDLER_CONSUMED; } -int macvtap_get_minor(struct macvlan_dev *vlan) +int tap_get_minor(struct macvlan_dev *vlan) { int retval = -ENOMEM; mutex_lock(&minor_lock); - retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); + retval = idr_alloc(&minor_idr, vlan, 1, TAP_NUM_DEVS, GFP_KERNEL); if (retval >= 0) { vlan->minor = retval; } else if (retval == -ENOSPC) { - netdev_err(vlan->dev, "Too many macvtap devices\n"); + netdev_err(vlan->dev, "Too many tap devices\n"); retval = -EINVAL; } mutex_unlock(&minor_lock); return retval < 0 ? retval : 0; } -void macvtap_free_minor(struct macvlan_dev *vlan) +void tap_free_minor(struct macvlan_dev *vlan) { mutex_lock(&minor_lock); if (vlan->minor) { @@ -435,7 +435,7 @@ void macvtap_free_minor(struct macvlan_dev *vlan) mutex_unlock(&minor_lock); } -static struct net_device *dev_get_by_macvtap_minor(int minor) +static struct net_device *dev_get_by_tap_minor(int minor) { struct net_device *dev = NULL; struct macvlan_dev *vlan; @@ -450,7 +450,7 @@ static struct net_device *dev_get_by_macvtap_minor(int minor) return dev; } -static void macvtap_sock_write_space(struct sock *sk) +static void tap_sock_write_space(struct sock *sk) { wait_queue_head_t *wqueue; @@ -463,28 +463,28 @@ static void macvtap_sock_write_space(struct sock *sk) wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); } -static void macvtap_sock_destruct(struct sock *sk) +static void tap_sock_destruct(struct sock *sk) { - struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); + struct tap_queue *q = container_of(sk, struct tap_queue, sk); skb_array_cleanup(&q->skb_array); } -static int macvtap_open(struct inode *inode, struct file *file) +static int tap_open(struct inode *inode, struct file *file) { struct net *net = current->nsproxy->net_ns; struct net_device *dev; - struct macvtap_queue *q; + struct tap_queue *q; int err = -ENODEV; rtnl_lock(); - dev = dev_get_by_macvtap_minor(iminor(inode)); + dev = dev_get_by_tap_minor(iminor(inode)); if (!dev) goto err; err = -ENOMEM; - q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, - &macvtap_proto, 0); + q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, + &tap_proto, 0); if (!q) goto err; @@ -493,15 +493,15 @@ static int macvtap_open(struct inode *inode, struct file *file) q->sock.type = SOCK_RAW; q->sock.state = SS_CONNECTED; q->sock.file = file; - q->sock.ops = &macvtap_socket_ops; + q->sock.ops = &tap_socket_ops; sock_init_data(&q->sock, &q->sk); - q->sk.sk_write_space = macvtap_sock_write_space; - q->sk.sk_destruct = macvtap_sock_destruct; + q->sk.sk_write_space = tap_sock_write_space; + q->sk.sk_destruct = tap_sock_destruct; q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); /* - * so far only KVM virtio_net uses macvtap, enable zero copy between + * so far only KVM virtio_net uses tap, enable zero copy between * guest kernel and host kernel when lower device supports zerocopy * * The macvlan supports zerocopy iff the lower device supports zero @@ -514,7 +514,7 @@ static int macvtap_open(struct inode *inode, struct file *file) if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL)) goto err_array; - err = macvtap_set_queue(dev, file, q); + err = tap_set_queue(dev, file, q); if (err) goto err_queue; @@ -535,16 +535,16 @@ err: return err; } -static int macvtap_release(struct inode *inode, struct file *file) +static int tap_release(struct inode *inode, struct file *file) { - struct macvtap_queue *q = file->private_data; - macvtap_put_queue(q); + struct tap_queue *q = file->private_data; + tap_put_queue(q); return 0; } -static unsigned int macvtap_poll(struct file *file, poll_table * wait) +static unsigned int tap_poll(struct file *file, poll_table *wait) { - struct macvtap_queue *q = file->private_data; + struct tap_queue *q = file->private_data; unsigned int mask = POLLERR; if (!q) @@ -565,8 +565,8 @@ out: return mask; } -static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, - size_t len, size_t linear, +static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad, + size_t len, size_t linear, int noblock, int *err) { struct sk_buff *skb; @@ -589,13 +589,13 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, } /* Neighbour code has some assumptions on HH_DATA_MOD alignment */ -#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) +#define TAP_RESERVE HH_DATA_OFF(ETH_HLEN) /* Get packet from user space buffer */ -static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, - struct iov_iter *from, int noblock) +static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, + struct iov_iter *from, int noblock) { - int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); + int good_linear = SKB_MAX_HEAD(TAP_RESERVE); struct sk_buff *skb; struct macvlan_dev *vlan; unsigned long total_len = iov_iter_count(from); @@ -621,14 +621,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, goto err; iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && - macvtap16_to_cpu(q, vnet_hdr.csum_start) + - macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > - macvtap16_to_cpu(q, vnet_hdr.hdr_len)) - vnet_hdr.hdr_len = cpu_to_macvtap16(q, - macvtap16_to_cpu(q, vnet_hdr.csum_start) + - macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2); + tap16_to_cpu(q, vnet_hdr.csum_start) + + tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > + tap16_to_cpu(q, vnet_hdr.hdr_len)) + vnet_hdr.hdr_len = cpu_to_tap16(q, + tap16_to_cpu(q, vnet_hdr.csum_start) + + tap16_to_cpu(q, vnet_hdr.csum_offset) + 2); err = -EINVAL; - if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len) + if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len) goto err; } @@ -640,7 +640,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, struct iov_iter i; copylen = vnet_hdr.hdr_len ? - macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; + tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; if (copylen > good_linear) copylen = good_linear; else if (copylen < ETH_HLEN) @@ -654,15 +654,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (!zerocopy) { copylen = len; - linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); + linear = tap16_to_cpu(q, vnet_hdr.hdr_len); if (linear > good_linear) linear = good_linear; else if (linear < ETH_HLEN) linear = ETH_HLEN; } - skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, - linear, noblock, &err); + skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen, + linear, noblock, &err); if (!skb) goto err; @@ -680,7 +680,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (vnet_hdr_len) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, - macvtap_is_little_endian(q)); + tap_is_little_endian(q)); if (err) goto err_kfree; } @@ -728,18 +728,18 @@ err: return err; } -static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from) +static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; - struct macvtap_queue *q = file->private_data; + struct tap_queue *q = file->private_data; - return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); + return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); } /* Put packet to the user space buffer */ -static ssize_t macvtap_put_user(struct macvtap_queue *q, - const struct sk_buff *skb, - struct iov_iter *iter) +static ssize_t tap_put_user(struct tap_queue *q, + const struct sk_buff *skb, + struct iov_iter *iter) { int ret; int vnet_hdr_len = 0; @@ -753,7 +753,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, return -EINVAL; if (virtio_net_hdr_from_skb(skb, &vnet_hdr, - macvtap_is_little_endian(q), true)) + tap_is_little_endian(q), true)) BUG(); if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != @@ -792,9 +792,9 @@ done: return ret ? ret : total; } -static ssize_t macvtap_do_read(struct macvtap_queue *q, - struct iov_iter *to, - int noblock) +static ssize_t tap_do_read(struct tap_queue *q, + struct iov_iter *to, + int noblock) { DEFINE_WAIT(wait); struct sk_buff *skb; @@ -827,7 +827,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, finish_wait(sk_sleep(&q->sk), &wait); if (skb) { - ret = macvtap_put_user(q, skb, to); + ret = tap_put_user(q, skb, to); if (unlikely(ret < 0)) kfree_skb(skb); else @@ -836,20 +836,20 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, return ret; } -static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to) +static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; - struct macvtap_queue *q = file->private_data; + struct tap_queue *q = file->private_data; ssize_t len = iov_iter_count(to), ret; - ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK); + ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; return ret; } -static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) +static struct macvlan_dev *tap_get_vlan(struct tap_queue *q) { struct macvlan_dev *vlan; @@ -861,33 +861,33 @@ static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) return vlan; } -static void macvtap_put_vlan(struct macvlan_dev *vlan) +static void tap_put_vlan(struct macvlan_dev *vlan) { dev_put(vlan->dev); } -static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) +static int tap_ioctl_set_queue(struct file *file, unsigned int flags) { - struct macvtap_queue *q = file->private_data; + struct tap_queue *q = file->private_data; struct macvlan_dev *vlan; int ret; - vlan = macvtap_get_vlan(q); + vlan = tap_get_vlan(q); if (!vlan) return -EINVAL; if (flags & IFF_ATTACH_QUEUE) - ret = macvtap_enable_queue(vlan->dev, file, q); + ret = tap_enable_queue(vlan->dev, file, q); else if (flags & IFF_DETACH_QUEUE) - ret = macvtap_disable_queue(q); + ret = tap_disable_queue(q); else ret = -EINVAL; - macvtap_put_vlan(vlan); + tap_put_vlan(vlan); return ret; } -static int set_offload(struct macvtap_queue *q, unsigned long arg) +static int set_offload(struct tap_queue *q, unsigned long arg) { struct macvlan_dev *vlan; netdev_features_t features; @@ -919,7 +919,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) * setting the TSO bit means that the userspace wants to * accept TSO frames and turning it off means that user space * does not support TSO. - * For macvtap, we have to invert it to mean the same thing. + * For tap, we have to invert it to mean the same thing. * When user space turns off TSO, we turn off GSO/LRO so that * user-space will not receive TSO frames. */ @@ -941,10 +941,10 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) /* * provide compatibility with generic tun/tap interface */ -static long macvtap_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long tap_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { - struct macvtap_queue *q = file->private_data; + struct tap_queue *q = file->private_data; struct macvlan_dev *vlan; void __user *argp = (void __user *)arg; struct ifreq __user *ifr = argp; @@ -962,16 +962,16 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, return -EFAULT; ret = 0; - if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP)) + if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP)) ret = -EINVAL; else - q->flags = (q->flags & ~MACVTAP_FEATURES) | u; + q->flags = (q->flags & ~TAP_IFFEATURES) | u; return ret; case TUNGETIFF: rtnl_lock(); - vlan = macvtap_get_vlan(q); + vlan = tap_get_vlan(q); if (!vlan) { rtnl_unlock(); return -ENOLINK; @@ -982,7 +982,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || put_user(u, &ifr->ifr_flags)) ret = -EFAULT; - macvtap_put_vlan(vlan); + tap_put_vlan(vlan); rtnl_unlock(); return ret; @@ -990,12 +990,12 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, if (get_user(u, &ifr->ifr_flags)) return -EFAULT; rtnl_lock(); - ret = macvtap_ioctl_set_queue(file, u); + ret = tap_ioctl_set_queue(file, u); rtnl_unlock(); return ret; case TUNGETFEATURES: - if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up)) + if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up)) return -EFAULT; return 0; @@ -1022,7 +1022,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, return 0; case TUNGETVNETLE: - s = !!(q->flags & MACVTAP_VNET_LE); + s = !!(q->flags & TAP_VNET_LE); if (put_user(s, sp)) return -EFAULT; return 0; @@ -1031,16 +1031,16 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, if (get_user(s, sp)) return -EFAULT; if (s) - q->flags |= MACVTAP_VNET_LE; + q->flags |= TAP_VNET_LE; else - q->flags &= ~MACVTAP_VNET_LE; + q->flags &= ~TAP_VNET_LE; return 0; case TUNGETVNETBE: - return macvtap_get_vnet_be(q, sp); + return tap_get_vnet_be(q, sp); case TUNSETVNETBE: - return macvtap_set_vnet_be(q, sp); + return tap_set_vnet_be(q, sp); case TUNSETOFFLOAD: /* let the user check for future flags */ @@ -1055,7 +1055,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, case SIOCGIFHWADDR: rtnl_lock(); - vlan = macvtap_get_vlan(q); + vlan = tap_get_vlan(q); if (!vlan) { rtnl_unlock(); return -ENOLINK; @@ -1066,7 +1066,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) || put_user(u, &ifr->ifr_hwaddr.sa_family)) ret = -EFAULT; - macvtap_put_vlan(vlan); + tap_put_vlan(vlan); rtnl_unlock(); return ret; @@ -1074,13 +1074,13 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa))) return -EFAULT; rtnl_lock(); - vlan = macvtap_get_vlan(q); + vlan = tap_get_vlan(q); if (!vlan) { rtnl_unlock(); return -ENOLINK; } ret = dev_set_mac_address(vlan->dev, &sa); - macvtap_put_vlan(vlan); + tap_put_vlan(vlan); rtnl_unlock(); return ret; @@ -1090,42 +1090,42 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, } #ifdef CONFIG_COMPAT -static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long tap_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { - return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); + return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); } #endif -const struct file_operations macvtap_fops = { +const struct file_operations tap_fops = { .owner = THIS_MODULE, - .open = macvtap_open, - .release = macvtap_release, - .read_iter = macvtap_read_iter, - .write_iter = macvtap_write_iter, - .poll = macvtap_poll, + .open = tap_open, + .release = tap_release, + .read_iter = tap_read_iter, + .write_iter = tap_write_iter, + .poll = tap_poll, .llseek = no_llseek, - .unlocked_ioctl = macvtap_ioctl, + .unlocked_ioctl = tap_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = macvtap_compat_ioctl, + .compat_ioctl = tap_compat_ioctl, #endif }; -static int macvtap_sendmsg(struct socket *sock, struct msghdr *m, - size_t total_len) +static int tap_sendmsg(struct socket *sock, struct msghdr *m, + size_t total_len) { - struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); - return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); + struct tap_queue *q = container_of(sock, struct tap_queue, sock); + return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); } -static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, - size_t total_len, int flags) +static int tap_recvmsg(struct socket *sock, struct msghdr *m, + size_t total_len, int flags) { - struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); + struct tap_queue *q = container_of(sock, struct tap_queue, sock); int ret; if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) return -EINVAL; - ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT); + ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; @@ -1133,40 +1133,40 @@ static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, return ret; } -static int macvtap_peek_len(struct socket *sock) +static int tap_peek_len(struct socket *sock) { - struct macvtap_queue *q = container_of(sock, struct macvtap_queue, + struct tap_queue *q = container_of(sock, struct tap_queue, sock); return skb_array_peek_len(&q->skb_array); } /* Ops structure to mimic raw sockets with tun */ -static const struct proto_ops macvtap_socket_ops = { - .sendmsg = macvtap_sendmsg, - .recvmsg = macvtap_recvmsg, - .peek_len = macvtap_peek_len, +static const struct proto_ops tap_socket_ops = { + .sendmsg = tap_sendmsg, + .recvmsg = tap_recvmsg, + .peek_len = tap_peek_len, }; /* Get an underlying socket object from tun file. Returns error unless file is * attached to a device. The returned object works like a packet socket, it * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for * holding a reference to the file for as long as the socket is in use. */ -struct socket *macvtap_get_socket(struct file *file) +struct socket *tap_get_socket(struct file *file) { - struct macvtap_queue *q; - if (file->f_op != &macvtap_fops) + struct tap_queue *q; + if (file->f_op != &tap_fops) return ERR_PTR(-EINVAL); q = file->private_data; if (!q) return ERR_PTR(-EBADFD); return &q->sock; } -EXPORT_SYMBOL_GPL(macvtap_get_socket); +EXPORT_SYMBOL_GPL(tap_get_socket); -int macvtap_queue_resize(struct macvlan_dev *vlan) +int tap_queue_resize(struct macvlan_dev *vlan) { struct net_device *dev = vlan->dev; - struct macvtap_queue *q; + struct tap_queue *q; struct skb_array **arrays; int n = vlan->numqueues; int ret, i = 0; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index c42e9c305134..2fe35354f20e 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -960,7 +961,7 @@ static struct socket *get_tap_socket(int fd) sock = tun_get_socket(file); if (!IS_ERR(sock)) return sock; - sock = macvtap_get_socket(file); + sock = tap_get_socket(file); if (IS_ERR(sock)) fput(file); return sock; diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index a4ccc3122f93..c9ec1343d187 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -9,19 +9,6 @@ #include #include -#if IS_ENABLED(CONFIG_MACVTAP) -struct socket *macvtap_get_socket(struct file *); -#else -#include -#include -struct file; -struct socket; -static inline struct socket *macvtap_get_socket(struct file *f) -{ - return ERR_PTR(-EINVAL); -} -#endif /* CONFIG_MACVTAP */ - struct macvlan_port; struct macvtap_queue; @@ -29,7 +16,7 @@ struct macvtap_queue; * Maximum times a macvtap device can be opened. This can be used to * configure the number of receive queue, e.g. for multiqueue virtio. */ -#define MAX_MACVTAP_QUEUES 256 +#define MAX_TAP_QUEUES 256 #define MACVLAN_MC_FILTER_BITS 8 #define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) @@ -49,7 +36,7 @@ struct macvlan_dev { enum macvlan_mode mode; u16 flags; /* This array tracks active taps. */ - struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES]; + struct tap_queue __rcu *taps[MAX_TAP_QUEUES]; /* This list tracks all taps (both enabled and disabled) */ struct list_head queue_list; int numvtaps; diff --git a/include/linux/if_macvtap.h b/include/linux/if_macvtap.h deleted file mode 100644 index c9bf84b75b27..000000000000 --- a/include/linux/if_macvtap.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _LINUX_IF_MACVTAP_H_ -#define _LINUX_IF_MACVTAP_H_ - -rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb); -void macvtap_del_queues(struct net_device *dev); -int macvtap_get_minor(struct macvlan_dev *vlan); -void macvtap_free_minor(struct macvlan_dev *vlan); -int macvtap_queue_resize(struct macvlan_dev *vlan); - -#endif /*_LINUX_IF_MACVTAP_H_*/ diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h new file mode 100644 index 000000000000..97d27b8ebd55 --- /dev/null +++ b/include/linux/if_tap.h @@ -0,0 +1,23 @@ +#ifndef _LINUX_IF_TAP_H_ +#define _LINUX_IF_TAP_H_ + +#if IS_ENABLED(CONFIG_MACVTAP) +struct socket *tap_get_socket(struct file *); +#else +#include +#include +struct file; +struct socket; +static inline struct socket *tap_get_socket(struct file *f) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_MACVTAP */ + +rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); +void tap_del_queues(struct net_device *dev); +int tap_get_minor(struct macvlan_dev *vlan); +void tap_free_minor(struct macvlan_dev *vlan); +int tap_queue_resize(struct macvlan_dev *vlan); + +#endif /*_LINUX_IF_TAP_H_*/ -- cgit v1.2.3 From ebc05ba7e8600b52a2a0c87a43105143368aca2a Mon Sep 17 00:00:00 2001 From: Sainath Grandhi Date: Fri, 10 Feb 2017 16:03:48 -0800 Subject: tap: Tap character device creation/destroy API This patch provides tap device create/destroy APIs in tap.c. Signed-off-by: Sainath Grandhi Signed-off-by: David S. Miller --- drivers/net/macvtap_main.c | 30 +++++++--------------- drivers/net/tap.c | 62 ++++++++++++++++++++++++++++++++++++++-------- include/linux/if_tap.h | 3 +++ 3 files changed, 63 insertions(+), 32 deletions(-) diff --git a/drivers/net/macvtap_main.c b/drivers/net/macvtap_main.c index 548f339a75bd..215ab7abae89 100644 --- a/drivers/net/macvtap_main.c +++ b/drivers/net/macvtap_main.c @@ -28,7 +28,6 @@ * Variables for dealing with macvtaps device numbers. */ static dev_t macvtap_major; -#define MACVTAP_NUM_DEVS (1U << MINORBITS) static const void *macvtap_net_namespace(struct device *d) { @@ -159,57 +158,46 @@ static struct notifier_block macvtap_notifier_block __read_mostly = { .notifier_call = macvtap_device_event, }; -extern struct file_operations tap_fops; static int macvtap_init(void) { int err; - err = alloc_chrdev_region(&macvtap_major, 0, - MACVTAP_NUM_DEVS, "macvtap"); - if (err) - goto out1; + err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap"); - cdev_init(&macvtap_cdev, &tap_fops); - err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS); if (err) - goto out2; + goto out1; err = class_register(&macvtap_class); if (err) - goto out3; + goto out2; err = register_netdevice_notifier(&macvtap_notifier_block); if (err) - goto out4; + goto out3; err = macvlan_link_register(&macvtap_link_ops); if (err) - goto out5; + goto out4; return 0; -out5: - unregister_netdevice_notifier(&macvtap_notifier_block); out4: - class_unregister(&macvtap_class); + unregister_netdevice_notifier(&macvtap_notifier_block); out3: - cdev_del(&macvtap_cdev); + class_unregister(&macvtap_class); out2: - unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); + tap_destroy_cdev(macvtap_major, &macvtap_cdev); out1: return err; } module_init(macvtap_init); -extern struct idr minor_idr; static void macvtap_exit(void) { rtnl_link_unregister(&macvtap_link_ops); unregister_netdevice_notifier(&macvtap_notifier_block); class_unregister(&macvtap_class); - cdev_del(&macvtap_cdev); - unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); - idr_destroy(&minor_idr); + tap_destroy_cdev(macvtap_major, &macvtap_cdev); } module_exit(macvtap_exit); diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 15ca2d531d05..04ba9782c2f3 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -123,8 +123,12 @@ static struct proto tap_proto = { }; #define TAP_NUM_DEVS (1U << MINORBITS) -static DEFINE_MUTEX(minor_lock); -DEFINE_IDR(minor_idr); +struct major_info { + dev_t major; + struct idr minor_idr; + struct mutex minor_lock; + const char *device_name; +} macvtap_major; #define GOODCOPY_LEN 128 @@ -413,26 +417,26 @@ int tap_get_minor(struct macvlan_dev *vlan) { int retval = -ENOMEM; - mutex_lock(&minor_lock); - retval = idr_alloc(&minor_idr, vlan, 1, TAP_NUM_DEVS, GFP_KERNEL); + mutex_lock(&macvtap_major.minor_lock); + retval = idr_alloc(&macvtap_major.minor_idr, vlan, 1, TAP_NUM_DEVS, GFP_KERNEL); if (retval >= 0) { vlan->minor = retval; } else if (retval == -ENOSPC) { netdev_err(vlan->dev, "Too many tap devices\n"); retval = -EINVAL; } - mutex_unlock(&minor_lock); + mutex_unlock(&macvtap_major.minor_lock); return retval < 0 ? retval : 0; } void tap_free_minor(struct macvlan_dev *vlan) { - mutex_lock(&minor_lock); + mutex_lock(&macvtap_major.minor_lock); if (vlan->minor) { - idr_remove(&minor_idr, vlan->minor); + idr_remove(&macvtap_major.minor_idr, vlan->minor); vlan->minor = 0; } - mutex_unlock(&minor_lock); + mutex_unlock(&macvtap_major.minor_lock); } static struct net_device *dev_get_by_tap_minor(int minor) @@ -440,13 +444,13 @@ static struct net_device *dev_get_by_tap_minor(int minor) struct net_device *dev = NULL; struct macvlan_dev *vlan; - mutex_lock(&minor_lock); - vlan = idr_find(&minor_idr, minor); + mutex_lock(&macvtap_major.minor_lock); + vlan = idr_find(&macvtap_major.minor_idr, minor); if (vlan) { dev = vlan->dev; dev_hold(dev); } - mutex_unlock(&minor_lock); + mutex_unlock(&macvtap_major.minor_lock); return dev; } @@ -1184,3 +1188,39 @@ int tap_queue_resize(struct macvlan_dev *vlan) kfree(arrays); return ret; } + +int tap_create_cdev(struct cdev *tap_cdev, + dev_t *tap_major, const char *device_name) +{ + int err; + + err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name); + if (err) + goto out1; + + cdev_init(tap_cdev, &tap_fops); + err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS); + if (err) + goto out2; + + macvtap_major.major = MAJOR(*tap_major); + + idr_init(&macvtap_major.minor_idr); + mutex_init(&macvtap_major.minor_lock); + + macvtap_major.device_name = device_name; + + return 0; + +out2: + unregister_chrdev_region(*tap_major, TAP_NUM_DEVS); +out1: + return err; +} + +void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev) +{ + cdev_del(tap_cdev); + unregister_chrdev_region(major, TAP_NUM_DEVS); + idr_destroy(&macvtap_major.minor_idr); +} diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 97d27b8ebd55..a2dfd9063a6c 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -19,5 +19,8 @@ void tap_del_queues(struct net_device *dev); int tap_get_minor(struct macvlan_dev *vlan); void tap_free_minor(struct macvlan_dev *vlan); int tap_queue_resize(struct macvlan_dev *vlan); +int tap_create_cdev(struct cdev *tap_cdev, + dev_t *tap_major, const char *device_name); +void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev); #endif /*_LINUX_IF_TAP_H_*/ -- cgit v1.2.3 From 6fe3faf86757eb7f078ff06b23b206f17dc4fb36 Mon Sep 17 00:00:00 2001 From: Sainath Grandhi Date: Fri, 10 Feb 2017 16:03:49 -0800 Subject: tap: Abstract type of virtual interface from tap implementation macvlan object is re-structured to hold tap related elements in a separate entity, tap_dev. Upon NETDEV_REGISTER device_event, tap_dev is registered with idr and fetched again on tap_open. Few of the tap functions are modified to accepted tap_dev as argument. tap_dev object includes callbacks to be used by underlying virtual interface to take care of tx and rx accounting. Signed-off-by: Sainath Grandhi Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 2 +- drivers/net/macvtap_main.c | 71 +++++++++--- drivers/net/tap.c | 264 ++++++++++++++++++++------------------------- include/linux/if_tap.h | 57 +++++++++- 4 files changed, 229 insertions(+), 165 deletions(-) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index cbfc1be23a0e..9261722960a7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1525,7 +1525,6 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { int macvlan_link_register(struct rtnl_link_ops *ops) { /* common fields */ - ops->priv_size = sizeof(struct macvlan_dev); ops->validate = macvlan_validate; ops->maxtype = IFLA_MACVLAN_MAX; ops->policy = macvlan_policy; @@ -1548,6 +1547,7 @@ static struct rtnl_link_ops macvlan_link_ops = { .newlink = macvlan_newlink, .dellink = macvlan_dellink, .get_link_net = macvlan_get_link_net, + .priv_size = sizeof(struct macvlan_dev), }; static int macvlan_device_event(struct notifier_block *unused, diff --git a/drivers/net/macvtap_main.c b/drivers/net/macvtap_main.c index 215ab7abae89..0238df62bf45 100644 --- a/drivers/net/macvtap_main.c +++ b/drivers/net/macvtap_main.c @@ -24,6 +24,11 @@ #include #include +struct macvtap_dev { + struct macvlan_dev vlan; + struct tap_dev tap; +}; + /* * Variables for dealing with macvtaps device numbers. */ @@ -46,22 +51,55 @@ static struct cdev macvtap_cdev; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ NETIF_F_TSO6 | NETIF_F_UFO) +static void macvtap_count_tx_dropped(struct tap_dev *tap) +{ + struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); + struct macvlan_dev *vlan = &vlantap->vlan; + + this_cpu_inc(vlan->pcpu_stats->tx_dropped); +} + +static void macvtap_count_rx_dropped(struct tap_dev *tap) +{ + struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); + struct macvlan_dev *vlan = &vlantap->vlan; + + macvlan_count_rx(vlan, 0, 0, 0); +} + +static void macvtap_update_features(struct tap_dev *tap, + netdev_features_t features) +{ + struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); + struct macvlan_dev *vlan = &vlantap->vlan; + + vlan->set_features = features; + netdev_update_features(vlan->dev); +} + static int macvtap_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - struct macvlan_dev *vlan = netdev_priv(dev); + struct macvtap_dev *vlantap = netdev_priv(dev); int err; - INIT_LIST_HEAD(&vlan->queue_list); + INIT_LIST_HEAD(&vlantap->tap.queue_list); /* Since macvlan supports all offloads by default, make * tap support all offloads also. */ - vlan->tap_features = TUN_OFFLOADS; + vlantap->tap.tap_features = TUN_OFFLOADS; - err = netdev_rx_handler_register(dev, tap_handle_frame, vlan); + /* Register callbacks for rx/tx drops accounting and updating + * net_device features + */ + vlantap->tap.count_tx_dropped = macvtap_count_tx_dropped; + vlantap->tap.count_rx_dropped = macvtap_count_rx_dropped; + vlantap->tap.update_features = macvtap_update_features; + + err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap); if (err) return err; @@ -74,14 +112,18 @@ static int macvtap_newlink(struct net *src_net, return err; } + vlantap->tap.dev = vlantap->vlan.dev; + return 0; } static void macvtap_dellink(struct net_device *dev, struct list_head *head) { + struct macvtap_dev *vlantap = netdev_priv(dev); + netdev_rx_handler_unregister(dev); - tap_del_queues(dev); + tap_del_queues(&vlantap->tap); macvlan_dellink(dev, head); } @@ -96,13 +138,14 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = { .setup = macvtap_setup, .newlink = macvtap_newlink, .dellink = macvtap_dellink, + .priv_size = sizeof(struct macvtap_dev), }; static int macvtap_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct macvlan_dev *vlan; + struct macvtap_dev *vlantap; struct device *classdev; dev_t devt; int err; @@ -112,7 +155,7 @@ static int macvtap_device_event(struct notifier_block *unused, return NOTIFY_DONE; snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex); - vlan = netdev_priv(dev); + vlantap = netdev_priv(dev); switch (event) { case NETDEV_REGISTER: @@ -120,15 +163,15 @@ static int macvtap_device_event(struct notifier_block *unused, * been registered but before register_netdevice has * finished running. */ - err = tap_get_minor(vlan); + err = tap_get_minor(&vlantap->tap); if (err) return notifier_from_errno(err); - devt = MKDEV(MAJOR(macvtap_major), vlan->minor); + devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); classdev = device_create(&macvtap_class, &dev->dev, devt, dev, tap_name); if (IS_ERR(classdev)) { - tap_free_minor(vlan); + tap_free_minor(&vlantap->tap); return notifier_from_errno(PTR_ERR(classdev)); } err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, @@ -138,15 +181,15 @@ static int macvtap_device_event(struct notifier_block *unused, break; case NETDEV_UNREGISTER: /* vlan->minor == 0 if NETDEV_REGISTER above failed */ - if (vlan->minor == 0) + if (vlantap->tap.minor == 0) break; sysfs_remove_link(&dev->dev.kobj, tap_name); - devt = MKDEV(MAJOR(macvtap_major), vlan->minor); + devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); device_destroy(&macvtap_class, devt); - tap_free_minor(vlan); + tap_free_minor(&vlantap->tap); break; case NETDEV_CHANGE_TX_QUEUE_LEN: - if (tap_queue_resize(vlan)) + if (tap_queue_resize(&vlantap->tap)) return NOTIFY_BAD; break; } diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 04ba9782c2f3..7d3e8b18f5e6 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1,5 +1,5 @@ #include -#include +#include #include #include #include @@ -23,30 +23,6 @@ #include #include -/* - * A tap queue is the central object of this driver, it connects - * an open character device to a macvlan interface. There can be - * multiple queues on one interface, which map back to queues - * implemented in hardware on the underlying device. - * - * tap_proto is used to allocate queues through the sock allocation - * mechanism. - * - */ -struct tap_queue { - struct sock sk; - struct socket sock; - struct socket_wq wq; - int vnet_hdr_sz; - struct macvlan_dev __rcu *vlan; - struct file *file; - unsigned int flags; - u16 queue_index; - bool enabled; - struct list_head next; - struct skb_array skb_array; -}; - #define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) #define TAP_VNET_LE 0x80000000 @@ -137,7 +113,7 @@ static const struct proto_ops tap_socket_ops; #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) -static struct macvlan_dev *tap_get_vlan_rcu(const struct net_device *dev) +static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev) { return rcu_dereference(dev->rx_handler_data); } @@ -159,10 +135,9 @@ static struct macvlan_dev *tap_get_vlan_rcu(const struct net_device *dev) * when both our references and any pending SKBs are gone. */ -static int tap_enable_queue(struct net_device *dev, struct file *file, +static int tap_enable_queue(struct tap_dev *tap, struct file *file, struct tap_queue *q) { - struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; ASSERT_RTNL(); @@ -171,62 +146,60 @@ static int tap_enable_queue(struct net_device *dev, struct file *file, goto out; err = 0; - rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); - q->queue_index = vlan->numvtaps; + rcu_assign_pointer(tap->taps[tap->numvtaps], q); + q->queue_index = tap->numvtaps; q->enabled = true; - vlan->numvtaps++; + tap->numvtaps++; out: return err; } /* Requires RTNL */ -static int tap_set_queue(struct net_device *dev, struct file *file, +static int tap_set_queue(struct tap_dev *tap, struct file *file, struct tap_queue *q) { - struct macvlan_dev *vlan = netdev_priv(dev); - - if (vlan->numqueues == MAX_TAP_QUEUES) + if (tap->numqueues == MAX_TAP_QUEUES) return -EBUSY; - rcu_assign_pointer(q->vlan, vlan); - rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); + rcu_assign_pointer(q->tap, tap); + rcu_assign_pointer(tap->taps[tap->numvtaps], q); sock_hold(&q->sk); q->file = file; - q->queue_index = vlan->numvtaps; + q->queue_index = tap->numvtaps; q->enabled = true; file->private_data = q; - list_add_tail(&q->next, &vlan->queue_list); + list_add_tail(&q->next, &tap->queue_list); - vlan->numvtaps++; - vlan->numqueues++; + tap->numvtaps++; + tap->numqueues++; return 0; } static int tap_disable_queue(struct tap_queue *q) { - struct macvlan_dev *vlan; + struct tap_dev *tap; struct tap_queue *nq; ASSERT_RTNL(); if (!q->enabled) return -EINVAL; - vlan = rtnl_dereference(q->vlan); + tap = rtnl_dereference(q->tap); - if (vlan) { + if (tap) { int index = q->queue_index; - BUG_ON(index >= vlan->numvtaps); - nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); + BUG_ON(index >= tap->numvtaps); + nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]); nq->queue_index = index; - rcu_assign_pointer(vlan->taps[index], nq); - RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); + rcu_assign_pointer(tap->taps[index], nq); + RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL); q->enabled = false; - vlan->numvtaps--; + tap->numvtaps--; } return 0; @@ -242,17 +215,17 @@ static int tap_disable_queue(struct tap_queue *q) */ static void tap_put_queue(struct tap_queue *q) { - struct macvlan_dev *vlan; + struct tap_dev *tap; rtnl_lock(); - vlan = rtnl_dereference(q->vlan); + tap = rtnl_dereference(q->tap); - if (vlan) { + if (tap) { if (q->enabled) BUG_ON(tap_disable_queue(q)); - vlan->numqueues--; - RCU_INIT_POINTER(q->vlan, NULL); + tap->numqueues--; + RCU_INIT_POINTER(q->tap, NULL); sock_put(&q->sk); list_del_init(&q->next); } @@ -270,17 +243,16 @@ static void tap_put_queue(struct tap_queue *q) * Cache vlan->numvtaps since it can become zero during the execution * of this function. */ -static struct tap_queue *tap_get_queue(struct net_device *dev, +static struct tap_queue *tap_get_queue(struct tap_dev *tap, struct sk_buff *skb) { - struct macvlan_dev *vlan = netdev_priv(dev); - struct tap_queue *tap = NULL; + struct tap_queue *queue = NULL; /* Access to taps array is protected by rcu, but access to numvtaps * isn't. Below we use it to lookup a queue, but treat it as a hint * and validate that the result isn't NULL - in case we are * racing against queue removal. */ - int numvtaps = ACCESS_ONCE(vlan->numvtaps); + int numvtaps = ACCESS_ONCE(tap->numvtaps); __u32 rxq; if (!numvtaps) @@ -292,7 +264,7 @@ static struct tap_queue *tap_get_queue(struct net_device *dev, /* Check if we can use flow to select a queue */ rxq = skb_get_hash(skb); if (rxq) { - tap = rcu_dereference(vlan->taps[rxq % numvtaps]); + queue = rcu_dereference(tap->taps[rxq % numvtaps]); goto out; } @@ -302,14 +274,14 @@ static struct tap_queue *tap_get_queue(struct net_device *dev, while (unlikely(rxq >= numvtaps)) rxq -= numvtaps; - tap = rcu_dereference(vlan->taps[rxq]); + queue = rcu_dereference(tap->taps[rxq]); goto out; } single: - tap = rcu_dereference(vlan->taps[0]); + queue = rcu_dereference(tap->taps[0]); out: - return tap; + return queue; } /* @@ -317,39 +289,38 @@ out: * that it holds on all queues and safely set the pointer * from the queues to NULL. */ -void tap_del_queues(struct net_device *dev) +void tap_del_queues(struct tap_dev *tap) { - struct macvlan_dev *vlan = netdev_priv(dev); struct tap_queue *q, *tmp; ASSERT_RTNL(); - list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { + list_for_each_entry_safe(q, tmp, &tap->queue_list, next) { list_del_init(&q->next); - RCU_INIT_POINTER(q->vlan, NULL); + RCU_INIT_POINTER(q->tap, NULL); if (q->enabled) - vlan->numvtaps--; - vlan->numqueues--; + tap->numvtaps--; + tap->numqueues--; sock_put(&q->sk); } - BUG_ON(vlan->numvtaps); - BUG_ON(vlan->numqueues); + BUG_ON(tap->numvtaps); + BUG_ON(tap->numqueues); /* guarantee that any future tap_set_queue will fail */ - vlan->numvtaps = MAX_TAP_QUEUES; + tap->numvtaps = MAX_TAP_QUEUES; } rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct net_device *dev = skb->dev; - struct macvlan_dev *vlan; + struct tap_dev *tap; struct tap_queue *q; netdev_features_t features = TAP_FEATURES; - vlan = tap_get_vlan_rcu(dev); - if (!vlan) + tap = tap_dev_get_rcu(dev); + if (!tap) return RX_HANDLER_PASS; - q = tap_get_queue(dev, skb); + q = tap_get_queue(tap, skb); if (!q) return RX_HANDLER_PASS; @@ -363,7 +334,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) * enabled. */ if (q->flags & IFF_VNET_HDR) - features |= vlan->tap_features; + features |= tap->tap_features; if (netif_needs_gso(skb, features)) { struct sk_buff *segs = __skb_gso_segment(skb, features, false); @@ -408,50 +379,51 @@ wake_up: drop: /* Count errors/drops only here, thus don't care about args. */ - macvlan_count_rx(vlan, 0, 0, 0); + if (tap->count_rx_dropped) + tap->count_rx_dropped(tap); kfree_skb(skb); return RX_HANDLER_CONSUMED; } -int tap_get_minor(struct macvlan_dev *vlan) +int tap_get_minor(struct tap_dev *tap) { int retval = -ENOMEM; mutex_lock(&macvtap_major.minor_lock); - retval = idr_alloc(&macvtap_major.minor_idr, vlan, 1, TAP_NUM_DEVS, GFP_KERNEL); + retval = idr_alloc(&macvtap_major.minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL); if (retval >= 0) { - vlan->minor = retval; + tap->minor = retval; } else if (retval == -ENOSPC) { - netdev_err(vlan->dev, "Too many tap devices\n"); + netdev_err(tap->dev, "Too many tap devices\n"); retval = -EINVAL; } mutex_unlock(&macvtap_major.minor_lock); return retval < 0 ? retval : 0; } -void tap_free_minor(struct macvlan_dev *vlan) +void tap_free_minor(struct tap_dev *tap) { mutex_lock(&macvtap_major.minor_lock); - if (vlan->minor) { - idr_remove(&macvtap_major.minor_idr, vlan->minor); - vlan->minor = 0; + if (tap->minor) { + idr_remove(&macvtap_major.minor_idr, tap->minor); + tap->minor = 0; } mutex_unlock(&macvtap_major.minor_lock); } -static struct net_device *dev_get_by_tap_minor(int minor) +static struct tap_dev *dev_get_by_tap_minor(int minor) { struct net_device *dev = NULL; - struct macvlan_dev *vlan; + struct tap_dev *tap; mutex_lock(&macvtap_major.minor_lock); - vlan = idr_find(&macvtap_major.minor_idr, minor); - if (vlan) { - dev = vlan->dev; + tap = idr_find(&macvtap_major.minor_idr, minor); + if (tap) { + dev = tap->dev; dev_hold(dev); } mutex_unlock(&macvtap_major.minor_lock); - return dev; + return tap; } static void tap_sock_write_space(struct sock *sk) @@ -477,13 +449,13 @@ static void tap_sock_destruct(struct sock *sk) static int tap_open(struct inode *inode, struct file *file) { struct net *net = current->nsproxy->net_ns; - struct net_device *dev; + struct tap_dev *tap; struct tap_queue *q; int err = -ENODEV; rtnl_lock(); - dev = dev_get_by_tap_minor(iminor(inode)); - if (!dev) + tap = dev_get_by_tap_minor(iminor(inode)); + if (!tap) goto err; err = -ENOMEM; @@ -511,18 +483,18 @@ static int tap_open(struct inode *inode, struct file *file) * The macvlan supports zerocopy iff the lower device supports zero * copy so we don't have to look at the lower device directly. */ - if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) + if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG)) sock_set_flag(&q->sk, SOCK_ZEROCOPY); err = -ENOMEM; - if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL)) + if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) goto err_array; - err = tap_set_queue(dev, file, q); + err = tap_set_queue(tap, file, q); if (err) goto err_queue; - dev_put(dev); + dev_put(tap->dev); rtnl_unlock(); return err; @@ -532,8 +504,8 @@ err_queue: err_array: sock_put(&q->sk); err: - if (dev) - dev_put(dev); + if (tap) + dev_put(tap->dev); rtnl_unlock(); return err; @@ -601,7 +573,7 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, { int good_linear = SKB_MAX_HEAD(TAP_RESERVE); struct sk_buff *skb; - struct macvlan_dev *vlan; + struct tap_dev *tap; unsigned long total_len = iov_iter_count(from); unsigned long len = total_len; int err; @@ -698,7 +670,7 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, skb_set_network_header(skb, depth); rcu_read_lock(); - vlan = rcu_dereference(q->vlan); + tap = rcu_dereference(q->tap); /* copy skb_ubuf_info for callback when skb has no error */ if (zerocopy) { skb_shinfo(skb)->destructor_arg = m->msg_control; @@ -709,8 +681,8 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, uarg->callback(uarg, false); } - if (vlan) { - skb->dev = vlan->dev; + if (tap) { + skb->dev = tap->dev; dev_queue_xmit(skb); } else { kfree_skb(skb); @@ -724,9 +696,9 @@ err_kfree: err: rcu_read_lock(); - vlan = rcu_dereference(q->vlan); - if (vlan) - this_cpu_inc(vlan->pcpu_stats->tx_dropped); + tap = rcu_dereference(q->tap); + if (tap && tap->count_tx_dropped) + tap->count_tx_dropped(tap); rcu_read_unlock(); return err; @@ -853,55 +825,55 @@ static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to) return ret; } -static struct macvlan_dev *tap_get_vlan(struct tap_queue *q) +static struct tap_dev *tap_get_tap_dev(struct tap_queue *q) { - struct macvlan_dev *vlan; + struct tap_dev *tap; ASSERT_RTNL(); - vlan = rtnl_dereference(q->vlan); - if (vlan) - dev_hold(vlan->dev); + tap = rtnl_dereference(q->tap); + if (tap) + dev_hold(tap->dev); - return vlan; + return tap; } -static void tap_put_vlan(struct macvlan_dev *vlan) +static void tap_put_tap_dev(struct tap_dev *tap) { - dev_put(vlan->dev); + dev_put(tap->dev); } static int tap_ioctl_set_queue(struct file *file, unsigned int flags) { struct tap_queue *q = file->private_data; - struct macvlan_dev *vlan; + struct tap_dev *tap; int ret; - vlan = tap_get_vlan(q); - if (!vlan) + tap = tap_get_tap_dev(q); + if (!tap) return -EINVAL; if (flags & IFF_ATTACH_QUEUE) - ret = tap_enable_queue(vlan->dev, file, q); + ret = tap_enable_queue(tap, file, q); else if (flags & IFF_DETACH_QUEUE) ret = tap_disable_queue(q); else ret = -EINVAL; - tap_put_vlan(vlan); + tap_put_tap_dev(tap); return ret; } static int set_offload(struct tap_queue *q, unsigned long arg) { - struct macvlan_dev *vlan; + struct tap_dev *tap; netdev_features_t features; netdev_features_t feature_mask = 0; - vlan = rtnl_dereference(q->vlan); - if (!vlan) + tap = rtnl_dereference(q->tap); + if (!tap) return -ENOLINK; - features = vlan->dev->features; + features = tap->dev->features; if (arg & TUN_F_CSUM) { feature_mask = NETIF_F_HW_CSUM; @@ -935,9 +907,9 @@ static int set_offload(struct tap_queue *q, unsigned long arg) /* tap_features are the same as features on tun/tap and * reflect user expectations. */ - vlan->tap_features = feature_mask; - vlan->set_features = features; - netdev_update_features(vlan->dev); + tap->tap_features = feature_mask; + if (tap->update_features) + tap->update_features(tap, features); return 0; } @@ -949,7 +921,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct tap_queue *q = file->private_data; - struct macvlan_dev *vlan; + struct tap_dev *tap; void __user *argp = (void __user *)arg; struct ifreq __user *ifr = argp; unsigned int __user *up = argp; @@ -975,18 +947,18 @@ static long tap_ioctl(struct file *file, unsigned int cmd, case TUNGETIFF: rtnl_lock(); - vlan = tap_get_vlan(q); - if (!vlan) { + tap = tap_get_tap_dev(q); + if (!tap) { rtnl_unlock(); return -ENOLINK; } ret = 0; u = q->flags; - if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || + if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || put_user(u, &ifr->ifr_flags)) ret = -EFAULT; - tap_put_vlan(vlan); + tap_put_tap_dev(tap); rtnl_unlock(); return ret; @@ -1059,18 +1031,18 @@ static long tap_ioctl(struct file *file, unsigned int cmd, case SIOCGIFHWADDR: rtnl_lock(); - vlan = tap_get_vlan(q); - if (!vlan) { + tap = tap_get_tap_dev(q); + if (!tap) { rtnl_unlock(); return -ENOLINK; } ret = 0; - u = vlan->dev->type; - if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || - copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) || + u = tap->dev->type; + if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || + copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) || put_user(u, &ifr->ifr_hwaddr.sa_family)) ret = -EFAULT; - tap_put_vlan(vlan); + tap_put_tap_dev(tap); rtnl_unlock(); return ret; @@ -1078,13 +1050,13 @@ static long tap_ioctl(struct file *file, unsigned int cmd, if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa))) return -EFAULT; rtnl_lock(); - vlan = tap_get_vlan(q); - if (!vlan) { + tap = tap_get_tap_dev(q); + if (!tap) { rtnl_unlock(); return -ENOLINK; } - ret = dev_set_mac_address(vlan->dev, &sa); - tap_put_vlan(vlan); + ret = dev_set_mac_address(tap->dev, &sa); + tap_put_tap_dev(tap); rtnl_unlock(); return ret; @@ -1167,19 +1139,19 @@ struct socket *tap_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tap_get_socket); -int tap_queue_resize(struct macvlan_dev *vlan) +int tap_queue_resize(struct tap_dev *tap) { - struct net_device *dev = vlan->dev; + struct net_device *dev = tap->dev; struct tap_queue *q; struct skb_array **arrays; - int n = vlan->numqueues; + int n = tap->numqueues; int ret, i = 0; arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); if (!arrays) return -ENOMEM; - list_for_each_entry(q, &vlan->queue_list, next) + list_for_each_entry(q, &tap->queue_list, next) arrays[i++] = &q->skb_array; ret = skb_array_resize_multiple(arrays, n, diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index a2dfd9063a6c..75031e5d0a65 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -14,11 +14,60 @@ static inline struct socket *tap_get_socket(struct file *f) } #endif /* CONFIG_MACVTAP */ +#include +#include + +#define MAX_TAP_QUEUES 256 + +struct tap_queue; + +struct tap_dev { + struct net_device *dev; + u16 flags; + /* This array tracks active taps. */ + struct tap_queue __rcu *taps[MAX_TAP_QUEUES]; + /* This list tracks all taps (both enabled and disabled) */ + struct list_head queue_list; + int numvtaps; + int numqueues; + netdev_features_t tap_features; + int minor; + + void (*update_features)(struct tap_dev *tap, netdev_features_t features); + void (*count_tx_dropped)(struct tap_dev *tap); + void (*count_rx_dropped)(struct tap_dev *tap); +}; + +/* + * A tap queue is the central object of tap module, it connects + * an open character device to virtual interface. There can be + * multiple queues on one interface, which map back to queues + * implemented in hardware on the underlying device. + * + * tap_proto is used to allocate queues through the sock allocation + * mechanism. + * + */ + +struct tap_queue { + struct sock sk; + struct socket sock; + struct socket_wq wq; + int vnet_hdr_sz; + struct tap_dev __rcu *tap; + struct file *file; + unsigned int flags; + u16 queue_index; + bool enabled; + struct list_head next; + struct skb_array skb_array; +}; + rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); -void tap_del_queues(struct net_device *dev); -int tap_get_minor(struct macvlan_dev *vlan); -void tap_free_minor(struct macvlan_dev *vlan); -int tap_queue_resize(struct macvlan_dev *vlan); +void tap_del_queues(struct tap_dev *tap); +int tap_get_minor(struct tap_dev *tap); +void tap_free_minor(struct tap_dev *tap); +int tap_queue_resize(struct tap_dev *tap); int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, const char *device_name); void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev); -- cgit v1.2.3 From d9f1f61c0801a73ff36d416a7ede54229b231e1d Mon Sep 17 00:00:00 2001 From: Sainath Grandhi Date: Fri, 10 Feb 2017 16:03:50 -0800 Subject: tap: Extending tap device create/destroy APIs Extending tap APIs get/free_minor and create/destroy_cdev to handle more than one type of virtual interface. Signed-off-by: Sainath Grandhi Signed-off-by: David S. Miller --- drivers/net/macvtap_main.c | 6 +-- drivers/net/tap.c | 118 +++++++++++++++++++++++++++++++++++++-------- include/linux/if_tap.h | 4 +- 3 files changed, 102 insertions(+), 26 deletions(-) diff --git a/drivers/net/macvtap_main.c b/drivers/net/macvtap_main.c index 0238df62bf45..a4bfc10b61dd 100644 --- a/drivers/net/macvtap_main.c +++ b/drivers/net/macvtap_main.c @@ -163,7 +163,7 @@ static int macvtap_device_event(struct notifier_block *unused, * been registered but before register_netdevice has * finished running. */ - err = tap_get_minor(&vlantap->tap); + err = tap_get_minor(macvtap_major, &vlantap->tap); if (err) return notifier_from_errno(err); @@ -171,7 +171,7 @@ static int macvtap_device_event(struct notifier_block *unused, classdev = device_create(&macvtap_class, &dev->dev, devt, dev, tap_name); if (IS_ERR(classdev)) { - tap_free_minor(&vlantap->tap); + tap_free_minor(macvtap_major, &vlantap->tap); return notifier_from_errno(PTR_ERR(classdev)); } err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, @@ -186,7 +186,7 @@ static int macvtap_device_event(struct notifier_block *unused, sysfs_remove_link(&dev->dev.kobj, tap_name); devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); device_destroy(&macvtap_class, devt); - tap_free_minor(&vlantap->tap); + tap_free_minor(macvtap_major, &vlantap->tap); break; case NETDEV_CHANGE_TX_QUEUE_LEN: if (tap_queue_resize(&vlantap->tap)) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 7d3e8b18f5e6..71bbf0b6327d 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -99,12 +99,17 @@ static struct proto tap_proto = { }; #define TAP_NUM_DEVS (1U << MINORBITS) + +static LIST_HEAD(major_list); + struct major_info { + struct rcu_head rcu; dev_t major; struct idr minor_idr; struct mutex minor_lock; const char *device_name; -} macvtap_major; + struct list_head next; +}; #define GOODCOPY_LEN 128 @@ -385,44 +390,89 @@ drop: return RX_HANDLER_CONSUMED; } -int tap_get_minor(struct tap_dev *tap) +static struct major_info *tap_get_major(int major) +{ + struct major_info *tap_major; + + list_for_each_entry_rcu(tap_major, &major_list, next) { + if (tap_major->major == major) + return tap_major; + } + + return NULL; +} + +int tap_get_minor(dev_t major, struct tap_dev *tap) { int retval = -ENOMEM; + struct major_info *tap_major; + + rcu_read_lock(); + tap_major = tap_get_major(MAJOR(major)); + if (!tap_major) { + retval = -EINVAL; + goto unlock; + } - mutex_lock(&macvtap_major.minor_lock); - retval = idr_alloc(&macvtap_major.minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL); + mutex_lock(&tap_major->minor_lock); + retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL); if (retval >= 0) { tap->minor = retval; } else if (retval == -ENOSPC) { netdev_err(tap->dev, "Too many tap devices\n"); retval = -EINVAL; } - mutex_unlock(&macvtap_major.minor_lock); + mutex_unlock(&tap_major->minor_lock); + +unlock: + rcu_read_unlock(); return retval < 0 ? retval : 0; } -void tap_free_minor(struct tap_dev *tap) +void tap_free_minor(dev_t major, struct tap_dev *tap) { - mutex_lock(&macvtap_major.minor_lock); + struct major_info *tap_major; + + rcu_read_lock(); + tap_major = tap_get_major(MAJOR(major)); + if (!tap_major) { + goto unlock; + } + + mutex_lock(&tap_major->minor_lock); if (tap->minor) { - idr_remove(&macvtap_major.minor_idr, tap->minor); + idr_remove(&tap_major->minor_idr, tap->minor); tap->minor = 0; } - mutex_unlock(&macvtap_major.minor_lock); + mutex_unlock(&tap_major->minor_lock); + +unlock: + rcu_read_unlock(); } -static struct tap_dev *dev_get_by_tap_minor(int minor) +static struct tap_dev *dev_get_by_tap_file(int major, int minor) { struct net_device *dev = NULL; struct tap_dev *tap; + struct major_info *tap_major; - mutex_lock(&macvtap_major.minor_lock); - tap = idr_find(&macvtap_major.minor_idr, minor); + rcu_read_lock(); + tap_major = tap_get_major(major); + if (!tap_major) { + tap = NULL; + goto unlock; + } + + mutex_lock(&tap_major->minor_lock); + tap = idr_find(&tap_major->minor_idr, minor); if (tap) { dev = tap->dev; dev_hold(dev); } - mutex_unlock(&macvtap_major.minor_lock); + mutex_unlock(&tap_major->minor_lock); + +unlock: + rcu_read_unlock(); return tap; } @@ -454,7 +504,7 @@ static int tap_open(struct inode *inode, struct file *file) int err = -ENODEV; rtnl_lock(); - tap = dev_get_by_tap_minor(iminor(inode)); + tap = dev_get_by_tap_file(imajor(inode), iminor(inode)); if (!tap) goto err; @@ -1161,6 +1211,25 @@ int tap_queue_resize(struct tap_dev *tap) return ret; } +static int tap_list_add(dev_t major, const char *device_name) +{ + struct major_info *tap_major; + + tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC); + if (!tap_major) + return -ENOMEM; + + tap_major->major = MAJOR(major); + + idr_init(&tap_major->minor_idr); + mutex_init(&tap_major->minor_lock); + + tap_major->device_name = device_name; + + list_add_tail_rcu(&tap_major->next, &major_list); + return 0; +} + int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, const char *device_name) { @@ -1175,15 +1244,14 @@ int tap_create_cdev(struct cdev *tap_cdev, if (err) goto out2; - macvtap_major.major = MAJOR(*tap_major); - - idr_init(&macvtap_major.minor_idr); - mutex_init(&macvtap_major.minor_lock); - - macvtap_major.device_name = device_name; + err = tap_list_add(*tap_major, device_name); + if (err) + goto out3; return 0; +out3: + cdev_del(tap_cdev); out2: unregister_chrdev_region(*tap_major, TAP_NUM_DEVS); out1: @@ -1192,7 +1260,15 @@ out1: void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev) { + struct major_info *tap_major, *tmp; + cdev_del(tap_cdev); unregister_chrdev_region(major, TAP_NUM_DEVS); - idr_destroy(&macvtap_major.minor_idr); + list_for_each_entry_safe(tap_major, tmp, &major_list, next) { + if (tap_major->major == MAJOR(major)) { + idr_destroy(&tap_major->minor_idr); + list_del_rcu(&tap_major->next); + kfree_rcu(tap_major, rcu); + } + } } diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 75031e5d0a65..362e71c16efb 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -65,8 +65,8 @@ struct tap_queue { rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); void tap_del_queues(struct tap_dev *tap); -int tap_get_minor(struct tap_dev *tap); -void tap_free_minor(struct tap_dev *tap); +int tap_get_minor(dev_t major, struct tap_dev *tap); +void tap_free_minor(dev_t major, struct tap_dev *tap); int tap_queue_resize(struct tap_dev *tap); int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, const char *device_name); -- cgit v1.2.3 From 9a393b5d5988ea4eaa3e0da138321abe0dc03a68 Mon Sep 17 00:00:00 2001 From: Sainath Grandhi Date: Fri, 10 Feb 2017 16:03:51 -0800 Subject: tap: tap as an independent module This patch makes tap a separate module for other types of virtual interfaces, for example, ipvlan to use. Signed-off-by: Sainath Grandhi Signed-off-by: David S. Miller --- drivers/net/Kconfig | 7 ++ drivers/net/Makefile | 3 +- drivers/net/macvtap.c | 249 +++++++++++++++++++++++++++++++++++++++++++++ drivers/net/macvtap_main.c | 249 --------------------------------------------- drivers/net/tap.c | 11 ++ drivers/vhost/Kconfig | 2 +- include/linux/if_tap.h | 4 +- 7 files changed, 271 insertions(+), 254 deletions(-) create mode 100644 drivers/net/macvtap.c delete mode 100644 drivers/net/macvtap_main.c diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index a993cbeb9e0c..5763503fe4e6 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -135,6 +135,7 @@ config MACVTAP tristate "MAC-VLAN based tap driver" depends on MACVLAN depends on INET + select TAP help This adds a specialized tap character device driver that is based on the MAC-VLAN network interface, called macvtap. A macvtap device @@ -287,6 +288,12 @@ config TUN If you don't know what to use this for, you don't need it. +config TAP + tristate + ---help--- + This option is selected by any driver implementing tap user space + interface for a virtual interface to re-use core tap functionality. + config TUN_VNET_CROSS_LE bool "Support for cross-endian vnet headers on little-endian kernels" default n diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 19b03a9fe0f6..7dd86ca02d0d 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_PHYLIB) += phy/ obj-$(CONFIG_RIONET) += rionet.o obj-$(CONFIG_NET_TEAM) += team/ obj-$(CONFIG_TUN) += tun.o +obj-$(CONFIG_TAP) += tap.o obj-$(CONFIG_VETH) += veth.o obj-$(CONFIG_VIRTIO_NET) += virtio_net.o obj-$(CONFIG_VXLAN) += vxlan.o @@ -29,8 +30,6 @@ obj-$(CONFIG_GTP) += gtp.o obj-$(CONFIG_NLMON) += nlmon.o obj-$(CONFIG_NET_VRF) += vrf.o -macvtap-objs := macvtap_main.o tap.o - # # Networking Drivers # diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c new file mode 100644 index 000000000000..a4bfc10b61dd --- /dev/null +++ b/drivers/net/macvtap.c @@ -0,0 +1,249 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +struct macvtap_dev { + struct macvlan_dev vlan; + struct tap_dev tap; +}; + +/* + * Variables for dealing with macvtaps device numbers. + */ +static dev_t macvtap_major; + +static const void *macvtap_net_namespace(struct device *d) +{ + struct net_device *dev = to_net_dev(d->parent); + return dev_net(dev); +} + +static struct class macvtap_class = { + .name = "macvtap", + .owner = THIS_MODULE, + .ns_type = &net_ns_type_operations, + .namespace = macvtap_net_namespace, +}; +static struct cdev macvtap_cdev; + +#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ + NETIF_F_TSO6 | NETIF_F_UFO) + +static void macvtap_count_tx_dropped(struct tap_dev *tap) +{ + struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); + struct macvlan_dev *vlan = &vlantap->vlan; + + this_cpu_inc(vlan->pcpu_stats->tx_dropped); +} + +static void macvtap_count_rx_dropped(struct tap_dev *tap) +{ + struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); + struct macvlan_dev *vlan = &vlantap->vlan; + + macvlan_count_rx(vlan, 0, 0, 0); +} + +static void macvtap_update_features(struct tap_dev *tap, + netdev_features_t features) +{ + struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); + struct macvlan_dev *vlan = &vlantap->vlan; + + vlan->set_features = features; + netdev_update_features(vlan->dev); +} + +static int macvtap_newlink(struct net *src_net, + struct net_device *dev, + struct nlattr *tb[], + struct nlattr *data[]) +{ + struct macvtap_dev *vlantap = netdev_priv(dev); + int err; + + INIT_LIST_HEAD(&vlantap->tap.queue_list); + + /* Since macvlan supports all offloads by default, make + * tap support all offloads also. + */ + vlantap->tap.tap_features = TUN_OFFLOADS; + + /* Register callbacks for rx/tx drops accounting and updating + * net_device features + */ + vlantap->tap.count_tx_dropped = macvtap_count_tx_dropped; + vlantap->tap.count_rx_dropped = macvtap_count_rx_dropped; + vlantap->tap.update_features = macvtap_update_features; + + err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap); + if (err) + return err; + + /* Don't put anything that may fail after macvlan_common_newlink + * because we can't undo what it does. + */ + err = macvlan_common_newlink(src_net, dev, tb, data); + if (err) { + netdev_rx_handler_unregister(dev); + return err; + } + + vlantap->tap.dev = vlantap->vlan.dev; + + return 0; +} + +static void macvtap_dellink(struct net_device *dev, + struct list_head *head) +{ + struct macvtap_dev *vlantap = netdev_priv(dev); + + netdev_rx_handler_unregister(dev); + tap_del_queues(&vlantap->tap); + macvlan_dellink(dev, head); +} + +static void macvtap_setup(struct net_device *dev) +{ + macvlan_common_setup(dev); + dev->tx_queue_len = TUN_READQ_SIZE; +} + +static struct rtnl_link_ops macvtap_link_ops __read_mostly = { + .kind = "macvtap", + .setup = macvtap_setup, + .newlink = macvtap_newlink, + .dellink = macvtap_dellink, + .priv_size = sizeof(struct macvtap_dev), +}; + +static int macvtap_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct macvtap_dev *vlantap; + struct device *classdev; + dev_t devt; + int err; + char tap_name[IFNAMSIZ]; + + if (dev->rtnl_link_ops != &macvtap_link_ops) + return NOTIFY_DONE; + + snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex); + vlantap = netdev_priv(dev); + + switch (event) { + case NETDEV_REGISTER: + /* Create the device node here after the network device has + * been registered but before register_netdevice has + * finished running. + */ + err = tap_get_minor(macvtap_major, &vlantap->tap); + if (err) + return notifier_from_errno(err); + + devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); + classdev = device_create(&macvtap_class, &dev->dev, devt, + dev, tap_name); + if (IS_ERR(classdev)) { + tap_free_minor(macvtap_major, &vlantap->tap); + return notifier_from_errno(PTR_ERR(classdev)); + } + err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, + tap_name); + if (err) + return notifier_from_errno(err); + break; + case NETDEV_UNREGISTER: + /* vlan->minor == 0 if NETDEV_REGISTER above failed */ + if (vlantap->tap.minor == 0) + break; + sysfs_remove_link(&dev->dev.kobj, tap_name); + devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); + device_destroy(&macvtap_class, devt); + tap_free_minor(macvtap_major, &vlantap->tap); + break; + case NETDEV_CHANGE_TX_QUEUE_LEN: + if (tap_queue_resize(&vlantap->tap)) + return NOTIFY_BAD; + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block macvtap_notifier_block __read_mostly = { + .notifier_call = macvtap_device_event, +}; + +static int macvtap_init(void) +{ + int err; + + err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap"); + + if (err) + goto out1; + + err = class_register(&macvtap_class); + if (err) + goto out2; + + err = register_netdevice_notifier(&macvtap_notifier_block); + if (err) + goto out3; + + err = macvlan_link_register(&macvtap_link_ops); + if (err) + goto out4; + + return 0; + +out4: + unregister_netdevice_notifier(&macvtap_notifier_block); +out3: + class_unregister(&macvtap_class); +out2: + tap_destroy_cdev(macvtap_major, &macvtap_cdev); +out1: + return err; +} +module_init(macvtap_init); + +static void macvtap_exit(void) +{ + rtnl_link_unregister(&macvtap_link_ops); + unregister_netdevice_notifier(&macvtap_notifier_block); + class_unregister(&macvtap_class); + tap_destroy_cdev(macvtap_major, &macvtap_cdev); +} +module_exit(macvtap_exit); + +MODULE_ALIAS_RTNL_LINK("macvtap"); +MODULE_AUTHOR("Arnd Bergmann "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/macvtap_main.c b/drivers/net/macvtap_main.c deleted file mode 100644 index a4bfc10b61dd..000000000000 --- a/drivers/net/macvtap_main.c +++ /dev/null @@ -1,249 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -struct macvtap_dev { - struct macvlan_dev vlan; - struct tap_dev tap; -}; - -/* - * Variables for dealing with macvtaps device numbers. - */ -static dev_t macvtap_major; - -static const void *macvtap_net_namespace(struct device *d) -{ - struct net_device *dev = to_net_dev(d->parent); - return dev_net(dev); -} - -static struct class macvtap_class = { - .name = "macvtap", - .owner = THIS_MODULE, - .ns_type = &net_ns_type_operations, - .namespace = macvtap_net_namespace, -}; -static struct cdev macvtap_cdev; - -#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6 | NETIF_F_UFO) - -static void macvtap_count_tx_dropped(struct tap_dev *tap) -{ - struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); - struct macvlan_dev *vlan = &vlantap->vlan; - - this_cpu_inc(vlan->pcpu_stats->tx_dropped); -} - -static void macvtap_count_rx_dropped(struct tap_dev *tap) -{ - struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); - struct macvlan_dev *vlan = &vlantap->vlan; - - macvlan_count_rx(vlan, 0, 0, 0); -} - -static void macvtap_update_features(struct tap_dev *tap, - netdev_features_t features) -{ - struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); - struct macvlan_dev *vlan = &vlantap->vlan; - - vlan->set_features = features; - netdev_update_features(vlan->dev); -} - -static int macvtap_newlink(struct net *src_net, - struct net_device *dev, - struct nlattr *tb[], - struct nlattr *data[]) -{ - struct macvtap_dev *vlantap = netdev_priv(dev); - int err; - - INIT_LIST_HEAD(&vlantap->tap.queue_list); - - /* Since macvlan supports all offloads by default, make - * tap support all offloads also. - */ - vlantap->tap.tap_features = TUN_OFFLOADS; - - /* Register callbacks for rx/tx drops accounting and updating - * net_device features - */ - vlantap->tap.count_tx_dropped = macvtap_count_tx_dropped; - vlantap->tap.count_rx_dropped = macvtap_count_rx_dropped; - vlantap->tap.update_features = macvtap_update_features; - - err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap); - if (err) - return err; - - /* Don't put anything that may fail after macvlan_common_newlink - * because we can't undo what it does. - */ - err = macvlan_common_newlink(src_net, dev, tb, data); - if (err) { - netdev_rx_handler_unregister(dev); - return err; - } - - vlantap->tap.dev = vlantap->vlan.dev; - - return 0; -} - -static void macvtap_dellink(struct net_device *dev, - struct list_head *head) -{ - struct macvtap_dev *vlantap = netdev_priv(dev); - - netdev_rx_handler_unregister(dev); - tap_del_queues(&vlantap->tap); - macvlan_dellink(dev, head); -} - -static void macvtap_setup(struct net_device *dev) -{ - macvlan_common_setup(dev); - dev->tx_queue_len = TUN_READQ_SIZE; -} - -static struct rtnl_link_ops macvtap_link_ops __read_mostly = { - .kind = "macvtap", - .setup = macvtap_setup, - .newlink = macvtap_newlink, - .dellink = macvtap_dellink, - .priv_size = sizeof(struct macvtap_dev), -}; - -static int macvtap_device_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct macvtap_dev *vlantap; - struct device *classdev; - dev_t devt; - int err; - char tap_name[IFNAMSIZ]; - - if (dev->rtnl_link_ops != &macvtap_link_ops) - return NOTIFY_DONE; - - snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex); - vlantap = netdev_priv(dev); - - switch (event) { - case NETDEV_REGISTER: - /* Create the device node here after the network device has - * been registered but before register_netdevice has - * finished running. - */ - err = tap_get_minor(macvtap_major, &vlantap->tap); - if (err) - return notifier_from_errno(err); - - devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); - classdev = device_create(&macvtap_class, &dev->dev, devt, - dev, tap_name); - if (IS_ERR(classdev)) { - tap_free_minor(macvtap_major, &vlantap->tap); - return notifier_from_errno(PTR_ERR(classdev)); - } - err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, - tap_name); - if (err) - return notifier_from_errno(err); - break; - case NETDEV_UNREGISTER: - /* vlan->minor == 0 if NETDEV_REGISTER above failed */ - if (vlantap->tap.minor == 0) - break; - sysfs_remove_link(&dev->dev.kobj, tap_name); - devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); - device_destroy(&macvtap_class, devt); - tap_free_minor(macvtap_major, &vlantap->tap); - break; - case NETDEV_CHANGE_TX_QUEUE_LEN: - if (tap_queue_resize(&vlantap->tap)) - return NOTIFY_BAD; - break; - } - - return NOTIFY_DONE; -} - -static struct notifier_block macvtap_notifier_block __read_mostly = { - .notifier_call = macvtap_device_event, -}; - -static int macvtap_init(void) -{ - int err; - - err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap"); - - if (err) - goto out1; - - err = class_register(&macvtap_class); - if (err) - goto out2; - - err = register_netdevice_notifier(&macvtap_notifier_block); - if (err) - goto out3; - - err = macvlan_link_register(&macvtap_link_ops); - if (err) - goto out4; - - return 0; - -out4: - unregister_netdevice_notifier(&macvtap_notifier_block); -out3: - class_unregister(&macvtap_class); -out2: - tap_destroy_cdev(macvtap_major, &macvtap_cdev); -out1: - return err; -} -module_init(macvtap_init); - -static void macvtap_exit(void) -{ - rtnl_link_unregister(&macvtap_link_ops); - unregister_netdevice_notifier(&macvtap_notifier_block); - class_unregister(&macvtap_class); - tap_destroy_cdev(macvtap_major, &macvtap_cdev); -} -module_exit(macvtap_exit); - -MODULE_ALIAS_RTNL_LINK("macvtap"); -MODULE_AUTHOR("Arnd Bergmann "); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 71bbf0b6327d..35b55a2fa1a1 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -312,6 +312,7 @@ void tap_del_queues(struct tap_dev *tap) /* guarantee that any future tap_set_queue will fail */ tap->numvtaps = MAX_TAP_QUEUES; } +EXPORT_SYMBOL_GPL(tap_del_queues); rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) { @@ -389,6 +390,7 @@ drop: kfree_skb(skb); return RX_HANDLER_CONSUMED; } +EXPORT_SYMBOL_GPL(tap_handle_frame); static struct major_info *tap_get_major(int major) { @@ -428,6 +430,7 @@ unlock: rcu_read_unlock(); return retval < 0 ? retval : 0; } +EXPORT_SYMBOL_GPL(tap_get_minor); void tap_free_minor(dev_t major, struct tap_dev *tap) { @@ -449,6 +452,7 @@ void tap_free_minor(dev_t major, struct tap_dev *tap) unlock: rcu_read_unlock(); } +EXPORT_SYMBOL_GPL(tap_free_minor); static struct tap_dev *dev_get_by_tap_file(int major, int minor) { @@ -1210,6 +1214,7 @@ int tap_queue_resize(struct tap_dev *tap) kfree(arrays); return ret; } +EXPORT_SYMBOL_GPL(tap_queue_resize); static int tap_list_add(dev_t major, const char *device_name) { @@ -1257,6 +1262,7 @@ out2: out1: return err; } +EXPORT_SYMBOL_GPL(tap_create_cdev); void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev) { @@ -1272,3 +1278,8 @@ void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev) } } } +EXPORT_SYMBOL_GPL(tap_destroy_cdev); + +MODULE_AUTHOR("Arnd Bergmann "); +MODULE_AUTHOR("Sainath Grandhi "); +MODULE_LICENSE("GPL"); diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index 40764ecad9ce..cfdecea5078f 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -1,6 +1,6 @@ config VHOST_NET tristate "Host kernel accelerator for virtio net" - depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP) + depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP) select VHOST ---help--- This kernel module can be loaded in host kernel to accelerate diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 362e71c16efb..3482c3c2037d 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -1,7 +1,7 @@ #ifndef _LINUX_IF_TAP_H_ #define _LINUX_IF_TAP_H_ -#if IS_ENABLED(CONFIG_MACVTAP) +#if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); #else #include @@ -12,7 +12,7 @@ static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -#endif /* CONFIG_MACVTAP */ +#endif /* CONFIG_TAP */ #include #include -- cgit v1.2.3 From 235a9d89da976e2975b3de9afc0bed7b72557983 Mon Sep 17 00:00:00 2001 From: Sainath Grandhi Date: Fri, 10 Feb 2017 16:03:52 -0800 Subject: ipvtap: IP-VLAN based tap driver This patch adds a tap character device driver that is based on the IP-VLAN network interface, called ipvtap. An ipvtap device can be created in the same way as an ipvlan device, using 'type ipvtap', and then accessed using the tap user space interface. Signed-off-by: Sainath Grandhi Signed-off-by: David S. Miller --- drivers/net/Kconfig | 13 +++ drivers/net/Makefile | 1 + drivers/net/ipvlan/Makefile | 1 + drivers/net/ipvlan/ipvlan.h | 7 ++ drivers/net/ipvlan/ipvlan_core.c | 3 +- drivers/net/ipvlan/ipvlan_main.c | 27 +++-- drivers/net/ipvlan/ipvtap.c | 241 +++++++++++++++++++++++++++++++++++++++ 7 files changed, 280 insertions(+), 13 deletions(-) create mode 100644 drivers/net/ipvlan/ipvtap.c diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5763503fe4e6..823bc2fd201f 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -166,6 +166,19 @@ config IPVLAN To compile this driver as a module, choose M here: the module will be called ipvlan. +config IPVTAP + tristate "IP-VLAN based tap driver" + depends on IPVLAN + depends on INET + select TAP + ---help--- + This adds a specialized tap character device driver that is based + on the IP-VLAN network interface, called ipvtap. An ipvtap device + can be added in the same way as a ipvlan device, using 'type + ipvtap', and then be accessed through the tap user space interface. + + To compile this driver as a module, choose M here: the module + will be called ipvtap. config VXLAN tristate "Virtual eXtensible Local Area Network (VXLAN)" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 7dd86ca02d0d..98ed4d96987c 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -7,6 +7,7 @@ # obj-$(CONFIG_BONDING) += bonding/ obj-$(CONFIG_IPVLAN) += ipvlan/ +obj-$(CONFIG_IPVTAP) += ipvlan/ obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_EQUALIZER) += eql.o obj-$(CONFIG_IFB) += ifb.o diff --git a/drivers/net/ipvlan/Makefile b/drivers/net/ipvlan/Makefile index df79910192d6..8a2c64dc9641 100644 --- a/drivers/net/ipvlan/Makefile +++ b/drivers/net/ipvlan/Makefile @@ -3,5 +3,6 @@ # obj-$(CONFIG_IPVLAN) += ipvlan.o +obj-$(CONFIG_IPVTAP) += ipvtap.o ipvlan-objs := ipvlan_core.o ipvlan_main.o diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 406ae4ff0ae8..800a46c8d26c 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -135,4 +135,11 @@ struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb, u16 proto); unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); +void ipvlan_count_rx(const struct ipvl_dev *ipvlan, + unsigned int len, bool success, bool mcast); +int ipvlan_link_new(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]); +void ipvlan_link_delete(struct net_device *dev, struct list_head *head); +void ipvlan_link_setup(struct net_device *dev); +int ipvlan_link_register(struct rtnl_link_ops *ops); #endif /* __IPVLAN_H */ diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8ae335d73d38..1f3295e274d0 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -16,7 +16,7 @@ void ipvlan_init_secret(void) net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret)); } -static void ipvlan_count_rx(const struct ipvl_dev *ipvlan, +void ipvlan_count_rx(const struct ipvl_dev *ipvlan, unsigned int len, bool success, bool mcast) { if (likely(success)) { @@ -33,6 +33,7 @@ static void ipvlan_count_rx(const struct ipvl_dev *ipvlan, this_cpu_inc(ipvlan->pcpu_stats->rx_errs); } } +EXPORT_SYMBOL_GPL(ipvlan_count_rx); static u8 ipvlan_get_v6_hash(const void *iaddr) { diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 95b18f4602cf..aa8575ccbce3 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -496,8 +496,8 @@ err: return ret; } -static int ipvlan_link_new(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) +int ipvlan_link_new(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) { struct ipvl_dev *ipvlan = netdev_priv(dev); struct ipvl_port *port; @@ -594,8 +594,9 @@ destroy_ipvlan_port: ipvlan_port_destroy(phy_dev); return err; } +EXPORT_SYMBOL_GPL(ipvlan_link_new); -static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) +void ipvlan_link_delete(struct net_device *dev, struct list_head *head) { struct ipvl_dev *ipvlan = netdev_priv(dev); struct ipvl_addr *addr, *next; @@ -611,8 +612,9 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(ipvlan->phy_dev, dev); } +EXPORT_SYMBOL_GPL(ipvlan_link_delete); -static void ipvlan_link_setup(struct net_device *dev) +void ipvlan_link_setup(struct net_device *dev) { ether_setup(dev); @@ -623,6 +625,7 @@ static void ipvlan_link_setup(struct net_device *dev) dev->header_ops = &ipvlan_header_ops; dev->ethtool_ops = &ipvlan_ethtool_ops; } +EXPORT_SYMBOL_GPL(ipvlan_link_setup); static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] = { @@ -633,22 +636,22 @@ static struct rtnl_link_ops ipvlan_link_ops = { .kind = "ipvlan", .priv_size = sizeof(struct ipvl_dev), - .get_size = ipvlan_nl_getsize, - .policy = ipvlan_nl_policy, - .validate = ipvlan_nl_validate, - .fill_info = ipvlan_nl_fillinfo, - .changelink = ipvlan_nl_changelink, - .maxtype = IFLA_IPVLAN_MAX, - .setup = ipvlan_link_setup, .newlink = ipvlan_link_new, .dellink = ipvlan_link_delete, }; -static int ipvlan_link_register(struct rtnl_link_ops *ops) +int ipvlan_link_register(struct rtnl_link_ops *ops) { + ops->get_size = ipvlan_nl_getsize; + ops->policy = ipvlan_nl_policy; + ops->validate = ipvlan_nl_validate; + ops->fill_info = ipvlan_nl_fillinfo; + ops->changelink = ipvlan_nl_changelink; + ops->maxtype = IFLA_IPVLAN_MAX; return rtnl_link_register(ops); } +EXPORT_SYMBOL_GPL(ipvlan_link_register); static int ipvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c new file mode 100644 index 000000000000..2b713b63b62c --- /dev/null +++ b/drivers/net/ipvlan/ipvtap.c @@ -0,0 +1,241 @@ +#include +#include "ipvlan.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ + NETIF_F_TSO6 | NETIF_F_UFO) + +static dev_t ipvtap_major; +static struct cdev ipvtap_cdev; + +static const void *ipvtap_net_namespace(struct device *d) +{ + struct net_device *dev = to_net_dev(d->parent); + return dev_net(dev); +} + +static struct class ipvtap_class = { + .name = "ipvtap", + .owner = THIS_MODULE, + .ns_type = &net_ns_type_operations, + .namespace = ipvtap_net_namespace, +}; + +struct ipvtap_dev { + struct ipvl_dev vlan; + struct tap_dev tap; +}; + +static void ipvtap_count_tx_dropped(struct tap_dev *tap) +{ + struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap); + struct ipvl_dev *vlan = &vlantap->vlan; + + this_cpu_inc(vlan->pcpu_stats->tx_drps); +} + +static void ipvtap_count_rx_dropped(struct tap_dev *tap) +{ + struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap); + struct ipvl_dev *vlan = &vlantap->vlan; + + ipvlan_count_rx(vlan, 0, 0, 0); +} + +static void ipvtap_update_features(struct tap_dev *tap, + netdev_features_t features) +{ + struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap); + struct ipvl_dev *vlan = &vlantap->vlan; + + vlan->sfeatures = features; + netdev_update_features(vlan->dev); +} + +static int ipvtap_newlink(struct net *src_net, + struct net_device *dev, + struct nlattr *tb[], + struct nlattr *data[]) +{ + struct ipvtap_dev *vlantap = netdev_priv(dev); + int err; + + INIT_LIST_HEAD(&vlantap->tap.queue_list); + + /* Since macvlan supports all offloads by default, make + * tap support all offloads also. + */ + vlantap->tap.tap_features = TUN_OFFLOADS; + vlantap->tap.count_tx_dropped = ipvtap_count_tx_dropped; + vlantap->tap.update_features = ipvtap_update_features; + vlantap->tap.count_rx_dropped = ipvtap_count_rx_dropped; + + err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap); + if (err) + return err; + + /* Don't put anything that may fail after macvlan_common_newlink + * because we can't undo what it does. + */ + err = ipvlan_link_new(src_net, dev, tb, data); + if (err) { + netdev_rx_handler_unregister(dev); + return err; + } + + vlantap->tap.dev = vlantap->vlan.dev; + + return err; +} + +static void ipvtap_dellink(struct net_device *dev, + struct list_head *head) +{ + struct ipvtap_dev *vlan = netdev_priv(dev); + + netdev_rx_handler_unregister(dev); + tap_del_queues(&vlan->tap); + ipvlan_link_delete(dev, head); +} + +static void ipvtap_setup(struct net_device *dev) +{ + ipvlan_link_setup(dev); + dev->tx_queue_len = TUN_READQ_SIZE; + dev->priv_flags &= ~IFF_NO_QUEUE; +} + +static struct rtnl_link_ops ipvtap_link_ops __read_mostly = { + .kind = "ipvtap", + .setup = ipvtap_setup, + .newlink = ipvtap_newlink, + .dellink = ipvtap_dellink, + .priv_size = sizeof(struct ipvtap_dev), +}; + +static int ipvtap_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ipvtap_dev *vlantap; + struct device *classdev; + dev_t devt; + int err; + char tap_name[IFNAMSIZ]; + + if (dev->rtnl_link_ops != &ipvtap_link_ops) + return NOTIFY_DONE; + + snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex); + vlantap = netdev_priv(dev); + + switch (event) { + case NETDEV_REGISTER: + /* Create the device node here after the network device has + * been registered but before register_netdevice has + * finished running. + */ + err = tap_get_minor(ipvtap_major, &vlantap->tap); + if (err) + return notifier_from_errno(err); + + devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor); + classdev = device_create(&ipvtap_class, &dev->dev, devt, + dev, tap_name); + if (IS_ERR(classdev)) { + tap_free_minor(ipvtap_major, &vlantap->tap); + return notifier_from_errno(PTR_ERR(classdev)); + } + err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, + tap_name); + if (err) + return notifier_from_errno(err); + break; + case NETDEV_UNREGISTER: + /* vlan->minor == 0 if NETDEV_REGISTER above failed */ + if (vlantap->tap.minor == 0) + break; + sysfs_remove_link(&dev->dev.kobj, tap_name); + devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor); + device_destroy(&ipvtap_class, devt); + tap_free_minor(ipvtap_major, &vlantap->tap); + break; + case NETDEV_CHANGE_TX_QUEUE_LEN: + if (tap_queue_resize(&vlantap->tap)) + return NOTIFY_BAD; + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block ipvtap_notifier_block __read_mostly = { + .notifier_call = ipvtap_device_event, +}; + +static int ipvtap_init(void) +{ + int err; + + err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap"); + + if (err) + goto out1; + + err = class_register(&ipvtap_class); + if (err) + goto out2; + + err = register_netdevice_notifier(&ipvtap_notifier_block); + if (err) + goto out3; + + err = ipvlan_link_register(&ipvtap_link_ops); + if (err) + goto out4; + + return 0; + +out4: + unregister_netdevice_notifier(&ipvtap_notifier_block); +out3: + class_unregister(&ipvtap_class); +out2: + tap_destroy_cdev(ipvtap_major, &ipvtap_cdev); +out1: + return err; +} +module_init(ipvtap_init); + +static void ipvtap_exit(void) +{ + rtnl_link_unregister(&ipvtap_link_ops); + unregister_netdevice_notifier(&ipvtap_notifier_block); + class_unregister(&ipvtap_class); + tap_destroy_cdev(ipvtap_major, &ipvtap_cdev); +} +module_exit(ipvtap_exit); +MODULE_ALIAS_RTNL_LINK("ipvtap"); +MODULE_AUTHOR("Sainath Grandhi "); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 90d2ea9f66d3f9f96bc0f41d94b4830c0b2d0a50 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Fri, 10 Feb 2017 17:34:59 +0000 Subject: sfc: fix swapped arguments to efx_ef10_handle_rx_event_errors Fixes: a0ee35414837 ("sfc: process RX event inner checksum flags") Reported-by: Colin Ian King Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 6bba2d206d82..761ccc64eee9 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3356,8 +3356,9 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, EFX_AND_QWORD(errors, *event, errors); if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, + rx_encap_hdr, rx_l3_class, rx_l4_class, - rx_encap_hdr, event); + event); } else { bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP || rx_l4_class == ESE_DZ_L4_CLASS_UDP; -- cgit v1.2.3 From c16ec18599c8c1722d476011786fd9e2529888f7 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sat, 11 Feb 2017 13:49:20 +0200 Subject: net: rename dst_neigh_output back to neigh_output After the dst->pending_confirm flag was removed, we do not need anymore to provide dst arg to dst_neigh_output. So, rename it to neigh_output as before commit 5110effee8fd ("net: Do delayed neigh confirmation."). Signed-off-by: Julian Anastasov Signed-off-by: David S. Miller --- drivers/net/vrf.c | 4 ++-- include/net/dst.h | 12 ------------ include/net/neighbour.h | 10 ++++++++++ net/ipv4/ip_output.c | 2 +- net/ipv6/ip6_output.c | 2 +- 5 files changed, 14 insertions(+), 16 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 630eafdb79e8..22379da63400 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -379,7 +379,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk, neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); - ret = dst_neigh_output(dst, neigh, skb); + ret = neigh_output(neigh, skb); rcu_read_unlock_bh(); return ret; } @@ -577,7 +577,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); - ret = dst_neigh_output(dst, neigh, skb); + ret = neigh_output(neigh, skb); } rcu_read_unlock_bh(); diff --git a/include/net/dst.h b/include/net/dst.h index 84a1043dd6a1..049af33da3b6 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -442,18 +442,6 @@ static inline void dst_confirm(struct dst_entry *dst) { } -static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, - struct sk_buff *skb) -{ - const struct hh_cache *hh; - - hh = &n->hh; - if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) - return neigh_hh_output(hh, skb); - else - return n->output(n, skb); -} - static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) { struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 8b683841e574..5ebf69491160 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -468,6 +468,16 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb return dev_queue_xmit(skb); } +static inline int neigh_output(struct neighbour *n, struct sk_buff *skb) +{ + const struct hh_cache *hh = &n->hh; + + if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) + return neigh_hh_output(hh, skb); + else + return n->output(n, skb); +} + static inline struct neighbour * __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) { diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 7a719f1ae556..737ce826d7ec 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -225,7 +225,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s int res; sock_confirm_neigh(skb, neigh); - res = dst_neigh_output(dst, neigh, skb); + res = neigh_output(neigh, skb); rcu_read_unlock_bh(); return res; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index d299040613a0..a75871c62328 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -120,7 +120,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); - ret = dst_neigh_output(dst, neigh, skb); + ret = neigh_output(neigh, skb); rcu_read_unlock_bh(); return ret; } -- cgit v1.2.3 From 98eb253cbde7e860fad83fa759058269ca69916d Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Fri, 10 Feb 2017 21:38:36 -0800 Subject: vxlan: remove vni zero check and drop for COLLECT_METADATA This patch drops the vni zero check for COLLECT_METADATA mode. It is not really needed, vni zero is a valid vni. Fixes: 3ad7a4b141eb ("vxlan: support fdb and learning in COLLECT_METADATA mode" Reported-by: Joe Stringer Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 2374a75dcb55..4e27c5b09600 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1333,9 +1333,6 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); - if ((vs->flags & VXLAN_F_COLLECT_METADATA) && !vni) - goto drop; - vxlan = vxlan_vs_find_vni(vs, vni); if (!vxlan) goto drop; -- cgit v1.2.3 From 1bf960502e4c94425c3a3a985200834fb59839c6 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Sat, 11 Feb 2017 03:49:57 +0200 Subject: net: ethernet: ti: cpsw: return NET_XMIT_DROP if skb_padto failed If skb_padto failed the skb has been dropped already, so it was consumed, but it doesn't mean it was sent, thus no need to update queue tx time, etc. So, return NET_XMIT_DROP as more appropriate. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 4d1c0c3042c7..503fa8af37d7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1604,7 +1604,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { cpsw_err(priv, tx_err, "packet pad failed\n"); ndev->stats.tx_dropped++; - return NETDEV_TX_OK; + return NET_XMIT_DROP; } if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && -- cgit v1.2.3 From 87b60cfacf9f17cf71933c6e33b66e68160af71d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 10 Feb 2017 10:31:49 -0800 Subject: net_sched: fix error recovery at qdisc creation Dmitry reported uses after free in qdisc code [1] The problem here is that ops->init() can return an error. qdisc_create_dflt() then call ops->destroy(), while qdisc_create() does _not_ call it. Four qdisc chose to call their own ops->destroy(), assuming their caller would not. This patch makes sure qdisc_create() calls ops->destroy() and fixes the four qdisc to avoid double free. [1] BUG: KASAN: use-after-free in mq_destroy+0x242/0x290 net/sched/sch_mq.c:33 at addr ffff8801d415d440 Read of size 8 by task syz-executor2/5030 CPU: 0 PID: 5030 Comm: syz-executor2 Not tainted 4.3.5-smp-DEV #119 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 0000000000000046 ffff8801b435b870 ffffffff81bbbed4 ffff8801db000400 ffff8801d415d440 ffff8801d415dc40 ffff8801c4988510 ffff8801b435b898 ffffffff816682b1 ffff8801b435b928 ffff8801d415d440 ffff8801c49880c0 Call Trace: [] __dump_stack lib/dump_stack.c:15 [inline] [] dump_stack+0x6c/0x98 lib/dump_stack.c:51 [] kasan_object_err+0x21/0x70 mm/kasan/report.c:158 [] print_address_description mm/kasan/report.c:196 [inline] [] kasan_report_error+0x1b4/0x4b0 mm/kasan/report.c:285 [] kasan_report mm/kasan/report.c:305 [inline] [] __asan_report_load8_noabort+0x43/0x50 mm/kasan/report.c:326 [] mq_destroy+0x242/0x290 net/sched/sch_mq.c:33 [] qdisc_destroy+0x12d/0x290 net/sched/sch_generic.c:953 [] qdisc_create_dflt+0xf0/0x120 net/sched/sch_generic.c:848 [] attach_default_qdiscs net/sched/sch_generic.c:1029 [inline] [] dev_activate+0x6ad/0x880 net/sched/sch_generic.c:1064 [] __dev_open+0x221/0x320 net/core/dev.c:1403 [] __dev_change_flags+0x15e/0x3e0 net/core/dev.c:6858 [] dev_change_flags+0x8e/0x140 net/core/dev.c:6926 [] dev_ifsioc+0x446/0x890 net/core/dev_ioctl.c:260 [] dev_ioctl+0x1ba/0xb80 net/core/dev_ioctl.c:546 [] sock_do_ioctl+0x99/0xb0 net/socket.c:879 [] sock_ioctl+0x2a0/0x390 net/socket.c:958 [] vfs_ioctl fs/ioctl.c:44 [inline] [] do_vfs_ioctl+0x8a8/0xe50 fs/ioctl.c:611 [] SYSC_ioctl fs/ioctl.c:626 [inline] [] SyS_ioctl+0x94/0xc0 fs/ioctl.c:617 [] entry_SYSCALL_64_fastpath+0x12/0x17 Signed-off-by: Eric Dumazet Reported-by: Dmitry Vyukov Signed-off-by: David S. Miller --- net/sched/sch_api.c | 2 ++ net/sched/sch_hhf.c | 8 ++++++-- net/sched/sch_mq.c | 10 +++------- net/sched/sch_mqprio.c | 19 ++++++------------- net/sched/sch_sfq.c | 3 ++- 5 files changed, 19 insertions(+), 23 deletions(-) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index adeabaec0d0b..a13c15e8f087 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1019,6 +1019,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev, return sch; } + /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ + ops->destroy(sch); err_out3: dev_put(dev); kfree((char *) sch - sch->padded); diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index e3d0458af17b..2fae8b5f1b80 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -627,7 +627,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * sizeof(u32)); if (!q->hhf_arrays[i]) { - hhf_destroy(sch); + /* Note: hhf_destroy() will be called + * by our caller. + */ return -ENOMEM; } } @@ -638,7 +640,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / BITS_PER_BYTE); if (!q->hhf_valid_bits[i]) { - hhf_destroy(sch); + /* Note: hhf_destroy() will be called + * by our caller. + */ return -ENOMEM; } } diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index 2bc8d7f8df16..20b7f1646f69 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) /* pre-allocate qdiscs, attachment can't fail */ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), GFP_KERNEL); - if (priv->qdiscs == NULL) + if (!priv->qdiscs) return -ENOMEM; for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { @@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx), TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(ntx + 1))); - if (qdisc == NULL) - goto err; + if (!qdisc) + return -ENOMEM; priv->qdiscs[ntx] = qdisc; qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; } sch->flags |= TCQ_F_MQROOT; return 0; - -err: - mq_destroy(sch); - return -ENOMEM; } static void mq_attach(struct Qdisc *sch) diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index b5c502c78143..922683418e53 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -118,10 +118,8 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) /* pre-allocate qdisc, attachment can't fail */ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), GFP_KERNEL); - if (priv->qdiscs == NULL) { - err = -ENOMEM; - goto err; - } + if (!priv->qdiscs) + return -ENOMEM; for (i = 0; i < dev->num_tx_queues; i++) { dev_queue = netdev_get_tx_queue(dev, i); @@ -129,10 +127,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) get_default_qdisc_ops(dev, i), TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(i + 1))); - if (qdisc == NULL) { - err = -ENOMEM; - goto err; - } + if (!qdisc) + return -ENOMEM; + priv->qdiscs[i] = qdisc; qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; } @@ -148,7 +145,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) priv->hw_owned = 1; err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); if (err) - goto err; + return err; } else { netdev_set_num_tc(dev, qopt->num_tc); for (i = 0; i < qopt->num_tc; i++) @@ -162,10 +159,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) sch->flags |= TCQ_F_MQROOT; return 0; - -err: - mqprio_destroy(sch); - return err; } static void mqprio_attach(struct Qdisc *sch) diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 83d06e251b93..42e8c8615e65 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -743,9 +743,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); if (!q->ht || !q->slots) { - sfq_destroy(sch); + /* Note: sfq_destroy() will be called by our caller */ return -ENOMEM; } + for (i = 0; i < q->divisor; i++) q->ht[i] = SFQ_EMPTY_SLOT; -- cgit v1.2.3 From d7ce6422d6e64502d9f6acff9a4466e2ddb64721 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 9 Feb 2017 23:29:13 -0800 Subject: i40e: don't check params until after checking for client instance We can avoid the minor bit of work by calling check params after we check for the client instance, since we're about to return early in cases where we do not have a client. Change-ID: I56f8ea2ba48d4f571fa331c9ace50819a022fa1c Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_client.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 7ca048f0b159..f6409f9b3a3a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -174,8 +174,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) if (!vsi) return; - memset(¶ms, 0, sizeof(params)); - i40e_client_get_params(vsi, ¶ms); mutex_lock(&i40e_client_instance_mutex); list_for_each_entry(cdev, &i40e_client_instances, list) { if (cdev->lan_info.pf == vsi->back) { @@ -186,6 +184,8 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) "Cannot locate client instance l2_param_change routine\n"); continue; } + memset(¶ms, 0, sizeof(params)); + i40e_client_get_params(vsi, ¶ms); if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); -- cgit v1.2.3 From e5f77f4a2ff2d6ce2d973ebc62084d92f203c9f6 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Thu, 9 Feb 2017 23:35:18 -0800 Subject: i40evf: track outstanding client request The iWarp client cannot continue until this operation has been completed by the PF driver. Sleep (with timeout) until the reply from the PF driver has been received. Change-ID: I5dc41b857bba32d0218b7ce167b5da122dadf349 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h | 1 + drivers/net/ethernet/intel/i40evf/i40evf.h | 1 + drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | 4 ++++ 3 files changed, 6 insertions(+) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index fc374f833aa9..d38a2b2aea2b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -81,6 +81,7 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index fffe4cf2c20b..00c42d803276 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -195,6 +195,7 @@ struct i40evf_adapter { u64 hw_csum_rx_error; u32 rx_desc_count; int num_msix_vectors; + u32 client_pending; struct msix_entry *msix_entries; u32 flags; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 2059a8e88908..bee58af390e1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -999,6 +999,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; + case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + adapter->client_pending &= + ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); + break; case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: { struct i40e_virtchnl_rss_hena *vrh = (struct i40e_virtchnl_rss_hena *)msg; -- cgit v1.2.3 From 7987dcd7b95111a5acbf5abdbf155eedacd3546b Mon Sep 17 00:00:00 2001 From: Scott Peterson Date: Thu, 9 Feb 2017 23:37:28 -0800 Subject: i40e/i40evf: Limit DMA sync of RX buffers to actual packet size On packet RX, we perform a DMA sync for CPU before passing the packet up. Here we limit that sync to the actual length of the incoming packet, rather than always syncing the entire buffer. Change-ID: I626aaf6c37275a8ce9e81efcaa773f327b331487 Signed-off-by: Scott Peterson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 16 +++++++++------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 16 +++++++++------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f5baeb154d39..4852445122d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1525,7 +1525,7 @@ static inline bool i40e_page_is_reserved(struct page *page) * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware + * @size: packet length from rx_desc * @skb: sk_buff to place the data into * * This function will add the data contained in rx_buffer->page to the skb. @@ -1538,13 +1538,10 @@ static inline bool i40e_page_is_reserved(struct page *page) **/ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, - union i40e_rx_desc *rx_desc, + unsigned int size, struct sk_buff *skb) { struct page *page = rx_buffer->page; - u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; #if (PAGE_SIZE < 8192) unsigned int truesize = I40E_RXBUFFER_2048; #else @@ -1613,6 +1610,11 @@ static inline struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc) { + u64 local_status_error_len = + le64_to_cpu(rx_desc->wb.qword1.status_error_len); + unsigned int size = + (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; struct i40e_rx_buffer *rx_buffer; struct sk_buff *skb; struct page *page; @@ -1654,11 +1656,11 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - I40E_RXBUFFER_2048, + size, DMA_FROM_DEVICE); /* pull page into skb */ - if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); rx_ring->rx_stats.page_reuse_count++; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index d4e488267988..4264d9133366 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1003,7 +1003,7 @@ static inline bool i40e_page_is_reserved(struct page *page) * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware + * @size: packet length from rx_desc * @skb: sk_buff to place the data into * * This function will add the data contained in rx_buffer->page to the skb. @@ -1016,13 +1016,10 @@ static inline bool i40e_page_is_reserved(struct page *page) **/ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, - union i40e_rx_desc *rx_desc, + unsigned int size, struct sk_buff *skb) { struct page *page = rx_buffer->page; - u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; #if (PAGE_SIZE < 8192) unsigned int truesize = I40E_RXBUFFER_2048; #else @@ -1091,6 +1088,11 @@ static inline struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc) { + u64 local_status_error_len = + le64_to_cpu(rx_desc->wb.qword1.status_error_len); + unsigned int size = + (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; struct i40e_rx_buffer *rx_buffer; struct sk_buff *skb; struct page *page; @@ -1132,11 +1134,11 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - I40E_RXBUFFER_2048, + size, DMA_FROM_DEVICE); /* pull page into skb */ - if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); rx_ring->rx_stats.page_reuse_count++; -- cgit v1.2.3 From e72e56597ba15ce70f4fc1eb2ceeaa8da0d4ab5e Mon Sep 17 00:00:00 2001 From: Scott Peterson Date: Thu, 9 Feb 2017 23:40:25 -0800 Subject: i40e/i40evf: Moves skb from i40e_rx_buffer to i40e_ring This patch reduces the size of struct i40e_rx_buffer by one pointer, and makes the i40e driver a little more consistent with the igb driver in terms of packets that span buffers. We do this by moving the skb field from struct i40e_rx_buffer to struct i40e_ring. We pass the skb we already have (or NULL if we don't) to i40e_fetch_rx_buffer(), which skips the skb allocation if we already have one for this packet. Change-ID: I4ad48a531844494ba0c5d8e1a62209a057f661b0 Signed-off-by: Scott Peterson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 31 +++++++++++++-------------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 9 +++++++- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 30 +++++++++++++------------- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 9 +++++++- 4 files changed, 46 insertions(+), 33 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 4852445122d9..aa9755b55d48 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1013,14 +1013,15 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; - if (rx_bi->skb) { - dev_kfree_skb(rx_bi->skb); - rx_bi->skb = NULL; - } if (!rx_bi->page) continue; @@ -1608,7 +1609,8 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, */ static inline struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc) + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) { u64 local_status_error_len = le64_to_cpu(rx_desc->wb.qword1.status_error_len); @@ -1616,15 +1618,12 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; struct i40e_rx_buffer *rx_buffer; - struct sk_buff *skb; struct page *page; rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; page = rx_buffer->page; prefetchw(page); - skb = rx_buffer->skb; - if (likely(!skb)) { void *page_addr = page_address(page) + rx_buffer->page_offset; @@ -1648,8 +1647,6 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, * it now to avoid a possible cache miss */ prefetchw(skb->data); - } else { - rx_buffer->skb = NULL; } /* we are reusing so sync this buffer for CPU use */ @@ -1702,7 +1699,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, #define staterrlen rx_desc->wb.qword1.status_error_len if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) { i40e_clean_programming_status(rx_ring, rx_desc); - rx_ring->rx_bi[ntc].skb = skb; return true; } /* if we are the last buffer then there is nothing else to do */ @@ -1710,8 +1706,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) return false; - /* place skb in next buffer to be received */ - rx_ring->rx_bi[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; return true; @@ -1732,12 +1726,12 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false; while (likely(total_rx_packets < budget)) { union i40e_rx_desc *rx_desc; - struct sk_buff *skb; u16 vlan_tag; u8 rx_ptype; u64 qword; @@ -1766,7 +1760,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - skb = i40e_fetch_rx_buffer(rx_ring, rx_desc); + skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb); if (!skb) break; @@ -1785,8 +1779,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) continue; } - if (i40e_cleanup_headers(rx_ring, skb)) + if (i40e_cleanup_headers(rx_ring, skb)) { + skb = NULL; continue; + } /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1811,11 +1807,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; i40e_receive_skb(rx_ring, skb, vlan_tag); + skb = NULL; /* update budget accounting */ total_rx_packets++; } + rx_ring->skb = skb; + u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 1ea820e9debe..f80979025c01 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -253,7 +253,6 @@ struct i40e_tx_buffer { }; struct i40e_rx_buffer { - struct sk_buff *skb; dma_addr_t dma; struct page *page; unsigned int page_offset; @@ -354,6 +353,14 @@ struct i40e_ring { struct rcu_head rcu; /* to avoid race on free */ u16 next_to_alloc; + struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must + * return before it sees the EOP for + * the current packet, we save that skb + * here and resume receiving this + * packet the next time + * i40e_clean_rx_ring_irq() is called + * for this ring. + */ } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 4264d9133366..7e9c425fc149 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -501,14 +501,15 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; - if (rx_bi->skb) { - dev_kfree_skb(rx_bi->skb); - rx_bi->skb = NULL; - } if (!rx_bi->page) continue; @@ -1086,7 +1087,8 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, */ static inline struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc) + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) { u64 local_status_error_len = le64_to_cpu(rx_desc->wb.qword1.status_error_len); @@ -1094,15 +1096,12 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; struct i40e_rx_buffer *rx_buffer; - struct sk_buff *skb; struct page *page; rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; page = rx_buffer->page; prefetchw(page); - skb = rx_buffer->skb; - if (likely(!skb)) { void *page_addr = page_address(page) + rx_buffer->page_offset; @@ -1126,8 +1125,6 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, * it now to avoid a possible cache miss */ prefetchw(skb->data); - } else { - rx_buffer->skb = NULL; } /* we are reusing so sync this buffer for CPU use */ @@ -1182,8 +1179,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) return false; - /* place skb in next buffer to be received */ - rx_ring->rx_bi[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; return true; @@ -1204,12 +1199,12 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false; while (likely(total_rx_packets < budget)) { union i40e_rx_desc *rx_desc; - struct sk_buff *skb; u16 vlan_tag; u8 rx_ptype; u64 qword; @@ -1238,7 +1233,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc); + skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb); if (!skb) break; @@ -1257,8 +1252,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) continue; } - if (i40e_cleanup_headers(rx_ring, skb)) + if (i40e_cleanup_headers(rx_ring, skb)) { + skb = NULL; continue; + } /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1275,11 +1272,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; i40e_receive_skb(rx_ring, skb, vlan_tag); + skb = NULL; /* update budget accounting */ total_rx_packets++; } + rx_ring->skb = skb; + u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index a5fc789f78eb..8274ba68bd32 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -239,7 +239,6 @@ struct i40e_tx_buffer { }; struct i40e_rx_buffer { - struct sk_buff *skb; dma_addr_t dma; struct page *page; unsigned int page_offset; @@ -340,6 +339,14 @@ struct i40e_ring { struct rcu_head rcu; /* to avoid race on free */ u16 next_to_alloc; + struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must + * return before it sees the EOP for + * the current packet, we save that skb + * here and resume receiving this + * packet the next time + * i40evf_clean_rx_ring_irq() is called + * for this ring. + */ } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { -- cgit v1.2.3 From 9b37c937313bf6769d0b018ca35180b379d40862 Mon Sep 17 00:00:00 2001 From: Scott Peterson Date: Thu, 9 Feb 2017 23:43:30 -0800 Subject: i40e/i40evf: eliminate i40e_pull_tail() Reorganize the i40e_pull_tail() logic, doing it in i40e_add_rx_frag() where it's cheaper. The igb driver does this the same way. Also renames i40e_page_is_reserved() to reflect what it actually tests. Change-ID: Icd9cc507aae1fcdc02308b3a09034111b4c24071 Signed-off-by: Scott Peterson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 169 ++++++++++++++------------ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 169 ++++++++++++++------------ 2 files changed, 186 insertions(+), 152 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index aa9755b55d48..1202b4c74da7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1425,45 +1425,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring, skb_record_rx_queue(skb, rx_ring->queue_index); } -/** - * i40e_pull_tail - i40e specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being adjusted - * - * This function is an i40e specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - /** * i40e_cleanup_headers - Correct empty headers * @rx_ring: rx descriptor ring packet is being transacted on @@ -1479,10 +1440,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) **/ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) { - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - i40e_pull_tail(rx_ring, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -1514,12 +1471,78 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, } /** - * i40e_page_is_reserved - check if reuse is possible + * i40e_page_is_reusable - check if any reuse is possible * @page: page struct to check + * + * A page is not reusable if it was allocated under low memory + * conditions, or it's not in the same NUMA node as this CPU. */ -static inline bool i40e_page_is_reserved(struct page *page) +static inline bool i40e_page_is_reusable(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); + return (page_to_nid(page) == numa_mem_id()) && + !page_is_pfmemalloc(page); +} + +/** + * i40e_can_reuse_rx_page - Determine if this page can be reused by + * the adapter for another receive + * + * @rx_buffer: buffer containing the page + * @page: page address from rx_buffer + * @truesize: actual size of the buffer in this page + * + * If page is reusable, rx_buffer->page_offset is adjusted to point to + * an unused region in the page. + * + * For small pages, @truesize will be a constant value, half the size + * of the memory at page. We'll attempt to alternate between high and + * low halves of the page, with one half ready for use by the hardware + * and the other half being consumed by the stack. We use the page + * ref count to determine whether the stack has finished consuming the + * portion of this page that was passed up with a previous packet. If + * the page ref count is >1, we'll assume the "other" half page is + * still busy, and this page cannot be reused. + * + * For larger pages, @truesize will be the actual space used by the + * received packet (adjusted upward to an even multiple of the cache + * line size). This will advance through the page by the amount + * actually consumed by the received packets while there is still + * space for a buffer. Each region of larger pages will be used at + * most once, after which the page will not be reused. + * + * In either case, if the page is reusable its refcount is increased. + **/ +static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, + struct page *page, + const unsigned int truesize) +{ +#if (PAGE_SIZE >= 8192) + unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; +#endif + + /* Is any reuse possible? */ + if (unlikely(!i40e_page_is_reusable(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Inc ref count on page before passing it up to the stack */ + get_page(page); + + return true; } /** @@ -1543,23 +1566,25 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) unsigned int truesize = I40E_RXBUFFER_2048; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); - unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; #endif + unsigned int pull_len; + + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; /* will the data fit in the skb we allocated? if so, just * copy it as it is pretty small anyway */ - if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; - + if (size <= I40E_RX_HDR_SIZE) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!i40e_page_is_reserved(page))) + /* page is reusable, we can reuse buffer as-is */ + if (likely(i40e_page_is_reusable(page))) return true; /* this page cannot be reused so discard it */ @@ -1567,34 +1592,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, return false; } - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); - - /* avoid re-using remote pages */ - if (unlikely(i40e_page_is_reserved(page))) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; + /* we need the header to contain the greater of either + * ETH_HLEN or 60 bytes if the skb->len is less than + * 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= truesize; -#else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; + /* align pull length to size of long to optimize + * memcpy performance + */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); - if (rx_buffer->page_offset > last_offset) - return false; -#endif + /* update all of the pointers */ + va += pull_len; + size -= pull_len; - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - get_page(rx_buffer->page); +add_tail_frag: + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + (unsigned long)va & ~PAGE_MASK, size, truesize); - return true; + return i40e_can_reuse_rx_page(rx_buffer, page, truesize); } /** diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 7e9c425fc149..b758846d4dc5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -903,45 +903,6 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring, skb_record_rx_queue(skb, rx_ring->queue_index); } -/** - * i40e_pull_tail - i40e specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being adjusted - * - * This function is an i40e specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - /** * i40e_cleanup_headers - Correct empty headers * @rx_ring: rx descriptor ring packet is being transacted on @@ -957,10 +918,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) **/ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) { - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - i40e_pull_tail(rx_ring, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -992,12 +949,78 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, } /** - * i40e_page_is_reserved - check if reuse is possible + * i40e_page_is_reusable - check if any reuse is possible * @page: page struct to check + * + * A page is not reusable if it was allocated under low memory + * conditions, or it's not in the same NUMA node as this CPU. */ -static inline bool i40e_page_is_reserved(struct page *page) +static inline bool i40e_page_is_reusable(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); + return (page_to_nid(page) == numa_mem_id()) && + !page_is_pfmemalloc(page); +} + +/** + * i40e_can_reuse_rx_page - Determine if this page can be reused by + * the adapter for another receive + * + * @rx_buffer: buffer containing the page + * @page: page address from rx_buffer + * @truesize: actual size of the buffer in this page + * + * If page is reusable, rx_buffer->page_offset is adjusted to point to + * an unused region in the page. + * + * For small pages, @truesize will be a constant value, half the size + * of the memory at page. We'll attempt to alternate between high and + * low halves of the page, with one half ready for use by the hardware + * and the other half being consumed by the stack. We use the page + * ref count to determine whether the stack has finished consuming the + * portion of this page that was passed up with a previous packet. If + * the page ref count is >1, we'll assume the "other" half page is + * still busy, and this page cannot be reused. + * + * For larger pages, @truesize will be the actual space used by the + * received packet (adjusted upward to an even multiple of the cache + * line size). This will advance through the page by the amount + * actually consumed by the received packets while there is still + * space for a buffer. Each region of larger pages will be used at + * most once, after which the page will not be reused. + * + * In either case, if the page is reusable its refcount is increased. + **/ +static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, + struct page *page, + const unsigned int truesize) +{ +#if (PAGE_SIZE >= 8192) + unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; +#endif + + /* Is any reuse possible? */ + if (unlikely(!i40e_page_is_reusable(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Inc ref count on page before passing it up to the stack */ + get_page(page); + + return true; } /** @@ -1021,23 +1044,25 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) unsigned int truesize = I40E_RXBUFFER_2048; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); - unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; #endif + unsigned int pull_len; + + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; /* will the data fit in the skb we allocated? if so, just * copy it as it is pretty small anyway */ - if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; - + if (size <= I40E_RX_HDR_SIZE) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!i40e_page_is_reserved(page))) + /* page is reusable, we can reuse buffer as-is */ + if (likely(i40e_page_is_reusable(page))) return true; /* this page cannot be reused so discard it */ @@ -1045,34 +1070,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, return false; } - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); - - /* avoid re-using remote pages */ - if (unlikely(i40e_page_is_reserved(page))) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; + /* we need the header to contain the greater of either + * ETH_HLEN or 60 bytes if the skb->len is less than + * 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= truesize; -#else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; + /* align pull length to size of long to optimize + * memcpy performance + */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); - if (rx_buffer->page_offset > last_offset) - return false; -#endif + /* update all of the pointers */ + va += pull_len; + size -= pull_len; - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - get_page(rx_buffer->page); +add_tail_frag: + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + (unsigned long)va & ~PAGE_MASK, size, truesize); - return true; + return i40e_can_reuse_rx_page(rx_buffer, page, truesize); } /** -- cgit v1.2.3 From a158aeaf5bb5b014240b360693751476aca7440b Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 9 Feb 2017 23:44:27 -0800 Subject: i40e: update comment explaining where FDIR buffers are freed The original comment implies that the only location where the raw_packet buffer will be freed is in i40e_clean_tx_ring() which is incorrect. In fact this isn't even the normal case. Update the comment explaining where the memory is freed. Change-ID: Ie0defc35ed1c3af183f81fdc60b6d783707a5595 Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1202b4c74da7..09f09ea7a5e5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -432,7 +432,12 @@ unsupported_flow: ret = -EINVAL; } - /* The buffer allocated here is freed by the i40e_clean_tx_ring() */ + /* The buffer allocated here will be normally be freed by + * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit + * completion. In the event of an error adding the buffer to the FDIR + * ring, it will immediately be freed. It may also be freed by + * i40e_clean_tx_ring() when closing the VSI. + */ return ret; } -- cgit v1.2.3 From cfffef76e7fa5b9ac3ec8298336d0265bc86af3c Mon Sep 17 00:00:00 2001 From: Bimmy Pujari Date: Mon, 28 Nov 2016 16:06:11 -0800 Subject: i40e/i40evf : Changed version from 1.6.25 to 1.6.27 Signed-off-by: Bimmy Pujari Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9f785c015a2f..0afb32b12e36 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -41,7 +41,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 25 +#define DRV_VERSION_BUILD 27 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 3fe87e021148..729d8fa91a2b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 25 +#define DRV_VERSION_BUILD 27 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ -- cgit v1.2.3 From 3bb83baf9ac9c73f4da051cae17042f264832e61 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Thu, 9 Feb 2017 23:46:50 -0800 Subject: i40e: Clean up dead code The function i40e_client_prepare() can never return an error. So make it void and quit checking its return value. Change-ID: I9ff311e2324dde329eb68648efb2c94aaff856db Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_client.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index f6409f9b3a3a..233627e84751 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -653,13 +653,11 @@ static int i40e_client_release(struct i40e_client *client) * i40e_client_prepare - prepare client specific resources * @client: pointer to the registered client * - * Return 0 on success or < 0 on error **/ -static int i40e_client_prepare(struct i40e_client *client) +static void i40e_client_prepare(struct i40e_client *client) { struct i40e_device *ldev; struct i40e_pf *pf; - int ret = 0; mutex_lock(&i40e_device_mutex); list_for_each_entry(ldev, &i40e_devices, list) { @@ -669,7 +667,6 @@ static int i40e_client_prepare(struct i40e_client *client) i40e_service_event_schedule(pf); } mutex_unlock(&i40e_device_mutex); - return ret; } /** @@ -926,13 +923,9 @@ int i40e_register_client(struct i40e_client *client) set_bit(__I40E_CLIENT_REGISTERED, &client->state); mutex_unlock(&i40e_client_mutex); - if (i40e_client_prepare(client)) { - ret = -EIO; - goto out; - } + i40e_client_prepare(client); - pr_info("i40e: Registered client %s with return code %d\n", - client->name, ret); + pr_info("i40e: Registered client %s\n", client->name); out: return ret; } -- cgit v1.2.3 From b3f028fc8abdf72c6ed1103e60e89dd60538f126 Mon Sep 17 00:00:00 2001 From: Sudheer Mogilappagari Date: Thu, 9 Feb 2017 23:58:22 -0800 Subject: i40e: Add bus number info to i40e_bus_info struct Currently i40e_bus_info has PCI device and function info only and log messages print device number as bus number. Added field to provide bus number info and modified log statements to print bus, device and function information. Change-ID: I811617cee2714cc0d6bade8d369f57040990756f Signed-off-by: Sudheer Mogilappagari Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_client.c | 16 +++++++++------- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + drivers/net/ethernet/intel/i40e/i40e_osdep.h | 12 ++++++------ drivers/net/ethernet/intel/i40e/i40e_type.h | 1 + drivers/net/ethernet/intel/i40evf/i40e_type.h | 1 + drivers/net/ethernet/intel/i40evf/i40evf_main.c | 1 + 6 files changed, 19 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 233627e84751..d570219efd9f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -510,9 +510,10 @@ void i40e_client_subtask(struct i40e_pf *pf) continue; if (!existing) { - dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", + dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n", client->name, pf->hw.pf_id, - pf->hw.bus.device, pf->hw.bus.func); + pf->hw.bus.bus_id, pf->hw.bus.device, + pf->hw.bus.func); } mutex_lock(&i40e_client_instance_mutex); @@ -561,8 +562,9 @@ int i40e_lan_add_device(struct i40e_pf *pf) ldev->pf = pf; INIT_LIST_HEAD(&ldev->list); list_add(&ldev->list, &i40e_devices); - dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n", - pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func); + dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n", + pf->hw.pf_id, pf->hw.bus.bus_id, + pf->hw.bus.device, pf->hw.bus.func); /* Since in some cases register may have happened before a device gets * added, we can schedule a subtask to go initiate the clients if @@ -590,9 +592,9 @@ int i40e_lan_del_device(struct i40e_pf *pf) mutex_lock(&i40e_device_mutex); list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) { if (ldev->pf == pf) { - dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n", - pf->hw.pf_id, pf->hw.bus.device, - pf->hw.bus.func); + dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n", + pf->hw.pf_id, pf->hw.bus.bus_id, + pf->hw.bus.device, pf->hw.bus.func); list_del(&ldev->list); kfree(ldev); ret = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0afb32b12e36..0f00b1aa370d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10990,6 +10990,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->subsystem_device_id = pdev->subsystem_device; hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); + hw->bus.bus_id = pdev->bus->number; pf->instance = pfs_found; /* set up the locks for the AQ, do this only once in probe diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index be74bcf9c961..fea81ed065db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -69,12 +69,12 @@ struct i40e_virt_mem { #define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) #define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) -#define i40e_debug(h, m, s, ...) \ -do { \ - if (((m) & (h)->debug_mask)) \ - pr_info("i40e %02x.%x " s, \ - (h)->bus.device, (h)->bus.func, \ - ##__VA_ARGS__); \ +#define i40e_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("i40e %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__); \ } while (0) typedef enum i40e_status_code i40e_status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index b6cf8d2670a4..9e9ae9f84016 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -469,6 +469,7 @@ struct i40e_bus_info { u16 func; u16 device; u16 lan_id; + u16 bus_id; }; /* Flow control (FC) parameters */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 92ac60da5201..3f19dff964ea 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -442,6 +442,7 @@ struct i40e_bus_info { u16 func; u16 device; u16 lan_id; + u16 bus_id; }; /* Flow control (FC) parameters */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 729d8fa91a2b..920c1cb06a92 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2726,6 +2726,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->subsystem_device_id = pdev->subsystem_device; hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); + hw->bus.bus_id = pdev->bus->number; /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove -- cgit v1.2.3 From 3e03d7ccf4418c87a134bbfa79c05090296767f7 Mon Sep 17 00:00:00 2001 From: Henry Tieman Date: Fri, 2 Dec 2016 12:32:57 -0800 Subject: i40e: Save link FEC info from link up event Store the FEC status bits from the link up event into the hw_link_info structure. Change-ID: I9a7b256f6dfb0dce89c2f503075d0d383526832e Signed-off-by: Henry Tieman Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 2 ++ drivers/net/ethernet/intel/i40e/i40e_main.c | 21 +++++++++++++++++++-- drivers/net/ethernet/intel/i40e/i40e_type.h | 1 + drivers/net/ethernet/intel/i40evf/i40e_type.h | 1 + 4 files changed, 23 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 128735975caa..fc73e4ef27ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1838,6 +1838,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; hw_link_info->link_info = resp->link_info; hw_link_info->an_info = resp->an_info; + hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | + I40E_AQ_CONFIG_FEC_RS_ENA); hw_link_info->ext_info = resp->ext_info; hw_link_info->loopback = resp->loopback; hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0f00b1aa370d..fa4a04da47b7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5272,6 +5272,8 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) enum i40e_aq_link_speed new_speed; char *speed = "Unknown"; char *fc = "Unknown"; + char *fec = ""; + char *an = ""; new_speed = vsi->back->hw.phy.link_info.link_speed; @@ -5331,8 +5333,23 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) break; } - netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", - speed, fc); + if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { + fec = ", FEC: None"; + an = ", Autoneg: False"; + + if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) + an = ", Autoneg: True"; + + if (vsi->back->hw.phy.link_info.fec_info & + I40E_AQ_CONFIG_FEC_KR_ENA) + fec = ", FEC: CL74 FC-FEC/BASE-R"; + else if (vsi->back->hw.phy.link_info.fec_info & + I40E_AQ_CONFIG_FEC_RS_ENA) + fec = ", FEC: CL108 RS-FEC"; + } + + netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n", + speed, fec, an, fc); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 9e9ae9f84016..939f9fdc8f85 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -184,6 +184,7 @@ struct i40e_link_status { enum i40e_aq_link_speed link_speed; u8 link_info; u8 an_info; + u8 fec_info; u8 ext_info; u8 loopback; /* is Link Status Event notification to SW enabled */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 3f19dff964ea..16bb88084bb9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -158,6 +158,7 @@ struct i40e_link_status { enum i40e_aq_link_speed link_speed; u8 link_info; u8 an_info; + u8 fec_info; u8 ext_info; u8 loopback; /* is Link Status Event notification to SW enabled */ -- cgit v1.2.3 From e6e3fc2bd3ee03117e1ce388c897fc3b2da97d65 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 2 Dec 2016 12:32:58 -0800 Subject: i40e: don't warn every time we clear an Rx timestamp register The intent of this message was to indicate to a user that we might have missed a timestamp event for a valid packet. The original method of detecting the missed events relied on waiting until all 4 registers were filled. A recent commit d55458c0cd7a5 ("i40e: replace PTP Rx timestamp hang logic") replaced this logic with much better detection scheme that could detect a stalled Rx timestamp register even when other registers were still functional. The new logic means that a message will be displayed almost as soon as a timestamp for a dropped frame occurs. This new logic highlights that the hardware will attempt timestamp for frames which it later decides to drop. The most prominent example is when a multicast PTP frame is received on a multicast address that we are not subscribed to. Because the hardware initiates the Rx timestamp as soon as possible, it will latch an RXTIME register, but then drop the packet. This results in users being confused by the message as they are not expecting to see dropped timestamp messages unless their application also indicates that timestamps were missing. Resolve this by reducing the severity and frequency of the displayed message. We now only print the message if 3 or 4 of the RXTIME registers are stalled and get cleared within the same watchdog event. This ensures that the common case does not constantly display the message. Additionally, since the message is likely not as meaningful to most users, reduce the message to a dev_dbg instead of a dev_warn. Users can still get a count of the number of timestamps dropped by reading the ethtool statistics value, if necessary. Change-ID: I35494442226a444c418dfb4f91a3070d06c8435c Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 9e49ffafce28..2caee35528fa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -280,7 +280,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - int i; + unsigned int i, cleared = 0; /* Since we cannot turn off the Rx timestamp logic if the device is * configured for Tx timestamping, we check if Rx timestamping is @@ -306,14 +306,25 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) time_is_before_jiffies(pf->latch_events[i] + HZ)) { rd32(hw, I40E_PRTTSYN_RXTIME_H(i)); pf->latch_event_flags &= ~BIT(i); - pf->rx_hwtstamp_cleared++; - dev_warn(&pf->pdev->dev, - "Clearing a missed Rx timestamp event for RXTIME[%d]\n", - i); + cleared++; } } spin_unlock_bh(&pf->ptp_rx_lock); + + /* Log a warning if more than 2 timestamps got dropped in the same + * check. We don't want to warn about all drops because it can occur + * in normal scenarios such as PTP frames on multicast addresses we + * aren't listening to. However, administrator should know if this is + * the reason packets aren't receiving timestamps. + */ + if (cleared > 2) + dev_dbg(&pf->pdev->dev, + "Dropped %d missed RXTIME timestamp events\n", + cleared); + + /* Finally, update the rx_hwtstamp_cleared counter */ + pf->rx_hwtstamp_cleared += cleared; } /** -- cgit v1.2.3 From d88d40b01c5c0dad6a1dca3b18267849eef4a2a9 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 2 Dec 2016 12:32:59 -0800 Subject: i40e: allow i40e_update_filter_state to skip broadcast filters Fix a bug where we modified the mac_filter_hash while outside a lock, when handling addition of broadcast filters. Normally, we add filters to firmware by batching the additions into lists and issuing 1 update for every few filters. Broadcast filters are handled differently, by instead setting the broadcast promiscuous mode flags. In order to make sure the 1<->1 mapping of filters in our addition array lined up with filters in the hlist tmp_add_list, we had to remove the filter and move it back to the main hash. However, we didn't do this under lock, which could cause consistency problems for the list. Fix this by updating i40e_update_filter_state logic so that it knows to avoid broadcast filters. This ensures that we don't have to remove the filter separately, and can put it back using the normal flow. Change-ID: Id288fade80b3e3a9a54b68cc249188cb95147518 Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 37 ++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index fa4a04da47b7..06c80d4162af 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1842,6 +1842,31 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi, } } +/** + * i40e_next_entry - Get the next non-broadcast filter from a list + * @f: pointer to filter in list + * + * Returns the next non-broadcast filter in the list. Required so that we + * ignore broadcast filters within the list, since these are not handled via + * the normal firmware update path. + */ +static struct i40e_mac_filter *i40e_next_filter(struct i40e_mac_filter *f) +{ + while (f) { + f = hlist_entry(f->hlist.next, + typeof(struct i40e_mac_filter), + hlist); + + /* keep going if we found a broadcast filter */ + if (f && is_broadcast_ether_addr(f->macaddr)) + continue; + + break; + } + + return f; +} + /** * i40e_update_filter_state - Update filter state based on return data * from firmware @@ -1874,9 +1899,9 @@ i40e_update_filter_state(int count, retval++; } - add_head = hlist_entry(add_head->hlist.next, - typeof(struct i40e_mac_filter), - hlist); + add_head = i40e_next_filter(add_head); + if (!add_head) + break; } return retval; @@ -2095,7 +2120,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) cmd_flags = 0; /* handle broadcast filters by updating the broadcast - * promiscuous flag instead of deleting a MAC filter. + * promiscuous flag and release filter list. */ if (is_broadcast_ether_addr(f->macaddr)) { i40e_aqc_broadcast_filter(vsi, vsi_name, f); @@ -2164,11 +2189,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) * promiscuous flag instead of adding a MAC filter. */ if (is_broadcast_ether_addr(f->macaddr)) { - u64 key = i40e_addr_to_hkey(f->macaddr); i40e_aqc_broadcast_filter(vsi, vsi_name, f); - - hlist_del(&f->hlist); - hash_add(vsi->mac_filter_hash, &f->hlist, key); continue; } -- cgit v1.2.3 From 671889e6740ac7ab84d1420525b50d1d47001102 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 2 Dec 2016 12:33:00 -0800 Subject: i40e: avoid race condition when sending filters to firmware for addition Refactor how we add new filters to firmware to avoid a race condition that can occur due to removing filters from the hash temporarily. To understand the race condition, suppose that you have a number of MAC filters, but have not yet added any VLANs. Now, add two VLANs in rapid succession. A possible resulting flow would look something like the following: (1) lock hash for add VLAN (2) add the new MAC/VLAN combos for each current MAC filter (3) unlock hash (4) lock hash for filter sync (5) notice that we have a VLAN, so prepare to update all MAC filters with VLAN=-1 to be VLAN=0. (6) move NEW and REMOVE filters to temporary list (7) unlock hash (8) lock hash for add VLAN (9) add new MAC/VLAN combos. Notice that no MAC filters are currently in the hash list, so we don't add any VLANs <--- BUG! (10) unlock hash (11) sync the temporary lists to firmware (12) lock hash for post-sync (13) move the temporary elements back to the main list .... Because we take filters out of the main hash into temporary lists, we introduce a narrow window where it is possible that other callers to the list will not see some of the filters which were previously added but have not yet been finalized. This results in sometimes dropping VLAN additions, and could also result in failing to add a MAC address on the newly added VLAN. One obvious way to avoid this race condition would be to lock the entire firmware process. Unfortunately this does not work because adminq firmware commands take a mutex which results in a sleep while atomic BUG(). So, we can't use the simplest approach. An alternative approach is to simply not remove the filters from the hash list while adding. Instead, add an i40e_new_mac_filter structure which we will use to track added filters. This avoids the need to remove the filter from the hash list. We'll store a pointer to the original i40e_mac_filter, along with our own copy of the state. We won't update the state directly, so as to avoid race with other code that may modify the state while under the lock. We are safe to read f->macaddr and f->vlan since these only change in two locations. The first is on filter creation, which must have already occurred. The second is inside i40e_correct_vlan_filters which was previously run after creation of this object and can't be run again until after. Thus, we should be safe to read the MAC address and VLAN while outside the lock. We also aren't going to run into a use-after-free issue because the only place where we free filters is when they are marked FAILED or when we remove them inside the sync subtask. Since the subtask has its own critical flag to prevent duplicate runs, we know this won't happen. We also know that the only location to transition a filter from NEW to FAILED is inside the subtask also, so we aren't worried about that either. Use the wrapper i40e_new_mac_filter for additions, and once we've finalized the addition to firmware, we will update the filter state inside a lock, and then free the wrapper structure. In order to avoid a possible race condition with filter deletion, we won't update the original filter state unless it is still I40E_FILTER_NEW when we finish the firmware sync. This approach is more complex, but avoids race conditions related to filters being temporarily removed from the list. We do not need the same behavior for deletion because we always unconditionally removed the filters from the list regardless of the firmware status. Change-Id: I14b74bc2301f8e69433fbe77ebca532db20c5317 Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 16 +++ drivers/net/ethernet/intel/i40e/i40e_main.c | 147 ++++++++++++++++++---------- 2 files changed, 112 insertions(+), 51 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index fdd9069b6cec..7a23d3e47c6f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -467,6 +467,22 @@ struct i40e_mac_filter { enum i40e_filter_state state; }; +/* Wrapper structure to keep track of filters while we are preparing to send + * firmware commands. We cannot send firmware commands while holding a + * spinlock, since it might sleep. To avoid this, we wrap the added filters in + * a separate structure, which will track the state change and update the real + * filter while under lock. We can't simply hold the filters in a separate + * list, as this opens a window for a race condition when adding new MAC + * addresses to all VLANs, or when adding new VLANs to all MAC addresses. + */ +struct i40e_new_mac_filter { + struct hlist_node hlist; + struct i40e_mac_filter *f; + + /* Track future changes to state separately */ + enum i40e_filter_state state; +}; + struct i40e_veb { struct i40e_pf *pf; u16 idx; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 06c80d4162af..e83a8ca5dd65 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1255,6 +1255,7 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, int vlan_filters) { struct i40e_mac_filter *f, *add_head; + struct i40e_new_mac_filter *new; struct hlist_node *h; int bkt, new_vlan; @@ -1273,13 +1274,13 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, */ /* Update the filters about to be added in place */ - hlist_for_each_entry(f, tmp_add_list, hlist) { - if (vsi->info.pvid && f->vlan != vsi->info.pvid) - f->vlan = vsi->info.pvid; - else if (vlan_filters && f->vlan == I40E_VLAN_ANY) - f->vlan = 0; - else if (!vlan_filters && f->vlan == 0) - f->vlan = I40E_VLAN_ANY; + hlist_for_each_entry(new, tmp_add_list, hlist) { + if (vsi->info.pvid && new->f->vlan != vsi->info.pvid) + new->f->vlan = vsi->info.pvid; + else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY) + new->f->vlan = 0; + else if (!vlan_filters && new->f->vlan == 0) + new->f->vlan = I40E_VLAN_ANY; } /* Update the remaining active filters */ @@ -1305,9 +1306,16 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, if (!add_head) return -ENOMEM; - /* Put the replacement filter into the add list */ - hash_del(&add_head->hlist); - hlist_add_head(&add_head->hlist, tmp_add_list); + /* Create a temporary i40e_new_mac_filter */ + new = kzalloc(sizeof(*new), GFP_ATOMIC); + if (!new) + return -ENOMEM; + + new->f = add_head; + new->state = add_head->state; + + /* Add the new filter to the tmp list */ + hlist_add_head(&new->hlist, tmp_add_list); /* Put the original filter into the delete list */ f->state = I40E_FILTER_REMOVE; @@ -1819,16 +1827,15 @@ static void i40e_set_rx_mode(struct net_device *netdev) } /** - * i40e_undo_filter_entries - Undo the changes made to MAC filter entries + * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries * @vsi: Pointer to VSI struct * @from: Pointer to list which contains MAC filter entries - changes to * those entries needs to be undone. * - * MAC filter entries from list were slated to be sent to firmware, either for - * addition or deletion. + * MAC filter entries from this list were slated for deletion. **/ -static void i40e_undo_filter_entries(struct i40e_vsi *vsi, - struct hlist_head *from) +static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, + struct hlist_head *from) { struct i40e_mac_filter *f; struct hlist_node *h; @@ -1842,29 +1849,51 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi, } } +/** + * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries + * @vsi: Pointer to vsi struct + * @from: Pointer to list which contains MAC filter entries - changes to + * those entries needs to be undone. + * + * MAC filter entries from this list were slated for addition. + **/ +static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, + struct hlist_head *from) +{ + struct i40e_new_mac_filter *new; + struct hlist_node *h; + + hlist_for_each_entry_safe(new, h, from, hlist) { + /* We can simply free the wrapper structure */ + hlist_del(&new->hlist); + kfree(new); + } +} + /** * i40e_next_entry - Get the next non-broadcast filter from a list - * @f: pointer to filter in list + * @next: pointer to filter in list * * Returns the next non-broadcast filter in the list. Required so that we * ignore broadcast filters within the list, since these are not handled via * the normal firmware update path. */ -static struct i40e_mac_filter *i40e_next_filter(struct i40e_mac_filter *f) +static +struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next) { - while (f) { - f = hlist_entry(f->hlist.next, - typeof(struct i40e_mac_filter), - hlist); + while (next) { + next = hlist_entry(next->hlist.next, + typeof(struct i40e_new_mac_filter), + hlist); /* keep going if we found a broadcast filter */ - if (f && is_broadcast_ether_addr(f->macaddr)) + if (next && is_broadcast_ether_addr(next->f->macaddr)) continue; break; } - return f; + return next; } /** @@ -1880,7 +1909,7 @@ static struct i40e_mac_filter *i40e_next_filter(struct i40e_mac_filter *f) static int i40e_update_filter_state(int count, struct i40e_aqc_add_macvlan_element_data *add_list, - struct i40e_mac_filter *add_head) + struct i40e_new_mac_filter *add_head) { int retval = 0; int i; @@ -1958,7 +1987,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, static void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_aqc_add_macvlan_element_data *list, - struct i40e_mac_filter *add_head, + struct i40e_new_mac_filter *add_head, int num_add, bool *promisc_changed) { struct i40e_hw *hw = &vsi->back->hw; @@ -1986,10 +2015,12 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, * This function sets or clears the promiscuous broadcast flags for VLAN * filters in order to properly receive broadcast frames. Assumes that only * broadcast filters are passed. + * + * Returns status indicating success or failure; **/ -static -void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, - struct i40e_mac_filter *f) +static i40e_status +i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, + struct i40e_mac_filter *f) { bool enable = f->state == I40E_FILTER_NEW; struct i40e_hw *hw = &vsi->back->hw; @@ -2008,15 +2039,13 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, NULL); } - if (aq_ret) { + if (aq_ret) dev_warn(&vsi->back->pdev->dev, "Error %s setting broadcast promiscuous mode on %s\n", i40e_aq_str(hw, hw->aq.asq_last_status), vsi_name); - f->state = I40E_FILTER_FAILED; - } else if (enable) { - f->state = I40E_FILTER_ACTIVE; - } + + return aq_ret; } /** @@ -2030,7 +2059,8 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, int i40e_sync_vsi_filters(struct i40e_vsi *vsi) { struct hlist_head tmp_add_list, tmp_del_list; - struct i40e_mac_filter *f, *add_head = NULL; + struct i40e_mac_filter *f; + struct i40e_new_mac_filter *new, *add_head = NULL; struct i40e_hw *hw = &vsi->back->hw; unsigned int failed_filters = 0; unsigned int vlan_filters = 0; @@ -2084,8 +2114,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) continue; } if (f->state == I40E_FILTER_NEW) { - hash_del(&f->hlist); - hlist_add_head(&f->hlist, &tmp_add_list); + /* Create a temporary i40e_new_mac_filter */ + new = kzalloc(sizeof(*new), GFP_ATOMIC); + if (!new) + goto err_no_memory_locked; + + /* Store pointer to the real filter */ + new->f = f; + new->state = f->state; + + /* Add it to the hash list */ + hlist_add_head(&new->hlist, &tmp_add_list); } /* Count the number of active (current and new) VLAN @@ -2178,32 +2217,37 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) goto err_no_memory; num_add = 0; - hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) { + hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) { - f->state = I40E_FILTER_FAILED; + new->state = I40E_FILTER_FAILED; continue; } /* handle broadcast filters by updating the broadcast * promiscuous flag instead of adding a MAC filter. */ - if (is_broadcast_ether_addr(f->macaddr)) { - i40e_aqc_broadcast_filter(vsi, vsi_name, f); + if (is_broadcast_ether_addr(new->f->macaddr)) { + if (i40e_aqc_broadcast_filter(vsi, vsi_name, + new->f)) + new->state = I40E_FILTER_FAILED; + else + new->state = I40E_FILTER_ACTIVE; continue; } /* add to add array */ if (num_add == 0) - add_head = f; + add_head = new; cmd_flags = 0; - ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); - if (f->vlan == I40E_VLAN_ANY) { + ether_addr_copy(add_list[num_add].mac_addr, + new->f->macaddr); + if (new->f->vlan == I40E_VLAN_ANY) { add_list[num_add].vlan_tag = 0; cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; } else { add_list[num_add].vlan_tag = - cpu_to_le16((u16)(f->vlan)); + cpu_to_le16((u16)(new->f->vlan)); } add_list[num_add].queue_number = 0; /* set invalid match method for later detection */ @@ -2229,11 +2273,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) * the VSI's list. */ spin_lock_bh(&vsi->mac_filter_hash_lock); - hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) { - u64 key = i40e_addr_to_hkey(f->macaddr); - - hlist_del(&f->hlist); - hash_add(vsi->mac_filter_hash, &f->hlist, key); + hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { + /* Only update the state if we're still NEW */ + if (new->f->state == I40E_FILTER_NEW) + new->f->state = new->state; + hlist_del(&new->hlist); + kfree(new); } spin_unlock_bh(&vsi->mac_filter_hash_lock); kfree(add_list); @@ -2394,8 +2439,8 @@ err_no_memory: /* Restore elements on the temporary add and delete lists */ spin_lock_bh(&vsi->mac_filter_hash_lock); err_no_memory_locked: - i40e_undo_filter_entries(vsi, &tmp_del_list); - i40e_undo_filter_entries(vsi, &tmp_add_list); + i40e_undo_del_filter_entries(vsi, &tmp_del_list); + i40e_undo_add_filter_entries(vsi, &tmp_add_list); spin_unlock_bh(&vsi->mac_filter_hash_lock); vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; -- cgit v1.2.3 From b7eaf8f16e87c3896e9b8c0b2e54d71210d43b48 Mon Sep 17 00:00:00 2001 From: Henry Tieman Date: Fri, 2 Dec 2016 12:33:01 -0800 Subject: i40e: Save more link abilities when using ethtool Ethtool support needs to save more PHY information. The added information includes FEC capabilities and 25G link types. Without this change it is possible to lose 25G or FEC settings by using ethtool. Change-ID: Ie42255b1e901ffbf9583b8c46466a54894114280 Signed-off-by: Henry Tieman Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c4ab3c1ae02a..a22e26200bcc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -803,9 +803,12 @@ static int i40e_set_settings(struct net_device *netdev, if (change || (abilities.link_speed != config.link_speed)) { /* copy over the rest of the abilities */ config.phy_type = abilities.phy_type; + config.phy_type_ext = abilities.phy_type_ext; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; /* save the requested speeds */ hw->phy.link_info.requested_speeds = config.link_speed; -- cgit v1.2.3 From a96e66e702bc615f5927b15c739c91d5098dac59 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Thu, 9 Feb 2017 16:46:45 +0800 Subject: netfilter: nf_ct_sip: Use mod_timer_pending() timer_del() followed by timer_add() can be replaced by mod_timer_pending(). Signed-off-by: Gao Feng Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_sip.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index c3fc14e021ec..24174c520239 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -809,13 +809,11 @@ static int refresh_signalling_expectation(struct nf_conn *ct, exp->tuple.dst.protonum != proto || exp->tuple.dst.u.udp.port != port) continue; - if (!del_timer(&exp->timeout)) - continue; - exp->flags &= ~NF_CT_EXPECT_INACTIVE; - exp->timeout.expires = jiffies + expires * HZ; - add_timer(&exp->timeout); - found = 1; - break; + if (mod_timer_pending(&exp->timeout, jiffies + expires * HZ)) { + exp->flags &= ~NF_CT_EXPECT_INACTIVE; + found = 1; + break; + } } spin_unlock_bh(&nf_conntrack_expect_lock); return found; -- cgit v1.2.3 From 4dee62b1b9b43c9bbf49c93cd114e1bf1334fb6a Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Thu, 9 Feb 2017 21:40:38 +0800 Subject: netfilter: nf_ct_expect: nf_ct_expect_insert() returns void Because nf_ct_expect_insert() always succeeds now, its return value can be just void instead of int. And remove code that checks for its return value. Signed-off-by: Gao Feng Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_expect.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index f8dbacf66795..e19a69787d99 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -353,7 +353,7 @@ void nf_ct_expect_put(struct nf_conntrack_expect *exp) } EXPORT_SYMBOL_GPL(nf_ct_expect_put); -static int nf_ct_expect_insert(struct nf_conntrack_expect *exp) +static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) { struct nf_conn_help *master_help = nfct_help(exp->master); struct nf_conntrack_helper *helper; @@ -380,7 +380,6 @@ static int nf_ct_expect_insert(struct nf_conntrack_expect *exp) add_timer(&exp->timeout); NF_CT_STAT_INC(net, expect_create); - return 0; } /* Race with expectations being used means we could have none to find; OK. */ @@ -464,9 +463,8 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, if (ret <= 0) goto out; - ret = nf_ct_expect_insert(expect); - if (ret < 0) - goto out; + nf_ct_expect_insert(expect); + spin_unlock_bh(&nf_conntrack_expect_lock); nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report); return ret; -- cgit v1.2.3 From b745d0358db44ebcef31478314436d6d4ff93bfa Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 10 Feb 2017 12:08:08 +0100 Subject: netfilter: nfnetlink: get rid of u_intX_t types Use uX types instead. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index a09fa9fd8f3d..586212ebba9e 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -100,9 +100,9 @@ int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) } EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); -static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type) +static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type) { - u_int8_t subsys_id = NFNL_SUBSYS_ID(type); + u8 subsys_id = NFNL_SUBSYS_ID(type); if (subsys_id >= NFNL_SUBSYS_COUNT) return NULL; @@ -111,9 +111,9 @@ static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t t } static inline const struct nfnl_callback * -nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss) +nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss) { - u_int8_t cb_id = NFNL_MSG_TYPE(type); + u8 cb_id = NFNL_MSG_TYPE(type); if (cb_id >= ss->cb_count) return NULL; @@ -185,7 +185,7 @@ replay: { int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); - u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); + u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; struct nlattr *attr = (void *)nlh + min_len; int attrlen = nlh->nlmsg_len - min_len; @@ -273,7 +273,7 @@ enum { }; static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, - u_int16_t subsys_id) + u16 subsys_id) { struct sk_buff *oskb = skb; struct net *net = sock_net(skb->sk); @@ -365,7 +365,7 @@ replay: { int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); - u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); + u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; struct nlattr *attr = (void *)nlh + min_len; int attrlen = nlh->nlmsg_len - min_len; @@ -439,7 +439,7 @@ done: static void nfnetlink_rcv(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); - u_int16_t res_id; + u16 res_id; int msglen; if (nlh->nlmsg_len < NLMSG_HDRLEN || -- cgit v1.2.3 From 48656835c0405aa2e6c0d6a4305c77b70758d168 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 10 Feb 2017 12:08:14 +0100 Subject: netfilter: nfnetlink: add nfnetlink_rcv_skb_batch() Add new nfnetlink_rcv_skb_batch() to wrap initial nfnetlink batch handling. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink.c | 51 ++++++++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 586212ebba9e..ca645a3b1375 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -436,12 +436,35 @@ done: kfree_skb(skb); } -static void nfnetlink_rcv(struct sk_buff *skb) +static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) { - struct nlmsghdr *nlh = nlmsg_hdr(skb); + struct nfgenmsg *nfgenmsg; u16 res_id; int msglen; + msglen = NLMSG_ALIGN(nlh->nlmsg_len); + if (msglen > skb->len) + msglen = skb->len; + + if (nlh->nlmsg_len < NLMSG_HDRLEN || + skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) + return; + + nfgenmsg = nlmsg_data(nlh); + skb_pull(skb, msglen); + /* Work around old nft using host byte order */ + if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES) + res_id = NFNL_SUBSYS_NFTABLES; + else + res_id = ntohs(nfgenmsg->res_id); + + nfnetlink_rcv_batch(skb, nlh, res_id); +} + +static void nfnetlink_rcv(struct sk_buff *skb) +{ + struct nlmsghdr *nlh = nlmsg_hdr(skb); + if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) return; @@ -451,28 +474,10 @@ static void nfnetlink_rcv(struct sk_buff *skb) return; } - if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) { - struct nfgenmsg *nfgenmsg; - - msglen = NLMSG_ALIGN(nlh->nlmsg_len); - if (msglen > skb->len) - msglen = skb->len; - - if (nlh->nlmsg_len < NLMSG_HDRLEN || - skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) - return; - - nfgenmsg = nlmsg_data(nlh); - skb_pull(skb, msglen); - /* Work around old nft using host byte order */ - if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES) - res_id = NFNL_SUBSYS_NFTABLES; - else - res_id = ntohs(nfgenmsg->res_id); - nfnetlink_rcv_batch(skb, nlh, res_id); - } else { + if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) + nfnetlink_rcv_skb_batch(skb, nlh); + else netlink_rcv_skb(skb, &nfnetlink_rcv_msg); - } } #ifdef CONFIG_MODULES -- cgit v1.2.3 From 8c4d4e8b5626fec965fd5034e5bd5e57790f243f Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 10 Feb 2017 12:08:17 +0100 Subject: netfilter: nfnetlink: allow to check for generation ID This patch allows userspace to specify the generation ID that has been used to build an incremental batch update. If userspace specifies the generation ID in the batch message as attribute, then nfnetlink compares it to the current generation ID so you make sure that you work against the right baseline. Otherwise, bail out with ERESTART so userspace knows that its changeset is stale and needs to respin. Userspace can do this transparently at the cost of taking slightly more time to refresh caches and rework the changeset. This check is optional, if there is no NFNL_BATCH_GENID attribute in the batch begin message, then no check is performed. Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nfnetlink.h | 1 + include/uapi/linux/netfilter/nfnetlink.h | 12 ++++++++++++ net/netfilter/nfnetlink.c | 31 +++++++++++++++++++++++++++---- 3 files changed, 40 insertions(+), 4 deletions(-) diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 1d82dd5e9a08..1b49209dd5c7 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -28,6 +28,7 @@ struct nfnetlink_subsystem { const struct nfnl_callback *cb; /* callback for individual types */ int (*commit)(struct net *net, struct sk_buff *skb); int (*abort)(struct net *net, struct sk_buff *skb); + bool (*valid_genid)(struct net *net, u32 genid); }; int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h index 4bb8cb7730e7..a09906a30d77 100644 --- a/include/uapi/linux/netfilter/nfnetlink.h +++ b/include/uapi/linux/netfilter/nfnetlink.h @@ -65,4 +65,16 @@ struct nfgenmsg { #define NFNL_MSG_BATCH_BEGIN NLMSG_MIN_TYPE #define NFNL_MSG_BATCH_END NLMSG_MIN_TYPE+1 +/** + * enum nfnl_batch_attributes - nfnetlink batch netlink attributes + * + * @NFNL_BATCH_GENID: generation ID for this changeset (NLA_U32) + */ +enum nfnl_batch_attributes { + NFNL_BATCH_UNSPEC, + NFNL_BATCH_GENID, + __NFNL_BATCH_MAX +}; +#define NFNL_BATCH_MAX (__NFNL_BATCH_MAX - 1) + #endif /* _UAPI_NFNETLINK_H */ diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index ca645a3b1375..a2148d0bc50e 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -3,7 +3,7 @@ * * (C) 2001 by Jay Schulist , * (C) 2002-2005 by Harald Welte - * (C) 2005,2007 by Pablo Neira Ayuso + * (C) 2005-2017 by Pablo Neira Ayuso * * Initial netfilter messages via netlink development funded and * generally made possible by Network Robots, Inc. (www.networkrobots.com) @@ -273,7 +273,7 @@ enum { }; static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, - u16 subsys_id) + u16 subsys_id, u32 genid) { struct sk_buff *oskb = skb; struct net *net = sock_net(skb->sk); @@ -315,6 +315,12 @@ replay: return kfree_skb(skb); } + if (genid && ss->valid_genid && !ss->valid_genid(net, genid)) { + nfnl_unlock(subsys_id); + netlink_ack(oskb, nlh, -ERESTART); + return kfree_skb(skb); + } + while (skb->len >= nlmsg_total_size(0)) { int msglen, type; @@ -436,11 +442,20 @@ done: kfree_skb(skb); } +static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = { + [NFNL_BATCH_GENID] = { .type = NLA_U32 }, +}; + static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) { + int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); + struct nlattr *attr = (void *)nlh + min_len; + struct nlattr *cda[NFNL_BATCH_MAX + 1]; + int attrlen = nlh->nlmsg_len - min_len; struct nfgenmsg *nfgenmsg; + int msglen, err; + u32 gen_id = 0; u16 res_id; - int msglen; msglen = NLMSG_ALIGN(nlh->nlmsg_len); if (msglen > skb->len) @@ -450,6 +465,14 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) return; + err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy); + if (err < 0) { + netlink_ack(skb, nlh, err); + return; + } + if (cda[NFNL_BATCH_GENID]) + gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID])); + nfgenmsg = nlmsg_data(nlh); skb_pull(skb, msglen); /* Work around old nft using host byte order */ @@ -458,7 +481,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) else res_id = ntohs(nfgenmsg->res_id); - nfnetlink_rcv_batch(skb, nlh, res_id); + nfnetlink_rcv_batch(skb, nlh, res_id, gen_id); } static void nfnetlink_rcv(struct sk_buff *skb) -- cgit v1.2.3 From 74e8bcd21c40dbbb3d74fa904536f8a3bddafed3 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 10 Feb 2017 12:08:20 +0100 Subject: netfilter: nf_tables: add check_genid to the nfnetlink subsystem This patch implements the check generation id as provided by nfnetlink. This allows us to reject ruleset updates against stale baseline, so userspace can retry update with a fresh ruleset cache. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index cb6ae46f6c48..71c60a04b66b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4972,6 +4972,11 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb) return 0; } +static bool nf_tables_valid_genid(struct net *net, u32 genid) +{ + return net->nft.base_seq == genid; +} + static const struct nfnetlink_subsystem nf_tables_subsys = { .name = "nf_tables", .subsys_id = NFNL_SUBSYS_NFTABLES, @@ -4979,6 +4984,7 @@ static const struct nfnetlink_subsystem nf_tables_subsys = { .cb = nf_tables_cb, .commit = nf_tables_commit, .abort = nf_tables_abort, + .valid_genid = nf_tables_valid_genid, }; int nft_chain_validate_dependency(const struct nft_chain *chain, -- cgit v1.2.3 From 1a94e38d254b3622d5d53f74b3b716b0fcab0ba8 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 10 Feb 2017 12:08:23 +0100 Subject: netfilter: nf_tables: add NFTA_RULE_ID attribute This new attribute allows us to uniquely identify a rule in transaction. Robots may trigger an insertion followed by deletion in a batch, in that scenario we still don't have a public rule handle that we can use to delete the rule. This is similar to the NFTA_SET_ID attribute that allows us to refer to an anonymous set from a batch. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 3 +++ include/uapi/linux/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 26 ++++++++++++++++++++++++++ 3 files changed, 31 insertions(+) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 21ce50e6d0c5..ac84686aaafb 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1202,10 +1202,13 @@ struct nft_trans { struct nft_trans_rule { struct nft_rule *rule; + u32 rule_id; }; #define nft_trans_rule(trans) \ (((struct nft_trans_rule *)trans->data)->rule) +#define nft_trans_rule_id(trans) \ + (((struct nft_trans_rule *)trans->data)->rule_id) struct nft_trans_set { struct nft_set *set; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 207951516ede..05215d30fe5c 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -207,6 +207,7 @@ enum nft_chain_attributes { * @NFTA_RULE_COMPAT: compatibility specifications of the rule (NLA_NESTED: nft_rule_compat_attributes) * @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64) * @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN) + * @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32) */ enum nft_rule_attributes { NFTA_RULE_UNSPEC, @@ -218,6 +219,7 @@ enum nft_rule_attributes { NFTA_RULE_POSITION, NFTA_RULE_USERDATA, NFTA_RULE_PAD, + NFTA_RULE_ID, __NFTA_RULE_MAX }; #define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 71c60a04b66b..6c782532615f 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -240,6 +240,10 @@ static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type, if (trans == NULL) return NULL; + if (msg_type == NFT_MSG_NEWRULE && ctx->nla[NFTA_RULE_ID] != NULL) { + nft_trans_rule_id(trans) = + ntohl(nla_get_be32(ctx->nla[NFTA_RULE_ID])); + } nft_trans_rule(trans) = rule; list_add_tail(&trans->list, &ctx->net->nft.commit_list); @@ -2293,6 +2297,22 @@ err1: return err; } +static struct nft_rule *nft_rule_lookup_byid(const struct net *net, + const struct nlattr *nla) +{ + u32 id = ntohl(nla_get_be32(nla)); + struct nft_trans *trans; + + list_for_each_entry(trans, &net->nft.commit_list, list) { + struct nft_rule *rule = nft_trans_rule(trans); + + if (trans->msg_type == NFT_MSG_NEWRULE && + id == nft_trans_rule_id(trans)) + return rule; + } + return ERR_PTR(-ENOENT); +} + static int nf_tables_delrule(struct net *net, struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -2330,6 +2350,12 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk, if (IS_ERR(rule)) return PTR_ERR(rule); + err = nft_delrule(&ctx, rule); + } else if (nla[NFTA_RULE_ID]) { + rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_ID]); + if (IS_ERR(rule)) + return PTR_ERR(rule); + err = nft_delrule(&ctx, rule); } else { err = nft_delrule_by_chain(&ctx); -- cgit v1.2.3 From fc52497eb9dd031de9d17af54ac8f4078663a9f5 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 10 Feb 2017 12:08:29 +0100 Subject: netfilter: update MAINTAINERS It's been a while since Patrick has been suspended as coreteam member [1]. Update this file to remove him. While at this, remove references to all foo-tables variants, given the project hosts more than just that, eg. ipset, conntrack, ... [1] https://marc.info/?l=netfilter-devel&m=146887464512702 Signed-off-by: Pablo Neira Ayuso --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index a9368bba9b37..5864bbd99f8f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8579,9 +8579,8 @@ F: Documentation/networking/s2io.txt F: Documentation/networking/vxge.txt F: drivers/net/ethernet/neterion/ -NETFILTER ({IP,IP6,ARP,EB,NF}TABLES) +NETFILTER M: Pablo Neira Ayuso -M: Patrick McHardy M: Jozsef Kadlecsik L: netfilter-devel@vger.kernel.org L: coreteam@netfilter.org -- cgit v1.2.3 From 7286ff7fde9f963736c7e575572899d8e16b06b7 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 10 Feb 2017 19:59:36 +0100 Subject: netfilter: nf_tables: honor NFT_SET_OBJECT in set backend selection Check for NFT_SET_OBJECT feature flag, otherwise we may end up selecting the wrong set backend. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 3 ++- net/netfilter/nft_set_hash.c | 2 +- net/netfilter/nft_set_rbtree.c | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 6c782532615f..ff7304ae58ac 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2424,7 +2424,8 @@ nft_select_set_ops(const struct nlattr * const nla[], features = 0; if (nla[NFTA_SET_FLAGS] != NULL) { features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS])); - features &= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_TIMEOUT; + features &= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_TIMEOUT | + NFT_SET_OBJECT; } bops = NULL; diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 6938bc890f31..5f652720fc78 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -404,7 +404,7 @@ static struct nft_set_ops nft_hash_ops __read_mostly = { .lookup = nft_hash_lookup, .update = nft_hash_update, .walk = nft_hash_walk, - .features = NFT_SET_MAP | NFT_SET_TIMEOUT, + .features = NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT, .owner = THIS_MODULE, }; diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 3387ed7dd231..71e8fb886a73 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -310,7 +310,7 @@ static struct nft_set_ops nft_rbtree_ops __read_mostly = { .activate = nft_rbtree_activate, .lookup = nft_rbtree_lookup, .walk = nft_rbtree_walk, - .features = NFT_SET_INTERVAL | NFT_SET_MAP, + .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT, .owner = THIS_MODULE, }; -- cgit v1.2.3 From bac9a7e0f5d6da82478d5e0a2a236158f42d5757 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 12 Feb 2017 19:18:10 -0500 Subject: bnxt_en: Update to firmware interface spec 1.7.0. The new spec has NVRAM defragmentation support which will be used in the next patch to improve ethtool flash operation. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 9 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 5 +- drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 437 +++++++++++++++++++++----- 3 files changed, 363 insertions(+), 88 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index cda1c787e8e1..8ac5987d7fe9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -3974,7 +3975,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); break; case HWRM_RING_ALLOC_CMPL: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL; + req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; req.length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_USING_MSIX) req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; @@ -3993,7 +3994,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, if (rc || err) { switch (ring_type) { - case RING_FREE_REQ_RING_TYPE_CMPL: + case RING_FREE_REQ_RING_TYPE_L2_CMPL: netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", rc, err); return -1; @@ -4137,7 +4138,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, if (rc || error_code) { switch (ring_type) { - case RING_FREE_REQ_RING_TYPE_CMPL: + case RING_FREE_REQ_RING_TYPE_L2_CMPL: netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", rc); return rc; @@ -4226,7 +4227,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) if (ring->fw_ring_id != INVALID_HW_RING_ID) { hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_CMPL, + RING_FREE_REQ_RING_TYPE_L2_CMPL, INVALID_HW_RING_ID); ring->fw_ring_id = INVALID_HW_RING_ID; bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9f07b9cf8965..eaed70061dfc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,10 +12,10 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.6.0" +#define DRV_MODULE_VERSION "1.7.0" #define DRV_VER_MAJ 1 -#define DRV_VER_MIN 6 +#define DRV_VER_MIN 7 #define DRV_VER_UPD 0 struct tx_bd { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 5df32ab64f0c..6e275c23d68b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -11,12 +11,12 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.6.1 */ +/* HSI and HWRM Specification 1.7.0 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 6 -#define HWRM_VERSION_UPDATE 1 +#define HWRM_VERSION_MINOR 7 +#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_STR "1.6.1" +#define HWRM_VERSION_STR "1.7.0" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -834,20 +834,32 @@ struct hwrm_func_qcfg_output { __le32 min_bw; #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 - #define FUNC_QCFG_RESP_MIN_BW_RSVD 0x10000000UL + #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID __le32 max_bw; #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0 - #define FUNC_QCFG_RESP_MAX_BW_RSVD 0x10000000UL + #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID @@ -923,20 +935,32 @@ struct hwrm_func_cfg_input { __le32 min_bw; #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0 - #define FUNC_CFG_REQ_MIN_BW_RSVD 0x10000000UL + #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID __le32 max_bw; #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0 - #define FUNC_CFG_REQ_MAX_BW_RSVD 0x10000000UL + #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID @@ -1531,6 +1555,20 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL u8 media_type; #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL @@ -2431,20 +2469,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id0_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id0_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2459,20 +2509,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id1_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id1_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2487,20 +2549,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id2_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id2_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2515,20 +2589,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id3_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id3_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2543,20 +2629,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id4_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id4_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2571,20 +2669,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id5_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id5_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2599,20 +2709,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id6_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id6_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2627,20 +2749,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id7_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id7_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2682,20 +2816,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id0_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id0_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2710,20 +2856,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id1_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id1_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2738,20 +2896,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id2_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id2_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2766,20 +2936,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id3_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id3_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2794,20 +2976,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id4_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id4_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2822,20 +3016,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id5_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id5_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2850,20 +3056,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id6_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id6_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2878,20 +3096,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id7_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id7_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID @@ -3034,6 +3264,7 @@ struct hwrm_vnic_qcaps_output { u8 unused_0; u8 unused_1; __le32 flags; + #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL @@ -3241,9 +3472,10 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL u8 ring_type; - #define RING_ALLOC_REQ_RING_TYPE_CMPL 0x0UL + #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL + #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL u8 unused_0; __le16 unused_1; __le64 page_tbl_addr; @@ -3277,10 +3509,16 @@ struct hwrm_ring_alloc_input { __le32 max_bw; #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0 - #define RING_ALLOC_REQ_MAX_BW_RSVD 0x10000000UL + #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL + #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID @@ -3315,9 +3553,10 @@ struct hwrm_ring_free_input { __le16 target_id; __le64 resp_addr; u8 ring_type; - #define RING_FREE_REQ_RING_TYPE_CMPL 0x0UL + #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_FREE_REQ_RING_TYPE_TX 0x1UL #define RING_FREE_REQ_RING_TYPE_RX 0x2UL + #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL u8 unused_0; __le16 ring_id; __le32 unused_1; @@ -3415,9 +3654,10 @@ struct hwrm_ring_reset_input { __le16 target_id; __le64 resp_addr; u8 ring_type; - #define RING_RESET_REQ_RING_TYPE_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_RESET_REQ_RING_TYPE_TX 0x1UL #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL u8 unused_0; __le16 ring_id; __le32 unused_1; @@ -3846,6 +4086,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { __le32 flags; #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL __le32 enables; #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL @@ -3946,7 +4187,7 @@ struct hwrm_cfa_ntuple_filter_free_output { }; /* hwrm_cfa_ntuple_filter_cfg */ -/* Input (40 bytes) */ +/* Input (48 bytes) */ struct hwrm_cfa_ntuple_filter_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -3956,10 +4197,14 @@ struct hwrm_cfa_ntuple_filter_cfg_input { __le32 enables; #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL __le32 unused_0; __le64 ntuple_filter_id; __le32 new_dst_id; __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL + __le16 unused_1[3]; }; /* Output (16 bytes) */ @@ -4342,6 +4587,7 @@ struct hwrm_fw_get_structured_data_input { #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL u8 count; u8 unused_0; }; @@ -4828,6 +5074,9 @@ struct hwrm_nvm_install_update_input { #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL __le16 flags; + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL __le16 unused_0; }; @@ -4854,6 +5103,15 @@ struct hwrm_nvm_install_update_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_nvm_install_update_cmd_err { + u8 code; + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL + u8 unused_0[7]; +}; + /* Hardware Resource Manager Specification */ /* Input (16 bytes) */ struct input { @@ -4981,11 +5239,26 @@ struct cmd_nums { #define HWRM_WOL_FILTER_FREE (0xf1UL) #define HWRM_WOL_FILTER_QCFG (0xf2UL) #define HWRM_WOL_REASON_QCFG (0xf3UL) + #define HWRM_CFA_METER_PROFILE_ALLOC (0xf5UL) + #define HWRM_CFA_METER_PROFILE_FREE (0xf6UL) + #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL) + #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL) + #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL) + #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL) + #define HWRM_CFA_VF_PAIR_FREE (0x101UL) + #define HWRM_CFA_VF_PAIR_INFO (0x102UL) + #define HWRM_CFA_FLOW_ALLOC (0x103UL) + #define HWRM_CFA_FLOW_FREE (0x104UL) + #define HWRM_CFA_FLOW_FLUSH (0x105UL) + #define HWRM_CFA_FLOW_STATS (0x106UL) + #define HWRM_CFA_FLOW_INFO (0x107UL) #define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL) #define HWRM_DBG_WRITE_INDIRECT (0xff13UL) #define HWRM_DBG_DUMP (0xff14UL) + #define HWRM_NVM_VALIDATE_OPTION (0xffefUL) + #define HWRM_NVM_FLUSH (0xfff0UL) #define HWRM_NVM_GET_VARIABLE (0xfff1UL) #define HWRM_NVM_SET_VARIABLE (0xfff2UL) #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL) -- cgit v1.2.3 From cb4d1d6261453677feb54e7a09c23fc7648dd6bc Mon Sep 17 00:00:00 2001 From: Kshitij Soni Date: Sun, 12 Feb 2017 19:18:11 -0500 Subject: bnxt_en: Retry failed NVM_INSTALL_UPDATE with defragmentation flag. If the HWRM_NVM_INSTALL_UPDATE command fails with the error code NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR, retry the command with a new flag to allow defragmentation. Since we are checking the response for error code, we also need to take the mutex until we finish reading the response. Signed-off-by: Kshitij Soni Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 32 ++++++++++++++++++----- 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 7aa248db10c6..4b45b885b1e3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1578,17 +1578,37 @@ static int bnxt_flash_package_from_file(struct net_device *dev, bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); install.install_type = cpu_to_le32(install_type); - rc = hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (rc) - return -EOPNOTSUPP; + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (rc) { + rc = -EOPNOTSUPP; + goto flash_pkg_exit; + } + + if (resp->error_code) { + u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; + + if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { + install.flags |= cpu_to_le16( + NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (rc) { + rc = -EOPNOTSUPP; + goto flash_pkg_exit; + } + } + } if (resp->result) { netdev_err(dev, "PKG install error = %d, problem_item = %d\n", (s8)resp->result, (int)resp->problem_item); - return -ENOPKG; + rc = -ENOPKG; } - return 0; +flash_pkg_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; } static int bnxt_flash_device(struct net_device *dev, -- cgit v1.2.3 From a79a5276aa2f844bd368c1d3d5a625e1fbefd989 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 12 Feb 2017 19:18:12 -0500 Subject: bnxt_en: Fix ethtool -l pre-set max combined channel. With commit d1e7925e6d80 ("bnxt_en: Centralize logic to reserve rings."), ring allocation for combined rings has become stricter. A combined ring must now have an rx-tx ring pair. The pre-set max. for combined rings should now be min(rx, tx). Fixes: d1e7925e6d80 ("bnxt_en: Centralize logic to reserve rings.") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 4b45b885b1e3..6903a873f072 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -357,7 +357,7 @@ static void bnxt_get_channels(struct net_device *dev, int max_rx_rings, max_tx_rings, tcs; bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); - channel->max_combined = max_t(int, max_rx_rings, max_tx_rings); + channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { max_rx_rings = 0; -- cgit v1.2.3 From 964fd4801d40ead69a447482c0dd0cd4be495e47 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 12 Feb 2017 19:18:13 -0500 Subject: bnxt_en: Allow NETIF_F_NTUPLE to be enabled on VFs. Commit ae10ae740ad2 ("bnxt_en: Add new hardware RFS mode.") has added code to allow NTUPLE to be enabled on VFs. So we now remove the BNXT_VF() check in rfs_capable() to allow NTUPLE on VFs. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8ac5987d7fe9..516c5d7b75f7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6291,7 +6291,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) #ifdef CONFIG_RFS_ACCEL int vnics, max_vnics, max_rss_ctxs; - if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP)) + if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) return false; vnics = 1 + bp->rx_nr_rings; -- cgit v1.2.3 From 61aad724ec0a685bc83b02b059a3ca0ad3bde6b0 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 12 Feb 2017 19:18:14 -0500 Subject: bnxt_en: Add hardware NTUPLE filter for encapsulated packets. If skb_flow_dissect_flow_keys() returns with the encapsulation flag set, pass the information to the firmware to setup the NTUPLE filter accordingly. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 516c5d7b75f7..f3d829f71f77 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3456,6 +3456,9 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) +#define BNXT_NTP_TUNNEL_FLTR_FLAG \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE + static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { @@ -3496,6 +3499,11 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); } + if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { + req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); + req.tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; + } req.src_port = keys->ports.src; req.src_port_mask = cpu_to_be16(0xffff); @@ -6869,6 +6877,7 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, keys1->ports.ports == keys2->ports.ports && keys1->basic.ip_proto == keys2->basic.ip_proto && keys1->basic.n_proto == keys2->basic.n_proto && + keys1->control.flags == keys2->control.flags && ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) return true; @@ -6886,9 +6895,6 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, int rc = 0, idx, bit_id, l2_idx = 0; struct hlist_head *head; - if (skb->encapsulation) - return -EPROTONOSUPPORT; - if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; int off = 0, j; @@ -6927,6 +6933,11 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, rc = -EPROTONOSUPPORT; goto err_free; } + if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && + bp->hwrm_spec_code < 0x10601) { + rc = -EPROTONOSUPPORT; + goto err_free; + } memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); -- cgit v1.2.3 From 33dac24abbd5a77eefca18fb7ebbd01a3cf1b343 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 12 Feb 2017 19:18:15 -0500 Subject: bnxt_en: Do not setup PHY unless driving a single PF. If it is a VF or an NPAR function, the firmware call to setup the PHY will fail. Adding this check will prevent unnecessary firmware calls to setup the PHY unless calling from the PF. This will also eliminate many unnecessary warning messages when the call from a VF or NPAR fails. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f3d829f71f77..afd119097344 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5853,6 +5853,9 @@ static int bnxt_update_phy_setting(struct bnxt *bp) rc); return rc; } + if (!BNXT_SINGLE_PF(bp)) + return 0; + if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != link_info->req_flow_ctrl) -- cgit v1.2.3 From e70c752f88ed23e6a0f081fa408282c2450c8ce9 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 12 Feb 2017 19:18:16 -0500 Subject: bnxt_en: Print FEC settings as part of the linkup dmesg. Print FEC (Forward Error Correction) autoneg and encoding settings during link up. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 13 ++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 ++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index afd119097344..9f1dfbe5aae9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5437,7 +5437,7 @@ static void bnxt_report_link(struct bnxt *bp) if (bp->link_info.link_up) { const char *duplex; const char *flow_ctrl; - u16 speed; + u16 speed, fec; netif_carrier_on(bp->dev); if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) @@ -5459,6 +5459,12 @@ static void bnxt_report_link(struct bnxt *bp) netdev_info(bp->dev, "EEE is %s\n", bp->eee.eee_active ? "active" : "not active"); + fec = bp->link_info.fec_cfg; + if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) + netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", + (fec & BNXT_FEC_AUTONEG) ? "on" : "off", + (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : + (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); } else { netif_carrier_off(bp->dev); netdev_err(bp->dev, "NIC Link is Down\n"); @@ -5583,6 +5589,11 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) } } } + + link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; + if (bp->hwrm_spec_code >= 0x10504) + link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); + /* TODO: need to add more logic to report VF link */ if (chng_link_state) { if (link_info->phy_link_status == BNXT_LINK_LINK) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index eaed70061dfc..faf26a2f726b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -867,6 +867,10 @@ struct bnxt_link_info { u16 force_link_speed; u32 preemphasis; u8 module_status; + u16 fec_cfg; +#define BNXT_FEC_AUTONEG PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED +#define BNXT_FEC_ENC_BASE_R PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED +#define BNXT_FEC_ENC_RS PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED /* copy of requested setting from ethtool cmd */ u8 autoneg; -- cgit v1.2.3 From b451c8b69e70de299aa6061e1fa6afbb4d7c1f9e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 12 Feb 2017 19:18:17 -0500 Subject: bnxt_en: Fix bnxt_setup_tc() error message. Add proper puctuation to make the message more clear. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9f1dfbe5aae9..c899d61bd177 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6833,7 +6833,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) int rc; if (tc > bp->max_tc) { - netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", + netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", tc, bp->max_tc); return -EINVAL; } -- cgit v1.2.3 From 32b40798c1b40343641f04cdfd09652af70ea0e9 Mon Sep 17 00:00:00 2001 From: Deepak Khungar Date: Sun, 12 Feb 2017 19:18:18 -0500 Subject: bnxt_en: Added PCI IDs for BCM57452 and BCM57454 ASICs Signed-off-by: Deepak Khungar Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c899d61bd177..71f9a1894db9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -99,6 +99,8 @@ enum board_idx { BCM57407_NPAR, BCM57414_NPAR, BCM57416_NPAR, + BCM57452, + BCM57454, NETXTREME_E_VF, NETXTREME_C_VF, }; @@ -133,6 +135,8 @@ static const struct { { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, { "Broadcom NetXtreme-E Ethernet Virtual Function" }, { "Broadcom NetXtreme-C Ethernet Virtual Function" }, }; @@ -168,6 +172,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, + { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, -- cgit v1.2.3 From 2fb93a1a9f7e746a9977e04ec4dfae31624c767a Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Thu, 9 Feb 2017 20:25:06 +0100 Subject: net: micrel: ksz884x: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ksz884x.c | 62 +++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 97f6ef1fa7d0..ee38c18c2d2d 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -5944,7 +5944,7 @@ static u16 eeprom_data[EEPROM_SIZE] = { 0 }; /* These functions use the MII functions in mii.c. */ /** - * netdev_get_settings - get network device settings + * netdev_get_link_ksettings - get network device settings * @dev: Network device. * @cmd: Ethtool command. * @@ -5952,23 +5952,26 @@ static u16 eeprom_data[EEPROM_SIZE] = { 0 }; * * Return 0 if successful; otherwise an error code. */ -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; mutex_lock(&hw_priv->lock); - mii_ethtool_gset(&priv->mii_if, cmd); - cmd->advertising |= SUPPORTED_TP; + mii_ethtool_get_link_ksettings(&priv->mii_if, cmd); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); mutex_unlock(&hw_priv->lock); /* Save advertised settings for workaround in next function. */ - priv->advertising = cmd->advertising; + ethtool_convert_link_mode_to_legacy_u32(&priv->advertising, + cmd->link_modes.advertising); + return 0; } /** - * netdev_set_settings - set network device settings + * netdev_set_link_ksettings - set network device settings * @dev: Network device. * @cmd: Ethtool command. * @@ -5976,54 +5979,65 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) * * Return 0 if successful; otherwise an error code. */ -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; struct ksz_port *port = &priv->port; - u32 speed = ethtool_cmd_speed(cmd); + struct ethtool_link_ksettings copy_cmd; + u32 speed = cmd->base.speed; + u32 advertising; int rc; + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + /* * ethtool utility does not change advertised setting if auto * negotiation is not specified explicitly. */ - if (cmd->autoneg && priv->advertising == cmd->advertising) { - cmd->advertising |= ADVERTISED_ALL; + if (cmd->base.autoneg && priv->advertising == advertising) { + advertising |= ADVERTISED_ALL; if (10 == speed) - cmd->advertising &= + advertising &= ~(ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half); else if (100 == speed) - cmd->advertising &= + advertising &= ~(ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half); - if (0 == cmd->duplex) - cmd->advertising &= + if (0 == cmd->base.duplex) + advertising &= ~(ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Full); - else if (1 == cmd->duplex) - cmd->advertising &= + else if (1 == cmd->base.duplex) + advertising &= ~(ADVERTISED_100baseT_Half | ADVERTISED_10baseT_Half); } mutex_lock(&hw_priv->lock); - if (cmd->autoneg && - (cmd->advertising & ADVERTISED_ALL) == - ADVERTISED_ALL) { + if (cmd->base.autoneg && + (advertising & ADVERTISED_ALL) == ADVERTISED_ALL) { port->duplex = 0; port->speed = 0; port->force_link = 0; } else { - port->duplex = cmd->duplex + 1; + port->duplex = cmd->base.duplex + 1; if (1000 != speed) port->speed = speed; - if (cmd->autoneg) + if (cmd->base.autoneg) port->force_link = 0; else port->force_link = 1; } - rc = mii_ethtool_sset(&priv->mii_if, cmd); + + memcpy(©_cmd, cmd, sizeof(copy_cmd)); + ethtool_convert_legacy_u32_to_link_mode(copy_cmd.link_modes.advertising, + advertising); + rc = mii_ethtool_set_link_ksettings( + &priv->mii_if, + (const struct ethtool_link_ksettings *)©_cmd); mutex_unlock(&hw_priv->lock); return rc; } @@ -6597,8 +6611,6 @@ static int netdev_set_features(struct net_device *dev, } static const struct ethtool_ops netdev_ethtool_ops = { - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_drvinfo = netdev_get_drvinfo, @@ -6617,6 +6629,8 @@ static const struct ethtool_ops netdev_ethtool_ops = { .get_strings = netdev_get_strings, .get_sset_count = netdev_get_sset_count, .get_ethtool_stats = netdev_get_ethtool_stats, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; /* -- cgit v1.2.3 From 8ff638e4bba71acf9e53f8508c21ea16b45b8871 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Thu, 9 Feb 2017 22:02:47 +0100 Subject: net: microchip: enc28j60: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/enc28j60.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index 045b9106c0ff..f6ecfa778660 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -1487,27 +1487,30 @@ enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) } static int -enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +enc28j60_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct enc28j60_net *priv = netdev_priv(dev); - cmd->transceiver = XCVR_INTERNAL; - cmd->supported = SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_TP; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; - cmd->port = PORT_TP; - cmd->autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + + cmd->base.speed = SPEED_10; + cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.port = PORT_TP; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } static int -enc28j60_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +enc28j60_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { - return enc28j60_setlink(dev, cmd->autoneg, - ethtool_cmd_speed(cmd), cmd->duplex); + return enc28j60_setlink(dev, cmd->base.autoneg, + cmd->base.speed, cmd->base.duplex); } static u32 enc28j60_get_msglevel(struct net_device *dev) @@ -1523,11 +1526,11 @@ static void enc28j60_set_msglevel(struct net_device *dev, u32 val) } static const struct ethtool_ops enc28j60_ethtool_ops = { - .get_settings = enc28j60_get_settings, - .set_settings = enc28j60_set_settings, .get_drvinfo = enc28j60_get_drvinfo, .get_msglevel = enc28j60_get_msglevel, .set_msglevel = enc28j60_set_msglevel, + .get_link_ksettings = enc28j60_get_link_ksettings, + .set_link_ksettings = enc28j60_set_link_ksettings, }; static int enc28j60_chipset_init(struct net_device *dev) -- cgit v1.2.3 From 60fdcfa5ca271a6ee20b7d49312a3408a4e05d0f Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Thu, 9 Feb 2017 22:42:18 +0100 Subject: net: microchip: encx24j600: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/encx24j600.c | 32 ++++++++++++++++------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index fbce6166504e..f831238d9793 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -940,29 +940,33 @@ static void encx24j600_get_drvinfo(struct net_device *dev, sizeof(info->bus_info)); } -static int encx24j600_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int encx24j600_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct encx24j600_priv *priv = netdev_priv(dev); + u32 supported; - cmd->transceiver = XCVR_INTERNAL; - cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP; - ethtool_cmd_speed_set(cmd, priv->speed); - cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; - cmd->port = PORT_TP; - cmd->autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + + cmd->base.speed = priv->speed; + cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.port = PORT_TP; + cmd->base.autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; } -static int encx24j600_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +encx24j600_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { - return encx24j600_setlink(dev, cmd->autoneg, - ethtool_cmd_speed(cmd), cmd->duplex); + return encx24j600_setlink(dev, cmd->base.autoneg, + cmd->base.speed, cmd->base.duplex); } static u32 encx24j600_get_msglevel(struct net_device *dev) @@ -980,13 +984,13 @@ static void encx24j600_set_msglevel(struct net_device *dev, u32 val) } static const struct ethtool_ops encx24j600_ethtool_ops = { - .get_settings = encx24j600_get_settings, - .set_settings = encx24j600_set_settings, .get_drvinfo = encx24j600_get_drvinfo, .get_msglevel = encx24j600_get_msglevel, .set_msglevel = encx24j600_set_msglevel, .get_regs_len = encx24j600_get_regs_len, .get_regs = encx24j600_get_regs, + .get_link_ksettings = encx24j600_get_link_ksettings, + .set_link_ksettings = encx24j600_set_link_ksettings, }; static const struct net_device_ops encx24j600_netdev_ops = { -- cgit v1.2.3 From cf901656e773d842b4e725652d9d95e3f674103e Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Thu, 9 Feb 2017 23:17:23 +0100 Subject: net: myricom: myri10ge: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Acked-by: Hyong-Youb Kim Signed-off-by: David S. Miller --- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 1139d1803e7e..b171ed2015fe 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1610,15 +1610,16 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) } static int -myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +myri10ge_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct myri10ge_priv *mgp = netdev_priv(netdev); char *ptr; int i; - cmd->autoneg = AUTONEG_DISABLE; - ethtool_cmd_speed_set(cmd, SPEED_10000); - cmd->duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; /* * parse the product code to deterimine the interface type @@ -1643,16 +1644,12 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) ptr++; if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') { /* We've found either an XFP, quad ribbon fiber, or SFP+ */ - cmd->port = PORT_FIBRE; - cmd->supported |= SUPPORTED_FIBRE; - cmd->advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); } else { - cmd->port = PORT_OTHER; + cmd->base.port = PORT_OTHER; } - if (*ptr == 'R' || *ptr == 'S') - cmd->transceiver = XCVR_EXTERNAL; - else - cmd->transceiver = XCVR_INTERNAL; return 0; } @@ -1925,7 +1922,6 @@ myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) } static const struct ethtool_ops myri10ge_ethtool_ops = { - .get_settings = myri10ge_get_settings, .get_drvinfo = myri10ge_get_drvinfo, .get_coalesce = myri10ge_get_coalesce, .set_coalesce = myri10ge_set_coalesce, @@ -1939,6 +1935,7 @@ static const struct ethtool_ops myri10ge_ethtool_ops = { .set_msglevel = myri10ge_set_msglevel, .get_msglevel = myri10ge_get_msglevel, .set_phys_id = myri10ge_phys_id, + .get_link_ksettings = myri10ge_get_link_ksettings, }; static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) -- cgit v1.2.3 From 586b6e274a41dc5756dd03f1f6a2a6199909d51a Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Thu, 9 Feb 2017 23:58:25 +0100 Subject: net: natsemi: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/natsemi/natsemi.c | 119 +++++++++++++++++++-------------- 1 file changed, 67 insertions(+), 52 deletions(-) diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 8e72679c015f..18af2a23a933 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -640,8 +640,10 @@ static int netdev_set_wol(struct net_device *dev, u32 newval); static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur); static int netdev_set_sopass(struct net_device *dev, u8 *newval); static int netdev_get_sopass(struct net_device *dev, u8 *data); -static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd); -static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd); +static int netdev_get_ecmd(struct net_device *dev, + struct ethtool_link_ksettings *ecmd); +static int netdev_set_ecmd(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd); static void enable_wol_mode(struct net_device *dev, int enable_intr); static int netdev_close(struct net_device *dev); static int netdev_get_regs(struct net_device *dev, u8 *buf); @@ -2584,7 +2586,8 @@ static int get_eeprom_len(struct net_device *dev) return np->eeprom_size; } -static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); @@ -2593,7 +2596,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return 0; } -static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); int res; @@ -2689,8 +2693,6 @@ static const struct ethtool_ops ethtool_ops = { .get_drvinfo = get_drvinfo, .get_regs_len = get_regs_len, .get_eeprom_len = get_eeprom_len, - .get_settings = get_settings, - .set_settings = set_settings, .get_wol = get_wol, .set_wol = set_wol, .get_regs = get_regs, @@ -2699,6 +2701,8 @@ static const struct ethtool_ops ethtool_ops = { .nway_reset = nway_reset, .get_link = get_link, .get_eeprom = get_eeprom, + .get_link_ksettings = get_link_ksettings, + .set_link_ksettings = set_link_ksettings, }; static int netdev_set_wol(struct net_device *dev, u32 newval) @@ -2828,29 +2832,32 @@ static int netdev_get_sopass(struct net_device *dev, u8 *data) return 0; } -static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +static int netdev_get_ecmd(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); + u32 supported, advertising; u32 tmp; - ecmd->port = dev->if_port; - ethtool_cmd_speed_set(ecmd, np->speed); - ecmd->duplex = np->duplex; - ecmd->autoneg = np->autoneg; - ecmd->advertising = 0; + ecmd->base.port = dev->if_port; + ecmd->base.speed = np->speed; + ecmd->base.duplex = np->duplex; + ecmd->base.autoneg = np->autoneg; + advertising = 0; + if (np->advertising & ADVERTISE_10HALF) - ecmd->advertising |= ADVERTISED_10baseT_Half; + advertising |= ADVERTISED_10baseT_Half; if (np->advertising & ADVERTISE_10FULL) - ecmd->advertising |= ADVERTISED_10baseT_Full; + advertising |= ADVERTISED_10baseT_Full; if (np->advertising & ADVERTISE_100HALF) - ecmd->advertising |= ADVERTISED_100baseT_Half; + advertising |= ADVERTISED_100baseT_Half; if (np->advertising & ADVERTISE_100FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - ecmd->supported = (SUPPORTED_Autoneg | + advertising |= ADVERTISED_100baseT_Full; + supported = (SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE); - ecmd->phy_address = np->phy_addr_external; + ecmd->base.phy_address = np->phy_addr_external; /* * We intentionally report the phy address of the external * phy, even if the internal phy is used. This is necessary @@ -2870,62 +2877,70 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) */ /* set information based on active port type */ - switch (ecmd->port) { + switch (ecmd->base.port) { default: case PORT_TP: - ecmd->advertising |= ADVERTISED_TP; - ecmd->transceiver = XCVR_INTERNAL; + advertising |= ADVERTISED_TP; break; case PORT_MII: - ecmd->advertising |= ADVERTISED_MII; - ecmd->transceiver = XCVR_EXTERNAL; + advertising |= ADVERTISED_MII; break; case PORT_FIBRE: - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; + advertising |= ADVERTISED_FIBRE; break; } /* if autonegotiation is on, try to return the active speed/duplex */ - if (ecmd->autoneg == AUTONEG_ENABLE) { - ecmd->advertising |= ADVERTISED_Autoneg; + if (ecmd->base.autoneg == AUTONEG_ENABLE) { + advertising |= ADVERTISED_Autoneg; tmp = mii_nway_result( np->advertising & mdio_read(dev, MII_LPA)); if (tmp == LPA_100FULL || tmp == LPA_100HALF) - ethtool_cmd_speed_set(ecmd, SPEED_100); + ecmd->base.speed = SPEED_100; else - ethtool_cmd_speed_set(ecmd, SPEED_10); + ecmd->base.speed = SPEED_10; if (tmp == LPA_100FULL || tmp == LPA_10FULL) - ecmd->duplex = DUPLEX_FULL; + ecmd->base.duplex = DUPLEX_FULL; else - ecmd->duplex = DUPLEX_HALF; + ecmd->base.duplex = DUPLEX_HALF; } /* ignore maxtxpkt, maxrxpkt for now */ + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); + return 0; } -static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +static int netdev_set_ecmd(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); + u32 advertising; - if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE) - return -EINVAL; - if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL) + ethtool_convert_link_mode_to_legacy_u32(&advertising, + ecmd->link_modes.advertising); + + if (ecmd->base.port != PORT_TP && + ecmd->base.port != PORT_MII && + ecmd->base.port != PORT_FIBRE) return -EINVAL; - if (ecmd->autoneg == AUTONEG_ENABLE) { - if ((ecmd->advertising & (ADVERTISED_10baseT_Half | + if (ecmd->base.autoneg == AUTONEG_ENABLE) { + if ((advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full)) == 0) { return -EINVAL; } - } else if (ecmd->autoneg == AUTONEG_DISABLE) { - u32 speed = ethtool_cmd_speed(ecmd); + } else if (ecmd->base.autoneg == AUTONEG_DISABLE) { + u32 speed = ecmd->base.speed; if (speed != SPEED_10 && speed != SPEED_100) return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + if (ecmd->base.duplex != DUPLEX_HALF && + ecmd->base.duplex != DUPLEX_FULL) return -EINVAL; } else { return -EINVAL; @@ -2936,8 +2951,8 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) * transceiver are really not going to work so don't let the * user select them. */ - if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE || - ecmd->port == PORT_TP)) + if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE || + ecmd->base.port == PORT_TP)) return -EINVAL; /* @@ -2956,30 +2971,30 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) /* WHEW! now lets bang some bits */ /* save the parms */ - dev->if_port = ecmd->port; - np->autoneg = ecmd->autoneg; - np->phy_addr_external = ecmd->phy_address & PhyAddrMask; + dev->if_port = ecmd->base.port; + np->autoneg = ecmd->base.autoneg; + np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask; if (np->autoneg == AUTONEG_ENABLE) { /* advertise only what has been requested */ np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); - if (ecmd->advertising & ADVERTISED_10baseT_Half) + if (advertising & ADVERTISED_10baseT_Half) np->advertising |= ADVERTISE_10HALF; - if (ecmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) np->advertising |= ADVERTISE_10FULL; - if (ecmd->advertising & ADVERTISED_100baseT_Half) + if (advertising & ADVERTISED_100baseT_Half) np->advertising |= ADVERTISE_100HALF; - if (ecmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) np->advertising |= ADVERTISE_100FULL; } else { - np->speed = ethtool_cmd_speed(ecmd); - np->duplex = ecmd->duplex; + np->speed = ecmd->base.speed; + np->duplex = ecmd->base.duplex; /* user overriding the initial full duplex parm? */ if (np->duplex == DUPLEX_HALF) np->full_duplex = 0; } /* get the right phy enabled */ - if (ecmd->port == PORT_TP) + if (ecmd->base.port == PORT_TP) switch_port_internal(dev); else switch_port_external(dev); -- cgit v1.2.3 From 28213375f95fac780f45845ec44f9dbece546bad Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Wed, 8 Feb 2017 23:54:45 +0100 Subject: net: micrel: ks8695net: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ks8695net.c | 91 ++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 41 deletions(-) diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index d2106159473f..bd51e057e915 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -854,85 +854,94 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value) } /** - * ks8695_wan_get_settings - Get device-specific settings. + * ks8695_wan_get_link_ksettings - Get device-specific settings. * @ndev: The network device to read settings from * @cmd: The ethtool structure to read into */ static int -ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +ks8695_wan_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; + u32 supported, advertising; /* All ports on the KS8695 support these... */ - cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP | SUPPORTED_MII); - cmd->transceiver = XCVR_INTERNAL; - cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; - cmd->port = PORT_MII; - cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); - cmd->phy_address = 0; + advertising = ADVERTISED_TP | ADVERTISED_MII; + cmd->base.port = PORT_MII; + supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); + cmd->base.phy_address = 0; ctrl = readl(ksp->phyiface_regs + KS8695_WMC); if ((ctrl & WMC_WAND) == 0) { /* auto-negotiation is enabled */ - cmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; if (ctrl & WMC_WANA100F) - cmd->advertising |= ADVERTISED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; if (ctrl & WMC_WANA100H) - cmd->advertising |= ADVERTISED_100baseT_Half; + advertising |= ADVERTISED_100baseT_Half; if (ctrl & WMC_WANA10F) - cmd->advertising |= ADVERTISED_10baseT_Full; + advertising |= ADVERTISED_10baseT_Full; if (ctrl & WMC_WANA10H) - cmd->advertising |= ADVERTISED_10baseT_Half; + advertising |= ADVERTISED_10baseT_Half; if (ctrl & WMC_WANAP) - cmd->advertising |= ADVERTISED_Pause; - cmd->autoneg = AUTONEG_ENABLE; + advertising |= ADVERTISED_Pause; + cmd->base.autoneg = AUTONEG_ENABLE; - ethtool_cmd_speed_set(cmd, - (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10); - cmd->duplex = (ctrl & WMC_WDS) ? + cmd->base.speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; + cmd->base.duplex = (ctrl & WMC_WDS) ? DUPLEX_FULL : DUPLEX_HALF; } else { /* auto-negotiation is disabled */ - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; - ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ? - SPEED_100 : SPEED_10)); - cmd->duplex = (ctrl & WMC_WANFF) ? + cmd->base.speed = (ctrl & WMC_WANF100) ? + SPEED_100 : SPEED_10; + cmd->base.duplex = (ctrl & WMC_WANFF) ? DUPLEX_FULL : DUPLEX_HALF; } + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } /** - * ks8695_wan_set_settings - Set device-specific settings. + * ks8695_wan_set_link_ksettings - Set device-specific settings. * @ndev: The network device to configure * @cmd: The settings to configure */ static int -ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +ks8695_wan_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; + u32 advertising; - if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100)) - return -EINVAL; - if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL)) + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if ((cmd->base.speed != SPEED_10) && (cmd->base.speed != SPEED_100)) return -EINVAL; - if (cmd->port != PORT_MII) + if ((cmd->base.duplex != DUPLEX_HALF) && + (cmd->base.duplex != DUPLEX_FULL)) return -EINVAL; - if (cmd->transceiver != XCVR_INTERNAL) + if (cmd->base.port != PORT_MII) return -EINVAL; - if ((cmd->autoneg != AUTONEG_DISABLE) && - (cmd->autoneg != AUTONEG_ENABLE)) + if ((cmd->base.autoneg != AUTONEG_DISABLE) && + (cmd->base.autoneg != AUTONEG_ENABLE)) return -EINVAL; - if (cmd->autoneg == AUTONEG_ENABLE) { - if ((cmd->advertising & (ADVERTISED_10baseT_Half | + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if ((advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full)) == 0) @@ -942,13 +951,13 @@ ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | WMC_WANA10F | WMC_WANA10H); - if (cmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) ctrl |= WMC_WANA100F; - if (cmd->advertising & ADVERTISED_100baseT_Half) + if (advertising & ADVERTISED_100baseT_Half) ctrl |= WMC_WANA100H; - if (cmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) ctrl |= WMC_WANA10F; - if (cmd->advertising & ADVERTISED_10baseT_Half) + if (advertising & ADVERTISED_10baseT_Half) ctrl |= WMC_WANA10H; /* force a re-negotiation */ @@ -961,9 +970,9 @@ ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) ctrl |= WMC_WAND; ctrl &= ~(WMC_WANF100 | WMC_WANFF); - if (cmd->speed == SPEED_100) + if (cmd->base.speed == SPEED_100) ctrl |= WMC_WANF100; - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) ctrl |= WMC_WANFF; writel(ctrl, ksp->phyiface_regs + KS8695_WMC); @@ -1042,12 +1051,12 @@ static const struct ethtool_ops ks8695_ethtool_ops = { static const struct ethtool_ops ks8695_wan_ethtool_ops = { .get_msglevel = ks8695_get_msglevel, .set_msglevel = ks8695_set_msglevel, - .get_settings = ks8695_wan_get_settings, - .set_settings = ks8695_wan_set_settings, .nway_reset = ks8695_wan_nwayreset, .get_link = ethtool_op_get_link, .get_pauseparam = ks8695_wan_get_pause, .get_drvinfo = ks8695_get_drvinfo, + .get_link_ksettings = ks8695_wan_get_link_ksettings, + .set_link_ksettings = ks8695_wan_set_link_ksettings, }; /* Network device interface functions */ -- cgit v1.2.3 From 98f2b09226758bcd416b023c9216597cc9727e60 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Thu, 9 Feb 2017 09:57:47 +0100 Subject: net: micrel: ks8851: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ks8851.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 955d69a8e8d3..279ee4612981 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -1086,16 +1086,18 @@ static void ks8851_set_msglevel(struct net_device *dev, u32 to) ks->msg_enable = to; } -static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ks8851_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct ks8851_net *ks = netdev_priv(dev); - return mii_ethtool_gset(&ks->mii, cmd); + return mii_ethtool_get_link_ksettings(&ks->mii, cmd); } -static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ks8851_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct ks8851_net *ks = netdev_priv(dev); - return mii_ethtool_sset(&ks->mii, cmd); + return mii_ethtool_set_link_ksettings(&ks->mii, cmd); } static u32 ks8851_get_link(struct net_device *dev) @@ -1251,13 +1253,13 @@ static const struct ethtool_ops ks8851_ethtool_ops = { .get_drvinfo = ks8851_get_drvinfo, .get_msglevel = ks8851_get_msglevel, .set_msglevel = ks8851_set_msglevel, - .get_settings = ks8851_get_settings, - .set_settings = ks8851_set_settings, .get_link = ks8851_get_link, .nway_reset = ks8851_nway_reset, .get_eeprom_len = ks8851_get_eeprom_len, .get_eeprom = ks8851_get_eeprom, .set_eeprom = ks8851_set_eeprom, + .get_link_ksettings = ks8851_get_link_ksettings, + .set_link_ksettings = ks8851_set_link_ksettings, }; /* MII interface controls */ -- cgit v1.2.3 From b6878eaf75176e9acb6463ea1e50edddd309931f Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Thu, 9 Feb 2017 11:28:25 +0100 Subject: net: micrel: ks8851_mll: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ks8851_mll.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index db628078a4e6..7647f7bdbcb8 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1311,16 +1311,18 @@ static void ks_set_msglevel(struct net_device *netdev, u32 to) ks->msg_enable = to; } -static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int ks_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct ks_net *ks = netdev_priv(netdev); - return mii_ethtool_gset(&ks->mii, cmd); + return mii_ethtool_get_link_ksettings(&ks->mii, cmd); } -static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int ks_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct ks_net *ks = netdev_priv(netdev); - return mii_ethtool_sset(&ks->mii, cmd); + return mii_ethtool_set_link_ksettings(&ks->mii, cmd); } static u32 ks_get_link(struct net_device *netdev) @@ -1339,10 +1341,10 @@ static const struct ethtool_ops ks_ethtool_ops = { .get_drvinfo = ks_get_drvinfo, .get_msglevel = ks_get_msglevel, .set_msglevel = ks_set_msglevel, - .get_settings = ks_get_settings, - .set_settings = ks_set_settings, .get_link = ks_get_link, .nway_reset = ks_nway_reset, + .get_link_ksettings = ks_get_link_ksettings, + .set_link_ksettings = ks_set_link_ksettings, }; /* MII interface controls */ -- cgit v1.2.3 From 51f21442a210e35e4e726f7d49120b0ed971fe14 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 12 Feb 2017 11:44:36 +0100 Subject: net: neterion: s2io: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/s2io.c | 51 ++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 203abcb0c252..c5c1d0e0c16f 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -5300,10 +5300,10 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) } /** - * s2io_ethtool_sset - Sets different link parameters. + * s2io_ethtool_set_link_ksettings - Sets different link parameters. * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. - * @info: pointer to the structure with parameters given by ethtool to set + * @cmd: pointer to the structure with parameters given by ethtool to set * link information. * Description: * The function sets different link parameters provided by the user onto @@ -5312,13 +5312,14 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) * 0 on success. */ -static int s2io_ethtool_sset(struct net_device *dev, - struct ethtool_cmd *info) +static int +s2io_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct s2io_nic *sp = netdev_priv(dev); - if ((info->autoneg == AUTONEG_ENABLE) || - (ethtool_cmd_speed(info) != SPEED_10000) || - (info->duplex != DUPLEX_FULL)) + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (cmd->base.speed != SPEED_10000) || + (cmd->base.duplex != DUPLEX_FULL)) return -EINVAL; else { s2io_close(sp->dev); @@ -5329,10 +5330,10 @@ static int s2io_ethtool_sset(struct net_device *dev, } /** - * s2io_ethtol_gset - Return link specific information. + * s2io_ethtol_get_link_ksettings - Return link specific information. * @sp : private member of the device structure, pointer to the * s2io_nic structure. - * @info : pointer to the structure with parameters given by ethtool + * @cmd : pointer to the structure with parameters given by ethtool * to return link information. * Description: * Returns link specific information like speed, duplex etc.. to ethtool. @@ -5340,25 +5341,31 @@ static int s2io_ethtool_sset(struct net_device *dev, * return 0 on success. */ -static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) +static int +s2io_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct s2io_nic *sp = netdev_priv(dev); - info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - info->port = PORT_FIBRE; - /* info->transceiver */ - info->transceiver = XCVR_EXTERNAL; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + + cmd->base.port = PORT_FIBRE; if (netif_carrier_ok(sp->dev)) { - ethtool_cmd_speed_set(info, SPEED_10000); - info->duplex = DUPLEX_FULL; + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(info, SPEED_UNKNOWN); - info->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - info->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } @@ -6626,8 +6633,6 @@ static int s2io_set_features(struct net_device *dev, netdev_features_t features) } static const struct ethtool_ops netdev_ethtool_ops = { - .get_settings = s2io_ethtool_gset, - .set_settings = s2io_ethtool_sset, .get_drvinfo = s2io_ethtool_gdrvinfo, .get_regs_len = s2io_ethtool_get_regs_len, .get_regs = s2io_ethtool_gregs, @@ -6643,6 +6648,8 @@ static const struct ethtool_ops netdev_ethtool_ops = { .set_phys_id = s2io_ethtool_set_led, .get_ethtool_stats = s2io_get_ethtool_stats, .get_sset_count = s2io_get_sset_count, + .get_link_ksettings = s2io_ethtool_get_link_ksettings, + .set_link_ksettings = s2io_ethtool_set_link_ksettings, }; /** -- cgit v1.2.3 From 08041ff24a5bda3c17aae8ca0370337aa3570fd5 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 12 Feb 2017 17:33:13 +0100 Subject: net: neterion: vxge: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-ethtool.c | 47 +++++++++++++---------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c index 9a2967016c18..db55e6d89cf4 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c @@ -38,9 +38,9 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { }; /** - * vxge_ethtool_sset - Sets different link parameters. + * vxge_ethtool_set_link_ksettings - Sets different link parameters. * @dev: device pointer. - * @info: pointer to the structure with parameters given by ethtool to set + * @cmd: pointer to the structure with parameters given by ethtool to set * link information. * * The function sets different link parameters provided by the user onto @@ -48,44 +48,51 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { * Return value: * 0 on success. */ -static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) +static int +vxge_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { /* We currently only support 10Gb/FULL */ - if ((info->autoneg == AUTONEG_ENABLE) || - (ethtool_cmd_speed(info) != SPEED_10000) || - (info->duplex != DUPLEX_FULL)) + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (cmd->base.speed != SPEED_10000) || + (cmd->base.duplex != DUPLEX_FULL)) return -EINVAL; return 0; } /** - * vxge_ethtool_gset - Return link specific information. + * vxge_ethtool_get_link_ksettings - Return link specific information. * @dev: device pointer. - * @info: pointer to the structure with parameters given by ethtool + * @cmd: pointer to the structure with parameters given by ethtool * to return link information. * * Returns link specific information like speed, duplex etc.. to ethtool. * Return value : * return 0 on success. */ -static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) +static int vxge_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - info->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - info->port = PORT_FIBRE; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); - info->transceiver = XCVR_EXTERNAL; + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + + cmd->base.port = PORT_FIBRE; if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(info, SPEED_10000); - info->duplex = DUPLEX_FULL; + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(info, SPEED_UNKNOWN); - info->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - info->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } @@ -1126,8 +1133,6 @@ static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms) } static const struct ethtool_ops vxge_ethtool_ops = { - .get_settings = vxge_ethtool_gset, - .set_settings = vxge_ethtool_sset, .get_drvinfo = vxge_ethtool_gdrvinfo, .get_regs_len = vxge_ethtool_get_regs_len, .get_regs = vxge_ethtool_gregs, @@ -1139,6 +1144,8 @@ static const struct ethtool_ops vxge_ethtool_ops = { .get_sset_count = vxge_ethtool_get_sset_count, .get_ethtool_stats = vxge_get_ethtool_stats, .flash_device = vxge_fw_flash, + .get_link_ksettings = vxge_ethtool_get_link_ksettings, + .set_link_ksettings = vxge_ethtool_set_link_ksettings, }; void vxge_initialize_ethtool_ops(struct net_device *ndev) -- cgit v1.2.3 From 2efbe14303e51e89dbd5e411d3c0c3e1e8d4211e Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 12 Feb 2017 21:38:29 +0100 Subject: net: nuvoton: w90p910: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/nuvoton/w90p910_ether.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 119f6dca71f0..9709c8ca0774 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -874,16 +874,18 @@ static void w90p910_get_drvinfo(struct net_device *dev, strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } -static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int w90p910_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct w90p910_ether *ether = netdev_priv(dev); - return mii_ethtool_gset(ðer->mii, cmd); + return mii_ethtool_get_link_ksettings(ðer->mii, cmd); } -static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int w90p910_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct w90p910_ether *ether = netdev_priv(dev); - return mii_ethtool_sset(ðer->mii, cmd); + return mii_ethtool_set_link_ksettings(ðer->mii, cmd); } static int w90p910_nway_reset(struct net_device *dev) @@ -899,11 +901,11 @@ static u32 w90p910_get_link(struct net_device *dev) } static const struct ethtool_ops w90p910_ether_ethtool_ops = { - .get_settings = w90p910_get_settings, - .set_settings = w90p910_set_settings, .get_drvinfo = w90p910_get_drvinfo, .nway_reset = w90p910_nway_reset, .get_link = w90p910_get_link, + .get_link_ksettings = w90p910_get_link_ksettings, + .set_link_ksettings = w90p910_set_link_ksettings, }; static const struct net_device_ops w90p910_ether_netdev_ops = { -- cgit v1.2.3 From 3b03cc0783b03ddd668ff3f86419bc67d0664e89 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Fri, 10 Feb 2017 23:57:48 +0100 Subject: net: natsemi: ns83820: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/natsemi/ns83820.c | 46 ++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index f9d2eb9a920a..729095db3e08 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1217,12 +1217,13 @@ static struct net_device_stats *ns83820_get_stats(struct net_device *ndev) } /* Let ethtool retrieve info */ -static int ns83820_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int ns83820_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct ns83820 *dev = PRIV(ndev); u32 cfg, tanar, tbicr; int fullduplex = 0; + u32 supported; /* * Here's the list of available ethtool commands from other drivers: @@ -1244,44 +1245,47 @@ static int ns83820_get_settings(struct net_device *ndev, fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0; - cmd->supported = SUPPORTED_Autoneg; + supported = SUPPORTED_Autoneg; if (dev->CFG_cache & CFG_TBI_EN) { /* we have optical interface */ - cmd->supported |= SUPPORTED_1000baseT_Half | + supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE; - cmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; } else { /* we have copper */ - cmd->supported |= SUPPORTED_10baseT_Half | + supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_MII; - cmd->port = PORT_MII; + cmd->base.port = PORT_MII; } - cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + + cmd->base.duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; switch (cfg / CFG_SPDSTS0 & 3) { case 2: - ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->base.speed = SPEED_1000; break; case 1: - ethtool_cmd_speed_set(cmd, SPEED_100); + cmd->base.speed = SPEED_100; break; default: - ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->base.speed = SPEED_10; break; } - cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) + cmd->base.autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; } /* Let ethool change settings*/ -static int ns83820_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int ns83820_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct ns83820 *dev = PRIV(ndev); u32 cfg, tanar; @@ -1306,10 +1310,10 @@ static int ns83820_set_settings(struct net_device *ndev, spin_lock(&dev->tx_lock); /* Set duplex */ - if (cmd->duplex != fullduplex) { + if (cmd->base.duplex != fullduplex) { if (have_optical) { /*set full duplex*/ - if (cmd->duplex == DUPLEX_FULL) { + if (cmd->base.duplex == DUPLEX_FULL) { /* force full duplex */ writel(readl(dev->base + TXCFG) | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, @@ -1333,7 +1337,7 @@ static int ns83820_set_settings(struct net_device *ndev, /* Set autonegotiation */ if (1) { - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { /* restart auto negotiation */ writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN, dev->base + TBICR); @@ -1348,7 +1352,7 @@ static int ns83820_set_settings(struct net_device *ndev, } printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name, - cmd->autoneg ? "ENABLED" : "DISABLED"); + cmd->base.autoneg ? "ENABLED" : "DISABLED"); } phy_intr(ndev); @@ -1375,10 +1379,10 @@ static u32 ns83820_get_link(struct net_device *ndev) } static const struct ethtool_ops ops = { - .get_settings = ns83820_get_settings, - .set_settings = ns83820_set_settings, .get_drvinfo = ns83820_get_drvinfo, - .get_link = ns83820_get_link + .get_link = ns83820_get_link, + .get_link_ksettings = ns83820_get_link_ksettings, + .set_link_ksettings = ns83820_set_link_ksettings, }; static inline void ns83820_disable_interrupts(struct ns83820 *dev) -- cgit v1.2.3 From fcd25166d950bc0fae4179b9d16995a1a2270810 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Feb 2017 00:03:42 +0100 Subject: net: dsa: mv88e6xxx: Add watchdog interrupt handler The switch contains a watchdog looking for issues with the internal gubbins of the switch. Hook the interrupt the watchdog triggers and log the value of the control register indicating why the watchdog fired. The watchdog can only be cleared with a switch reset, which will destroy the current configuration. Rather than doing this, just disable the interrupt. The mv88e6390 family has different watchdog registers. So use an ops structure, so support for the mv88e6390 family can be added later. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 14 ++++++ drivers/net/dsa/mv88e6xxx/global2.c | 89 ++++++++++++++++++++++++++++++++++- drivers/net/dsa/mv88e6xxx/global2.h | 4 ++ drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 21 +++++++++ 4 files changed, 127 insertions(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7b4e40b286e4..489a59f5dea3 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3111,6 +3111,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, @@ -3179,6 +3180,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3205,6 +3207,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, @@ -3232,6 +3235,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3250,6 +3254,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3276,6 +3281,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3304,6 +3310,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3330,6 +3337,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3358,6 +3366,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3380,6 +3389,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, @@ -3491,6 +3501,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3598,6 +3609,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3624,6 +3636,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3652,6 +3665,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 50e4e0be4227..1e2d65826d12 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -649,6 +649,91 @@ int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_smi_phy_write_c22(chip, addr, reg, val, external); } +static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq) +{ + u16 reg; + + mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, ®); + + dev_info(chip->dev, "Watchdog event: 0x%04x", reg); + + return IRQ_HANDLED; +} + +static void mv88e6097_watchdog_free(struct mv88e6xxx_chip *chip) +{ + u16 reg; + + mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, ®); + + reg &= ~(GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE | + GLOBAL2_WDOG_CONTROL_QC_ENABLE); + + mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, reg); +} + +static int mv88e6097_watchdog_setup(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, + GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE | + GLOBAL2_WDOG_CONTROL_QC_ENABLE | + GLOBAL2_WDOG_CONTROL_SWRESET); +} + +const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = { + .irq_action = mv88e6097_watchdog_action, + .irq_setup = mv88e6097_watchdog_setup, + .irq_free = mv88e6097_watchdog_free, +}; + +static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id) +{ + struct mv88e6xxx_chip *chip = dev_id; + irqreturn_t ret = IRQ_NONE; + + mutex_lock(&chip->reg_lock); + if (chip->info->ops->watchdog_ops->irq_action) + ret = chip->info->ops->watchdog_ops->irq_action(chip, irq); + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static void mv88e6xxx_g2_watchdog_free(struct mv88e6xxx_chip *chip) +{ + mutex_lock(&chip->reg_lock); + if (chip->info->ops->watchdog_ops->irq_free) + chip->info->ops->watchdog_ops->irq_free(chip); + mutex_unlock(&chip->reg_lock); + + free_irq(chip->watchdog_irq, chip); + irq_dispose_mapping(chip->watchdog_irq); +} + +static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip) +{ + int err; + + chip->watchdog_irq = irq_find_mapping(chip->g2_irq.domain, + GLOBAL2_INT_SOURCE_WATCHDOG); + if (chip->watchdog_irq < 0) + return chip->watchdog_irq; + + err = request_threaded_irq(chip->watchdog_irq, NULL, + mv88e6xxx_g2_watchdog_thread_fn, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + "mv88e6xxx-watchdog", chip); + if (err) + return err; + + mutex_lock(&chip->reg_lock); + if (chip->info->ops->watchdog_ops->irq_setup) + err = chip->info->ops->watchdog_ops->irq_setup(chip); + mutex_unlock(&chip->reg_lock); + + return err; +} + static void mv88e6xxx_g2_irq_mask(struct irq_data *d) { struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d); @@ -737,6 +822,8 @@ void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip) { int irq, virq; + mv88e6xxx_g2_watchdog_free(chip); + free_irq(chip->device_irq, chip); irq_dispose_mapping(chip->device_irq); @@ -779,7 +866,7 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) if (err) goto out; - return 0; + return mv88e6xxx_g2_watchdog_setup(chip); out: for (irq = 0; irq < 16; irq++) { diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 00e635279ba1..8305f6941fd1 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -46,6 +46,8 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; + #else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) @@ -125,6 +127,8 @@ static inline int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) return -EOPNOTSUPP; } +static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {}; + #endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ #endif /* _MV88E6XXX_GLOBAL2_H */ diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index ac54f40813f7..e35a56c89d5c 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -339,6 +339,7 @@ #define GLOBAL_STATS_COUNTER_01 0x1f #define GLOBAL2_INT_SOURCE 0x00 +#define GLOBAL2_INT_SOURCE_WATCHDOG 15 #define GLOBAL2_INT_MASK 0x01 #define GLOBAL2_MGMT_EN_2X 0x02 #define GLOBAL2_MGMT_EN_0X 0x03 @@ -415,6 +416,14 @@ #define GLOBAL2_SCRATCH_REGISTER_SHIFT 8 #define GLOBAL2_SCRATCH_VALUE_MASK 0xff #define GLOBAL2_WDOG_CONTROL 0x1b +#define GLOBAL2_WDOG_CONTROL_EGRESS_EVENT BIT(7) +#define GLOBAL2_WDOG_CONTROL_RMU_TIMEOUT BIT(6) +#define GLOBAL2_WDOG_CONTROL_QC_ENABLE BIT(5) +#define GLOBAL2_WDOG_CONTROL_EGRESS_HISTORY BIT(4) +#define GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE BIT(3) +#define GLOBAL2_WDOG_CONTROL_FORCE_IRQ BIT(2) +#define GLOBAL2_WDOG_CONTROL_HISTORY BIT(1) +#define GLOBAL2_WDOG_CONTROL_SWRESET BIT(0) #define GLOBAL2_QOS_WEIGHT 0x1c #define GLOBAL2_MISC 0x1d @@ -706,6 +715,7 @@ struct mv88e6xxx_vtu_entry { }; struct mv88e6xxx_bus_ops; +struct mv88e6xxx_irq_ops; struct mv88e6xxx_irq { u16 masked; @@ -766,6 +776,7 @@ struct mv88e6xxx_chip { struct mv88e6xxx_irq g2_irq; int irq; int device_irq; + int watchdog_irq; }; struct mv88e6xxx_bus_ops { @@ -879,11 +890,21 @@ struct mv88e6xxx_ops { uint64_t *data); int (*g1_set_cpu_port)(struct mv88e6xxx_chip *chip, int port); int (*g1_set_egress_port)(struct mv88e6xxx_chip *chip, int port); + const struct mv88e6xxx_irq_ops *watchdog_ops; /* Can be either in g1 or g2, so don't use a prefix */ int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); }; +struct mv88e6xxx_irq_ops { + /* Action to be performed when the interrupt happens */ + int (*irq_action)(struct mv88e6xxx_chip *chip, int irq); + /* Setup the hardware to generate the interrupt */ + int (*irq_setup)(struct mv88e6xxx_chip *chip); + /* Reset the hardware to stop generating the interrupt */ + void (*irq_free)(struct mv88e6xxx_chip *chip); +}; + #define STATS_TYPE_PORT BIT(0) #define STATS_TYPE_BANK0 BIT(1) #define STATS_TYPE_BANK1 BIT(2) -- cgit v1.2.3 From 61303736638aa923b40c9e107b64e6d2b7af499d Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Feb 2017 00:03:43 +0100 Subject: net: dsa: mv88e6xxx: Add mv88e6390 watchdog interrupt support Implement the ops needed to support the watchdog for the MV88E6390 family. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 9 +++++++ drivers/net/dsa/mv88e6xxx/global2.c | 48 +++++++++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/global2.h | 2 ++ drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 12 +++++++++ 4 files changed, 71 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 489a59f5dea3..7658284beaf9 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3419,6 +3419,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .stats_get_stats = mv88e6390_stats_get_stats, .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3446,6 +3447,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .stats_get_stats = mv88e6390_stats_get_stats, .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3473,6 +3475,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .stats_get_stats = mv88e6390_stats_get_stats, .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3530,6 +3533,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .stats_get_stats = mv88e6390_stats_get_stats, .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3694,6 +3698,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .stats_get_stats = mv88e6390_stats_get_stats, .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3722,6 +3727,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .stats_get_stats = mv88e6390_stats_get_stats, .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3752,6 +3758,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .stats_get_stats = mv88e6390_stats_get_stats, .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3781,6 +3788,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .stats_get_stats = mv88e6390_stats_get_stats, .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; @@ -3808,6 +3816,7 @@ static const struct mv88e6xxx_ops mv88e6391_ops = { .stats_get_stats = mv88e6390_stats_get_stats, .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 1e2d65826d12..8f15bc7b1f5f 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -686,6 +686,54 @@ const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = { .irq_free = mv88e6097_watchdog_free, }; +static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g2_update(chip, GLOBAL2_WDOG_CONTROL, + GLOBAL2_WDOG_INT_ENABLE | + GLOBAL2_WDOG_CUT_THROUGH | + GLOBAL2_WDOG_QUEUE_CONTROLLER | + GLOBAL2_WDOG_EGRESS | + GLOBAL2_WDOG_FORCE_IRQ); +} + +static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq) +{ + int err; + u16 reg; + + mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, GLOBAL2_WDOG_EVENT); + err = mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, ®); + + dev_info(chip->dev, "Watchdog event: 0x%04x", + reg & GLOBAL2_WDOG_DATA_MASK); + + mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, GLOBAL2_WDOG_HISTORY); + err = mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, ®); + + dev_info(chip->dev, "Watchdog history: 0x%04x", + reg & GLOBAL2_WDOG_DATA_MASK); + + /* Trigger a software reset to try to recover the switch */ + if (chip->info->ops->reset) + chip->info->ops->reset(chip); + + mv88e6390_watchdog_setup(chip); + + return IRQ_HANDLED; +} + +static void mv88e6390_watchdog_free(struct mv88e6xxx_chip *chip) +{ + mv88e6xxx_g2_update(chip, GLOBAL2_WDOG_CONTROL, + GLOBAL2_WDOG_INT_ENABLE); +} + +const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = { + .irq_action = mv88e6390_watchdog_action, + .irq_setup = mv88e6390_watchdog_setup, + .irq_free = mv88e6390_watchdog_free, +}; + static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id) { struct mv88e6xxx_chip *chip = dev_id; diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 8305f6941fd1..a8b2f9486a4a 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -47,6 +47,7 @@ void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; +extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; #else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ @@ -128,6 +129,7 @@ static inline int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) } static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {}; +static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {}; #endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index e35a56c89d5c..6033f2f6260a 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -424,6 +424,17 @@ #define GLOBAL2_WDOG_CONTROL_FORCE_IRQ BIT(2) #define GLOBAL2_WDOG_CONTROL_HISTORY BIT(1) #define GLOBAL2_WDOG_CONTROL_SWRESET BIT(0) +#define GLOBAL2_WDOG_UPDATE BIT(15) +#define GLOBAL2_WDOG_INT_SOURCE (0x00 << 8) +#define GLOBAL2_WDOG_INT_STATUS (0x10 << 8) +#define GLOBAL2_WDOG_INT_ENABLE (0x11 << 8) +#define GLOBAL2_WDOG_EVENT (0x12 << 8) +#define GLOBAL2_WDOG_HISTORY (0x13 << 8) +#define GLOBAL2_WDOG_DATA_MASK 0xff +#define GLOBAL2_WDOG_CUT_THROUGH BIT(3) +#define GLOBAL2_WDOG_QUEUE_CONTROLLER BIT(2) +#define GLOBAL2_WDOG_EGRESS BIT(1) +#define GLOBAL2_WDOG_FORCE_IRQ BIT(0) #define GLOBAL2_QOS_WEIGHT 0x1c #define GLOBAL2_MISC 0x1d @@ -675,6 +686,7 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6390 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ -- cgit v1.2.3 From 8f9000a565d01cb1f1688dc5dc32ac8026a7e993 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Thu, 9 Feb 2017 23:53:10 +0300 Subject: net:ethernet:aquantia: Add 2500/5000 mbit link modes support. Using new link mode indices instead deprecated SUPPORTED_/ADVERTISED_ macro. Added indication for 2500 and 5000mbit link modes (AQtion adapter already supports these speeds). Signed-off-by: Pavel Belous Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 96 +++++++++++++++---------- 1 file changed, 59 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index bed25abd2889..aa22a7ce710b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -737,48 +737,70 @@ err_exit:; void aq_nic_get_link_ksettings(struct aq_nic_s *self, struct ethtool_link_ksettings *cmd) { - u32 supported, advertising; - cmd->base.port = PORT_TP; /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ cmd->base.duplex = DUPLEX_FULL; cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - - supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) ? - ADVERTISED_10000baseT_Full : 0U; - supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) ? - ADVERTISED_1000baseT_Full : 0U; - supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) ? - ADVERTISED_100baseT_Full : 0U; - supported |= self->aq_hw_caps.flow_control ? SUPPORTED_Pause : 0; - supported |= SUPPORTED_Autoneg; - supported |= SUPPORTED_TP; - - advertising = (self->aq_nic_cfg.is_autoneg) ? - ADVERTISED_Autoneg : 0U; - advertising |= - (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) ? - ADVERTISED_10000baseT_Full : 0U; - advertising |= - (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) ? - ADVERTISED_1000baseT_Full : 0U; - - advertising |= - (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) ? - ADVERTISED_100baseT_Full : 0U; - advertising |= (self->aq_nic_cfg.flow_control) ? - ADVERTISED_Pause : 0U; - advertising |= ADVERTISED_TP; - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); + ethtool_link_ksettings_zero_link_mode(cmd, supported); + + if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + + if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 5000baseT_Full); + + if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 2500baseT_Full); + + if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + + if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + + if (self->aq_hw_caps.flow_control) + ethtool_link_ksettings_add_link_mode(cmd, supported, + Pause); + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + if (self->aq_nic_cfg.is_autoneg) + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 5000baseT_Full); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 2500baseT_Full); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + + if (self->aq_nic_cfg.flow_control) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Pause); + + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); } int aq_nic_set_link_ksettings(struct aq_nic_s *self, -- cgit v1.2.3 From 37fabbf4d489cc2e1cbf7cde816d9453a65ddfb7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 10 Feb 2017 05:46:46 -0800 Subject: net: busy-poll: remove LL_FLUSH_FAILED and LL_FLUSH_BUSY Commit 79e7fff47b7b ("net: remove support for per driver ndo_busy_poll()") made them obsolete. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/busy_poll.h | 4 ---- net/core/dev.c | 3 --- 2 files changed, 7 deletions(-) diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index d73b849e29a6..b8d637225a07 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -33,10 +33,6 @@ struct napi_struct; extern unsigned int sysctl_net_busy_read __read_mostly; extern unsigned int sysctl_net_busy_poll __read_mostly; -/* return values from ndo_ll_poll */ -#define LL_FLUSH_FAILED -1 -#define LL_FLUSH_BUSY -2 - static inline bool net_busy_loop_on(void) { return sysctl_net_busy_poll; diff --git a/net/core/dev.c b/net/core/dev.c index 363c44b9be63..2f1bbe1bf67c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5008,9 +5008,6 @@ count: LINUX_MIB_BUSYPOLLRXPACKETS, rc); local_bh_enable(); - if (rc == LL_FLUSH_FAILED) - break; /* permanent failure */ - if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) || busy_loop_timeout(end_time)) break; -- cgit v1.2.3 From fb585b44383c4cff85f92e67377ee1c5f07d6dc1 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Fri, 10 Feb 2017 16:43:50 +0100 Subject: net: make net_device members garp_port and mrp_port conditional garp_port is only used in net/802/garp.c which is only compiled with CONFIG_GARP enabled. Same goes for mrp_port which is only used in net/802/mrp.c with CONFIG_MRP enabled. Only include the two members in struct net_device if their respective CONFIG_* is enabled. This saves a few bytes in struct net_device in case CONFIG_GARP or CONFIG_MRP are not enabled. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 54c82b5df0ba..98f65ed8f8b0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1865,8 +1865,12 @@ struct net_device { struct pcpu_vstats __percpu *vstats; }; +#if IS_ENABLED(CONFIG_GARP) struct garp_port __rcu *garp_port; +#endif +#if IS_ENABLED(CONFIG_MRP) struct mrp_port __rcu *mrp_port; +#endif struct device dev; const struct attribute_group *sysfs_groups[4]; -- cgit v1.2.3 From 1f8f1e89e0183e9504dfa45e9b87c44cff2e66c6 Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Fri, 10 Feb 2017 21:17:06 +0100 Subject: net: fs_enet: Fix an error handling path 'of_node_put(fpi->phy_node)' should also be called if we branch to 'out_deregister_fixed_link' error handling path. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 54e3ce9bd94c..5c6426756d11 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1045,10 +1045,10 @@ out_cleanup_data: out_free_dev: free_netdev(ndev); out_put: - of_node_put(fpi->phy_node); if (fpi->clk_per) clk_disable_unprepare(fpi->clk_per); out_deregister_fixed_link: + of_node_put(fpi->phy_node); if (of_phy_is_fixed_link(ofdev->dev.of_node)) of_phy_deregister_fixed_link(ofdev->dev.of_node); out_free_fpi: -- cgit v1.2.3 From e9ea828f62891ca91e3f80bf4a438f337d59977d Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Fri, 10 Feb 2017 21:17:19 +0100 Subject: net: fs_enet: Simplify code There is no need to use an intermediate variable to handle an error code in this case. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 5c6426756d11..753259091b22 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -964,11 +964,10 @@ static int fs_enet_probe(struct platform_device *ofdev) */ clk = devm_clk_get(&ofdev->dev, "per"); if (!IS_ERR(clk)) { - err = clk_prepare_enable(clk); - if (err) { - ret = err; + ret = clk_prepare_enable(clk); + if (ret) goto out_deregister_fixed_link; - } + fpi->clk_per = clk; } -- cgit v1.2.3 From 2194bd1080210c6e85ea262cda9ad0135b3f3c87 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 13 Feb 2017 14:00:22 +0300 Subject: net: qcom/emac: fix a sizeof() typo We had intended to say "sizeof(u32)" but the "u" is missing. Fortunately, sizeof(32) is also 4, so the original code still works. Fixes: c4e7beea2192 ("net: qcom/emac: add ethtool support for reading hardware registers") Signed-off-by: Dan Carpenter Acked-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c index 0d9945fb79be..bbe24639aa5a 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -227,7 +227,7 @@ static void emac_get_regs(struct net_device *netdev, static int emac_get_regs_len(struct net_device *netdev) { - return EMAC_MAX_REG_SIZE * sizeof(32); + return EMAC_MAX_REG_SIZE * sizeof(u32); } static const struct ethtool_ops emac_ethtool_ops = { -- cgit v1.2.3 From d7cf52c249d7aa797ea27808e1526604309ec142 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 14 Feb 2017 16:27:13 +0100 Subject: sched: Fix accidental removal of errout goto Bring back the goto that was removed by accident. Reported-by: Colin Ian King Fixes: 40c81b25b16c ("sched: check negative err value to safe one level of indent") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_api.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index abe1fe13ae54..732f7cae459d 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -389,6 +389,7 @@ replay: RTM_DELTFILTER, false); if (tcf_proto_destroy(tp, false)) RCU_INIT_POINTER(*back, next); + goto errout; case RTM_GETTFILTER: err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, true); -- cgit v1.2.3 From 91eaa475e287e7cf87959d68fb273e4f092897f4 Mon Sep 17 00:00:00 2001 From: Volodymyr Bendiuga Date: Tue, 14 Feb 2017 11:29:30 +0100 Subject: net:dsa:mv88e6xxx: use watchdog ops for 6097 chip mv88e6097 chip requires watchdog_ops to be set. Signed-off-by: Volodymyr Bendiuga Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7658284beaf9..03dc886ed3d6 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3160,6 +3160,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .stats_get_stats = mv88e6095_stats_get_stats, .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, }; -- cgit v1.2.3 From 56ac13bfc703d5189e8b91c5c2f512830d1cb013 Mon Sep 17 00:00:00 2001 From: Amadeusz Sławiński Date: Mon, 13 Feb 2017 12:38:37 +0200 Subject: ath10k: remove ath10k_vif_to_arvif() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit it adds unnecessary level of indirection, while we just access structure field Signed-off-by: Amadeusz Sławiński Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 68 +++++++++++++++++------------------ drivers/net/wireless/ath/ath10k/mac.h | 7 +--- drivers/net/wireless/ath/ath10k/p2p.c | 2 +- drivers/net/wireless/ath/ath10k/wmi.c | 2 +- 4 files changed, 37 insertions(+), 42 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 9977829a6ec4..3029f257a19a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1993,7 +1993,7 @@ static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, { struct sk_buff *skb = data; struct ieee80211_mgmt *mgmt = (void *)skb->data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; if (vif->type != NL80211_IFTYPE_STATION) return; @@ -2016,7 +2016,7 @@ static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { u32 *vdev_id = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k *ar = arvif->ar; struct ieee80211_hw *hw = ar->hw; @@ -2083,7 +2083,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; u32 aid; lockdep_assert_held(&ar->conf_mutex); @@ -2159,7 +2159,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; @@ -2222,7 +2222,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, struct wmi_peer_assoc_complete_arg *arg) { const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -2446,7 +2446,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, struct wmi_peer_assoc_complete_arg *arg) { const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; enum nl80211_band band; const u16 *vht_mcs_mask; @@ -2507,7 +2507,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: @@ -2574,7 +2574,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -2689,7 +2689,7 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, struct ieee80211_vif *vif, struct ieee80211_sta_vht_cap vht_cap) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; int ret; u32 param; u32 value; @@ -2756,7 +2756,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_bss_conf *bss_conf) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct wmi_peer_assoc_complete_arg peer_arg; @@ -2849,7 +2849,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ieee80211_sta_vht_cap vht_cap = {}; int ret; @@ -2882,7 +2882,7 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ieee80211_sta *sta, bool reassoc) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_peer_assoc_complete_arg peer_arg; int ret = 0; @@ -2949,7 +2949,7 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; int ret = 0; lockdep_assert_held(&ar->conf_mutex); @@ -3175,7 +3175,7 @@ static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath10k *ar = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; if (arvif->tx_paused) return; @@ -3262,7 +3262,7 @@ struct ath10k_mac_tx_pause { static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_mac_tx_pause *arg = data; if (arvif->vdev_id != arg->vdev_id) @@ -3358,7 +3358,7 @@ static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, return false; if (vif) - return !ath10k_vif_to_arvif(vif)->nohwcrypt; + return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; return true; } @@ -3423,7 +3423,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; /* This is case only for P2P_GO */ if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) @@ -4849,7 +4849,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_peer *peer; enum wmi_sta_powersave_param param; int ret = 0; @@ -5185,7 +5185,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_peer *peer; int ret; int i; @@ -5320,7 +5320,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, u32 changed) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; int ret = 0; u32 vdev_param, pdev_param, slottime, preamble; @@ -5512,7 +5512,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_scan_request *hw_req) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_scan_request *req = &hw_req->req; struct wmi_start_scan_arg arg; int ret = 0; @@ -5644,7 +5644,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_key_conf *key) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_peer *peer; const u8 *peer_addr; bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || @@ -5783,7 +5783,7 @@ static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, int keyidx) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; int ret; mutex_lock(&arvif->ar->conf_mutex); @@ -5964,7 +5964,7 @@ static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; int *num_tdls_vifs = data; if (vif->type != NL80211_IFTYPE_STATION) @@ -5992,7 +5992,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, enum ieee80211_sta_state new_state) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; struct ath10k_peer *peer; int ret = 0; @@ -6227,7 +6227,7 @@ exit: static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, u16 ac, bool enable) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_sta_uapsd_auto_trig_arg arg = {}; u32 prio = 0, acc = 0; u32 value = 0; @@ -6335,7 +6335,7 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw, const struct ieee80211_tx_queue_params *params) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_wmm_params_arg *p = NULL; int ret; @@ -6409,7 +6409,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, enum ieee80211_roc_type type) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_start_scan_arg arg; int ret = 0; u32 scan_time_msec; @@ -6909,7 +6909,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; struct ath10k *ar = arvif->ar; enum nl80211_band band; @@ -7060,7 +7060,7 @@ static void ath10k_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, s64 tsf_offset) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; u32 offset, vdev_param; int ret; @@ -7085,7 +7085,7 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_ampdu_params *params) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; @@ -7183,7 +7183,7 @@ ath10k_mac_update_vif_chan(struct ath10k *ar, ath10k_monitor_stop(ar); for (i = 0; i < n_vifs; i++) { - arvif = ath10k_vif_to_arvif(vifs[i].vif); + arvif = (void *)vifs[i].vif->drv_priv; ath10k_dbg(ar, ATH10K_DBG_MAC, "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", @@ -7216,7 +7216,7 @@ ath10k_mac_update_vif_chan(struct ath10k *ar, spin_unlock_bh(&ar->data_lock); for (i = 0; i < n_vifs; i++) { - arvif = ath10k_vif_to_arvif(vifs[i].vif); + arvif = (void *)vifs[i].vif->drv_priv; if (WARN_ON(!arvif->is_started)) continue; @@ -7873,7 +7873,7 @@ static void ath10k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath10k_vif_iter *arvif_iter = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; if (arvif->vdev_id == arvif_iter->vdev_id) arvif_iter->arvif = arvif; diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 1bd29ecfcdcc..553747bc19ed 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -83,17 +83,12 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, u8 tid); int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val); -static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) -{ - return (struct ath10k_vif *)vif->drv_priv; -} - static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { if (arvif->tx_seq_no == 0) diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c index c0b6ffaf3ec1..7e621ee194e3 100644 --- a/drivers/net/wireless/ath/ath10k/p2p.c +++ b/drivers/net/wireless/ath/ath10k/p2p.c @@ -132,7 +132,7 @@ struct ath10k_p2p_noa_arg { static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_p2p_noa_arg *arg = data; if (arvif->vdev_id != arg->vdev_id) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 414ad3e1eed4..2f1743e60fa1 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1779,7 +1779,7 @@ unlock: static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; ath10k_wmi_tx_beacon_nowait(arvif); } -- cgit v1.2.3 From 182f1e5a626e6deb1e09a3d96a65cc64b31437f7 Mon Sep 17 00:00:00 2001 From: Amadeusz Sławiński Date: Mon, 13 Feb 2017 12:38:38 +0200 Subject: ath10k: use size_t for len variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cleanup to consolidate type used for len variables Signed-off-by: Amadeusz Sławiński Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 2 +- drivers/net/wireless/ath/ath10k/debug.c | 49 +++++++++++++++--------------- drivers/net/wireless/ath/ath10k/spectral.c | 7 +++-- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index c27e7ea38a65..5ebe64a6e991 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -351,7 +351,7 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar, char *buf, size_t buf_len) { - unsigned int len = 0; + size_t len = 0; int i; for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index d5ff0f4ef5ce..fb0ade3adb07 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -237,7 +237,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file, { struct ath10k *ar = file->private_data; char *buf; - unsigned int len = 0, buf_len = 4096; + size_t len = 0, buf_len = 4096; const char *name; ssize_t ret_cnt; bool enabled; @@ -529,7 +529,7 @@ static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; - unsigned int len = strlen(buf); + size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -547,17 +547,16 @@ static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - int ret, len, buf_len; + int ret; + size_t len = 0, buf_len = 500; char *buf; - buf_len = 500; buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; spin_lock_bh(&ar->data_lock); - len = 0; len += scnprintf(buf + len, buf_len - len, "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter); len += scnprintf(buf + len, buf_len - len, @@ -696,7 +695,7 @@ static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - unsigned int len; + size_t len; char buf[50]; len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id); @@ -733,8 +732,8 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar, struct ath10k_ce_crash_hdr *ce_hdr; struct ath10k_dump_file_data *dump_data; struct ath10k_tlv_dump_data *dump_tlv; - int hdr_len = sizeof(*dump_data); - unsigned int len, sofar = 0; + size_t hdr_len = sizeof(*dump_data); + size_t len, sofar = 0; unsigned char *buf; len = hdr_len; @@ -900,7 +899,7 @@ static ssize_t ath10k_reg_addr_read(struct file *file, { struct ath10k *ar = file->private_data; u8 buf[32]; - unsigned int len = 0; + size_t len = 0; u32 reg_addr; mutex_lock(&ar->conf_mutex); @@ -948,7 +947,7 @@ static ssize_t ath10k_reg_value_read(struct file *file, { struct ath10k *ar = file->private_data; u8 buf[48]; - unsigned int len; + size_t len; u32 reg_addr, reg_val; int ret; @@ -1171,7 +1170,7 @@ static ssize_t ath10k_read_htt_stats_mask(struct file *file, { struct ath10k *ar = file->private_data; char buf[32]; - unsigned int len; + size_t len; len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask); @@ -1225,7 +1224,7 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file, struct ath10k *ar = file->private_data; char buf[64]; u8 amsdu, ampdu; - unsigned int len; + size_t len; mutex_lock(&ar->conf_mutex); @@ -1285,7 +1284,7 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - unsigned int len; + size_t len; char buf[96]; len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n", @@ -1611,11 +1610,10 @@ static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - int len = 0; + size_t len; char buf[32]; - len = scnprintf(buf, sizeof(buf) - len, "%d\n", - ar->ani_enabled); + len = scnprintf(buf, sizeof(buf), "%d\n", ar->ani_enabled); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -1640,11 +1638,10 @@ static ssize_t ath10k_read_nf_cal_period(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - unsigned int len; + size_t len; char buf[32]; - len = scnprintf(buf, sizeof(buf), "%d\n", - ar->debug.nf_cal_period); + len = scnprintf(buf, sizeof(buf), "%d\n", ar->debug.nf_cal_period); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -1740,9 +1737,10 @@ void ath10k_debug_tpc_stats_process(struct ath10k *ar, } static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, - unsigned int j, char *buf, unsigned int *len) + unsigned int j, char *buf, size_t *len) { - unsigned int i, buf_len; + int i; + size_t buf_len; static const char table_str[][5] = { "CDD", "STBC", "TXBF" }; @@ -1782,7 +1780,8 @@ static void ath10k_tpc_stats_fill(struct ath10k *ar, struct ath10k_tpc_stats *tpc_stats, char *buf) { - unsigned int len, j, buf_len; + int j; + size_t len, buf_len; len = 0; buf_len = ATH10K_TPC_CONFIG_BUF_SIZE; @@ -1916,7 +1915,7 @@ static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; - unsigned int len = strlen(buf); + size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -2340,7 +2339,7 @@ static ssize_t ath10k_debug_fw_checksums_read(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - unsigned int len = 0, buf_len = 4096; + size_t len = 0, buf_len = 4096; ssize_t ret_cnt; char *buf; @@ -2556,7 +2555,7 @@ void ath10k_dbg_dump(struct ath10k *ar, const void *buf, size_t len) { char linebuf[256]; - unsigned int linebuflen; + size_t linebuflen; const void *ptr; if (ath10k_debug_mask & mask) { diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index 2ffc1fe4923b..c061d6958bd1 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -278,7 +278,7 @@ static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf, { struct ath10k *ar = file->private_data; char *mode = ""; - unsigned int len; + size_t len; enum ath10k_spectral_mode spectral_mode; mutex_lock(&ar->conf_mutex); @@ -370,7 +370,7 @@ static ssize_t read_file_spectral_count(struct file *file, { struct ath10k *ar = file->private_data; char buf[32]; - unsigned int len; + size_t len; u8 spectral_count; mutex_lock(&ar->conf_mutex); @@ -422,7 +422,8 @@ static ssize_t read_file_spectral_bins(struct file *file, { struct ath10k *ar = file->private_data; char buf[32]; - unsigned int len, bins, fft_size, bin_scale; + unsigned int bins, fft_size, bin_scale; + size_t len; mutex_lock(&ar->conf_mutex); -- cgit v1.2.3 From 6d21911356377d23b74b0336d228a83a2dfac5cd Mon Sep 17 00:00:00 2001 From: Amadeusz Sławiński Date: Mon, 13 Feb 2017 12:38:39 +0200 Subject: ath10k: fix comment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I wanted to take a look and it's apparently in other header Signed-off-by: Amadeusz Sławiński Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index cec713c99931..386aa51435f1 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -75,7 +75,7 @@ struct wmi_cmd_hdr { /* * There is no signed version of __le32, so for a temporary solution come - * up with our own version. The idea is from fs/ntfs/types.h. + * up with our own version. The idea is from fs/ntfs/endian.h. * * Use a_ prefix so that it doesn't conflict if we get proper support to * linux/types.h. -- cgit v1.2.3 From 1427228d5869f5804b03d47acfa4a88122572a78 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Mon, 13 Feb 2017 12:38:39 +0200 Subject: ath10k: fix napi crash during rmmod when probe firmware fails This fixes the below crash when ath10k probe firmware fails, NAPI polling tries to access a rx ring resource which was never allocated. An easy way to reproduce this is easy to remove all the firmware files, load ath10k modules and ath10k will crash when calling 'rmmod ath10k_pci'. The fix is to call napi_enable() from ath10k_pci_hif_start() so that it matches with napi_disable() being called from ath10k_pci_hif_stop(). Big thanks to Mohammed Shafi Shajakhan who debugged this and provided first version of the fix. In this patch I just fix the actual problem in pci.c instead of having a workaround in core.c. BUG: unable to handle kernel NULL pointer dereference at (null) IP: __ath10k_htt_rx_ring_fill_n+0x19/0x230 [ath10k_core] __ath10k_htt_rx_ring_fill_n+0x19/0x230 [ath10k_core] Call Trace: [] ath10k_htt_rx_msdu_buff_replenish+0x42/0x90 [ath10k_core] [] ath10k_htt_txrx_compl_task+0x433/0x17d0 [ath10k_core] [] ? __wake_up_common+0x4d/0x80 [] ? cpu_load_update+0xdc/0x150 [] ? ath10k_pci_read32+0xd/0x10 [ath10k_pci] [] ath10k_pci_napi_poll+0x47/0x110 [ath10k_pci] [] net_rx_action+0x20f/0x370 Reported-by: Ben Greear Fixes: 3c97f5de1f28 ("ath10k: implement NAPI support") Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 5d2f9b9922d3..6094372307aa 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1651,6 +1651,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); + napi_enable(&ar->napi); + ath10k_pci_irq_enable(ar); ath10k_pci_rx_post(ar); @@ -2535,7 +2537,6 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ath10k_err(ar, "could not wake up target CPU: %d\n", ret); goto err_ce; } - napi_enable(&ar->napi); return 0; -- cgit v1.2.3 From bfd0aeac52f74bfb44c0974131e44abb33a13e78 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Mon, 13 Feb 2017 14:59:09 +0100 Subject: bridge: fdb: converge fdb searching functions into one Before this patch we had 3 different fdb searching functions which was confusing. This patch reduces all of them to one - fdb_find_rcu(), and two flavors: br_fdb_find() which requires hash_lock and br_fdb_find_rcu which requires RCU. This makes it clear what needs to be used, we also remove two abusers of __br_fdb_get which called it under hash_lock and replace them with br_fdb_find(). Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_device.c | 2 +- net/bridge/br_fdb.c | 113 ++++++++++++++++++++---------------------------- net/bridge/br_input.c | 4 +- net/bridge/br_private.h | 5 ++- 4 files changed, 54 insertions(+), 70 deletions(-) diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index d208ee9ab60a..ea71513fca21 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -79,7 +79,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) br_multicast_flood(mdst, skb, false, true); else br_flood(br, skb, BR_PKT_MULTICAST, false, true); - } else if ((dst = __br_fdb_get(br, dest, vid)) != NULL) { + } else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) { br_forward(dst->dst, skb, false, true); } else { br_flood(br, skb, BR_PKT_UNICAST, false, true); diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 5693168e88b6..04a53a0b733d 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -28,9 +28,6 @@ #include "br_private.h" static struct kmem_cache *br_fdb_cache __read_mostly; -static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, - const unsigned char *addr, - __u16 vid); static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, u16 vid); static void fdb_notify(struct net_bridge *br, @@ -86,6 +83,43 @@ static void fdb_rcu_free(struct rcu_head *head) kmem_cache_free(br_fdb_cache, ent); } +static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, + const unsigned char *addr, + __u16 vid) +{ + struct net_bridge_fdb_entry *f; + + hlist_for_each_entry_rcu(f, head, hlist) + if (ether_addr_equal(f->addr.addr, addr) && f->vlan_id == vid) + break; + + return f; +} + +/* requires bridge hash_lock */ +static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br, + const unsigned char *addr, + __u16 vid) +{ + struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; + struct net_bridge_fdb_entry *fdb; + + rcu_read_lock(); + fdb = fdb_find_rcu(head, addr, vid); + rcu_read_unlock(); + + return fdb; +} + +struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br, + const unsigned char *addr, + __u16 vid) +{ + struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; + + return fdb_find_rcu(head, addr, vid); +} + /* When a static FDB entry is added, the mac address from the entry is * added to the bridge private HW address list and all required ports * are then updated with the new information. @@ -198,11 +232,10 @@ void br_fdb_find_delete_local(struct net_bridge *br, const struct net_bridge_port *p, const unsigned char *addr, u16 vid) { - struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; struct net_bridge_fdb_entry *f; spin_lock_bh(&br->hash_lock); - f = fdb_find(head, addr, vid); + f = br_fdb_find(br, addr, vid); if (f && f->is_local && !f->added_by_user && f->dst == p) fdb_delete_local(br, p, f); spin_unlock_bh(&br->hash_lock); @@ -266,7 +299,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) spin_lock_bh(&br->hash_lock); /* If old entry was unassociated with any port, then delete it. */ - f = __br_fdb_get(br, br->dev->dev_addr, 0); + f = br_fdb_find(br, br->dev->dev_addr, 0); if (f && f->is_local && !f->dst && !f->added_by_user) fdb_delete_local(br, NULL, f); @@ -281,7 +314,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; - f = __br_fdb_get(br, br->dev->dev_addr, v->vid); + f = br_fdb_find(br, br->dev->dev_addr, v->vid); if (f && f->is_local && !f->dst && !f->added_by_user) fdb_delete_local(br, NULL, f); fdb_insert(br, NULL, newaddr, v->vid); @@ -380,24 +413,6 @@ void br_fdb_delete_by_port(struct net_bridge *br, spin_unlock_bh(&br->hash_lock); } -/* No locking or refcounting, assumes caller has rcu_read_lock */ -struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, - const unsigned char *addr, - __u16 vid) -{ - struct net_bridge_fdb_entry *fdb; - - hlist_for_each_entry_rcu(fdb, - &br->hash[br_mac_hash(addr, vid)], hlist) { - if (ether_addr_equal(fdb->addr.addr, addr) && - fdb->vlan_id == vid) { - return fdb; - } - } - - return NULL; -} - #if IS_ENABLED(CONFIG_ATM_LANE) /* Interface used by ATM LANE hook to test * if an addr is on some other bridge port */ @@ -412,7 +427,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr) if (!port) ret = 0; else { - fdb = __br_fdb_get(port->br, addr, 0); + fdb = br_fdb_find_rcu(port->br, addr, 0); ret = fdb && fdb->dst && fdb->dst->dev != dev && fdb->dst->state == BR_STATE_FORWARDING; } @@ -474,34 +489,6 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, return num; } -static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, - const unsigned char *addr, - __u16 vid) -{ - struct net_bridge_fdb_entry *fdb; - - hlist_for_each_entry(fdb, head, hlist) { - if (ether_addr_equal(fdb->addr.addr, addr) && - fdb->vlan_id == vid) - return fdb; - } - return NULL; -} - -static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, - const unsigned char *addr, - __u16 vid) -{ - struct net_bridge_fdb_entry *fdb; - - hlist_for_each_entry_rcu(fdb, head, hlist) { - if (ether_addr_equal(fdb->addr.addr, addr) && - fdb->vlan_id == vid) - return fdb; - } - return NULL; -} - static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, struct net_bridge_port *source, const unsigned char *addr, @@ -535,7 +522,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, if (!is_valid_ether_addr(addr)) return -EINVAL; - fdb = fdb_find(head, addr, vid); + fdb = br_fdb_find(br, addr, vid); if (fdb) { /* it is okay to have multiple ports with same * address, just use the first one. @@ -608,7 +595,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, } } else { spin_lock(&br->hash_lock); - if (likely(!fdb_find(head, addr, vid))) { + if (likely(!fdb_find_rcu(head, addr, vid))) { fdb = fdb_create(head, source, addr, vid, 0, 0); if (fdb) { if (unlikely(added_by_user)) @@ -792,7 +779,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, return -EINVAL; } - fdb = fdb_find(head, addr, vid); + fdb = br_fdb_find(br, addr, vid); if (fdb == NULL) { if (!(flags & NLM_F_CREATE)) return -ENOENT; @@ -942,10 +929,9 @@ out: static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid) { - struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; struct net_bridge_fdb_entry *fdb; - fdb = fdb_find(head, addr, vid); + fdb = br_fdb_find(br, addr, vid); if (!fdb) return -ENOENT; @@ -969,10 +955,9 @@ static int fdb_delete_by_addr_and_port(struct net_bridge_port *p, const u8 *addr, u16 vlan) { struct net_bridge *br = p->br; - struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)]; struct net_bridge_fdb_entry *fdb; - fdb = fdb_find(head, addr, vlan); + fdb = br_fdb_find(br, addr, vlan); if (!fdb || fdb->dst != p) return -ENOENT; @@ -1117,7 +1102,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, spin_lock_bh(&br->hash_lock); head = &br->hash[br_mac_hash(addr, vid)]; - fdb = fdb_find(head, addr, vid); + fdb = br_fdb_find(br, addr, vid); if (!fdb) { fdb = fdb_create(head, p, addr, vid, 0, 0); if (!fdb) { @@ -1145,15 +1130,13 @@ err_unlock: int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 vid) { - struct hlist_head *head; struct net_bridge_fdb_entry *fdb; int err = 0; ASSERT_RTNL(); spin_lock_bh(&br->hash_lock); - head = &br->hash[br_mac_hash(addr, vid)]; - fdb = fdb_find(head, addr, vid); + fdb = br_fdb_find(br, addr, vid); if (fdb && fdb->added_by_external_learn) fdb_delete(br, fdb); else diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 4615a9b3e26c..236f34244dbe 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -114,7 +114,7 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, return; } - f = __br_fdb_get(br, n->ha, vid); + f = br_fdb_find_rcu(br, n->ha, vid); if (f && ((p->flags & BR_PROXYARP) || (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)))) { arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip, @@ -189,7 +189,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb } break; case BR_PKT_UNICAST: - dst = __br_fdb_get(br, dest, vid); + dst = br_fdb_find_rcu(br, dest, vid); default: break; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 1cbbf63a5ef7..61368186edea 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -507,8 +507,9 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr); void br_fdb_cleanup(struct work_struct *work); void br_fdb_delete_by_port(struct net_bridge *br, const struct net_bridge_port *p, u16 vid, int do_all); -struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, - const unsigned char *addr, __u16 vid); +struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br, + const unsigned char *addr, + __u16 vid); int br_fdb_test_addr(struct net_device *dev, unsigned char *addr); int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count, unsigned long off); -- cgit v1.2.3 From 410b3d48f5111a28bb8d4c3d3dc5984c1baf7fc9 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Mon, 13 Feb 2017 14:59:10 +0100 Subject: bridge: fdb: add proper lock checks in searching functions In order to avoid new errors add checks to br_fdb_find and fdb_find_rcu functions. The first requires hash_lock, the second obviously RCU. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 4 ++++ net/bridge/br_private.h | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 04a53a0b733d..da5ed7d0a3c8 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -89,6 +89,8 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, { struct net_bridge_fdb_entry *f; + WARN_ON_ONCE(!rcu_read_lock_held()); + hlist_for_each_entry_rcu(f, head, hlist) if (ether_addr_equal(f->addr.addr, addr) && f->vlan_id == vid) break; @@ -104,6 +106,8 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br, struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; struct net_bridge_fdb_entry *fdb; + WARN_ON_ONCE(!br_hash_lock_held(br)); + rcu_read_lock(); fdb = fdb_find_rcu(head, addr, vid); rcu_read_unlock(); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 61368186edea..2288fca7756c 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -531,6 +531,15 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 vid); +static inline bool br_hash_lock_held(struct net_bridge *br) +{ +#ifdef CONFIG_LOCKDEP + return lockdep_is_held(&br->hash_lock); +#else + return true; +#endif +} + /* br_forward.c */ enum br_pkt_type { BR_PKT_UNICAST, -- cgit v1.2.3 From 5019ab50f26c247abb510a80635b25b64b6c1f4b Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Mon, 13 Feb 2017 14:59:11 +0100 Subject: bridge: fdb: converge fdb_delete_by functions into one We can simplify the logic of entries pointing to the bridge by converging the fdb_delete_by functions, this would allow us to use the same function for both cases since the fdb's dst is set to NULL if it is pointing to the bridge thus we can always check for a port match. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 62 +++++++++++++---------------------------------------- 1 file changed, 15 insertions(+), 47 deletions(-) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index da5ed7d0a3c8..4ac11574117c 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -930,35 +930,10 @@ out: return err; } -static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, - u16 vid) -{ - struct net_bridge_fdb_entry *fdb; - - fdb = br_fdb_find(br, addr, vid); - if (!fdb) - return -ENOENT; - - fdb_delete(br, fdb); - return 0; -} - -static int __br_fdb_delete_by_addr(struct net_bridge *br, - const unsigned char *addr, u16 vid) -{ - int err; - - spin_lock_bh(&br->hash_lock); - err = fdb_delete_by_addr(br, addr, vid); - spin_unlock_bh(&br->hash_lock); - - return err; -} - -static int fdb_delete_by_addr_and_port(struct net_bridge_port *p, +static int fdb_delete_by_addr_and_port(struct net_bridge *br, + const struct net_bridge_port *p, const u8 *addr, u16 vlan) { - struct net_bridge *br = p->br; struct net_bridge_fdb_entry *fdb; fdb = br_fdb_find(br, addr, vlan); @@ -966,17 +941,19 @@ static int fdb_delete_by_addr_and_port(struct net_bridge_port *p, return -ENOENT; fdb_delete(br, fdb); + return 0; } -static int __br_fdb_delete(struct net_bridge_port *p, +static int __br_fdb_delete(struct net_bridge *br, + const struct net_bridge_port *p, const unsigned char *addr, u16 vid) { int err; - spin_lock_bh(&p->br->hash_lock); - err = fdb_delete_by_addr_and_port(p, addr, vid); - spin_unlock_bh(&p->br->hash_lock); + spin_lock_bh(&br->hash_lock); + err = fdb_delete_by_addr_and_port(br, p, addr, vid); + spin_unlock_bh(&br->hash_lock); return err; } @@ -989,7 +966,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_bridge_vlan_group *vg; struct net_bridge_port *p = NULL; struct net_bridge_vlan *v; - struct net_bridge *br = NULL; + struct net_bridge *br; int err; if (dev->priv_flags & IFF_EBRIDGE) { @@ -1003,6 +980,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], return -EINVAL; } vg = nbp_vlan_group(p); + br = p->br; } if (vid) { @@ -1012,30 +990,20 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], return -EINVAL; } - if (dev->priv_flags & IFF_EBRIDGE) - err = __br_fdb_delete_by_addr(br, addr, vid); - else - err = __br_fdb_delete(p, addr, vid); + err = __br_fdb_delete(br, p, addr, vid); } else { err = -ENOENT; - if (dev->priv_flags & IFF_EBRIDGE) - err = __br_fdb_delete_by_addr(br, addr, 0); - else - err &= __br_fdb_delete(p, addr, 0); - + err &= __br_fdb_delete(br, p, addr, 0); if (!vg || !vg->num_vlans) - goto out; + return err; list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; - if (dev->priv_flags & IFF_EBRIDGE) - err = __br_fdb_delete_by_addr(br, addr, v->vid); - else - err &= __br_fdb_delete(p, addr, v->vid); + err &= __br_fdb_delete(br, p, addr, v->vid); } } -out: + return err; } -- cgit v1.2.3 From 6f9f6ec2e0acea398caf83bf750439c69068de43 Mon Sep 17 00:00:00 2001 From: Andrew Rybchenko Date: Mon, 13 Feb 2017 14:57:39 +0000 Subject: sfc: MSI-X is the only interrupt mode for EF10 VFs Add min_interrupt_mode specification per NIC type. It is a bit confusing because of "highest interrupt mode is less capable". Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 2 ++ drivers/net/ethernet/sfc/efx.c | 11 +++++++++-- drivers/net/ethernet/sfc/net_driver.h | 5 ++++- drivers/net/ethernet/sfc/siena.c | 1 + 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 761ccc64eee9..b29a819f721d 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -6456,6 +6456,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, .can_rx_scatter = true, .always_rx_scatter = true, + .min_interrupt_mode = EFX_INT_MODE_MSIX, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .offload_features = EF10_OFFLOAD_FEATURES, @@ -6588,6 +6589,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .can_rx_scatter = true, .always_rx_scatter = true, .option_descriptors = true, + .min_interrupt_mode = EFX_INT_MODE_LEGACY, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .offload_features = EF10_OFFLOAD_FEATURES, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index cb8e2c3f806a..88a9f97c0c4f 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2935,7 +2935,7 @@ static const struct efx_phy_operations efx_dummy_phy_operations = { static int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, struct net_device *net_dev) { - int i; + int rc = -ENOMEM, i; /* Initialise common structures */ INIT_LIST_HEAD(&efx->node); @@ -2976,8 +2976,15 @@ static int efx_init_struct(struct efx_nic *efx, } /* Higher numbered interrupt modes are less capable! */ + if (WARN_ON_ONCE(efx->type->max_interrupt_mode > + efx->type->min_interrupt_mode)) { + rc = -EIO; + goto fail; + } efx->interrupt_mode = max(efx->type->max_interrupt_mode, interrupt_mode); + efx->interrupt_mode = min(efx->type->min_interrupt_mode, + interrupt_mode); /* Would be good to use the net_dev name, but we're too early */ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", @@ -2990,7 +2997,7 @@ static int efx_init_struct(struct efx_nic *efx, fail: efx_fini_struct(efx); - return -ENOMEM; + return rc; } static void efx_fini_struct(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 5cb8112b1cf3..c0537ea06c9a 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1142,8 +1142,10 @@ struct efx_udp_tunnel { * @can_rx_scatter: NIC is able to scatter packets to multiple buffers * @always_rx_scatter: NIC will always scatter packets to multiple buffers * @option_descriptors: NIC supports TX option descriptors + * @min_interrupt_mode: Lowest capability interrupt mode supported + * from &enum efx_int_mode. * @max_interrupt_mode: Highest capability interrupt mode supported - * from &enum efx_init_mode. + * from &enum efx_int_mode. * @timer_period_max: Maximum period of interrupt timer (in ticks) * @offload_features: net_device feature flags for protocol offload * features implemented in hardware @@ -1306,6 +1308,7 @@ struct efx_nic_type { bool can_rx_scatter; bool always_rx_scatter; bool option_descriptors; + unsigned int min_interrupt_mode; unsigned int max_interrupt_mode; unsigned int timer_period_max; netdev_features_t offload_features; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index b1d36df71ecb..a617f657eae3 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -1069,6 +1069,7 @@ const struct efx_nic_type siena_a0_nic_type = { .rx_buffer_padding = 0, .can_rx_scatter = true, .option_descriptors = false, + .min_interrupt_mode = EFX_INT_MODE_LEGACY, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -- cgit v1.2.3 From 62980cb6dd2fd2dca34060c4fd77ebafa878272a Mon Sep 17 00:00:00 2001 From: Andrew Rybchenko Date: Mon, 13 Feb 2017 14:59:04 +0000 Subject: sfc: only fall back to a lower interrupt mode if it is supported If we fail to probe interrupts with our minimum mode, return that error. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 88a9f97c0c4f..8c4c273643dc 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1420,9 +1420,12 @@ static int efx_probe_interrupts(struct efx_nic *efx) xentries, 1, n_channels); if (rc < 0) { /* Fall back to single channel MSI */ - efx->interrupt_mode = EFX_INT_MODE_MSI; netif_err(efx, drv, efx->net_dev, "could not enable MSI-X\n"); + if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) + efx->interrupt_mode = EFX_INT_MODE_MSI; + else + return rc; } else if (rc < n_channels) { netif_err(efx, drv, efx->net_dev, "WARNING: Insufficient MSI-X vectors" @@ -1465,7 +1468,10 @@ static int efx_probe_interrupts(struct efx_nic *efx) } else { netif_err(efx, drv, efx->net_dev, "could not enable MSI\n"); - efx->interrupt_mode = EFX_INT_MODE_LEGACY; + if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) + efx->interrupt_mode = EFX_INT_MODE_LEGACY; + else + return rc; } } -- cgit v1.2.3 From 1c61bedc0a725f0b019a70bba07cb2cf05c4ae2a Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Mon, 13 Feb 2017 12:38:40 +0200 Subject: ath10k: fetch firmware images in a loop To make it easier to handle minimum and maximum firmware API numbers convert the firmware fetch functionality to a loop. If no firmware image is found print an error with minimum and maximum API numbers and the name of firmware directory. This is needed when we switch to using request_firmware_direct() which doesn't print any errors anymore. Also add a new function for creating the fw file name dynamically which makes it easier to add new bus support, for example SDIO and USB, later. Signed-off-by: Erik Stromdahl [kvalo@qca.qualcomm.com: remove sdio/usb part, new error message, clarify commit log] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 51 +++++++++++++++------------------- drivers/net/wireless/ath/ath10k/hw.h | 4 +++ 2 files changed, 27 insertions(+), 28 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 5ebe64a6e991..bac1a7f2ecf1 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1358,44 +1358,39 @@ err: return ret; } +static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name, + size_t fw_name_len, int fw_api) +{ + scnprintf(fw_name, fw_name_len, "%s-%d.bin", ATH10K_FW_FILE_BASE, fw_api); +} + static int ath10k_core_fetch_firmware_files(struct ath10k *ar) { - int ret; + int ret, i; + char fw_name[100]; /* calibration file is optional, don't check for any errors */ ath10k_fetch_cal_file(ar); - ar->fw_api = 5; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE, - &ar->normal_mode_fw.fw_file); - if (ret == 0) - goto success; - - ar->fw_api = 4; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE, - &ar->normal_mode_fw.fw_file); - if (ret == 0) - goto success; + for (i = ATH10K_FW_API_MAX; i >= ATH10K_FW_API_MIN; i--) { + ar->fw_api = i; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", + ar->fw_api); - ar->fw_api = 3; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + ath10k_core_get_fw_name(ar, fw_name, sizeof(fw_name), ar->fw_api); + ret = ath10k_core_fetch_firmware_api_n(ar, fw_name, + &ar->normal_mode_fw.fw_file); + if (!ret) + goto success; + } - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE, - &ar->normal_mode_fw.fw_file); - if (ret == 0) - goto success; + /* we end up here if we couldn't fetch any firmware */ - ar->fw_api = 2; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + ath10k_err(ar, "Failed to find firmware-N.bin (N between %d and %d) from %s: %d", + ATH10K_FW_API_MIN, ATH10K_FW_API_MAX, ar->hw_params.fw.dir, + ret); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE, - &ar->normal_mode_fw.fw_file); - if (ret) - return ret; + return ret; success: ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 38aa7c95732e..f0fda0f2b3b4 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -128,6 +128,10 @@ enum qca9377_chip_id_rev { #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin" #define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234 +#define ATH10K_FW_FILE_BASE "firmware" +#define ATH10K_FW_API_MAX 5 +#define ATH10K_FW_API_MIN 2 + #define ATH10K_FW_API2_FILE "firmware-2.bin" #define ATH10K_FW_API3_FILE "firmware-3.bin" -- cgit v1.2.3 From 310c01afae0109c37c871887a7c64bc3e4a9e636 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Mon, 13 Feb 2017 12:38:41 +0200 Subject: ath10k: add directory to board data error message This way user has a better idea what file exactly is missing. This is needed when we switch to using request_firmware_direct() which doesn't print any errors anymore. Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index bac1a7f2ecf1..51d91837a980 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1168,7 +1168,8 @@ static int ath10k_core_fetch_board_file(struct ath10k *ar) ar->bd_api = 1; ret = ath10k_core_fetch_board_data_api_1(ar); if (ret) { - ath10k_err(ar, "failed to fetch board data\n"); + ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n", + ar->hw_params.fw.dir); return ret; } -- cgit v1.2.3 From 9f5bcfe93315d75da4cc46bd30b536966559359a Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Mon, 13 Feb 2017 12:38:42 +0200 Subject: ath10k: silence firmware file probing warnings Firmware files are versioned to prevent older driver instances to load unsupported firmware blobs. This is reflected with a fallback logic which attempts to load several firmware files. This however produced a lot of unnecessary warnings sometimes confusing users and leading them to rename firmware files making things even more confusing. Hence use request_firmware_direct() which does not produce extra warnings. This shouldn't really break anything because most modern systems don't rely on udev/hotplug helpers to load firmware files anymore. For example it was confirmed that LEDE does not user helper. This also fixes a 60 second delay per _each_ unexistent firmware/calibration file with distros which have CONFIG_FW_LOADER_USER_HELPER_FALLBACK enabled, RHEL being a notable example. Using ath10k with firmware-2.bin this might end up into a five minute delay in boot. Signed-off-by: Michal Kazior [kvalo@qca.qualcomm.com: add more info to the commit log] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 11 +++++------ drivers/net/wireless/ath/ath10k/testmode.c | 5 ++++- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 51d91837a980..873b134ee722 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -456,7 +456,10 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, dir = "."; snprintf(filename, sizeof(filename), "%s/%s", dir, file); - ret = request_firmware(&fw, filename, ar->dev); + ret = request_firmware_direct(&fw, filename, ar->dev); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", + filename, ret); + if (ret) return ERR_PTR(ret); @@ -1190,12 +1193,8 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, /* first fetch the firmware file (firmware-*.bin) */ fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); - if (IS_ERR(fw_file->firmware)) { - ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n", - ar->hw_params.fw.dir, name, - PTR_ERR(fw_file->firmware)); + if (IS_ERR(fw_file->firmware)) return PTR_ERR(fw_file->firmware); - } data = fw_file->firmware->data; len = fw_file->firmware->size; diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index ed85f938e3c0..8bb36c18a749 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -150,7 +150,10 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar, ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE); /* load utf firmware image */ - ret = request_firmware(&fw_file->firmware, filename, ar->dev); + ret = request_firmware_direct(&fw_file->firmware, filename, ar->dev); + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode fw request '%s': %d\n", + filename, ret); + if (ret) { ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n", filename, ret); -- cgit v1.2.3 From 7be52c03bbf7c8f53211ed13810d64dcb2bc7168 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Mon, 13 Feb 2017 12:38:43 +0200 Subject: ath10k: convert warning about non-existent OTP board id to debug message Currently ath10k unncessarily warns about board id not available from OTP: ath10k_pci 0000:02:00.0: pci irq msi oper_irq_mode 2 irq_mode 0 reset_mode 0 ath10k_pci 0000:02:00.0: qca988x hw2.0 target 0x4100016c chip_id 0x043202ff sub 0000:0000 ath10k_pci 0000:02:00.0: kconfig debug 1 debugfs 1 tracing 1 dfs 1 testmode 1 ath10k_pci 0000:02:00.0: firmware ver 10.2.4.70.9-2 api 5 features no-p2p,raw-mode crc32 b8d50af5 ath10k_pci 0000:02:00.0: board id is not exist in otp, ignore it ath10k_pci 0000:02:00.0: board_file api 1 bmi_id N/A crc32 bebc7c08 ath10k_pci 0000:02:00.0: htt-ver 2.1 wmi-op 5 htt-op 2 cal otp max-sta 128 raw 0 hwcrypto 1 But not all boards have the board id in OTP so this is not a problem and no need to confuse the user with that info. So this can be safely changed to a debug message. Also fix grammar in the debug message. Fixes: d2e202c06ca4 ("ath10k: ignore configuring the incorrect board_id") Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 873b134ee722..499ee7b41771 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -701,7 +701,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 || (board_id == 0)) { - ath10k_warn(ar, "board id is not exist in otp, ignore it\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "board id does not exist in otp, ignore it\n"); return -EOPNOTSUPP; } -- cgit v1.2.3 From a532293fcba4b6b041c1a91674bc284d51280eb7 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Fri, 10 Feb 2017 12:27:09 -0800 Subject: ath10k: fix the garage chars in board file name creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The variant[] string will be valid only if the bdf_ext is set. The string memory needs to be null-terminated to avoid the undefined garbage appended by the subsequent board file name creation. ath10k_pci 0000:04:00.0: failed to fetch board data for "bus=pci,vendor=168c,device=003e,subsystem-vendor=168c,subsystem-device=3363��P�����" from ath10k/QCA6174/hw3.0/board-2.bin Fixes: f2593cb1b291 ("ath10k: Search SMBIOS for OEM board file extension") Reported-by: Kalle Valo Signed-off-by: Ryan Hsu Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 499ee7b41771..59729aa8cd82 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1126,7 +1126,7 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name, size_t name_len) { /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ - char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; + char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; if (ar->id.bmi_ids_valid) { scnprintf(name, name_len, -- cgit v1.2.3 From 1235a3b66cbfe4262b52137115056829c1760120 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 9 Feb 2017 12:21:17 +0100 Subject: ath6kl: Use net_device_stats from struct net_device Instead of using a private copy of struct net_device_stats in struct ath6kl_vif, use stats from struct net_device. Also remove the now unnecessary .ndo_get_stats function. Signed-off-by: Tobias Klauser Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/core.h | 1 - drivers/net/wireless/ath/ath6kl/main.c | 8 -------- drivers/net/wireless/ath/ath6kl/txrx.c | 22 +++++++++++----------- 3 files changed, 11 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index ac25f1781b42..87e99c12d4ba 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -641,7 +641,6 @@ struct ath6kl_vif { u32 txe_intvl; u16 bg_scan_period; u8 assoc_bss_dtim_period; - struct net_device_stats net_stats; struct target_stats target_stats; struct wmi_connect_cmd profile; u16 rsn_capab; diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 1af3fed5a72c..91ee542de3d7 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -1113,13 +1113,6 @@ static int ath6kl_close(struct net_device *dev) return 0; } -static struct net_device_stats *ath6kl_get_stats(struct net_device *dev) -{ - struct ath6kl_vif *vif = netdev_priv(dev); - - return &vif->net_stats; -} - static int ath6kl_set_features(struct net_device *dev, netdev_features_t features) { @@ -1285,7 +1278,6 @@ static const struct net_device_ops ath6kl_netdev_ops = { .ndo_open = ath6kl_open, .ndo_stop = ath6kl_close, .ndo_start_xmit = ath6kl_data_tx, - .ndo_get_stats = ath6kl_get_stats, .ndo_set_features = ath6kl_set_features, .ndo_set_rx_mode = ath6kl_set_multicast_list, }; diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index 9df41d5e3249..a531e0c5c1e2 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -405,7 +405,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) skb = skb_realloc_headroom(skb, dev->needed_headroom); kfree_skb(tmp_skb); if (skb == NULL) { - vif->net_stats.tx_dropped++; + dev->stats.tx_dropped++; return 0; } } @@ -520,8 +520,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) fail_tx: dev_kfree_skb(skb); - vif->net_stats.tx_dropped++; - vif->net_stats.tx_aborted_errors++; + dev->stats.tx_dropped++; + dev->stats.tx_aborted_errors++; return 0; } @@ -767,7 +767,7 @@ void ath6kl_tx_complete(struct htc_target *target, /* a packet was flushed */ flushing[if_idx] = true; - vif->net_stats.tx_errors++; + vif->ndev->stats.tx_errors++; if (status != -ENOSPC && status != -ECANCELED) ath6kl_warn("tx complete error: %d\n", status); @@ -783,8 +783,8 @@ void ath6kl_tx_complete(struct htc_target *target, eid, "OK"); flushing[if_idx] = false; - vif->net_stats.tx_packets++; - vif->net_stats.tx_bytes += skb->len; + vif->ndev->stats.tx_packets++; + vif->ndev->stats.tx_bytes += skb->len; } ath6kl_tx_clear_node_map(vif, eid, map_no); @@ -1365,8 +1365,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) */ spin_lock_bh(&vif->if_lock); - vif->net_stats.rx_packets++; - vif->net_stats.rx_bytes += packet->act_len; + vif->ndev->stats.rx_packets++; + vif->ndev->stats.rx_bytes += packet->act_len; spin_unlock_bh(&vif->if_lock); @@ -1395,8 +1395,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) ((packet->act_len < min_hdr_len) || (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { ath6kl_info("frame len is too short or too long\n"); - vif->net_stats.rx_errors++; - vif->net_stats.rx_length_errors++; + vif->ndev->stats.rx_errors++; + vif->ndev->stats.rx_length_errors++; dev_kfree_skb(skb); return; } @@ -1619,7 +1619,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) return; } } else if (!is_broadcast_ether_addr(datap->h_dest)) { - vif->net_stats.multicast++; + vif->ndev->stats.multicast++; } ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); -- cgit v1.2.3 From a6e56d749f1b4c89f24d8cb008385d648fa52582 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 12 Feb 2017 14:29:31 +0100 Subject: ath9k: clean up and fix ath_tx_count_airtime MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ath_tx_count_airtime is doing a lot of unnecessary work: - Redundant station lookup - Redundant rcu_read_lock/unlock - Useless memcpy of bf->rates - Useless NULL check of bf->bf_mpdu - Redundant lookup of the skb tid Additionally, it tries to look up the mac80211 queue index from the txq, which fails if the frame was delivered via the power save queue. This patch fixes all of these issues by passing down the right set of pointers instead of doing extra work Cc: stable@vger.kernel.org Fixes: 63fefa050477 ("ath9k: Introduce airtime fairness scheduling between stations") Signed-off-by: Felix Fietkau Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/xmit.c | 52 +++++++++++------------------------ 1 file changed, 16 insertions(+), 36 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 11073cf87909..396bf05c6bf6 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -699,51 +699,31 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf) return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); } -static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_txq *txq, - struct ath_buf *bf, struct ath_tx_status *ts) +static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_node *an, + struct ath_atx_tid *tid, struct ath_buf *bf, + struct ath_tx_status *ts) { - struct ath_node *an; - struct ath_acq *acq = &sc->cur_chan->acq[txq->mac80211_qnum]; - struct sk_buff *skb; - struct ieee80211_hdr *hdr; - struct ieee80211_hw *hw = sc->hw; - struct ieee80211_tx_rate rates[4]; - struct ieee80211_sta *sta; - int i; + struct ath_txq *txq = tid->txq; u32 airtime = 0; - - skb = bf->bf_mpdu; - if(!skb) - return; - - hdr = (struct ieee80211_hdr *)skb->data; - memcpy(rates, bf->rates, sizeof(rates)); - - rcu_read_lock(); - - sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); - if(!sta) - goto exit; - - - an = (struct ath_node *) sta->drv_priv; + int i; airtime += ts->duration * (ts->ts_longretry + 1); + for(i = 0; i < ts->ts_rateindex; i++) { + int rate_dur = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, i); + airtime += rate_dur * bf->rates[i].count; + } - for(i=0; i < ts->ts_rateindex; i++) - airtime += ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, i) * rates[i].count; + if (sc->airtime_flags & AIRTIME_USE_TX) { + int q = txq->mac80211_qnum; + struct ath_acq *acq = &sc->cur_chan->acq[q]; - if (!!(sc->airtime_flags & AIRTIME_USE_TX)) { spin_lock_bh(&acq->lock); - an->airtime_deficit[txq->mac80211_qnum] -= airtime; - if (an->airtime_deficit[txq->mac80211_qnum] <= 0) - __ath_tx_queue_tid(sc, ath_get_skb_tid(sc, an, skb)); + an->airtime_deficit[q] -= airtime; + if (an->airtime_deficit[q] <= 0) + __ath_tx_queue_tid(sc, tid); spin_unlock_bh(&acq->lock); } ath_debug_airtime(sc, an, 0, airtime); - -exit: - rcu_read_unlock(); } static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, @@ -767,13 +747,13 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, ts->ts_rateindex); - ath_tx_count_airtime(sc, txq, bf, ts); hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); if (sta) { struct ath_node *an = (struct ath_node *)sta->drv_priv; tid = ath_get_skb_tid(sc, an, bf->bf_mpdu); + ath_tx_count_airtime(sc, an, tid, bf, ts); if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) tid->clear_ps_filter = true; } -- cgit v1.2.3 From 07460b92db7c2d0808cfaae02b325a52c2f7c7a8 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 13 Feb 2017 16:56:48 +0100 Subject: ath9k: Access rchan::buf only with per_cpu helper The relayfs was changed to use per CPU constructs to handle the rchan buffers. But the users of the rchan buffers in other parts of the kernel were not modified. This caused crashes like BUG: unable to handle kernel paging request at 00003a5198a0b910 IP: [] ath_cmn_process_fft+0xea/0x610 PGD 0 [ 179.522449] Oops: 0000 [#1] SMP Modules linked in: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.9.0-rc5 #1 [...] Call Trace: [ 179.656426] [] ? ath_rx_tasklet+0x2f3/0xd10 [] ? ath9k_tasklet+0x1b6/0x230 [] ? tasklet_action+0xf1/0x100 [] ? __do_softirq+0xef/0x284 [] ? irq_exit+0xae/0xb0 [] ? do_IRQ+0x4f/0xd0 [] ? common_interrupt+0x82/0x82 [ 179.703152] [] ? poll_idle+0x2d/0x57 [] ? sched_clock+0x5/0x10 [] ? cpuidle_enter_state+0xf6/0x2d0 [] ? cpu_startup_entry+0x14e/0x230 [] ? start_kernel+0x461/0x481 [] ? early_idt_handler_array+0x120/0x120 [] ? x86_64_start_kernel+0x14c/0x170 Code: 31 db 41 be ff ff ff ff 4c 8b 26 48 8b 6e 08 49 8b 84 24 60 05 00 00 48 8b 00 0f b7 40 04 66 89 44 24 48 eb 11 48 8b 55 40 48 98 <48> 8b 3c c2 e8 ad a0 a4 ff 01 c3 41 8d 56 01 be 00 02 00 00 48 RIP [] ath_cmn_process_fft+0xea/0x610 RSP CR2: 00003a5198a0b910 Fixes: 017c59c042d0 ("relay: Use per CPU constructs for the relay channel buffer pointers") Cc: Akash Goel Cc: Nick Kossifidis Reported-by: Mathias Kretschmer Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/common-spectral.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 789a3dbe8341..0ffa23a61568 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -482,7 +482,7 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv) struct rchan *rc = spec_priv->rfs_chan_spec_scan; for_each_online_cpu(i) - ret += relay_buf_full(rc->buf[i]); + ret += relay_buf_full(*per_cpu_ptr(rc->buf, i)); i = num_online_cpus(); -- cgit v1.2.3 From 2493b842f258e14938f278e44ecc26970dfabbf0 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 13 Feb 2017 10:56:57 -0800 Subject: sunvnet: make sunvnet common code dynamically loadable When the sunvnet_common code was split out for use by both sunvnet and the newer ldmvsw, it was made into a static kernel library, which limits the usefulness of sunvnet and ldmvsw as loadables, since most of the real work is being done in the shared code. Also, this is simply dead code in kernels that aren't running the LDoms. This patch makes the sunvnet_common into a dynamically loadable module and makes sunvnet and ldmvsw dependent on sunvnet_common. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/Kconfig | 8 ++++++-- drivers/net/ethernet/sun/sunvnet_common.c | 5 +++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig index a4b40e3015e5..b2caf5132bd2 100644 --- a/drivers/net/ethernet/sun/Kconfig +++ b/drivers/net/ethernet/sun/Kconfig @@ -70,19 +70,23 @@ config CASSINI . config SUNVNET_COMMON - bool + tristate "Common routines to support Sun Virtual Networking" depends on SUN_LDOMS - default y if SUN_LDOMS + default m config SUNVNET tristate "Sun Virtual Network support" + default m depends on SUN_LDOMS + depends on SUNVNET_COMMON ---help--- Support for virtual network devices under Sun Logical Domains. config LDMVSW tristate "Sun4v LDoms Virtual Switch support" + default m depends on SUN_LDOMS + depends on SUNVNET_COMMON ---help--- Support for virtual switch devices under Sun4v Logical Domains. This driver adds a network interface for every vsw-port node diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 191c8ade6155..c71f0007ad5b 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -37,6 +37,11 @@ */ #define VNET_MAX_RETRIES 10 +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("Sun LDOM virtual network support library"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.1"); + static int __vnet_tx_trigger(struct vnet_port *port, u32 start); static void vnet_port_reset(struct vnet_port *port); -- cgit v1.2.3 From d4aa89cc2bbe021722c946eb11b21ebb0f13c825 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Mon, 13 Feb 2017 10:56:58 -0800 Subject: sunvnet: remove unused variable in maybe_tx_wakeup The vio_dring_state *dr variable is unused in maybe_tx_wakeup(). As the comments indicate, we call maybe_tx_wakeup() whenever we get a STOPPED LDC message on the port. If the queue is stopped, we want to wake it up so that we will send another START message at the next TX and trigger the consumer to drain the dring. Signed-off-by: Sowmini Varadhan Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunvnet_common.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index c71f0007ad5b..0f940f0e488d 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -719,12 +719,8 @@ static void maybe_tx_wakeup(struct vnet_port *port) txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), port->q_index); __netif_tx_lock(txq, smp_processor_id()); - if (likely(netif_tx_queue_stopped(txq))) { - struct vio_dring_state *dr; - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + if (likely(netif_tx_queue_stopped(txq))) netif_tx_wake_queue(txq); - } __netif_tx_unlock(txq); } -- cgit v1.2.3 From f2f3e210bffe5c8f8b30d0b0c7b0f733ff5db334 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 13 Feb 2017 10:56:59 -0800 Subject: sunvnet: update version and version printing There have been several changes since the first version of this code, so we bump the version number. While we're at it, we can simplify the version printing a bit and drop a couple lines of code. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunvnet.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 5356a7074796..4cc2571f71c6 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -38,11 +38,11 @@ #define VNET_TX_TIMEOUT (5 * HZ) #define DRV_MODULE_NAME "sunvnet" -#define DRV_MODULE_VERSION "1.0" -#define DRV_MODULE_RELDATE "June 25, 2007" +#define DRV_MODULE_VERSION "2.0" +#define DRV_MODULE_RELDATE "February 3, 2017" static char version[] = - DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("Sun LDOM virtual network driver"); MODULE_LICENSE("GPL"); @@ -303,11 +303,6 @@ static struct vio_driver_ops vnet_vio_ops = { .handshake_complete = sunvnet_handshake_complete_common, }; -static void print_version(void) -{ - printk_once(KERN_INFO "%s", version); -} - const char *remote_macaddr_prop = "remote-mac-address"; static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) @@ -319,8 +314,6 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) const u64 *rmac; int len, i, err, switch_port; - print_version(); - hp = mdesc_grab(); vp = vnet_find_parent(hp, vdev->mp, vdev); @@ -446,6 +439,7 @@ static struct vio_driver vnet_port_driver = { static int __init vnet_init(void) { + pr_info("%s\n", version); return vio_register_driver(&vnet_port_driver); } -- cgit v1.2.3 From fd263fb6e718c5bdf35cbc1de4f781c71794d2a4 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 13 Feb 2017 10:57:00 -0800 Subject: sunvnet: add memory barrier before check for tx enable In order to allow the underlying LDC and outstanding memory operations to potentially catch up with the driver's Tx requests, add a memory barrier before checking again for available tx descriptors. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunvnet_common.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 0f940f0e488d..623363bf08df 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1427,6 +1427,7 @@ ldc_start_done: dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); if (unlikely(vnet_tx_dring_avail(dr) < 1)) { netif_tx_stop_queue(txq); + smp_rmb(); if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) netif_tx_wake_queue(txq); } -- cgit v1.2.3 From bf091f3f362b3c562a18bbf7a2d3e2f3a36eba1d Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 13 Feb 2017 10:57:01 -0800 Subject: sunvnet: straighten up message event handling logic The use of gotos for handling the incoming events made this code harder to read and support than it should be. This patch straightens out and clears up the logic. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunvnet_common.c | 94 +++++++++++++++---------------- 1 file changed, 45 insertions(+), 49 deletions(-) diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 623363bf08df..d124bdebf636 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -738,41 +738,37 @@ static int vnet_event_napi(struct vnet_port *port, int budget) struct vio_driver_state *vio = &port->vio; int tx_wakeup, err; int npkts = 0; - int event = (port->rx_event & LDC_EVENT_RESET); - -ldc_ctrl: - if (unlikely(event == LDC_EVENT_RESET || - event == LDC_EVENT_UP)) { - vio_link_state_change(vio, event); - - if (event == LDC_EVENT_RESET) { - vnet_port_reset(port); - vio_port_up(vio); - - /* If the device is running but its tx queue was - * stopped (due to flow control), restart it. - * This is necessary since vnet_port_reset() - * clears the tx drings and thus we may never get - * back a VIO_TYPE_DATA ACK packet - which is - * the normal mechanism to restart the tx queue. - */ - if (netif_running(dev)) - maybe_tx_wakeup(port); - } + + /* we don't expect any other bits */ + BUG_ON(port->rx_event & ~(LDC_EVENT_DATA_READY | + LDC_EVENT_RESET | + LDC_EVENT_UP)); + + /* RESET takes precedent over any other event */ + if (port->rx_event & LDC_EVENT_RESET) { + vio_link_state_change(vio, LDC_EVENT_RESET); + vnet_port_reset(port); + vio_port_up(vio); + + /* If the device is running but its tx queue was + * stopped (due to flow control), restart it. + * This is necessary since vnet_port_reset() + * clears the tx drings and thus we may never get + * back a VIO_TYPE_DATA ACK packet - which is + * the normal mechanism to restart the tx queue. + */ + if (netif_running(dev)) + maybe_tx_wakeup(port); + port->rx_event = 0; return 0; } - /* We may have multiple LDC events in rx_event. Unroll send_events() */ - event = (port->rx_event & LDC_EVENT_UP); - port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP); - if (event == LDC_EVENT_UP) - goto ldc_ctrl; - event = port->rx_event; - if (!(event & LDC_EVENT_DATA_READY)) - return 0; - /* we dont expect any other bits than RESET, UP, DATA_READY */ - BUG_ON(event != LDC_EVENT_DATA_READY); + if (port->rx_event & LDC_EVENT_UP) { + vio_link_state_change(vio, LDC_EVENT_UP); + port->rx_event = 0; + return 0; + } err = 0; tx_wakeup = 0; @@ -795,25 +791,25 @@ ldc_ctrl: pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); pkt->end_idx = -1; - goto napi_resume; - } - err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); - if (unlikely(err < 0)) { - if (err == -ECONNRESET) - vio_conn_reset(vio); - break; + } else { + err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); + if (unlikely(err < 0)) { + if (err == -ECONNRESET) + vio_conn_reset(vio); + break; + } + if (err == 0) + break; + viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", + msgbuf.tag.type, + msgbuf.tag.stype, + msgbuf.tag.stype_env, + msgbuf.tag.sid); + err = vio_validate_sid(vio, &msgbuf.tag); + if (err < 0) + break; } - if (err == 0) - break; - viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", - msgbuf.tag.type, - msgbuf.tag.stype, - msgbuf.tag.stype_env, - msgbuf.tag.sid); - err = vio_validate_sid(vio, &msgbuf.tag); - if (err < 0) - break; -napi_resume: + if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { if (!sunvnet_port_is_up_common(port)) { -- cgit v1.2.3 From daa86e50f649fccadafc53994ddc4254d75a008b Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 13 Feb 2017 10:57:02 -0800 Subject: sunvnet: remove extra rcu_read_unlocks The RCU read lock is grabbed first thing in sunvnet_start_xmit_common() so it always needs to be released. This removes the conditional release in the dropped packet error path and removes a couple of superfluous calls in the middle of the code. Reported-by: Bijan Mottahedeh Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunvnet_common.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index d124bdebf636..65f7038e82fe 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1253,10 +1253,8 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, rcu_read_lock(); port = vnet_tx_port(skb, dev); - if (unlikely(!port)) { - rcu_read_unlock(); + if (unlikely(!port)) goto out_dropped; - } if (skb_is_gso(skb) && skb->len > port->tsolen) { err = vnet_handle_offloads(port, skb, vnet_tx_port); @@ -1281,7 +1279,6 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, fl4.saddr = ip_hdr(skb)->saddr; rt = ip_route_output_key(dev_net(dev), &fl4); - rcu_read_unlock(); if (!IS_ERR(rt)) { skb_dst_set(skb, &rt->dst); icmp_send(skb, ICMP_DEST_UNREACH, @@ -1441,8 +1438,7 @@ out_dropped: jiffies + VNET_CLEAN_TIMEOUT); else if (port) del_timer(&port->clean_timer); - if (port) - rcu_read_unlock(); + rcu_read_unlock(); if (skb) dev_kfree_skb(skb); vnet_free_skbs(freeskbs); -- cgit v1.2.3 From 7602011f59cc32ebc3a5f9058d6ba11b096c8c50 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 13 Feb 2017 10:57:03 -0800 Subject: ldmvsw: update and simplify version string New version and simplify the print code. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/ldmvsw.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 335b87660638..f0fe6cf57cda 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -41,11 +41,11 @@ static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; #define DRV_MODULE_NAME "ldmvsw" -#define DRV_MODULE_VERSION "1.0" -#define DRV_MODULE_RELDATE "Jan 15, 2016" +#define DRV_MODULE_VERSION "1.1" +#define DRV_MODULE_RELDATE "February 3, 2017" static char version[] = - DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; MODULE_AUTHOR("Oracle"); MODULE_DESCRIPTION("Sun4v LDOM Virtual Switch Driver"); MODULE_LICENSE("GPL"); @@ -259,11 +259,6 @@ static struct vio_driver_ops vsw_vio_ops = { .handshake_complete = sunvnet_handshake_complete_common, }; -static void print_version(void) -{ - printk_once(KERN_INFO "%s", version); -} - static const char *remote_macaddr_prop = "remote-mac-address"; static const char *id_prop = "id"; @@ -279,8 +274,6 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) const u64 *port_id; u64 handle; - print_version(); - hp = mdesc_grab(); rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); @@ -457,6 +450,7 @@ static struct vio_driver vsw_port_driver = { static int __init vsw_init(void) { + pr_info("%s\n", version); return vio_register_driver(&vsw_port_driver); } -- cgit v1.2.3 From bc221a34ac473b444a7cfdd0c152b4c71f79326b Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 13 Feb 2017 10:57:04 -0800 Subject: ldmvsw: disable tso and gso for bridge operations The ldmvsw driver is specifically for supporting the ldom virtual networking by running in the primary ldom and using the LDC to connect the remaining ldoms to the outside world via a bridge. With TSO and GSO supported while connected the bridge, things tend to misbehave as seen in our case by delayed packets, enough to begin triggering retransmits and affecting overall throughput. By turning off advertised support for TSO and GSO we restore stable traffic flow through the bridge. Orabug: 23293104 Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/ldmvsw.c | 5 ++--- drivers/net/ethernet/sun/sunvnet_common.c | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index f0fe6cf57cda..89952deae47f 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -234,8 +234,7 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[], dev->ethtool_ops = &vsw_ethtool_ops; dev->watchdog_timeo = VSW_TX_TIMEOUT; - dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | - NETIF_F_HW_CSUM | NETIF_F_SG; + dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; /* MTU range: 68 - 65535 */ @@ -320,7 +319,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) port->vp = vp; port->dev = dev; port->switch_port = 1; - port->tso = true; + port->tso = false; /* no tso in vsw, misbehaves in bridge */ port->tsolen = 0; /* Mark the port as belonging to ldmvsw which directs the diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 65f7038e82fe..fa2d11ca9b81 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -186,6 +186,7 @@ static int handle_attr_info(struct vio_driver_state *vio, } else { pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; pkt->ipv4_lso_maxlen = 0; + port->tsolen = 0; } /* for version >= 1.6, ACK packet mode we support */ @@ -1635,7 +1636,7 @@ static void vnet_port_reset(struct vnet_port *port) del_timer(&port->clean_timer); sunvnet_port_free_tx_bufs_common(port); port->rmtu = 0; - port->tso = true; + port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */ port->tsolen = 0; } -- cgit v1.2.3 From 1ca6270b8d932475636364b989c6d2c2318fbabe Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Mon, 13 Feb 2017 21:03:02 +0100 Subject: mlxsw: spectrum: Change ipv6 unregistered mc table Point back the unregister IPv6 mc table to the bc table. It is done since IPv6 mcast snooping is not supported for Spectrum yet. Reported-by: Jiri Pirko Fixes: 71c365bdc439 ("mlxsw: spectrum: Separate bc and mc floods") Signed-off-by: Nogah Frankel Signed-off-by: Yotam Gigi Tested-by: Jiri Pirko Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 37c018bea5e2..16484f24b7db 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3078,7 +3078,6 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, flood_table = MLXSW_SP_FLOOD_TABLE_UC; break; case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4: - case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6: flood_table = MLXSW_SP_FLOOD_TABLE_MC; break; default: -- cgit v1.2.3 From cce5fbadb65568015706d3ae169b419cc1c7a25f Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Mon, 13 Feb 2017 23:45:46 +0100 Subject: pcnet32: factor out pcnet32_clr_suspend() Move the code to clear SUSPEND flag to a separate function to simplify code. Signed-off-by: Ondrej Zary Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/pcnet32.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index a8a22c0d688a..10b70ac3db6c 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1108,6 +1108,13 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, return 1; } +static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr) +{ + int csr5 = lp->a->read_csr(ioaddr, CSR5); + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND); +} + /* * process one receive descriptor entry */ @@ -1425,13 +1432,8 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, } } - if (!(csr0 & CSR0_STOP)) { /* If not stopped */ - int csr5; - - /* clear SUSPEND (SPND) - CSR5 bit 0 */ - csr5 = a->read_csr(ioaddr, CSR5); - a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); - } + if (!(csr0 & CSR0_STOP)) /* If not stopped */ + pcnet32_clr_suspend(lp, ioaddr); spin_unlock_irqrestore(&lp->lock, flags); } @@ -2675,10 +2677,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev) } if (suspended) { - int csr5; - /* clear SUSPEND (SPND) - CSR5 bit 0 */ - csr5 = lp->a->read_csr(ioaddr, CSR5); - lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + pcnet32_clr_suspend(lp, ioaddr); } else { lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); pcnet32_restart(dev, CSR0_NORMAL); -- cgit v1.2.3 From 2be4cb973f0cfaf051224fa5bc969c71c9fc3d5b Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Mon, 13 Feb 2017 23:45:47 +0100 Subject: pcnet32: fix BNC/AUI port on AM79C970A Even though the port autoselection is enabled by default on AM79C970A, BNC/AUI port does not work because the link is always reported to be down. The link state reported by the chip belongs only to the TP port but the driver uses it regardless of the port used. The chip can't detect BNC/AUI link state. Disable port autoselection and use TP port by default to keep current behavior (link detection works on TP port, BNC/AUI port does not work). Implement ethtool autoneg, port and duplex configuration to allow using the BNC/AUI port. Report the TP link state only if the TP port is selected. When the port autoselection is enabled or AUI port is selected, report the link as always up. Move pcnet32_suspend() and pcnet32_clr_suspend() functions to avoid forward declarations. Signed-off-by: Ondrej Zary Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/pcnet32.c | 177 ++++++++++++++++++++++++++----------- 1 file changed, 125 insertions(+), 52 deletions(-) diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 10b70ac3db6c..86369d7c9a0f 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -291,7 +291,10 @@ struct pcnet32_private { int options; unsigned int shared_irq:1, /* shared irq possible */ dxsuflo:1, /* disable transmit stop on uflo */ - mii:1; /* mii port available */ + mii:1, /* mii port available */ + autoneg:1, /* autoneg enabled */ + port_tp:1, /* port set to TP */ + fdx:1; /* full duplex enabled */ struct net_device *next; struct mii_if_info mii_if; struct timer_list watchdog_timer; @@ -677,6 +680,52 @@ static void pcnet32_poll_controller(struct net_device *dev) } #endif +/* + * lp->lock must be held. + */ +static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, + int can_sleep) +{ + int csr5; + struct pcnet32_private *lp = netdev_priv(dev); + const struct pcnet32_access *a = lp->a; + ulong ioaddr = dev->base_addr; + int ticks; + + /* really old chips have to be stopped. */ + if (lp->chip_version < PCNET32_79C970A) + return 0; + + /* set SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); + + /* poll waiting for bit to be set */ + ticks = 0; + while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { + spin_unlock_irqrestore(&lp->lock, *flags); + if (can_sleep) + msleep(1); + else + mdelay(1); + spin_lock_irqsave(&lp->lock, *flags); + ticks++; + if (ticks > 200) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Error getting into suspend!\n"); + return 0; + } + } + return 1; +} + +static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr) +{ + int csr5 = lp->a->read_csr(ioaddr, CSR5); + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND); +} + static int pcnet32_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { @@ -684,12 +733,29 @@ static int pcnet32_get_link_ksettings(struct net_device *dev, unsigned long flags; int r = -EOPNOTSUPP; + spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); - spin_unlock_irqrestore(&lp->lock, flags); + r = 0; + } else if (lp->chip_version == PCNET32_79C970A) { + if (lp->autoneg) { + cmd->base.autoneg = AUTONEG_ENABLE; + if (lp->a->read_bcr(dev->base_addr, 4) == 0xc0) + cmd->base.port = PORT_AUI; + else + cmd->base.port = PORT_TP; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = lp->port_tp ? PORT_TP : PORT_AUI; + } + cmd->base.duplex = lp->fdx ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.speed = SPEED_10; + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.supported, + SUPPORTED_TP | SUPPORTED_AUI); r = 0; } + spin_unlock_irqrestore(&lp->lock, flags); return r; } @@ -697,14 +763,46 @@ static int pcnet32_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct pcnet32_private *lp = netdev_priv(dev); + ulong ioaddr = dev->base_addr; unsigned long flags; int r = -EOPNOTSUPP; + int suspended, bcr2, bcr9, csr15; + spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd); - spin_unlock_irqrestore(&lp->lock, flags); + } else if (lp->chip_version == PCNET32_79C970A) { + suspended = pcnet32_suspend(dev, &flags, 0); + if (!suspended) + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); + + lp->autoneg = cmd->base.autoneg == AUTONEG_ENABLE; + bcr2 = lp->a->read_bcr(ioaddr, 2); + if (cmd->base.autoneg == AUTONEG_ENABLE) { + lp->a->write_bcr(ioaddr, 2, bcr2 | 0x0002); + } else { + lp->a->write_bcr(ioaddr, 2, bcr2 & ~0x0002); + + lp->port_tp = cmd->base.port == PORT_TP; + csr15 = lp->a->read_csr(ioaddr, CSR15) & ~0x0180; + if (cmd->base.port == PORT_TP) + csr15 |= 0x0080; + lp->a->write_csr(ioaddr, CSR15, csr15); + lp->init_block->mode = cpu_to_le16(csr15); + + lp->fdx = cmd->base.duplex == DUPLEX_FULL; + bcr9 = lp->a->read_bcr(ioaddr, 9) & ~0x0003; + if (cmd->base.duplex == DUPLEX_FULL) + bcr9 |= 0x0003; + lp->a->write_bcr(ioaddr, 9, bcr9); + } + if (suspended) + pcnet32_clr_suspend(lp, ioaddr); + else if (netif_running(dev)) + pcnet32_restart(dev, CSR0_NORMAL); + r = 0; } + spin_unlock_irqrestore(&lp->lock, flags); return r; } @@ -732,7 +830,14 @@ static u32 pcnet32_get_link(struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { r = mii_link_ok(&lp->mii_if); - } else if (lp->chip_version >= PCNET32_79C970A) { + } else if (lp->chip_version == PCNET32_79C970A) { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + /* only read link if port is set to TP */ + if (!lp->autoneg && lp->port_tp) + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); + else /* link always up for AUI port or port auto select */ + r = 1; + } else if (lp->chip_version > PCNET32_79C970A) { ulong ioaddr = dev->base_addr; /* card base I/O address */ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); } else { /* can not detect link on really old chips */ @@ -1069,52 +1174,6 @@ static int pcnet32_set_phys_id(struct net_device *dev, return 0; } -/* - * lp->lock must be held. - */ -static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, - int can_sleep) -{ - int csr5; - struct pcnet32_private *lp = netdev_priv(dev); - const struct pcnet32_access *a = lp->a; - ulong ioaddr = dev->base_addr; - int ticks; - - /* really old chips have to be stopped. */ - if (lp->chip_version < PCNET32_79C970A) - return 0; - - /* set SUSPEND (SPND) - CSR5 bit 0 */ - csr5 = a->read_csr(ioaddr, CSR5); - a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); - - /* poll waiting for bit to be set */ - ticks = 0; - while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { - spin_unlock_irqrestore(&lp->lock, *flags); - if (can_sleep) - msleep(1); - else - mdelay(1); - spin_lock_irqsave(&lp->lock, *flags); - ticks++; - if (ticks > 200) { - netif_printk(lp, hw, KERN_DEBUG, dev, - "Error getting into suspend!\n"); - return 0; - } - } - return 1; -} - -static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr) -{ - int csr5 = lp->a->read_csr(ioaddr, CSR5); - /* clear SUSPEND (SPND) - CSR5 bit 0 */ - lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND); -} - /* * process one receive descriptor entry */ @@ -1814,6 +1873,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->options = PCNET32_PORT_ASEL; else lp->options = options_mapping[options[cards_found]]; + /* force default port to TP on 79C970A so link detection can work */ + if (lp->chip_version == PCNET32_79C970A) + lp->options = PCNET32_PORT_10BT; lp->mii_if.dev = dev; lp->mii_if.mdio_read = mdio_read; lp->mii_if.mdio_write = mdio_write; @@ -2065,6 +2127,10 @@ static int pcnet32_open(struct net_device *dev) (u32) (lp->rx_ring_dma_addr), (u32) (lp->init_dma_addr)); + lp->autoneg = !!(lp->options & PCNET32_PORT_ASEL); + lp->port_tp = !!(lp->options & PCNET32_PORT_10BT); + lp->fdx = !!(lp->options & PCNET32_PORT_FD); + /* set/reset autoselect bit */ val = lp->a->read_bcr(ioaddr, 2) & ~2; if (lp->options & PCNET32_PORT_ASEL) @@ -2788,6 +2854,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) if (lp->mii) { curr_link = mii_link_ok(&lp->mii_if); + } else if (lp->chip_version == PCNET32_79C970A) { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + /* only read link if port is set to TP */ + if (!lp->autoneg && lp->port_tp) + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); + else /* link always up for AUI port or port auto select */ + curr_link = 1; } else { ulong ioaddr = dev->base_addr; /* card base I/O address */ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); -- cgit v1.2.3 From 7e57fbb2a341b5d44d30e71a6d782c5e6dbc429c Mon Sep 17 00:00:00 2001 From: Alexander Alemayhu Date: Tue, 14 Feb 2017 00:02:35 +0100 Subject: bpf: reduce compiler warnings by adding fallthrough comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes the following warnings: kernel/bpf/verifier.c: In function ‘may_access_direct_pkt_data’: kernel/bpf/verifier.c:702:6: warning: this statement may fall through [-Wimplicit-fallthrough=] if (t == BPF_WRITE) ^ kernel/bpf/verifier.c:704:2: note: here case BPF_PROG_TYPE_SCHED_CLS: ^~~~ kernel/bpf/verifier.c: In function ‘reg_set_min_max_inv’: kernel/bpf/verifier.c:2057:23: warning: this statement may fall through [-Wimplicit-fallthrough=] true_reg->min_value = 0; ~~~~~~~~~~~~~~~~~~~~^~~ kernel/bpf/verifier.c:2058:2: note: here case BPF_JSGT: ^~~~ kernel/bpf/verifier.c:2068:23: warning: this statement may fall through [-Wimplicit-fallthrough=] true_reg->min_value = 0; ~~~~~~~~~~~~~~~~~~~~^~~ kernel/bpf/verifier.c:2069:2: note: here case BPF_JSGE: ^~~~ kernel/bpf/verifier.c: In function ‘reg_set_min_max’: kernel/bpf/verifier.c:2009:24: warning: this statement may fall through [-Wimplicit-fallthrough=] false_reg->min_value = 0; ~~~~~~~~~~~~~~~~~~~~~^~~ kernel/bpf/verifier.c:2010:2: note: here case BPF_JSGT: ^~~~ kernel/bpf/verifier.c:2019:24: warning: this statement may fall through [-Wimplicit-fallthrough=] false_reg->min_value = 0; ~~~~~~~~~~~~~~~~~~~~~^~~ kernel/bpf/verifier.c:2020:2: note: here case BPF_JSGE: ^~~~ Reported-by: David Binderman Signed-off-by: Alexander Alemayhu Acked-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1a754e5d2695..d2bded2b250c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -701,6 +701,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, /* dst_input() and dst_output() can't write for now */ if (t == BPF_WRITE) return false; + /* fallthrough */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: @@ -2007,6 +2008,7 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, case BPF_JGT: /* Unsigned comparison, the minimum value is 0. */ false_reg->min_value = 0; + /* fallthrough */ case BPF_JSGT: /* If this is false then we know the maximum val is val, * otherwise we know the min val is val+1. @@ -2017,6 +2019,7 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, case BPF_JGE: /* Unsigned comparison, the minimum value is 0. */ false_reg->min_value = 0; + /* fallthrough */ case BPF_JSGE: /* If this is false then we know the maximum value is val - 1, * otherwise we know the mimimum value is val. @@ -2055,6 +2058,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, case BPF_JGT: /* Unsigned comparison, the minimum value is 0. */ true_reg->min_value = 0; + /* fallthrough */ case BPF_JSGT: /* * If this is false, then the val is <= the register, if it is @@ -2066,6 +2070,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, case BPF_JGE: /* Unsigned comparison, the minimum value is 0. */ true_reg->min_value = 0; + /* fallthrough */ case BPF_JSGE: /* If this is false then constant < register, if it is true then * the register < constant. -- cgit v1.2.3 From 59bdb293fce30071bf62baed957a7b9d1f5fa3ea Mon Sep 17 00:00:00 2001 From: Manuel Lauss Date: Tue, 14 Feb 2017 13:08:03 +0100 Subject: net: irda: au1k_ir: remove unused timer remove the unused timer. I suppose it was intended as a timeout detector, but never properly implemented. Signed-off-by: Manuel Lauss Signed-off-by: David S. Miller --- drivers/net/irda/au1k_ir.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index 44e4f386a5dc..bc2a63f5ca8c 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c @@ -169,8 +169,6 @@ struct au1k_private { u32 speed; u32 newspeed; - struct timer_list timer; - struct resource *ioarea; struct au1k_irda_platform_data *platdata; struct clk *irda_clk; @@ -178,8 +176,6 @@ struct au1k_private { static int qos_mtt_bits = 0x07; /* 1 ms or more */ -#define RUN_AT(x) (jiffies + (x)) - static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode) { if (p->platdata && p->platdata->set_phy_mode) @@ -620,8 +616,6 @@ static int au1k_irda_start(struct net_device *dev) /* power up */ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR); - aup->timer.expires = RUN_AT((3 * HZ)); - aup->timer.data = (unsigned long)dev; return 0; } @@ -642,7 +636,6 @@ static int au1k_irda_stop(struct net_device *dev) } netif_stop_queue(dev); - del_timer(&aup->timer); /* disable the interrupt */ free_irq(aup->irq_tx, dev); -- cgit v1.2.3 From 044950cc48ced25ccb2f4ea7c31228399e19d5d6 Mon Sep 17 00:00:00 2001 From: Manuel Lauss Date: Tue, 14 Feb 2017 13:08:04 +0100 Subject: net: irda: au1k_ir: drop useless include remove useless ioport.h include. Signed-off-by: Manuel Lauss Signed-off-by: David S. Miller --- drivers/net/irda/au1k_ir.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index bc2a63f5ca8c..be4ea6aa57a9 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From 6f2e3f7d9785dacb358b48b44950182b5c13e4bc Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 14 Feb 2017 14:19:32 +0000 Subject: net_sched: nla_memdup_cookie() can be static Fixes the following sparse warning: net/sched/act_api.c:532:5: warning: symbol 'nla_memdup_cookie' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- net/sched/act_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 3c5e29ba6594..f219ff325ed4 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -529,7 +529,7 @@ errout: return err; } -int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb) +static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb) { a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL); if (!a->act_cookie) -- cgit v1.2.3 From a06d4d672f76ef01d1fac38e2cefc22b7aaa7476 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 14 Feb 2017 15:10:06 +0100 Subject: net: hip04: Omit private ndo_get_stats function hip04_get_stats() just returns dev->stats so we can leave it out altogether and let dev_get_stats() do the job. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hip04_eth.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 6e50ec82b3d8..0cec06bec63e 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -701,11 +701,6 @@ static void hip04_tx_timeout_task(struct work_struct *work) hip04_mac_open(priv->ndev); } -static struct net_device_stats *hip04_get_stats(struct net_device *ndev) -{ - return &ndev->stats; -} - static int hip04_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { @@ -764,7 +759,6 @@ static const struct ethtool_ops hip04_ethtool_ops = { static const struct net_device_ops hip04_netdev_ops = { .ndo_open = hip04_mac_open, .ndo_stop = hip04_mac_stop, - .ndo_get_stats = hip04_get_stats, .ndo_start_xmit = hip04_mac_start_xmit, .ndo_set_mac_address = hip04_set_mac_address, .ndo_tx_timeout = hip04_timeout, -- cgit v1.2.3 From 675d4d5c03d752edff3e1ef93c032a4eeb660552 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 14 Feb 2017 17:47:12 +0100 Subject: pch_gbe: Omit private ndo_get_stats function pch_gbe_get_stats() just returns dev->stats so we can leave it out altogether and let dev_get_stats() do the job. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index f9e4e8eca665..5ae9681a2da7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2148,17 +2148,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } -/** - * pch_gbe_get_stats - Get System Network Statistics - * @netdev: Network interface device structure - * Returns: The current stats - */ -static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev) -{ - /* only return the current stats */ - return &netdev->stats; -} - /** * pch_gbe_set_multi - Multicast and Promiscuous mode set * @netdev: Network interface device structure @@ -2420,7 +2409,6 @@ static const struct net_device_ops pch_gbe_netdev_ops = { .ndo_open = pch_gbe_open, .ndo_stop = pch_gbe_stop, .ndo_start_xmit = pch_gbe_xmit_frame, - .ndo_get_stats = pch_gbe_get_stats, .ndo_set_mac_address = pch_gbe_set_mac, .ndo_tx_timeout = pch_gbe_tx_timeout, .ndo_change_mtu = pch_gbe_change_mtu, -- cgit v1.2.3 From d5bc1613d02f1c3f1226e2f7d555f0384d973482 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 14 Feb 2017 16:02:36 +0200 Subject: net: ethernet: ti: cpsw: use var instead of func for usage count The usage count function is based on ndev_running flag that is updated before calling ndo_open/close, but if ndo is called in another place, as with suspend/resume, the counter is not changed, that breaks sus/resume. For common resource no difference which device is using it, does matter only device count. So, replace usage count function on var and inc and dec it in ndo_open/close. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 42 ++++++++++++------------------------------ 1 file changed, 12 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 503fa8af37d7..e86f226b258e 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -399,6 +399,7 @@ struct cpsw_common { struct cpts *cpts; int rx_ch_num, tx_ch_num; int speed; + int usage_count; }; struct cpsw_priv { @@ -671,18 +672,6 @@ static void cpsw_intr_disable(struct cpsw_common *cpsw) return; } -static int cpsw_get_usage_count(struct cpsw_common *cpsw) -{ - u32 i; - u32 usage_count = 0; - - for (i = 0; i < cpsw->data.slaves; i++) - if (cpsw->slaves[i].ndev && netif_running(cpsw->slaves[i].ndev)) - usage_count++; - - return usage_count; -} - static void cpsw_tx_handler(void *token, int len, int status) { struct netdev_queue *txq; @@ -716,8 +705,7 @@ static void cpsw_rx_handler(void *token, int len, int status) if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { /* In dual emac mode check for all interfaces */ - if (cpsw->data.dual_emac && - cpsw_get_usage_count(cpsw) && + if (cpsw->data.dual_emac && cpsw->usage_count && (status >= 0)) { /* The packet received is for the interface which * is already down and the other interface is up @@ -1492,11 +1480,8 @@ static int cpsw_ndo_open(struct net_device *ndev) CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), CPSW_RTL_VERSION(reg)); - /* Initialize host and slave ports. - * Given ndev is marked as opened already, so init port only if 1 ndev - * is opened - */ - if (cpsw_get_usage_count(cpsw) < 2) + /* Initialize host and slave ports */ + if (!cpsw->usage_count) cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); @@ -1507,10 +1492,8 @@ static int cpsw_ndo_open(struct net_device *ndev) cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); - /* Given ndev is marked as opened already, so if more ndev - * are opened - no need to init shared resources. - */ - if (cpsw_get_usage_count(cpsw) < 2) { + /* initialize shared resources for every ndev */ + if (!cpsw->usage_count) { /* disable priority elevation */ __raw_writel(0, &cpsw->regs->ptype); @@ -1552,6 +1535,7 @@ static int cpsw_ndo_open(struct net_device *ndev) cpdma_ctlr_start(cpsw->dma); cpsw_intr_enable(cpsw); + cpsw->usage_count++; return 0; @@ -1572,10 +1556,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_tx_stop_all_queues(priv->ndev); netif_carrier_off(priv->ndev); - /* Given ndev is marked as close already, - * so disable shared resources if no open devices - */ - if (!cpsw_get_usage_count(cpsw)) { + if (cpsw->usage_count <= 1) { napi_disable(&cpsw->napi_rx); napi_disable(&cpsw->napi_tx); cpts_unregister(cpsw->cpts); @@ -1588,6 +1569,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) if (cpsw_need_resplit(cpsw)) cpsw_split_res(ndev); + cpsw->usage_count--; pm_runtime_put_sync(cpsw->dev); return 0; } @@ -2393,7 +2375,7 @@ static int cpsw_resume_data_pass(struct net_device *ndev) netif_dormant_off(slave->ndev); /* After this receive is started */ - if (cpsw_get_usage_count(cpsw)) { + if (cpsw->usage_count) { ret = cpsw_fill_rx_channels(priv); if (ret) return ret; @@ -2447,7 +2429,7 @@ static int cpsw_set_channels(struct net_device *ndev, } } - if (cpsw_get_usage_count(cpsw)) + if (cpsw->usage_count) cpsw_split_res(ndev); ret = cpsw_resume_data_pass(ndev); @@ -2529,7 +2511,7 @@ static int cpsw_set_ringparam(struct net_device *ndev, cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending); - if (cpsw_get_usage_count(cpsw)) + if (cpsw->usage_count) cpdma_chan_split_pool(cpsw->dma); ret = cpsw_resume_data_pass(ndev); -- cgit v1.2.3 From b0fcee825c0ad05057a97d1f4685e1b9e9d00c53 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 15 Feb 2017 09:39:24 +0100 Subject: xfrm: Add a secpath_set helper. Add a new helper to set the secpath to the skb. This avoids code duplication, as this is used in multiple places. Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 1 + net/ipv6/xfrm6_input.c | 15 +++------------ net/xfrm/xfrm_input.c | 34 ++++++++++++++++++++++------------ 3 files changed, 26 insertions(+), 24 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6e061309adca..287635df4eef 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1006,6 +1006,7 @@ secpath_put(struct sec_path *sp) } struct sec_path *secpath_dup(struct sec_path *src); +int secpath_set(struct sk_buff *skb); static inline void secpath_reset(struct sk_buff *skb) diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index b5789562aded..662fb2c3e765 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -69,18 +69,9 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, struct xfrm_state *x = NULL; int i = 0; - /* Allocate new secpath or COW existing one. */ - if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { - struct sec_path *sp; - - sp = secpath_dup(skb->sp); - if (!sp) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); - goto drop; - } - if (skb->sp) - secpath_put(skb->sp); - skb->sp = sp; + if (secpath_set(skb)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); + goto drop; } if (1 + skb->sp->len == XFRM_MAX_DEPTH) { diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 8722294c6e59..d8f913bb6919 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -117,6 +117,24 @@ struct sec_path *secpath_dup(struct sec_path *src) } EXPORT_SYMBOL(secpath_dup); +int secpath_set(struct sk_buff *skb) +{ + struct sec_path *sp; + + /* Allocate new secpath or COW existing one. */ + if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { + sp = secpath_dup(skb->sp); + if (!sp) + return -ENOMEM; + + if (skb->sp) + secpath_put(skb->sp); + skb->sp = sp; + } + return 0; +} +EXPORT_SYMBOL(secpath_set); + /* Fetch spi and seq from ipsec header */ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) @@ -212,18 +230,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) break; } - /* Allocate new secpath or COW existing one. */ - if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { - struct sec_path *sp; - - sp = secpath_dup(skb->sp); - if (!sp) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); - goto drop; - } - if (skb->sp) - secpath_put(skb->sp); - skb->sp = sp; + err = secpath_set(skb); + if (err) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); + goto drop; } seq = 0; -- cgit v1.2.3 From 5f114163f2f5eb2edbb49c4d3e0b405c7a8a7e2a Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 15 Feb 2017 09:39:39 +0100 Subject: net: Add a skb_gro_flush_final helper. Add a skb_gro_flush_final helper to prepare for consuming skbs in call_gro_receive. We will extend this helper to not touch the skb if the skb is consumed by a gro callback with a followup patch. We need this to handle the upcomming IPsec ESP callbacks as they reinject the skb to the napi_gro_receive asynchronous. The handler is used in all gro_receive functions that can call the ESP gro handlers. Signed-off-by: Steffen Klassert --- include/linux/netdevice.h | 5 +++++ net/ethernet/eth.c | 2 +- net/ipv4/af_inet.c | 2 +- net/ipv6/ip6_offload.c | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 58afbd1cc659..f9da3acfee9a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2661,6 +2661,11 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, remcsum_unadjust((__sum16 *)ptr, grc->delta); } +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) +{ + NAPI_GRO_CB(skb)->flush |= flush; +} + static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index efdaaab735fc..c666ff0dd88b 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -474,7 +474,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 685ba53df2d1..602d40f43687 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1423,7 +1423,7 @@ out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index fc7b4017ba24..0838e6d01d2e 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -253,7 +253,7 @@ out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } -- cgit v1.2.3 From 25393d3fc055b76587fcc91627aee8c345400c3a Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 15 Feb 2017 09:39:44 +0100 Subject: net: Prepare gro for packet consuming gro callbacks The upcomming IPsec ESP gro callbacks will consume the skb, so prepare for that. Signed-off-by: Steffen Klassert --- include/linux/netdevice.h | 9 +++++++++ net/core/dev.c | 7 +++++++ net/xfrm/Kconfig | 4 ++++ 3 files changed, 20 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f9da3acfee9a..9e5d1cd9d975 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -352,6 +352,7 @@ enum gro_result { GRO_HELD, GRO_NORMAL, GRO_DROP, + GRO_CONSUMED, }; typedef enum gro_result gro_result_t; @@ -2661,10 +2662,18 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, remcsum_unadjust((__sum16 *)ptr, grc->delta); } +#ifdef CONFIG_XFRM_OFFLOAD +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) +{ + if (PTR_ERR(pp) != -EINPROGRESS) + NAPI_GRO_CB(skb)->flush |= flush; +} +#else static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) { NAPI_GRO_CB(skb)->flush |= flush; } +#endif static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, diff --git a/net/core/dev.c b/net/core/dev.c index 3e1a60102e64..64efbb9e4436 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4505,6 +4505,11 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (&ptype->list == head) goto normal; + if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) { + ret = GRO_CONSUMED; + goto ok; + } + same_flow = NAPI_GRO_CB(skb)->same_flow; ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; @@ -4609,6 +4614,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) case GRO_HELD: case GRO_MERGED: + case GRO_CONSUMED: break; } @@ -4680,6 +4686,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, break; case GRO_MERGED: + case GRO_CONSUMED: break; } diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig index bda1a13628a8..a484451c72ec 100644 --- a/net/xfrm/Kconfig +++ b/net/xfrm/Kconfig @@ -5,6 +5,10 @@ config XFRM bool depends on NET +config XFRM_OFFLOAD + bool + depends on XFRM + config XFRM_ALGO tristate select XFRM -- cgit v1.2.3 From 1e29537034e388c7e72eac43cfcda1d23131623b Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 15 Feb 2017 09:39:49 +0100 Subject: xfrm: Export xfrm_parse_spi. We need it in the ESP offload handlers, so export it. Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 1 + net/xfrm/xfrm_input.c | 1 + 2 files changed, 2 insertions(+) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 287635df4eef..fe8db3d87e4f 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1520,6 +1520,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm4_transport_finish(struct sk_buff *skb, int async); int xfrm4_rcv(struct sk_buff *skb); +int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) { diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index d8f913bb6919..86f8a8de5252 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -170,6 +170,7 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq); return 0; } +EXPORT_SYMBOL(xfrm_parse_spi); int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) { -- cgit v1.2.3 From 1f8665320fa1a232fda22bcc7f88daeb83628385 Mon Sep 17 00:00:00 2001 From: Iain Hunter Date: Thu, 9 Feb 2017 14:37:28 +0000 Subject: wlcore: disable multicast filter in AP mode Enable AP support for allmulticast for MDNS. It can be enabled by bringing up the interface with ip command with argument allmulticast on Signed-off-by: Iain Hunter Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/main.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index e536aa01b937..a21fda910529 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -3202,6 +3202,21 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, if (ret < 0) goto out_sleep; } + + /* + * If interface in AP mode and created with allmulticast then disable + * the firmware filters so that all multicast packets are passed + * This is mandatory for MDNS based discovery protocols + */ + if (wlvif->bss_type == BSS_TYPE_AP_BSS) { + if (*total & FIF_ALLMULTI) { + ret = wl1271_acx_group_address_tbl(wl, wlvif, + false, + NULL, 0); + if (ret < 0) + goto out_sleep; + } + } } /* -- cgit v1.2.3 From 7546bba385b4eb746fcb2ea972abe96efb44edb8 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 9 Feb 2017 21:12:51 +0100 Subject: rtlwifi: btcoexist: fix semicolon.cocci warnings Remove unneeded semicolon. Generated by: scripts/coccinelle/misc/semicolon.cocci CC: Larry Finger Signed-off-by: Julia Lawall Signed-off-by: Fengguang Wu Acked-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index 962fbaf3ba1d..b5daa484f1a6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -2877,7 +2877,7 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) false, false); btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); - }; + } } else { /* fw mechanism */ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || -- cgit v1.2.3 From 3a6282045b228f6de6abced74b793c71178fb948 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Fri, 10 Feb 2017 14:03:13 +0100 Subject: orinoco: Use net_device_stats from struct net_device Instead of using a private copy of struct net_device_stats in struct orinoco_private, use stats from struct net_device. Also remove the now unnecessary .ndo_get_stats function. Signed-off-by: Tobias Klauser Signed-off-by: Kalle Valo --- drivers/net/wireless/intersil/orinoco/main.c | 27 ++++++---------------- drivers/net/wireless/intersil/orinoco/orinoco.h | 2 -- .../net/wireless/intersil/orinoco/orinoco_usb.c | 6 ++--- 3 files changed, 9 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c index 9d96b7c928f7..28cf97489001 100644 --- a/drivers/net/wireless/intersil/orinoco/main.c +++ b/drivers/net/wireless/intersil/orinoco/main.c @@ -294,14 +294,6 @@ int orinoco_stop(struct net_device *dev) } EXPORT_SYMBOL(orinoco_stop); -struct net_device_stats *orinoco_get_stats(struct net_device *dev) -{ - struct orinoco_private *priv = ndev_priv(dev); - - return &priv->stats; -} -EXPORT_SYMBOL(orinoco_get_stats); - void orinoco_set_multicast_list(struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); @@ -433,7 +425,7 @@ EXPORT_SYMBOL(orinoco_process_xmit_skb); static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct hermes *hw = &priv->hw; int err = 0; u16 txfid = priv->txfid; @@ -593,10 +585,7 @@ static void __orinoco_ev_alloc(struct net_device *dev, struct hermes *hw) static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw) { - struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; - - stats->tx_packets++; + dev->stats.tx_packets++; netif_wake_queue(dev); @@ -605,8 +594,7 @@ static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw) static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw) { - struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; u16 fid = hermes_read_regn(hw, TXCOMPLFID); u16 status; struct hermes_txexc_data hdr; @@ -662,7 +650,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw) void orinoco_tx_timeout(struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct hermes *hw = &priv->hw; printk(KERN_WARNING "%s: Tx timeout! " @@ -749,7 +737,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid, int len; struct sk_buff *skb; struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct hermes *hw = &priv->hw; len = le16_to_cpu(desc->data_len); @@ -840,7 +828,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid, void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw) { struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct iw_statistics *wstats = &priv->wstats; struct sk_buff *skb = NULL; u16 rxfid, status; @@ -959,7 +947,7 @@ static void orinoco_rx(struct net_device *dev, struct sk_buff *skb) { struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; u16 status, fc; int length; struct ethhdr *hdr; @@ -2137,7 +2125,6 @@ static const struct net_device_ops orinoco_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = orinoco_tx_timeout, - .ndo_get_stats = orinoco_get_stats, }; /* Allocate private data. diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h index 5fa1c3e3713f..430862a6a24b 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco.h +++ b/drivers/net/wireless/intersil/orinoco/orinoco.h @@ -84,7 +84,6 @@ struct orinoco_private { /* Net device stuff */ struct net_device *ndev; - struct net_device_stats stats; struct iw_statistics wstats; /* Hardware control variables */ @@ -206,7 +205,6 @@ int orinoco_process_xmit_skb(struct sk_buff *skb, /* Common ndo functions exported for reuse by orinoco_usb */ int orinoco_open(struct net_device *dev); int orinoco_stop(struct net_device *dev); -struct net_device_stats *orinoco_get_stats(struct net_device *dev); void orinoco_set_multicast_list(struct net_device *dev); int orinoco_change_mtu(struct net_device *dev, int new_mtu); void orinoco_tx_timeout(struct net_device *dev); diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index bca6935a94db..98e1380b9917 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -403,8 +403,7 @@ static void ezusb_ctx_complete(struct request_context *ctx) if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) { struct net_device *dev = upriv->dev; - struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; if (ctx->state != EZUSB_CTX_COMPLETE) stats->tx_errors++; @@ -1183,7 +1182,7 @@ static int ezusb_program(struct hermes *hw, const char *buf, static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct ezusb_priv *upriv = priv->card; u8 mic[MICHAEL_MIC_LEN + 1]; int err = 0; @@ -1556,7 +1555,6 @@ static const struct net_device_ops ezusb_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = orinoco_tx_timeout, - .ndo_get_stats = orinoco_get_stats, }; static int ezusb_probe(struct usb_interface *interface, -- cgit v1.2.3 From 2447e2cad75239ae407c0f98acf12511354208c5 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 10 Feb 2017 13:55:25 -0800 Subject: mwifiex: don't enable/disable IRQ 0 during suspend/resume If we don't have an out-of-band wakeup IRQ configured through DT (as most platforms don't), then we fall out of this function with 'irq_wakeup == 0'. Other code (e.g., mwifiex_disable_wake() and mwifiex_enable_wake()) treats 'irq_wakeup >= 0' as a valid IRQ, and so we end up calling {enable,disable}_irq() on IRQ 0. That seems bad, so let's not do that. Same problem as fixed in this patch: https://patchwork.kernel.org/patch/9531693/ [PATCH v2 2/3] btmrvl: set irq_bt to -1 when failed to parse it with the difference that: (a) this one is actually a regression and (b) this affects both device tree and non-device-tree systems While fixing the regression, also drop the verbosity on the parse failure, so we don't see this when a DT node is present but doesn't have an interrupt property (this is perfectly legal): [ 21.999000] mwifiex_pcie 0000:01:00.0: fail to parse irq_wakeup from device tree Fixes: 853402a00823 ("mwifiex: Enable WoWLAN for both sdio and pcie") Signed-off-by: Brian Norris Acked-by: Rajat Jain Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 9d80180a5519..5ebca1d0cfc7 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1519,13 +1519,13 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter) struct device *dev = adapter->dev; if (!dev->of_node) - return; + goto err_exit; adapter->dt_node = dev->of_node; adapter->irq_wakeup = irq_of_parse_and_map(adapter->dt_node, 0); if (!adapter->irq_wakeup) { - dev_info(dev, "fail to parse irq_wakeup from device tree\n"); - return; + dev_dbg(dev, "fail to parse irq_wakeup from device tree\n"); + goto err_exit; } ret = devm_request_irq(dev, adapter->irq_wakeup, @@ -1545,7 +1545,7 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter) return; err_exit: - adapter->irq_wakeup = 0; + adapter->irq_wakeup = -1; } /* -- cgit v1.2.3 From ac0ca72c6f53ccbbe50b0af9e1de0577da1cb667 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Sat, 11 Feb 2017 18:46:03 -0600 Subject: rtlwifi: btcoexist: Fix if == else warnings in halbtc8821a2ant.c The 0-DAY kernel test infrastructure reports the following: drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3023:1-3: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3035:2-4: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3037:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3047:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3075:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3085:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3129:1-3: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3141:2-4: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3143:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3153:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3179:2-4: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3181:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:3192:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:2677:1-3: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:2833:1-3: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:2847:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:2857:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:2885:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:2895:3-5: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:2940:1-3: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:2788:2-4: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:2391:2-4: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c:2417:2-4: WARNING: possible condition with no effect (if == else) Reported-by: kbuild-all@01.org Signed-off-by: Larry Finger Cc: Julia Lawall Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtc8821a2ant.c | 185 +++------------------ 1 file changed, 24 insertions(+), 161 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index b5daa484f1a6..1717e9ce96ca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -2388,14 +2388,8 @@ static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist) * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 0); /*for voice quality*/ - } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 0); /*for voice quality*/ - } + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); /*for voice quality*/ /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -2785,14 +2779,7 @@ static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist) NORMAL_EXEC, false); } - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 1); - } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 1); - } + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -2830,40 +2817,18 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { - /* for HID at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5afa5afa, 0xffff, 0x3); - } else { - /* for HID quality & wifi performance balance at 11n mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5afa5afa, 0xffff, 0x3); - } + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5afa5afa, 0xffff, 0x3); if (BTC_WIFI_BW_HT40 == wifi_bw) { /* fw mechanism */ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - false, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - false, 3); - } - } else { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - true, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - true, 3); - } - } + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + btc8821a2ant_tdma_dur_adj(btcoexist, false, + false, 3); + else + btc8821a2ant_tdma_dur_adj(btcoexist, false, + true, 3); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -2881,27 +2846,10 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) } else { /* fw mechanism */ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - false, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - false, 3); - } - } else { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - true, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - true, 3); - } - } + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 3); + else + btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 3); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -2937,15 +2885,8 @@ static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { - /* for HID at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5f5a5f, 0xffff, 0x3); - } else { - /* for HID quality & wifi performance balance at 11n mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5f5a5f, 0xffff, 0x3); - } + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5f5a5f, 0xffff, 0x3); if (BTC_WIFI_BW_HT40 == wifi_bw) { halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3); @@ -3020,40 +2961,12 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { - /* for HID at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5a5a5a, 0xffff, 0x3); - } else { - /* for HID quality & wifi performance balance at 11n mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5a5a5a, 0xffff, 0x3); - } + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5a5a5a, 0xffff, 0x3); if (BTC_WIFI_BW_HT40 == wifi_bw) { /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - true, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - true, 3); - } - } else { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - true, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - true, 3); - } - } + btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 3); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -3126,40 +3039,12 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { - /* for HID at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5f5b5f5b, 0xffffff, 0x3); - } else { - /*for HID quality & wifi performance balance at 11n mode*/ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5f5b5f5b, 0xffffff, 0x3); - } + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5f5b5f5b, 0xffffff, 0x3); if (BTC_WIFI_BW_HT40 == wifi_bw) { /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } - } else { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } - } + btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -3176,29 +3061,7 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist) } } else { /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } - } else { - if (bt_info_ext&BIT0) { - /*a2dp basic rate*/ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } else { - /*a2dp edr rate*/ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } - } + btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || -- cgit v1.2.3 From 42e74946f016520a700085bc89e6250c71bb61b6 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Sat, 11 Feb 2017 18:46:04 -0600 Subject: rtlwifi: btcoexist: Fix if == else warnings in halbtc8821a1ant.c The 0-DAY kernel test infrastructure reports the following: drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c:1771:1-3: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c:2126:3-5: WARNING: possible condition with no effect (if == else) Reported-by: kbuild-all@01.org Signed-off-by: Larry Finger Cc: Julia Lawall Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtc8821a1ant.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index 20562f081c96..8b689ed9a629 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -1768,11 +1768,7 @@ static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist, /* tdma and coex table*/ halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == - wifi_status) - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); - else - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist, @@ -2123,16 +2119,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) wifi_rssi_state = halbtc8821a1ant_WifiRssiState(btcoexist, 1, 2, 30, 0); - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a1ant_limited_tx(btcoexist, - NORMAL_EXEC, 1, 1, - 1, 1); - } else { - halbtc8821a1ant_limited_tx(btcoexist, - NORMAL_EXEC, 1, 1, - 1, 1); - } + halbtc8821a1ant_limited_tx(btcoexist, + NORMAL_EXEC, 1, 1, 1, 1); } else { halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); -- cgit v1.2.3 From b686784d03e66f8515198de4850d1e372b729880 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Sat, 11 Feb 2017 18:46:05 -0600 Subject: rtlwifi: btcoexist: Fix if == else warnings in halbtc8723b1ant.c The 0-DAY kernel test infrastructure reports the following: drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c:1875:2-4: WARNING: possible condition with no effect (if == else) drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c:2253:3-5: WARNING: possible condition with no effect (if == else) Reported-by: kbuild-all@01.org Signed-off-by: Larry Finger Cc: Julia Lawall Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtc8723b1ant.c | 24 ++++------------------ 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index e9f0a76107ed..d67bbfb6ad8e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -1872,16 +1872,8 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy( } } else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { /*HID+A2DP */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->auto_tdma_adjust = false; - } else { /*for low BT RSSI*/ - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->auto_tdma_adjust = false; - } + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + coex_dm->auto_tdma_adjust = false; halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6); /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */ @@ -2250,16 +2242,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, 1, 2, 30, 0); - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8723b1ant_limited_tx(btcoexist, - NORMAL_EXEC, - 1, 1, 1, 1); - } else { - halbtc8723b1ant_limited_tx(btcoexist, - NORMAL_EXEC, - 1, 1, 1, 1); - } + halbtc8723b1ant_limited_tx(btcoexist, + NORMAL_EXEC, 1, 1, 1, 1); } else { halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); -- cgit v1.2.3 From 91b632803ee4e47c5a5c4dc3d8bf5abf9c16107a Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 13 Feb 2017 11:14:09 +0100 Subject: brcmfmac: Use net_device_stats from struct net_device Instead of using a private copy of struct net_device_stats in struct brcm_if, use stats from struct net_device. Also remove the now unnecessary .ndo_get_stats function. Signed-off-by: Tobias Klauser Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/core.c | 26 +++++++--------------- .../wireless/broadcom/brcm80211/brcmfmac/core.h | 2 -- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index b73a55b00fa7..60da86a8d95b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -249,10 +249,10 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, done: if (ret) { - ifp->stats.tx_dropped++; + ndev->stats.tx_dropped++; } else { - ifp->stats.tx_packets++; - ifp->stats.tx_bytes += skb->len; + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; } /* Return ok: we always eat the packet */ @@ -296,15 +296,15 @@ void brcmf_txflowblock(struct device *dev, bool state) void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) { if (skb->pkt_type == PACKET_MULTICAST) - ifp->stats.multicast++; + ifp->ndev->stats.multicast++; if (!(ifp->ndev->flags & IFF_UP)) { brcmu_pkt_buf_free_skb(skb); return; } - ifp->stats.rx_bytes += skb->len; - ifp->stats.rx_packets++; + ifp->ndev->stats.rx_bytes += skb->len; + ifp->ndev->stats.rx_packets++; brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol)); if (in_interrupt()) @@ -327,7 +327,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb, if (ret || !(*ifp) || !(*ifp)->ndev) { if (ret != -ENODATA && *ifp) - (*ifp)->stats.rx_errors++; + (*ifp)->ndev->stats.rx_errors++; brcmu_pkt_buf_free_skb(skb); return -ENODATA; } @@ -388,7 +388,7 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success) } if (!success) - ifp->stats.tx_errors++; + ifp->ndev->stats.tx_errors++; brcmu_pkt_buf_free_skb(txp); } @@ -411,15 +411,6 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success) } } -static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev) -{ - struct brcmf_if *ifp = netdev_priv(ndev); - - brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); - - return &ifp->stats; -} - static void brcmf_ethtool_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { @@ -492,7 +483,6 @@ static int brcmf_netdev_open(struct net_device *ndev) static const struct net_device_ops brcmf_netdev_ops_pri = { .ndo_open = brcmf_netdev_open, .ndo_stop = brcmf_netdev_stop, - .ndo_get_stats = brcmf_netdev_get_stats, .ndo_start_xmit = brcmf_netdev_start_xmit, .ndo_set_mac_address = brcmf_netdev_set_mac_address, .ndo_set_rx_mode = brcmf_netdev_set_multicast_list diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index de3197be5491..6aecd8dfd824 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -171,7 +171,6 @@ enum brcmf_netif_stop_reason { * @drvr: points to device related information. * @vif: points to cfg80211 specific interface information. * @ndev: associated network device. - * @stats: interface specific network statistics. * @multicast_work: worker object for multicast provisioning. * @ndoffload_work: worker object for neighbor discovery offload configuration. * @fws_desc: interface specific firmware-signalling descriptor. @@ -187,7 +186,6 @@ struct brcmf_if { struct brcmf_pub *drvr; struct brcmf_cfg80211_vif *vif; struct net_device *ndev; - struct net_device_stats stats; struct work_struct multicast_work; struct work_struct ndoffload_work; struct brcmf_fws_mac_descriptor *fws_desc; -- cgit v1.2.3 From 7272416609126e8910b7f0d0e3dba008aa87830c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Feb 2017 22:28:33 +0100 Subject: rt2500usb: don't mark register accesses as inline When CONFIG_KASAN is set, we get a rather large stack here: drivers/net/wireless/ralink/rt2x00/rt2500usb.c: In function 'rt2500usb_set_device_state': drivers/net/wireless/ralink/rt2x00/rt2500usb.c:1074:1: error: the frame size of 3032 bytes is larger than 100 bytes [-Werror=frame-larger-than=] If we don't force those functions to be inline, the compiler can figure this out better itself and not inline the functions when doing so would be harmful, reducing the stack size to a merge 256 bytes. Note that there is another problem that manifests in this driver, as a result of the typecheck() macro causing even larger stack frames. Signed-off-by: Arnd Bergmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2500usb.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index 62357465fe29..0d2670a56c4c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -55,7 +55,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); * If the csr_mutex is already held then the _lock variants must * be used instead. */ -static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, +static void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 *value) { @@ -66,7 +66,7 @@ static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, *value = le16_to_cpu(reg); } -static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, +static void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 *value) { @@ -77,16 +77,7 @@ static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, *value = le16_to_cpu(reg); } -static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - void *value, const u16 length) -{ - rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, - USB_VENDOR_REQUEST_IN, offset, - value, length); -} - -static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, +static void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { @@ -96,7 +87,7 @@ static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, ®, sizeof(reg)); } -static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, +static void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { @@ -106,7 +97,7 @@ static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, ®, sizeof(reg), REGISTER_TIMEOUT); } -static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, +static void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, const unsigned int offset, void *value, const u16 length) { -- cgit v1.2.3 From c9f1e32600816d695f817477d56490bfc2ba43c6 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Tue, 14 Feb 2017 20:10:30 +0100 Subject: ath9k: use correct OTP register offsets for the AR9340 and AR9550 This patch fixes the OTP register definitions for the AR934x and AR9550 WMAC SoC. Previously, the ath9k driver was unable to initialize the integrated WMAC on an Aerohive AP121: | ath: phy0: timeout (1000 us) on reg 0x30018: 0xbadc0ffe & 0x00000007 != 0x00000004 | ath: phy0: timeout (1000 us) on reg 0x30018: 0xbadc0ffe & 0x00000007 != 0x00000004 | ath: phy0: Unable to initialize hardware; initialization status: -5 | ath9k ar934x_wmac: failed to initialize device | ath9k: probe of ar934x_wmac failed with error -5 It turns out that the AR9300_OTP_STATUS and AR9300_OTP_DATA definitions contain a typo. Cc: Gabor Juhos Cc: stable@vger.kernel.org Fixes: add295a4afbdf5852d0 "ath9k: use correct OTP register offsets for AR9550" Signed-off-by: Christian Lamparter Signed-off-by: Chris Blake Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_eeprom.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 7dc7205dc877..bd2269c7de6b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -75,13 +75,13 @@ #define AR9300_OTP_BASE \ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) #define AR9300_OTP_STATUS \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18) + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18) #define AR9300_OTP_STATUS_TYPE 0x7 #define AR9300_OTP_STATUS_VALID 0x4 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 #define AR9300_OTP_STATUS_SM_BUSY 0x1 #define AR9300_OTP_READ_DATA \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c) + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c) enum targetPowerHTRates { HT_TARGET_RATE_0_8_16, -- cgit v1.2.3 From 54ef207ac8f7a17d677082157a29f4df8499dc81 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 15 Feb 2017 09:39:54 +0100 Subject: xfrm: Extend the sec_path for IPsec offloading We need to keep per packet offloading informations across the layers. So we extend the sec_path to carry these for the input and output offload codepath. Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 41 +++++++++++++++++++++++++++++++++++++++++ net/xfrm/xfrm_input.c | 2 ++ 2 files changed, 43 insertions(+) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index fe8db3d87e4f..10086a0986f8 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -498,6 +498,7 @@ struct xfrm_tmpl { }; #define XFRM_MAX_DEPTH 6 +#define XFRM_MAX_OFFLOAD_DEPTH 1 struct xfrm_policy_walk_entry { struct list_head all; @@ -973,10 +974,41 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); +struct xfrm_offload { + /* Output sequence number for replay protection on offloading. */ + struct { + __u32 low; + __u32 hi; + } seq; + + __u32 flags; +#define SA_DELETE_REQ 1 +#define CRYPTO_DONE 2 +#define CRYPTO_NEXT_DONE 4 +#define CRYPTO_FALLBACK 8 +#define XFRM_GSO_SEGMENT 16 +#define XFRM_GRO 32 + + __u32 status; +#define CRYPTO_SUCCESS 1 +#define CRYPTO_GENERIC_ERROR 2 +#define CRYPTO_TRANSPORT_AH_AUTH_FAILED 4 +#define CRYPTO_TRANSPORT_ESP_AUTH_FAILED 8 +#define CRYPTO_TUNNEL_AH_AUTH_FAILED 16 +#define CRYPTO_TUNNEL_ESP_AUTH_FAILED 32 +#define CRYPTO_INVALID_PACKET_SYNTAX 64 +#define CRYPTO_INVALID_PROTOCOL 128 + + __u8 proto; +}; + struct sec_path { atomic_t refcnt; int len; + int olen; + struct xfrm_state *xvec[XFRM_MAX_DEPTH]; + struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH]; }; static inline int secpath_exists(struct sk_buff *skb) @@ -1776,6 +1808,15 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) { return skb->sp->xvec[skb->sp->len - 1]; } +static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) +{ + struct sec_path *sp = skb->sp; + + if (!sp || !sp->olen || sp->len != sp->olen) + return NULL; + + return &sp->ovec[sp->olen - 1]; +} #endif static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 86f8a8de5252..d2ff71230864 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -105,6 +105,8 @@ struct sec_path *secpath_dup(struct sec_path *src) return NULL; sp->len = 0; + sp->olen = 0; + if (src) { int i; -- cgit v1.2.3 From 7785bba299a8dc8fe8390a0183dad3cafb3f1d80 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 15 Feb 2017 09:40:00 +0100 Subject: esp: Add a software GRO codepath This patch adds GRO ifrastructure and callbacks for ESP on ipv4 and ipv6. In case the GRO layer detects an ESP packet, the esp{4,6}_gro_receive() function does a xfrm state lookup and calls the xfrm input layer if it finds a matching state. The packet will be decapsulated and reinjected it into layer 2. Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 1 + net/ipv4/Kconfig | 13 +++++ net/ipv4/Makefile | 1 + net/ipv4/esp4_offload.c | 106 +++++++++++++++++++++++++++++++++++++++ net/ipv4/xfrm4_input.c | 6 +++ net/ipv4/xfrm4_mode_transport.c | 4 +- net/ipv6/Kconfig | 13 +++++ net/ipv6/Makefile | 1 + net/ipv6/esp6_offload.c | 108 ++++++++++++++++++++++++++++++++++++++++ net/ipv6/xfrm6_input.c | 7 +++ net/ipv6/xfrm6_mode_transport.c | 4 +- net/xfrm/xfrm_input.c | 31 ++++++++++-- 12 files changed, 288 insertions(+), 7 deletions(-) create mode 100644 net/ipv4/esp4_offload.c create mode 100644 net/ipv6/esp6_offload.c diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 10086a0986f8..14d82bf16692 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -682,6 +682,7 @@ struct xfrm_spi_skb_cb { unsigned int daddroff; unsigned int family; + __be32 seq; }; #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0])) diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 6e7baaf814c6..e0878c3f66a8 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -360,6 +360,19 @@ config INET_ESP If unsure, say Y. +config INET_ESP_OFFLOAD + tristate "IP: ESP transformation offload" + depends on INET_ESP + select XFRM_OFFLOAD + default n + ---help--- + Support for ESP transformation offload. This makes sense + only if this system really does IPsec and want to do it + with high throughput. A typical desktop system does not + need it, even if it does IPsec. + + If unsure, say N. + config INET_IPCOMP tristate "IP: IPComp transformation" select INET_XFRM_TUNNEL diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 48af58a5686e..c6d4238ff94a 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_NET_IPVTI) += ip_vti.o obj-$(CONFIG_SYN_COOKIES) += syncookies.o obj-$(CONFIG_INET_AH) += ah4.o obj-$(CONFIG_INET_ESP) += esp4.o +obj-$(CONFIG_INET_ESP_OFFLOAD) += esp4_offload.o obj-$(CONFIG_INET_IPCOMP) += ipcomp.o obj-$(CONFIG_INET_XFRM_TUNNEL) += xfrm4_tunnel.o obj-$(CONFIG_INET_XFRM_MODE_BEET) += xfrm4_mode_beet.o diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c new file mode 100644 index 000000000000..1de442632406 --- /dev/null +++ b/net/ipv4/esp4_offload.c @@ -0,0 +1,106 @@ +/* + * IPV4 GSO/GRO offload support + * Linux INET implementation + * + * Copyright (C) 2016 secunet Security Networks AG + * Author: Steffen Klassert + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * ESP GRO support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct sk_buff **esp4_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + int offset = skb_gro_offset(skb); + struct xfrm_offload *xo; + struct xfrm_state *x; + __be32 seq; + __be32 spi; + int err; + + skb_pull(skb, offset); + + if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) + goto out; + + err = secpath_set(skb); + if (err) + goto out; + + if (skb->sp->len == XFRM_MAX_DEPTH) + goto out; + + x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, + (xfrm_address_t *)&ip_hdr(skb)->daddr, + spi, IPPROTO_ESP, AF_INET); + if (!x) + goto out; + + skb->sp->xvec[skb->sp->len++] = x; + skb->sp->olen++; + + xo = xfrm_offload(skb); + if (!xo) { + xfrm_state_put(x); + goto out; + } + xo->flags |= XFRM_GRO; + + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; + XFRM_SPI_SKB_CB(skb)->family = AF_INET; + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); + XFRM_SPI_SKB_CB(skb)->seq = seq; + + /* We don't need to handle errors from xfrm_input, it does all + * the error handling and frees the resources on error. */ + xfrm_input(skb, IPPROTO_ESP, spi, -2); + + return ERR_PTR(-EINPROGRESS); +out: + skb_push(skb, offset); + NAPI_GRO_CB(skb)->same_flow = 0; + NAPI_GRO_CB(skb)->flush = 1; + + return NULL; +} + +static const struct net_offload esp4_offload = { + .callbacks = { + .gro_receive = esp4_gro_receive, + }, +}; + +static int __init esp4_offload_init(void) +{ + return inet_add_offload(&esp4_offload, IPPROTO_ESP); +} + +static void __exit esp4_offload_exit(void) +{ + inet_del_offload(&esp4_offload, IPPROTO_ESP); +} + +module_init(esp4_offload_init); +module_exit(esp4_offload_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Steffen Klassert "); diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index 62e1e72db461..1fc684111ce6 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -40,6 +40,7 @@ drop: int xfrm4_transport_finish(struct sk_buff *skb, int async) { + struct xfrm_offload *xo = xfrm_offload(skb); struct iphdr *iph = ip_hdr(skb); iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol; @@ -53,6 +54,11 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async) iph->tot_len = htons(skb->len); ip_send_check(iph); + if (xo && (xo->flags & XFRM_GRO)) { + skb_mac_header_rebuild(skb); + return 0; + } + NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, dev_net(skb->dev), NULL, skb, skb->dev, NULL, xfrm4_rcv_encap_finish); diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c index fd840c7d75ea..4acc0508c5eb 100644 --- a/net/ipv4/xfrm4_mode_transport.c +++ b/net/ipv4/xfrm4_mode_transport.c @@ -43,6 +43,7 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) { int ihl = skb->data - skb_transport_header(skb); + struct xfrm_offload *xo = xfrm_offload(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), @@ -50,7 +51,8 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) skb->network_header = skb->transport_header; } ip_hdr(skb)->tot_len = htons(skb->len + ihl); - skb_reset_transport_header(skb); + if (!xo || !(xo->flags & XFRM_GRO)) + skb_reset_transport_header(skb); return 0; } diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index ec1267e2bd1f..b2a85cca91f7 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -75,6 +75,19 @@ config INET6_ESP If unsure, say Y. +config INET6_ESP_OFFLOAD + tristate "IPv6: ESP transformation offload" + depends on INET6_ESP + select XFRM_OFFLOAD + default n + ---help--- + Support for ESP transformation offload. This makes sense + only if this system really does IPsec and want to do it + with high throughput. A typical desktop system does not + need it, even if it does IPsec. + + If unsure, say N. + config INET6_IPCOMP tristate "IPv6: IPComp transformation" select INET6_XFRM_TUNNEL diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index a9e9fec387ce..217e9ff0e24b 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -30,6 +30,7 @@ ipv6-objs += $(ipv6-y) obj-$(CONFIG_INET6_AH) += ah6.o obj-$(CONFIG_INET6_ESP) += esp6.o +obj-$(CONFIG_INET6_ESP_OFFLOAD) += esp6_offload.o obj-$(CONFIG_INET6_IPCOMP) += ipcomp6.o obj-$(CONFIG_INET6_XFRM_TUNNEL) += xfrm6_tunnel.o obj-$(CONFIG_INET6_TUNNEL) += tunnel6.o diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c new file mode 100644 index 000000000000..d914eb93204a --- /dev/null +++ b/net/ipv6/esp6_offload.c @@ -0,0 +1,108 @@ +/* + * IPV6 GSO/GRO offload support + * Linux INET implementation + * + * Copyright (C) 2016 secunet Security Networks AG + * Author: Steffen Klassert + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * ESP GRO support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct sk_buff **esp6_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + int offset = skb_gro_offset(skb); + struct xfrm_offload *xo; + struct xfrm_state *x; + __be32 seq; + __be32 spi; + int err; + + skb_pull(skb, offset); + + if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) + goto out; + + err = secpath_set(skb); + if (err) + goto out; + + if (skb->sp->len == XFRM_MAX_DEPTH) + goto out; + + x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, + (xfrm_address_t *)&ipv6_hdr(skb)->daddr, + spi, IPPROTO_ESP, AF_INET6); + if (!x) + goto out; + + skb->sp->xvec[skb->sp->len++] = x; + skb->sp->olen++; + + xo = xfrm_offload(skb); + if (!xo) { + xfrm_state_put(x); + goto out; + } + xo->flags |= XFRM_GRO; + + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; + XFRM_SPI_SKB_CB(skb)->family = AF_INET6; + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); + XFRM_SPI_SKB_CB(skb)->seq = seq; + + /* We don't need to handle errors from xfrm_input, it does all + * the error handling and frees the resources on error. */ + xfrm_input(skb, IPPROTO_ESP, spi, -2); + + return ERR_PTR(-EINPROGRESS); +out: + skb_push(skb, offset); + NAPI_GRO_CB(skb)->same_flow = 0; + NAPI_GRO_CB(skb)->flush = 1; + + return NULL; +} + +static const struct net_offload esp6_offload = { + .callbacks = { + .gro_receive = esp6_gro_receive, + }, +}; + +static int __init esp6_offload_init(void) +{ + return inet6_add_offload(&esp6_offload, IPPROTO_ESP); +} + +static void __exit esp6_offload_exit(void) +{ + inet6_del_offload(&esp6_offload, IPPROTO_ESP); +} + +module_init(esp6_offload_init); +module_exit(esp6_offload_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Steffen Klassert "); diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 662fb2c3e765..08a807b29298 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -33,6 +33,8 @@ EXPORT_SYMBOL(xfrm6_rcv_spi); int xfrm6_transport_finish(struct sk_buff *skb, int async) { + struct xfrm_offload *xo = xfrm_offload(skb); + skb_network_header(skb)[IP6CB(skb)->nhoff] = XFRM_MODE_SKB_CB(skb)->protocol; @@ -44,6 +46,11 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) ipv6_hdr(skb)->payload_len = htons(skb->len); __skb_push(skb, skb->data - skb_network_header(skb)); + if (xo && (xo->flags & XFRM_GRO)) { + skb_mac_header_rebuild(skb); + return -1; + } + NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, dev_net(skb->dev), NULL, skb, skb->dev, NULL, ip6_rcv_finish); diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index 4e344105b3fd..4439ee44c8b0 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c @@ -47,6 +47,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) { int ihl = skb->data - skb_transport_header(skb); + struct xfrm_offload *xo = xfrm_offload(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), @@ -55,7 +56,8 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) } ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - sizeof(struct ipv6hdr)); - skb_reset_transport_header(skb); + if (!xo || !(xo->flags & XFRM_GRO)) + skb_reset_transport_header(skb); return 0; } diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index d2ff71230864..46bdb4fbed0b 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -207,14 +207,23 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) unsigned int family; int decaps = 0; int async = 0; + struct xfrm_offload *xo; + bool xfrm_gro = false; - /* A negative encap_type indicates async resumption. */ if (encap_type < 0) { - async = 1; x = xfrm_input_state(skb); - seq = XFRM_SKB_CB(skb)->seq.input.low; family = x->outer_mode->afinfo->family; - goto resume; + + /* An encap_type of -1 indicates async resumption. */ + if (encap_type == -1) { + async = 1; + seq = XFRM_SKB_CB(skb)->seq.input.low; + goto resume; + } + /* encap_type < -1 indicates a GRO call. */ + encap_type = 0; + seq = XFRM_SPI_SKB_CB(skb)->seq; + goto lock; } daddr = (xfrm_address_t *)(skb_network_header(skb) + @@ -260,6 +269,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) skb->sp->xvec[skb->sp->len++] = x; +lock: spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) { @@ -381,7 +391,18 @@ resume: gro_cells_receive(&gro_cells, skb); return 0; } else { - return x->inner_mode->afinfo->transport_finish(skb, async); + xo = xfrm_offload(skb); + if (xo) + xfrm_gro = xo->flags & XFRM_GRO; + + err = x->inner_mode->afinfo->transport_finish(skb, async); + if (xfrm_gro) { + skb_dst_drop(skb); + gro_cells_receive(&gro_cells, skb); + return err; + } + + return err; } drop_unlock: -- cgit v1.2.3 From 99f5711e7c15e997c4eb34b378502ef6f3982233 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 9 Feb 2017 09:10:04 -0800 Subject: mlx4: do not use rwlock in fast path Using a reader-writer lock in fast path is silly, when we can instead use RCU or a seqlock. For mlx4 hwstamp clock, a seqlock is the way to go, removing two atomic operations and false sharing. Signed-off-by: Eric Dumazet Cc: Tariq Toukan Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_clock.c | 35 ++++++++++++++------------- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 2 +- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 504461a464c5..e7b81a305469 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -62,12 +62,13 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, struct skb_shared_hwtstamps *hwts, u64 timestamp) { - unsigned long flags; + unsigned int seq; u64 nsec; - read_lock_irqsave(&mdev->clock_lock, flags); - nsec = timecounter_cyc2time(&mdev->clock, timestamp); - read_unlock_irqrestore(&mdev->clock_lock, flags); + do { + seq = read_seqbegin(&mdev->clock_lock); + nsec = timecounter_cyc2time(&mdev->clock, timestamp); + } while (read_seqretry(&mdev->clock_lock, seq)); memset(hwts, 0, sizeof(struct skb_shared_hwtstamps)); hwts->hwtstamp = ns_to_ktime(nsec); @@ -95,9 +96,9 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) unsigned long flags; if (timeout) { - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); timecounter_read(&mdev->clock); - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); mdev->last_overflow_check = jiffies; } } @@ -128,10 +129,10 @@ static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) adj *= delta; diff = div_u64(adj, 1000000000ULL); - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); timecounter_read(&mdev->clock); mdev->cycles.mult = neg_adj ? mult - diff : mult + diff; - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); return 0; } @@ -149,9 +150,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) ptp_clock_info); unsigned long flags; - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); timecounter_adjtime(&mdev->clock, delta); - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); return 0; } @@ -172,9 +173,9 @@ static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, unsigned long flags; u64 ns; - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); ns = timecounter_read(&mdev->clock); - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); *ts = ns_to_timespec64(ns); @@ -198,9 +199,9 @@ static int mlx4_en_phc_settime(struct ptp_clock_info *ptp, unsigned long flags; /* reset the timecounter */ - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); timecounter_init(&mdev->clock, &mdev->cycles, ns); - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); return 0; } @@ -266,7 +267,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) if (mdev->ptp_clock) return; - rwlock_init(&mdev->clock_lock); + seqlock_init(&mdev->clock_lock); memset(&mdev->cycles, 0, sizeof(mdev->cycles)); mdev->cycles.read = mlx4_en_read_clock; @@ -276,10 +277,10 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); mdev->nominal_c_mult = mdev->cycles.mult; - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); timecounter_init(&mdev->clock, &mdev->cycles, ktime_to_ns(ktime_get_real())); - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index cec59bc264c9..d8ca6d1794ef 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -424,9 +424,9 @@ struct mlx4_en_dev { u32 priv_pdn; spinlock_t uar_lock; u8 mac_removed[MLX4_MAX_PORTS + 1]; - rwlock_t clock_lock; u32 nominal_c_mult; struct cyclecounter cycles; + seqlock_t clock_lock; struct timecounter clock; unsigned long last_overflow_check; unsigned long overflow_period; -- cgit v1.2.3 From cdc6a4ba88fbc26de1ffe8cbcaf0e41d316046fd Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Sat, 11 Feb 2017 20:37:08 +0100 Subject: bpf: Remove redundant ifdef MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove a useless ifdef __NR_bpf as requested by Wang Nan. Inline one-line static functions as it was in the bpf_sys.h file. Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: David S. Miller Cc: Wang Nan Link: https://lkml.kernel.org/r/828ab1ff-4dcf-53ff-c97b-074adb895006@huawei.com Acked-by: Wang Nan Signed-off-by: David S. Miller --- tools/lib/bpf/bpf.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 50e04cc5dddd..2de9c386989a 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -42,21 +42,15 @@ # endif #endif -static __u64 ptr_to_u64(const void *ptr) +static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; } -static int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, - unsigned int size) +static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, + unsigned int size) { -#ifdef __NR_bpf return syscall(__NR_bpf, cmd, attr, size); -#else - fprintf(stderr, "No bpf syscall, kernel headers too old?\n"); - errno = ENOSYS; - return -1; -#endif } int bpf_create_map(enum bpf_map_type map_type, int key_size, -- cgit v1.2.3 From d498f8719a098a5df7c6dba4ea302df7afb51efd Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Sat, 11 Feb 2017 23:20:23 +0100 Subject: bpf: Rebuild bpf.o for any dependency update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is needed to force a rebuild of bpf.o when one of its dependencies (e.g. uapi/linux/bpf.h) is updated. Add a phony target. Signed-off-by: Mickaël Salaün Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: David S. Miller Cc: Wang Nan Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/Makefile | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index a35f564f66a1..c7816fe60feb 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -1,13 +1,24 @@ -CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I../../../lib +LIBDIR := ../../../lib +BPFOBJ := $(LIBDIR)/bpf/bpf.o + +CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map TEST_PROGS := $(test_objs) test_kmod.sh TEST_FILES := $(test_objs) +.PHONY: all clean force + all: $(test_objs) -$(test_objs): ../../../lib/bpf/bpf.o +# force a rebuild of BPFOBJ when its dependencies are updated +force: + +$(BPFOBJ): force + $(MAKE) -C $(dir $(BPFOBJ)) + +$(test_objs): $(BPFOBJ) include ../lib.mk -- cgit v1.2.3 From 8ae70032552a8082734d0b8550848cf6bf92e1d5 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 15 Feb 2017 11:57:50 +0100 Subject: sched: have stub for tcf_destroy_chain in case NET_CLS is not configured This fixes broken build for !NET_CLS: net/built-in.o: In function `fq_codel_destroy': /home/sab/linux/net-next/net/sched/sch_fq_codel.c:468: undefined reference to `tcf_destroy_chain' Fixes: cf1facda2f61 ("sched: move tcf_proto_destroy and tcf_destroy_chain helpers into cls_api") Reported-by: Sabrina Dubroca Signed-off-by: Jiri Pirko Tested-by: Sabrina Dubroca Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 71b266cd63d4..be5c12a5c375 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -17,7 +17,13 @@ struct tcf_walker { int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); +#ifdef CONFIG_NET_CLS void tcf_destroy_chain(struct tcf_proto __rcu **fl); +#else +static inline void tcf_destroy_chain(struct tcf_proto __rcu **fl) +{ +} +#endif static inline unsigned long __cls_set_class(unsigned long *clp, unsigned long cl) -- cgit v1.2.3 From 425df17ce3a26d98f76e2b6b0af2acf4aeb0b026 Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Tue, 14 Feb 2017 21:16:28 -0800 Subject: openvswitch: Set internal device max mtu to ETH_MAX_MTU. Commit 91572088e3fd ("net: use core MTU range checking in core net infra") changed the openvswitch internal device to use the core net infra for controlling the MTU range, but failed to actually set the max_mtu as described in the commit message, which now defaults to ETH_DATA_LEN. This patch fixes this by setting max_mtu to ETH_MAX_MTU after ether_setup() call. Fixes: 91572088e3fd ("net: use core MTU range checking in core net infra") Signed-off-by: Jarno Rajahalme Signed-off-by: David S. Miller --- net/openvswitch/vport-internal_dev.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 09141a18ee2d..89193a634da4 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -149,6 +149,8 @@ static void do_setup(struct net_device *netdev) { ether_setup(netdev); + netdev->max_mtu = ETH_MAX_MTU; + netdev->netdev_ops = &internal_dev_netdev_ops; netdev->priv_flags &= ~IFF_TX_SKB_SHARING; -- cgit v1.2.3 From b93f79bea9c1fcb1dd4619eaa528b6a5688c3ece Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 15 Feb 2017 11:45:25 +0530 Subject: cxgb4: Update proper netdev stats for rx drops Count buffer group drops or truncates as rx drops rather than rx errors in netdev stats. Signed-off-by: Ganesh Goudar Signed-off-by: Arjun V Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 70590144be03..8ba3dc2e236c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2400,7 +2400,7 @@ static void cxgb_get_stats(struct net_device *dev, ns->rx_over_errors = 0; ns->rx_crc_errors = stats.rx_fcs_err; ns->rx_frame_errors = stats.rx_symbol_err; - ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + + ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 + stats.rx_ovflow2 + stats.rx_ovflow3 + stats.rx_trunc0 + stats.rx_trunc1 + stats.rx_trunc2 + stats.rx_trunc3; -- cgit v1.2.3 From c78c70fa30e23dc6cdb394f6c13880919499fba5 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Wed, 15 Feb 2017 10:24:10 +0200 Subject: qed: Add infrastructure for PTP support The patch adds the required qed interfaces for configuring/reading the PTP clock on the adapter. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 2 +- drivers/net/ethernet/qlogic/qed/qed.h | 2 + drivers/net/ethernet/qlogic/qed/qed_l2.c | 5 + drivers/net/ethernet/qlogic/qed/qed_l2.h | 1 + drivers/net/ethernet/qlogic/qed/qed_main.c | 15 ++ drivers/net/ethernet/qlogic/qed/qed_ptp.c | 323 +++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_ptp.h | 47 ++++ drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 31 +++ include/linux/qed/qed_eth_if.h | 22 ++ 9 files changed, 447 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_ptp.c create mode 100644 drivers/net/ethernet/qlogic/qed/qed_ptp.h diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 729e43768e99..1a7300f72cab 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ - qed_selftest.o qed_dcbx.o qed_debug.o + qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_RDMA) += qed_roce.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 1f61cf3209e8..6557f94b92ed 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -456,6 +456,8 @@ struct qed_hwfn { u8 dcbx_no_edpm; u8 db_bar_no_edpm; + /* p_ptp_ptt is valid for leading HWFN only */ + struct qed_ptt *p_ptp_ptt; struct qed_simd_fp_handler simd_proto_handler[64]; #ifdef CONFIG_QED_SRIOV diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 7520eb34ad00..df932be5a4e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -214,6 +214,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, p_ramrod->vport_id = abs_vport_id; p_ramrod->mtu = cpu_to_le16(p_params->mtu); + p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; p_ramrod->drop_ttl0_en = p_params->drop_ttl0; p_ramrod->untagged = p_params->only_untagged; @@ -1886,6 +1887,7 @@ static int qed_start_vport(struct qed_dev *cdev, start.drop_ttl0 = params->drop_ttl0; start.opaque_fid = p_hwfn->hw_info.opaque_fid; start.concrete_fid = p_hwfn->hw_info.concrete_fid; + start.handle_ptp_pkts = params->handle_ptp_pkts; start.vport_id = params->vport_id; start.max_buffers_per_cqe = 16; start.mtu = params->mtu; @@ -2328,6 +2330,8 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass; extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; #endif +extern const struct qed_eth_ptp_ops qed_ptp_ops_pass; + static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, #ifdef CONFIG_QED_SRIOV @@ -2336,6 +2340,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { #ifdef CONFIG_DCB .dcb = &qed_dcbnl_ops_pass, #endif + .ptp = &qed_ptp_ops_pass, .fill_dev_info = &qed_fill_eth_dev_info, .register_ops = &qed_register_eth_ops, .check_mac = &qed_check_mac, diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 93cb932ef663..e763abd334f6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -156,6 +156,7 @@ struct qed_sp_vport_start_params { enum qed_tpa_mode tpa_mode; bool remove_inner_vlan; bool tx_switching; + bool handle_ptp_pkts; bool only_untagged; bool drop_ttl0; u8 max_buffers_per_cqe; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 93eee83ccdc3..592e104687a7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -902,6 +902,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, struct qed_mcp_drv_version drv_version; const u8 *data = NULL; struct qed_hwfn *hwfn; + struct qed_ptt *p_ptt; int rc = -EINVAL; if (qed_iov_wq_start(cdev)) @@ -916,6 +917,14 @@ static int qed_slowpath_start(struct qed_dev *cdev, QED_FW_FILE_NAME); goto err; } + + p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + if (p_ptt) { + QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt; + } else { + DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n"); + goto err; + } } cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; @@ -1003,6 +1012,10 @@ err: if (IS_PF(cdev)) release_firmware(cdev->firmware); + if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt) + qed_ptt_release(QED_LEADING_HWFN(cdev), + QED_LEADING_HWFN(cdev)->p_ptp_ptt); + qed_iov_wq_stop(cdev, false); return rc; @@ -1016,6 +1029,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) qed_ll2_dealloc_if(cdev); if (IS_PF(cdev)) { + qed_ptt_release(QED_LEADING_HWFN(cdev), + QED_LEADING_HWFN(cdev)->p_ptp_ptt); qed_free_stream_mem(cdev); if (IS_QED_ETH_IF(cdev)) qed_sriov_disable(cdev, true); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c new file mode 100644 index 000000000000..d27aa85da23c --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -0,0 +1,323 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include "qed.h" +#include "qed_dev_api.h" +#include "qed_hw.h" +#include "qed_l2.h" +#include "qed_ptp.h" +#include "qed_reg_addr.h" + +/* 16 nano second time quantas to wait before making a Drift adjustment */ +#define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0 +/* Nano seconds to add/subtract when making a Drift adjustment */ +#define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28 +/* Add/subtract the Adjustment_Value when making a Drift adjustment */ +#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 +#define QED_TIMESTAMP_MASK BIT(16) + +/* Read Rx timestamp */ +static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 val; + + *timestamp = 0; + val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID); + if (!(val & QED_TIMESTAMP_MASK)) { + DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val); + return -EINVAL; + } + + val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB); + *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB); + *timestamp <<= 32; + *timestamp |= val; + + /* Reset timestamp register to allow new timestamp */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, + QED_TIMESTAMP_MASK); + + return 0; +} + +/* Read Tx timestamp */ +static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 val; + + *timestamp = 0; + val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID); + if (!(val & QED_TIMESTAMP_MASK)) { + DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val); + return -EINVAL; + } + + val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB); + *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB); + *timestamp <<= 32; + *timestamp |= val; + + /* Reset timestamp register to allow new timestamp */ + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); + + return 0; +} + +/* Read Phy Hardware Clock */ +static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 temp = 0; + + temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB); + *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB); + *phc_cycles <<= 32; + *phc_cycles |= temp; + + return 0; +} + +/* Filter PTP protocol packets that need to be timestamped */ +static int qed_ptp_hw_cfg_rx_filters(struct qed_dev *cdev, + enum qed_ptp_filter_type type) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 rule_mask, parm_mask; + + switch (type) { + case QED_PTP_FILTER_L2_IPV4_IPV6: + parm_mask = 0x6AA; + rule_mask = 0x3EEE; + break; + case QED_PTP_FILTER_L2: + parm_mask = 0x6BF; + rule_mask = 0x3EFF; + break; + case QED_PTP_FILTER_IPV4_IPV6: + parm_mask = 0x7EA; + rule_mask = 0x3FFE; + break; + case QED_PTP_FILTER_IPV4: + parm_mask = 0x7EE; + rule_mask = 0x3FFE; + break; + default: + DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", type); + return -EINVAL; + } + + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, parm_mask); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask); + + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_TO_HOST, 0x1); + + /* Reset possibly old timestamps */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, + QED_TIMESTAMP_MASK); + + return 0; +} + +/* Adjust the HW clock by a rate given in parts-per-billion (ppb) units. + * FW/HW accepts the adjustment value in terms of 3 parameters: + * Drift period - adjustment happens once in certain number of nano seconds. + * Drift value - time is adjusted by a certain value, for example by 5 ns. + * Drift direction - add or subtract the adjustment value. + * The routine translates ppb into the adjustment triplet in an optimal manner. + */ +static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb) +{ + s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2; + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 drift_ctr_cfg = 0, drift_state; + int drift_dir = 1; + + if (ppb < 0) { + ppb = -ppb; + drift_dir = 0; + } + + if (ppb > 1) { + s64 best_dif = ppb, best_approx_dev = 1; + + /* Adjustment value is up to +/-7ns, find an optimal value in + * this range. + */ + for (val = 7; val > 0; val--) { + period = div_s64(val * 1000000000, ppb); + period -= 8; + period >>= 4; + if (period < 1) + period = 1; + if (period > 0xFFFFFFE) + period = 0xFFFFFFE; + + /* Check both rounding ends for approximate error */ + approx_dev = period * 16 + 8; + dif = ppb * approx_dev - val * 1000000000; + dif2 = dif + 16 * ppb; + + if (dif < 0) + dif = -dif; + if (dif2 < 0) + dif2 = -dif2; + + /* Determine which end gives better approximation */ + if (dif * (approx_dev + 16) > dif2 * approx_dev) { + period++; + approx_dev += 16; + dif = dif2; + } + + /* Track best approximation found so far */ + if (best_dif * approx_dev > dif * best_approx_dev) { + best_dif = dif; + best_val = val; + best_period = period; + best_approx_dev = approx_dev; + } + } + } else if (ppb == 1) { + /* This is a special case as its the only value which wouldn't + * fit in a s64 variable. In order to prevent castings simple + * handle it seperately. + */ + best_val = 4; + best_period = 0xee6b27f; + } else { + best_val = 0; + best_period = 0xFFFFFFF; + } + + drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) | + (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) | + (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1); + + drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR); + if (drift_state & 1) { + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, + drift_ctr_cfg); + } else { + DP_INFO(p_hwfn, "Drift counter is not reset\n"); + return -EINVAL; + } + + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); + + return 0; +} + +static int qed_ptp_hw_enable(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + + /* Reset PTP event detection rules - will be configured in the IOCTL */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7); + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1); + + /* Pause free running counter */ + qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0); + /* Resume free running counter */ + qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4); + + /* Disable drift register */ + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); + + /* Reset possibly old timestamps */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, + QED_TIMESTAMP_MASK); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); + + return 0; +} + +static int qed_ptp_hw_hwtstamp_tx_on(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x6AA); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3EEE); + + return 0; +} + +static int qed_ptp_hw_disable(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + + /* Reset PTP event detection rules */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable the PTP feature */ + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); + + return 0; +} + +const struct qed_eth_ptp_ops qed_ptp_ops_pass = { + .hwtstamp_tx_on = qed_ptp_hw_hwtstamp_tx_on, + .cfg_rx_filters = qed_ptp_hw_cfg_rx_filters, + .read_rx_ts = qed_ptp_hw_read_rx_ts, + .read_tx_ts = qed_ptp_hw_read_tx_ts, + .read_cc = qed_ptp_hw_read_cc, + .adjfreq = qed_ptp_hw_adjfreq, + .disable = qed_ptp_hw_disable, + .enable = qed_ptp_hw_enable, +}; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h new file mode 100644 index 000000000000..63c666d0b739 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.h @@ -0,0 +1,47 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QED_PTP_H +#define _QED_PTP_H +#include + +int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_ptp_filter_type type); +int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts); +int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts); +int qed_ptp_read_cc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u64 *cycles); +int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb); +int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index b6722c6ff761..3b7edf6ff234 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1481,4 +1481,35 @@ #define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL #define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL #define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL +#define NIG_REG_RX_PTP_EN 0x501900UL +#define NIG_REG_TX_PTP_EN 0x501904UL +#define NIG_REG_LLH_PTP_TO_HOST 0x501908UL +#define NIG_REG_LLH_PTP_TO_MCP 0x50190cUL +#define NIG_REG_PTP_SW_TXTSEN 0x501910UL +#define NIG_REG_LLH_PTP_ETHERTYPE_1 0x501914UL +#define NIG_REG_LLH_PTP_MAC_DA_2_LSB 0x501918UL +#define NIG_REG_LLH_PTP_MAC_DA_2_MSB 0x50191cUL +#define NIG_REG_LLH_PTP_PARAM_MASK 0x501920UL +#define NIG_REG_LLH_PTP_RULE_MASK 0x501924UL +#define NIG_REG_TX_LLH_PTP_PARAM_MASK 0x501928UL +#define NIG_REG_TX_LLH_PTP_RULE_MASK 0x50192cUL +#define NIG_REG_LLH_PTP_HOST_BUF_SEQID 0x501930UL +#define NIG_REG_LLH_PTP_HOST_BUF_TS_LSB 0x501934UL +#define NIG_REG_LLH_PTP_HOST_BUF_TS_MSB 0x501938UL +#define NIG_REG_LLH_PTP_MCP_BUF_SEQID 0x50193cUL +#define NIG_REG_LLH_PTP_MCP_BUF_TS_LSB 0x501940UL +#define NIG_REG_LLH_PTP_MCP_BUF_TS_MSB 0x501944UL +#define NIG_REG_TX_LLH_PTP_BUF_SEQID 0x501948UL +#define NIG_REG_TX_LLH_PTP_BUF_TS_LSB 0x50194cUL +#define NIG_REG_TX_LLH_PTP_BUF_TS_MSB 0x501950UL +#define NIG_REG_RX_PTP_TS_MSB_ERR 0x501954UL +#define NIG_REG_TX_PTP_TS_MSB_ERR 0x501958UL +#define NIG_REG_TSGEN_SYNC_TIME_LSB 0x5088c0UL +#define NIG_REG_TSGEN_SYNC_TIME_MSB 0x5088c4UL +#define NIG_REG_TSGEN_RST_DRIFT_CNTR 0x5088d8UL +#define NIG_REG_TSGEN_DRIFT_CNTR_CONF 0x5088dcUL +#define NIG_REG_TS_OUTPUT_ENABLE_PDA 0x508870UL +#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL +#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL +#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL #endif diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 3613d63cd5d0..4cd1f0ccfa36 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -96,6 +96,7 @@ struct qed_update_vport_params { struct qed_start_vport_params { bool remove_inner_vlan; + bool handle_ptp_pkts; bool gro_enable; bool drop_ttl0; u8 vport_id; @@ -159,6 +160,15 @@ struct qed_eth_cb_ops { void (*force_mac) (void *dev, u8 *mac, bool forced); }; +#define QED_MAX_PHC_DRIFT_PPB 291666666 + +enum qed_ptp_filter_type { + QED_PTP_FILTER_L2, + QED_PTP_FILTER_IPV4, + QED_PTP_FILTER_IPV4_IPV6, + QED_PTP_FILTER_L2_IPV4_IPV6 +}; + #ifdef CONFIG_DCB /* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration * of dcbnl_rtnl_ops structure. @@ -218,6 +228,17 @@ struct qed_eth_dcbnl_ops { }; #endif +struct qed_eth_ptp_ops { + int (*hwtstamp_tx_on)(struct qed_dev *); + int (*cfg_rx_filters)(struct qed_dev *, enum qed_ptp_filter_type); + int (*read_rx_ts)(struct qed_dev *, u64 *); + int (*read_tx_ts)(struct qed_dev *, u64 *); + int (*read_cc)(struct qed_dev *, u64 *); + int (*disable)(struct qed_dev *); + int (*adjfreq)(struct qed_dev *, s32); + int (*enable)(struct qed_dev *); +}; + struct qed_eth_ops { const struct qed_common_ops *common; #ifdef CONFIG_QED_SRIOV @@ -226,6 +247,7 @@ struct qed_eth_ops { #ifdef CONFIG_DCB const struct qed_eth_dcbnl_ops *dcb; #endif + const struct qed_eth_ptp_ops *ptp; int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); -- cgit v1.2.3 From 4c55215c05d252e975930fe08ff418d02e002ceb Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Wed, 15 Feb 2017 10:24:11 +0200 Subject: qede: Add driver support for PTP This patch adds the driver support for, - Registering the ptp clock functionality with the OS. - Timestamping the Rx/Tx PTP packets. - Ethtool callbacks related to PTP. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/Kconfig | 1 + drivers/net/ethernet/qlogic/qede/Makefile | 2 +- drivers/net/ethernet/qlogic/qede/qede.h | 4 + drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 10 + drivers/net/ethernet/qlogic/qede/qede_fp.c | 5 + drivers/net/ethernet/qlogic/qede/qede_main.c | 39 ++ drivers/net/ethernet/qlogic/qede/qede_ptp.c | 536 ++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qede/qede_ptp.h | 65 +++ 8 files changed, 661 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/qlogic/qede/qede_ptp.c create mode 100644 drivers/net/ethernet/qlogic/qede/qede_ptp.h diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 3cfd10503446..aaa1e8517348 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -104,6 +104,7 @@ config QED_SRIOV config QEDE tristate "QLogic QED 25/40/100Gb Ethernet NIC" depends on QED + imply PTP_1588_CLOCK ---help--- This enables the support for ... diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 38fbee6a442b..bc5f7c3b277d 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_QEDE) := qede.o -qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o +qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o qede-$(CONFIG_DCB) += qede_dcbnl.o qede-$(CONFIG_QED_RDMA) += qede_roce.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index b4234066689b..f2aaef2cfb86 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -137,6 +137,8 @@ struct qede_rdma_dev { struct workqueue_struct *roce_wq; }; +struct qede_ptp; + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -148,8 +150,10 @@ struct qede_dev { u32 flags; #define QEDE_FLAG_IS_VF BIT(0) #define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF)) +#define QEDE_TX_TIMESTAMPING_EN BIT(1) const struct qed_eth_ops *ops; + struct qede_ptp *ptp; struct qed_dev_eth_info dev_info; #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index baf264225c12..c02754ddc0e9 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -39,6 +39,7 @@ #include #include #include "qede.h" +#include "qede_ptp.h" #define QEDE_RQSTAT_OFFSET(stat_name) \ (offsetof(struct qede_rx_queue, stat_name)) @@ -940,6 +941,14 @@ static int qede_set_channels(struct net_device *dev, return 0; } +static int qede_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct qede_dev *edev = netdev_priv(dev); + + return qede_ptp_get_ts_info(edev, info); +} + static int qede_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { @@ -1586,6 +1595,7 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_rxfh_key_size = qede_get_rxfh_key_size, .get_rxfh = qede_get_rxfh, .set_rxfh = qede_set_rxfh, + .get_ts_info = qede_get_ts_info, .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 26848eed3bc1..1e65038c8fc0 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -40,6 +40,7 @@ #include #include #include +#include "qede_ptp.h" #include #include "qede.h" @@ -1277,6 +1278,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev, qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); qede_set_skb_csum(skb, csum_flag); skb_record_rx_queue(skb, rxq->rxq_id); + qede_ptp_record_rx_ts(edev, cqe, skb); /* SKB is prepared - pass it to stack */ qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); @@ -1451,6 +1453,9 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) first_bd->data.bd_flags.bitfields = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + qede_ptp_tx_ts(edev, skb); + /* Map skb linear data for DMA and set in the first BD */ mapping = dma_map_single(txq->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 40a76a1d5973..d163e72aa2a6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -62,6 +62,7 @@ #include #include #include "qede.h" +#include "qede_ptp.h" static char version[] = "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; @@ -484,6 +485,25 @@ static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting) } #endif +static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + + switch (cmd) { + case SIOCSHWTSTAMP: + return qede_ptp_hw_ts(edev, ifr); + default: + DP_VERBOSE(edev, QED_MSG_DEBUG, + "default IOCTL cmd 0x%x\n", cmd); + return -EOPNOTSUPP; + } + + return 0; +} + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -492,6 +512,7 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, + .ndo_do_ioctl = qede_ioctl, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_mac = qede_set_vf_mac, .ndo_set_vf_vlan = qede_set_vf_vlan, @@ -841,6 +862,15 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); + /* PTP not supported on VFs */ + if (!is_vf) { + rc = qede_ptp_register_phc(edev); + if (rc) { + DP_NOTICE(edev, "Cannot register PHC\n"); + goto err5; + } + } + edev->ops->register_ops(cdev, &qede_ll_ops, edev); #ifdef CONFIG_DCB @@ -856,6 +886,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, return 0; +err5: + unregister_netdev(edev->ndev); err4: qede_roce_dev_remove(edev); err3: @@ -907,6 +939,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) unregister_netdev(ndev); + qede_ptp_remove(edev); + qede_roce_dev_remove(edev); edev->ops->common->set_power_state(cdev, PCI_D0); @@ -1660,6 +1694,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (!vport_update_params) return -ENOMEM; + start.handle_ptp_pkts = !!(edev->ptp); start.gro_enable = !edev->gro_disable; start.mtu = edev->ndev->mtu; start.vport_id = 0; @@ -1781,6 +1816,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_roce_dev_event_close(edev); edev->state = QEDE_STATE_CLOSED; + qede_ptp_stop(edev); + /* Close OS Tx */ netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); @@ -1882,6 +1919,8 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, qede_roce_dev_event_open(edev); qede_link_update(edev, &link_output); + qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL)); + edev->state = QEDE_STATE_OPEN; DP_INFO(edev, "Ending successfully qede load\n"); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c new file mode 100644 index 000000000000..2e62dec09bd7 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -0,0 +1,536 @@ +/* QLogic qede NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "qede_ptp.h" + +struct qede_ptp { + const struct qed_eth_ptp_ops *ops; + struct ptp_clock_info clock_info; + struct cyclecounter cc; + struct timecounter tc; + struct ptp_clock *clock; + struct work_struct work; + struct qede_dev *edev; + struct sk_buff *tx_skb; + + /* ptp spinlock is used for protecting the cycle/time counter fields + * and, also for serializing the qed PTP API invocations. + */ + spinlock_t lock; + bool hw_ts_ioctl_called; + u16 tx_type; + u16 rx_filter; +}; + +/** + * qede_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * Adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) +{ + struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info); + struct qede_dev *edev = ptp->edev; + int rc; + + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + spin_lock_bh(&ptp->lock); + rc = ptp->ops->adjfreq(edev->cdev, ppb); + spin_unlock_bh(&ptp->lock); + } else { + DP_ERR(edev, "PTP adjfreq called while interface is down\n"); + rc = -EFAULT; + } + __qede_unlock(edev); + + return rc; +} + +static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct qede_dev *edev; + struct qede_ptp *ptp; + + ptp = container_of(info, struct qede_ptp, clock_info); + edev = ptp->edev; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n", + delta); + + spin_lock_bh(&ptp->lock); + timecounter_adjtime(&ptp->tc, delta); + spin_unlock_bh(&ptp->lock); + + return 0; +} + +static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts) +{ + struct qede_dev *edev; + struct qede_ptp *ptp; + u64 ns; + + ptp = container_of(info, struct qede_ptp, clock_info); + edev = ptp->edev; + + spin_lock_bh(&ptp->lock); + ns = timecounter_read(&ptp->tc); + spin_unlock_bh(&ptp->lock); + + DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int qede_ptp_settime(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct qede_dev *edev; + struct qede_ptp *ptp; + u64 ns; + + ptp = container_of(info, struct qede_ptp, clock_info); + edev = ptp->edev; + + ns = timespec64_to_ns(ts); + + DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns); + + /* Re-init the timecounter */ + spin_lock_bh(&ptp->lock); + timecounter_init(&ptp->tc, &ptp->cc, ns); + spin_unlock_bh(&ptp->lock); + + return 0; +} + +/* Enable (or disable) ancillary features of the phc subsystem */ +static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info, + struct ptp_clock_request *rq, + int on) +{ + struct qede_dev *edev; + struct qede_ptp *ptp; + + ptp = container_of(info, struct qede_ptp, clock_info); + edev = ptp->edev; + + DP_ERR(edev, "PHC ancillary features are not supported\n"); + + return -ENOTSUPP; +} + +static void qede_ptp_task(struct work_struct *work) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct qede_dev *edev; + struct qede_ptp *ptp; + u64 timestamp, ns; + int rc; + + ptp = container_of(work, struct qede_ptp, work); + edev = ptp->edev; + + /* Read Tx timestamp registers */ + spin_lock_bh(&ptp->lock); + rc = ptp->ops->read_tx_ts(edev->cdev, ×tamp); + spin_unlock_bh(&ptp->lock); + if (rc) { + /* Reschedule to keep checking for a valid timestamp value */ + schedule_work(&ptp->work); + return; + } + + ns = timecounter_cyc2time(&ptp->tc, timestamp); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(ptp->tx_skb, &shhwtstamps); + dev_kfree_skb_any(ptp->tx_skb); + ptp->tx_skb = NULL; + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); +} + +/* Read the PHC. This API is invoked with ptp_lock held. */ +static u64 qede_ptp_read_cc(const struct cyclecounter *cc) +{ + struct qede_dev *edev; + struct qede_ptp *ptp; + u64 phc_cycles; + int rc; + + ptp = container_of(cc, struct qede_ptp, cc); + edev = ptp->edev; + rc = ptp->ops->read_cc(edev->cdev, &phc_cycles); + if (rc) + WARN_ONCE(1, "PHC read err %d\n", rc); + + DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles); + + return phc_cycles; +} + +static void qede_ptp_init_cc(struct qede_dev *edev) +{ + struct qede_ptp *ptp; + + ptp = edev->ptp; + if (!ptp) + return; + + memset(&ptp->cc, 0, sizeof(ptp->cc)); + ptp->cc.read = qede_ptp_read_cc; + ptp->cc.mask = CYCLECOUNTER_MASK(64); + ptp->cc.shift = 0; + ptp->cc.mult = 1; +} + +static int qede_ptp_cfg_filters(struct qede_dev *edev) +{ + struct qede_ptp *ptp = edev->ptp; + + if (!ptp) + return -EIO; + + if (!ptp->hw_ts_ioctl_called) { + DP_INFO(edev, "TS IOCTL not called\n"); + return 0; + } + + switch (ptp->tx_type) { + case HWTSTAMP_TX_ON: + edev->flags |= QEDE_TX_TIMESTAMPING_EN; + ptp->ops->hwtstamp_tx_on(edev->cdev); + break; + + case HWTSTAMP_TX_ONESTEP_SYNC: + DP_ERR(edev, "One-step timestamping is not supported\n"); + return -ERANGE; + } + + spin_lock_bh(&ptp->lock); + switch (ptp->rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + ptp->rx_filter = HWTSTAMP_FILTER_NONE; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 events */ + ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4); + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ + ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6); + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + /* Initialize PTP detection L2 events */ + ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ + ptp->ops->cfg_rx_filters(edev->cdev, + QED_PTP_FILTER_L2_IPV4_IPV6); + break; + } + + spin_unlock_bh(&ptp->lock); + + return 0; +} + +int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct qede_ptp *ptp; + int rc; + + ptp = edev->ptp; + if (!ptp) + return -EIO; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n", + config.tx_type, config.rx_filter); + + if (config.flags) { + DP_ERR(edev, "config.flags is reserved for future use\n"); + return -EINVAL; + } + + ptp->hw_ts_ioctl_called = 1; + ptp->tx_type = config.tx_type; + ptp->rx_filter = config.rx_filter; + + rc = qede_ptp_cfg_filters(edev); + if (rc) + return rc; + + config.rx_filter = ptp->rx_filter; + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +/* Called during load, to initialize PTP-related stuff */ +static void qede_ptp_init(struct qede_dev *edev, bool init_tc) +{ + struct qede_ptp *ptp; + int rc; + + ptp = edev->ptp; + if (!ptp) + return; + + spin_lock_init(&ptp->lock); + + /* Configure PTP in HW */ + rc = ptp->ops->enable(edev->cdev); + if (rc) { + DP_ERR(edev, "Stopping PTP initialization\n"); + return; + } + + /* Init work queue for Tx timestamping */ + INIT_WORK(&ptp->work, qede_ptp_task); + + /* Init cyclecounter and timecounter. This is done only in the first + * load. If done in every load, PTP application will fail when doing + * unload / load (e.g. MTU change) while it is running. + */ + if (init_tc) { + qede_ptp_init_cc(edev); + timecounter_init(&ptp->tc, &ptp->cc, + ktime_to_ns(ktime_get_real())); + } + + DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n"); +} + +void qede_ptp_start(struct qede_dev *edev, bool init_tc) +{ + qede_ptp_init(edev, init_tc); + qede_ptp_cfg_filters(edev); +} + +void qede_ptp_remove(struct qede_dev *edev) +{ + struct qede_ptp *ptp; + + ptp = edev->ptp; + if (ptp && ptp->clock) { + ptp_clock_unregister(ptp->clock); + ptp->clock = NULL; + } + + kfree(ptp); + edev->ptp = NULL; +} + +int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) +{ + struct qede_ptp *ptp = edev->ptp; + + if (!ptp) + return -EIO; + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (ptp->clock) + info->phc_index = ptp_clock_index(ptp->clock); + else + info->phc_index = -1; + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + + return 0; +} + +/* Called during unload, to stop PTP-related stuff */ +void qede_ptp_stop(struct qede_dev *edev) +{ + struct qede_ptp *ptp; + + ptp = edev->ptp; + if (!ptp) + return; + + /* Cancel PTP work queue. Should be done after the Tx queues are + * drained to prevent additional scheduling. + */ + cancel_work_sync(&ptp->work); + if (ptp->tx_skb) { + dev_kfree_skb_any(ptp->tx_skb); + ptp->tx_skb = NULL; + } + + /* Disable PTP in HW */ + spin_lock_bh(&ptp->lock); + ptp->ops->disable(edev->cdev); + spin_unlock_bh(&ptp->lock); +} + +int qede_ptp_register_phc(struct qede_dev *edev) +{ + struct qede_ptp *ptp; + + ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); + if (!ptp) { + DP_INFO(edev, "Failed to allocate struct for PTP\n"); + return -ENOMEM; + } + + ptp->edev = edev; + ptp->ops = edev->ops->ptp; + if (!ptp->ops) { + kfree(ptp); + edev->ptp = NULL; + DP_ERR(edev, "PTP clock registeration failed\n"); + return -EIO; + } + + edev->ptp = ptp; + + /* Fill the ptp_clock_info struct and register PTP clock */ + ptp->clock_info.owner = THIS_MODULE; + snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name); + ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB; + ptp->clock_info.n_alarm = 0; + ptp->clock_info.n_ext_ts = 0; + ptp->clock_info.n_per_out = 0; + ptp->clock_info.pps = 0; + ptp->clock_info.adjfreq = qede_ptp_adjfreq; + ptp->clock_info.adjtime = qede_ptp_adjtime; + ptp->clock_info.gettime64 = qede_ptp_gettime; + ptp->clock_info.settime64 = qede_ptp_settime; + ptp->clock_info.enable = qede_ptp_ancillary_feature_enable; + + ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); + if (IS_ERR(ptp->clock)) { + ptp->clock = NULL; + kfree(ptp); + edev->ptp = NULL; + DP_ERR(edev, "PTP clock registeration failed\n"); + } + + return 0; +} + +void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) +{ + struct qede_ptp *ptp; + + ptp = edev->ptp; + if (!ptp) + return; + + if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) { + DP_NOTICE(edev, + "Tx timestamping was not enabled, this packet will not be timestamped\n"); + } else if (unlikely(ptp->tx_skb)) { + DP_NOTICE(edev, + "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); + } else { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + /* schedule check for Tx timestamp */ + ptp->tx_skb = skb_get(skb); + schedule_work(&ptp->work); + } +} + +void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb) +{ + struct qede_ptp *ptp; + u64 timestamp, ns; + int rc; + + ptp = edev->ptp; + if (!ptp) + return; + + spin_lock_bh(&ptp->lock); + rc = ptp->ops->read_rx_ts(edev->cdev, ×tamp); + if (rc) { + spin_unlock_bh(&ptp->lock); + DP_INFO(edev, "Invalid Rx timestamp\n"); + return; + } + + ns = timecounter_cyc2time(&ptp->tc, timestamp); + spin_unlock_bh(&ptp->lock); + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h new file mode 100644 index 000000000000..f328f9bba53a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -0,0 +1,65 @@ +/* QLogic qede NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QEDE_PTP_H_ +#define _QEDE_PTP_H_ + +#include +#include +#include +#include "qede.h" + +void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb); +void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); +int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); +void qede_ptp_start(struct qede_dev *edev, bool init_tc); +void qede_ptp_stop(struct qede_dev *edev); +void qede_ptp_remove(struct qede_dev *edev); +int qede_ptp_register_phc(struct qede_dev *edev); +int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts); + +static inline void qede_ptp_record_rx_ts(struct qede_dev *edev, + union eth_rx_cqe *cqe, + struct sk_buff *skb) +{ + /* Check if this packet was timestamped */ + if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) & + (1 << PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT))) { + if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) + & (1 << PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT))) { + qede_ptp_rx_ts(edev, skb); + } else { + DP_INFO(edev, + "Timestamp recorded for non PTP packets\n"); + } + } +} +#endif /* _QEDE_PTP_H_ */ -- cgit v1.2.3 From 36a4a50f4048f21b7c2694b37804dfc1d9890ab4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 15 Feb 2017 08:38:47 +0100 Subject: net: ethernet: aquantia: switch to pci_alloc_irq_vectors pci_enable_msix has been long deprecated, but this driver adds a new instance. Convert it to pci_alloc_irq_vectors so that no new instance of the deprecated function reaches mainline. Signed-off-by: Christoph Hellwig Tested-by: Pavel Belous Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_pci_func.c | 101 +++++---------------- 1 file changed, 25 insertions(+), 76 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index da4bc09dac51..581de71a958a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -22,13 +22,11 @@ struct aq_pci_func_s { void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS]; resource_size_t mmio_pa; unsigned int msix_entry_mask; - unsigned int irq_type; unsigned int ports; bool is_pci_enabled; bool is_regions; bool is_pci_using_dac; struct aq_hw_caps_s aq_hw_caps; - struct msix_entry msix_entry[AQ_CFG_PCI_FUNC_MSIX_IRQS]; }; struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, @@ -87,7 +85,6 @@ int aq_pci_func_init(struct aq_pci_func_s *self) int err = 0; unsigned int bar = 0U; unsigned int port = 0U; - unsigned int i = 0U; err = pci_enable_device(self->pdev); if (err < 0) @@ -145,27 +142,16 @@ int aq_pci_func_init(struct aq_pci_func_s *self) } } - for (i = 0; i < self->aq_hw_caps.msix_irqs; i++) - self->msix_entry[i].entry = i; - /*enable interrupts */ -#if AQ_CFG_FORCE_LEGACY_INT - self->irq_type = AQ_HW_IRQ_LEGACY; -#else - err = pci_enable_msix(self->pdev, self->msix_entry, - self->aq_hw_caps.msix_irqs); +#if !AQ_CFG_FORCE_LEGACY_INT + err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs, + self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX); - if (err >= 0) { - self->irq_type = AQ_HW_IRQ_MSIX; - } else { - err = pci_enable_msi(self->pdev); - - if (err >= 0) { - self->irq_type = AQ_HW_IRQ_MSI; - } else { - self->irq_type = AQ_HW_IRQ_LEGACY; - err = 0; - } + if (err < 0) { + err = pci_alloc_irq_vectors(self->pdev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (err < 0) + goto err_exit; } #endif @@ -196,34 +182,22 @@ err_exit: int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, char *name, void *aq_vec, cpumask_t *affinity_mask) { + struct pci_dev *pdev = self->pdev; int err = 0; - switch (self->irq_type) { - case AQ_HW_IRQ_MSIX: - err = request_irq(self->msix_entry[i].vector, aq_vec_isr, 0, + if (pdev->msix_enabled || pdev->msi_enabled) + err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0, name, aq_vec); - break; - - case AQ_HW_IRQ_MSI: - err = request_irq(self->pdev->irq, aq_vec_isr, 0, name, aq_vec); - break; - - case AQ_HW_IRQ_LEGACY: - err = request_irq(self->pdev->irq, aq_vec_isr_legacy, + else + err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy, IRQF_SHARED, name, aq_vec); - break; - - default: - err = -EFAULT; - break; - } if (err >= 0) { self->msix_entry_mask |= (1 << i); self->aq_vec[i] = aq_vec; - if (self->irq_type == AQ_HW_IRQ_MSIX) - irq_set_affinity_hint(self->msix_entry[i].vector, + if (pdev->msix_enabled) + irq_set_affinity_hint(pci_irq_vector(pdev, i), affinity_mask); } @@ -232,30 +206,16 @@ int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, void aq_pci_func_free_irqs(struct aq_pci_func_s *self) { + struct pci_dev *pdev = self->pdev; unsigned int i = 0U; for (i = 32U; i--;) { if (!((1U << i) & self->msix_entry_mask)) continue; - switch (self->irq_type) { - case AQ_HW_IRQ_MSIX: - irq_set_affinity_hint(self->msix_entry[i].vector, NULL); - free_irq(self->msix_entry[i].vector, self->aq_vec[i]); - break; - - case AQ_HW_IRQ_MSI: - free_irq(self->pdev->irq, self->aq_vec[i]); - break; - - case AQ_HW_IRQ_LEGACY: - free_irq(self->pdev->irq, self->aq_vec[i]); - break; - - default: - break; - } - + free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]); + if (pdev->msix_enabled) + irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL); self->msix_entry_mask &= ~(1U << i); } } @@ -267,7 +227,11 @@ void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self) unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self) { - return self->irq_type; + if (self->pdev->msix_enabled) + return AQ_HW_IRQ_MSIX; + if (self->pdev->msi_enabled) + return AQ_HW_IRQ_MSIX; + return AQ_HW_IRQ_LEGACY; } void aq_pci_func_deinit(struct aq_pci_func_s *self) @@ -276,22 +240,7 @@ void aq_pci_func_deinit(struct aq_pci_func_s *self) goto err_exit; aq_pci_func_free_irqs(self); - - switch (self->irq_type) { - case AQ_HW_IRQ_MSI: - pci_disable_msi(self->pdev); - break; - - case AQ_HW_IRQ_MSIX: - pci_disable_msix(self->pdev); - break; - - case AQ_HW_IRQ_LEGACY: - break; - - default: - break; - } + pci_free_irq_vectors(self->pdev); if (self->is_regions) pci_release_regions(self->pdev); -- cgit v1.2.3 From cb2c0acea271d96ca3242fb2ff0ce5e24958d015 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 15 Feb 2017 10:46:38 +0100 Subject: net: stmmac: remove useless parenthesis This patch remove some useless parenthesis. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Reviewed-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 8 ++++---- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7251871eaa32..ee1dbf4b2329 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -716,15 +716,15 @@ static void stmmac_adjust_link(struct net_device *dev) new_state = 1; switch (phydev->speed) { case 1000: - if (likely((priv->plat->has_gmac) || - (priv->plat->has_gmac4))) + if (likely(priv->plat->has_gmac || + priv->plat->has_gmac4)) ctrl &= ~priv->hw->link.port; stmmac_hw_fix_mac_speed(priv); break; case 100: case 10: - if (likely((priv->plat->has_gmac) || - (priv->plat->has_gmac4))) { + if (likely(priv->plat->has_gmac || + priv->plat->has_gmac4)) { ctrl |= priv->hw->link.port; if (phydev->speed == SPEED_100) { ctrl |= priv->hw->link.speed; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index d9893cf05695..a6957738a432 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -257,7 +257,7 @@ int stmmac_mdio_register(struct net_device *ndev) * If an IRQ was provided to be assigned after * the bus probe, do it here. */ - if ((!mdio_bus_data->irqs) && + if (!mdio_bus_data->irqs && (mdio_bus_data->probed_phy_irq > 0)) { new_bus->irq[addr] = mdio_bus_data->probed_phy_irq; -- cgit v1.2.3 From 3e12790eed9da7ac0e20b94690ce5de209540168 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 15 Feb 2017 10:46:39 +0100 Subject: net: stmmac: likely is useless in occasional function The stmmac_adjust_link() function is called too rarely for having likely() macros being useful. Just remove likely annotation in it. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Reviewed-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ee1dbf4b2329..511c47cf6aad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -716,15 +716,15 @@ static void stmmac_adjust_link(struct net_device *dev) new_state = 1; switch (phydev->speed) { case 1000: - if (likely(priv->plat->has_gmac || - priv->plat->has_gmac4)) + if (priv->plat->has_gmac || + priv->plat->has_gmac4) ctrl &= ~priv->hw->link.port; stmmac_hw_fix_mac_speed(priv); break; case 100: case 10: - if (likely(priv->plat->has_gmac || - priv->plat->has_gmac4)) { + if (priv->plat->has_gmac || + priv->plat->has_gmac4) { ctrl |= priv->hw->link.port; if (phydev->speed == SPEED_100) { ctrl |= priv->hw->link.speed; -- cgit v1.2.3 From bd00632c5294bff0ed5826677cd2f1b2e303a6f6 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 15 Feb 2017 10:46:40 +0100 Subject: net: stmmac: use SPEED_UNKNOWN/DUPLEX_UNKNOWN It is better to use DUPLEX_UNKNOWN instead of just "-1". Using 0 for an invalid speed is bad since 0 is a valid value for speed. So this patch replace 0 by SPEED_UNKNOWN. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Reviewed-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 511c47cf6aad..a87071d0ebfe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -754,8 +754,8 @@ static void stmmac_adjust_link(struct net_device *dev) } else if (priv->oldlink) { new_state = 1; priv->oldlink = 0; - priv->speed = 0; - priv->oldduplex = -1; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; } if (new_state && netif_msg_link(priv)) @@ -817,8 +817,8 @@ static int stmmac_init_phy(struct net_device *dev) int interface = priv->plat->interface; int max_speed = priv->plat->max_speed; priv->oldlink = 0; - priv->speed = 0; - priv->oldduplex = -1; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; if (priv->plat->phy_node) { phydev = of_phy_connect(dev, priv->plat->phy_node, @@ -3434,8 +3434,8 @@ int stmmac_suspend(struct device *dev) spin_unlock_irqrestore(&priv->lock, flags); priv->oldlink = 0; - priv->speed = 0; - priv->oldduplex = -1; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; return 0; } EXPORT_SYMBOL_GPL(stmmac_suspend); -- cgit v1.2.3 From 688495b10ba703c1c13b3d05fcaecff55a320578 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 15 Feb 2017 10:46:41 +0100 Subject: net: stmmac: set speed at SPEED_UNKNOWN in case of broken speed In case of invalid speed given, stmmac_adjust_link() still record it as current speed. This patch modify the default case to set speed as SPEED_UNKNOWN if not 10/100/1000. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Reviewed-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a87071d0ebfe..f7664b956eff 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -739,6 +739,7 @@ static void stmmac_adjust_link(struct net_device *dev) default: netif_warn(priv, link, priv->dev, "broken speed: %d\n", phydev->speed); + phydev->speed = SPEED_UNKNOWN; break; } -- cgit v1.2.3 From 5db1355673205dccaf754dc3cb2f6b5abc2ff67e Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 15 Feb 2017 10:46:42 +0100 Subject: net: stmmac: run stmmac_hw_fix_mac_speed when speed is valid This patch mutualise a bit by running stmmac_hw_fix_mac_speed() after the switch in case of valid speed. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Reviewed-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f7664b956eff..bebe81010644 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -719,7 +719,6 @@ static void stmmac_adjust_link(struct net_device *dev) if (priv->plat->has_gmac || priv->plat->has_gmac4) ctrl &= ~priv->hw->link.port; - stmmac_hw_fix_mac_speed(priv); break; case 100: case 10: @@ -734,7 +733,6 @@ static void stmmac_adjust_link(struct net_device *dev) } else { ctrl &= ~priv->hw->link.port; } - stmmac_hw_fix_mac_speed(priv); break; default: netif_warn(priv, link, priv->dev, @@ -742,7 +740,8 @@ static void stmmac_adjust_link(struct net_device *dev) phydev->speed = SPEED_UNKNOWN; break; } - + if (phydev->speed != SPEED_UNKNOWN) + stmmac_hw_fix_mac_speed(priv); priv->speed = phydev->speed; } -- cgit v1.2.3 From 9beae261baab3f5a3001933cbc4d321538bef344 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 15 Feb 2017 10:46:43 +0100 Subject: net: stmmac: split the stmmac_adjust_link 10/100 case The 10/100 case have too many ifcase. This patch split it for removing an if. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Reviewed-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index bebe81010644..3cbe09682afe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -721,15 +721,19 @@ static void stmmac_adjust_link(struct net_device *dev) ctrl &= ~priv->hw->link.port; break; case 100: + if (priv->plat->has_gmac || + priv->plat->has_gmac4) { + ctrl |= priv->hw->link.port; + ctrl |= priv->hw->link.speed; + } else { + ctrl &= ~priv->hw->link.port; + } + break; case 10: if (priv->plat->has_gmac || priv->plat->has_gmac4) { ctrl |= priv->hw->link.port; - if (phydev->speed == SPEED_100) { - ctrl |= priv->hw->link.speed; - } else { - ctrl &= ~(priv->hw->link.speed); - } + ctrl &= ~(priv->hw->link.speed); } else { ctrl &= ~priv->hw->link.port; } -- cgit v1.2.3 From cc26dc67b69a736b17c69fa3f2251c8fefa99b75 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 15 Feb 2017 10:46:44 +0100 Subject: net: stmmac: reduce indentation by adding a continue As suggested by Joe Perches, replacing the "if phydev" logic permit to reduce indentation in the for loop. Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Reviewed-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 82 +++++++++++------------ 1 file changed, 40 insertions(+), 42 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index a6957738a432..db157a47000c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -247,50 +247,48 @@ int stmmac_mdio_register(struct net_device *ndev) found = 0; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); + int act = 0; + char irq_num[4]; + char *irq_str; + + if (!phydev) + continue; + + /* + * If an IRQ was provided to be assigned after + * the bus probe, do it here. + */ + if (!mdio_bus_data->irqs && + (mdio_bus_data->probed_phy_irq > 0)) { + new_bus->irq[addr] = mdio_bus_data->probed_phy_irq; + phydev->irq = mdio_bus_data->probed_phy_irq; + } - if (phydev) { - int act = 0; - char irq_num[4]; - char *irq_str; - - /* - * If an IRQ was provided to be assigned after - * the bus probe, do it here. - */ - if (!mdio_bus_data->irqs && - (mdio_bus_data->probed_phy_irq > 0)) { - new_bus->irq[addr] = - mdio_bus_data->probed_phy_irq; - phydev->irq = mdio_bus_data->probed_phy_irq; - } - - /* - * If we're going to bind the MAC to this PHY bus, - * and no PHY number was provided to the MAC, - * use the one probed here. - */ - if (priv->plat->phy_addr == -1) - priv->plat->phy_addr = addr; - - act = (priv->plat->phy_addr == addr); - switch (phydev->irq) { - case PHY_POLL: - irq_str = "POLL"; - break; - case PHY_IGNORE_INTERRUPT: - irq_str = "IGNORE"; - break; - default: - sprintf(irq_num, "%d", phydev->irq); - irq_str = irq_num; - break; - } - netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", - phydev->phy_id, addr, - irq_str, phydev_name(phydev), - act ? " active" : ""); - found = 1; + /* + * If we're going to bind the MAC to this PHY bus, + * and no PHY number was provided to the MAC, + * use the one probed here. + */ + if (priv->plat->phy_addr == -1) + priv->plat->phy_addr = addr; + + act = (priv->plat->phy_addr == addr); + switch (phydev->irq) { + case PHY_POLL: + irq_str = "POLL"; + break; + case PHY_IGNORE_INTERRUPT: + irq_str = "IGNORE"; + break; + default: + sprintf(irq_num, "%d", phydev->irq); + irq_str = irq_num; + break; } + netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", + phydev->phy_id, addr, irq_str, phydev_name(phydev), + act ? " active" : ""); + found = 1; } if (!found && !mdio_node) { -- cgit v1.2.3 From c80151ec4b65c08dac6a653a849ec5e50d94708f Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 15 Feb 2017 10:46:45 +0100 Subject: net: stmmac: invert the logic for dumping regs It is easier to follow the logic by removing the not operator Signed-off-by: Corentin Labbe Acked-by: Giuseppe Cavallaro Reviewed-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index aab895d9257e..5ff6bc4eb8f1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -442,24 +442,24 @@ static void stmmac_ethtool_gregs(struct net_device *dev, memset(reg_space, 0x0, REG_SPACE_SIZE); - if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) { + if (priv->plat->has_gmac || priv->plat->has_gmac4) { /* MAC registers */ - for (i = 0; i < 12; i++) + for (i = 0; i < 55; i++) reg_space[i] = readl(priv->ioaddr + (i * 4)); /* DMA registers */ - for (i = 0; i < 9; i++) - reg_space[i + 12] = + for (i = 0; i < 22; i++) + reg_space[i + 55] = readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); - reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR); - reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR); } else { /* MAC registers */ - for (i = 0; i < 55; i++) + for (i = 0; i < 12; i++) reg_space[i] = readl(priv->ioaddr + (i * 4)); /* DMA registers */ - for (i = 0; i < 22; i++) - reg_space[i + 55] = + for (i = 0; i < 9; i++) + reg_space[i + 12] = readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); + reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR); + reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR); } } -- cgit v1.2.3 From 0c921a894c2f01850f39ad6ecb31dd92a9cc9839 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 15 Feb 2017 12:09:51 +0100 Subject: mlxsw: acl: Use PBS type for forward action Current behaviour of "mirred redirect" action (forward) offload is a bit odd. For matched packets the action forwards them to the desired destination, but it also lets the packet duplicates to go the original way down (bridge, router, etc). That is more like "mirred mirror". Fix this by using PBS type which behaves exactly like "mirred redirect". Note that PBS does not support loopback mode. Fixes: 4cda7d8d7098 ("mlxsw: core: Introduce flexible actions support") Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/core_acl_flex_actions.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 42bb18feb316..5f337715a4da 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -651,17 +651,16 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, u8 local_port, bool in_port) { struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; - u32 kvdl_index = 0; + u32 kvdl_index; char *act; int err; - if (!in_port) { - fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, - local_port); - if (IS_ERR(fwd_entry_ref)) - return PTR_ERR(fwd_entry_ref); - kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index; - } + if (in_port) + return -EOPNOTSUPP; + fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port); + if (IS_ERR(fwd_entry_ref)) + return PTR_ERR(fwd_entry_ref); + kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index; act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE, MLXSW_AFA_FORWARD_SIZE); @@ -669,13 +668,12 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, err = -ENOBUFS; goto err_append_action; } - mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_OUTPUT, + mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS, kvdl_index, in_port); return 0; err_append_action: - if (!in_port) - mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); + mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); return err; } EXPORT_SYMBOL(mlxsw_afa_block_append_fwd); -- cgit v1.2.3 From b973154a2278c6af0b0efefafb222e0189959659 Mon Sep 17 00:00:00 2001 From: Jan Koniarik Date: Wed, 15 Feb 2017 16:59:35 +0100 Subject: atm: idt77252, use setup_timer and mod_timer Stop accessing timer struct members directly and use setup_timer and mod_timer helpers intended for that use. It makes the code cleaner and will allow for easier change of the timer struct internals. Signed-off-by: Jan Koniarik Signed-off-by: Jiri Slaby Cc: Chas Williams <3chas3@gmail.com> Cc: Cc: Signed-off-by: David S. Miller --- drivers/atm/idt77252.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 471ddfd93ea8..5ec109533bb9 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -2132,12 +2132,8 @@ idt77252_init_est(struct vc_map *vc, int pcr) est->interval = 2; /* XXX: make this configurable */ est->ewma_log = 2; /* XXX: make this configurable */ - init_timer(&est->timer); - est->timer.data = (unsigned long)vc; - est->timer.function = idt77252_est_timer; - - est->timer.expires = jiffies + ((HZ / 4) << est->interval); - add_timer(&est->timer); + setup_timer(&est->timer, idt77252_est_timer, (unsigned long)vc); + mod_timer(&est->timer, jiffies + ((HZ / 4) << est->interval)); return est; } @@ -3638,9 +3634,7 @@ static int idt77252_init_one(struct pci_dev *pcidev, spin_lock_init(&card->cmd_lock); spin_lock_init(&card->tst_lock); - init_timer(&card->tst_timer); - card->tst_timer.data = (unsigned long)card; - card->tst_timer.function = tst_timer; + setup_timer(&card->tst_timer, tst_timer, (unsigned long)card); /* Do the I/O remapping... */ card->membase = ioremap(membase, 1024); -- cgit v1.2.3 From 1dc0eb75a8f88e37c2aee75fca0313cd6e30a1e1 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Thu, 10 Nov 2016 16:01:33 -0800 Subject: ixgbe: Support 2.5Gb and 5Gb speed Though not advertised through ethtool, if the link partner advertises a 2.5Gb or 5Gb connection, and the adapter supports it, allow the speed to be used. Signed-off-by: Tony Nguyen Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 3 +++ drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 10 +++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index ee28e54a4f75..04f15092ae31 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -340,6 +340,9 @@ static int ixgbe_get_settings(struct net_device *netdev, case IXGBE_LINK_SPEED_10GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; + case IXGBE_LINK_SPEED_5GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_5000); + break; case IXGBE_LINK_SPEED_2_5GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_2500); break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 2fcde8777a29..e55b2602f371 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -768,9 +768,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - - /* - * Clear autoneg_advertised and set new values based on input link + /* Clear autoneg_advertised and set new values based on input link * speed. */ hw->phy.autoneg_advertised = 0; @@ -778,6 +776,12 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed & IXGBE_LINK_SPEED_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; + + if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; -- cgit v1.2.3 From 3f40c74ccef0a0bc8cdc52105e1ac712e8e32868 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Mon, 21 Nov 2016 09:52:40 -0800 Subject: ixgbe: prefix Data Center Bridge ops struct Since dcbnl_ops is global, it should be prefixed by ixgbe_ Signed-off-by: Stephen Hemminger Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e83444c34cf9..6530eff01a0b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -751,7 +751,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info; extern const struct ixgbe_info ixgbe_x550em_a_info; extern const struct ixgbe_info ixgbe_x550em_a_fw_info; #ifdef CONFIG_IXGBE_DCB -extern const struct dcbnl_rtnl_ops dcbnl_ops; +extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; #endif extern char ixgbe_driver_name[]; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index b8fc3cfec831..78c52375acc6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -777,7 +777,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) return err ? 1 : 0; } -const struct dcbnl_rtnl_ops dcbnl_ops = { +const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = { .ieee_getets = ixgbe_dcbnl_ieee_getets, .ieee_setets = ixgbe_dcbnl_ieee_setets, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 86135c00d4b1..0b73a3607fdd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9687,7 +9687,7 @@ skip_sriov: #ifdef CONFIG_IXGBE_DCB if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) - netdev->dcbnl_ops = &dcbnl_ops; + netdev->dcbnl_ops = &ixgbe_dcbnl_ops; #endif #ifdef IXGBE_FCOE -- cgit v1.2.3 From 1733284d02e21ec256f10794109d8c39c3c1b0f8 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Mon, 12 Dec 2016 15:08:13 -0800 Subject: ixgbe: Update version to reflect added functionality Update the driver version to reflect the new devices that it supports. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0b73a3607fdd..c1e14a2608ef 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -72,7 +72,7 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "4.4.0-k" +#define DRV_VERSION "5.0.0-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2016 Intel Corporation."; -- cgit v1.2.3 From af43da0dba0b1bc7af259cd7d6d76054f3acfab0 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:35:34 -0800 Subject: ixgbe: Add function for checking to see if we can reuse page This patch consolidates the code for the ixgbe driver so that it is more inline with what is already in igb. The general idea is to just consolidate functions that represent logical steps in the Rx process so we can later update them more easily. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 71 ++++++++++++++++----------- 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c1e14a2608ef..a19dda5711ae 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1930,6 +1930,42 @@ static inline bool ixgbe_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } +static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + struct page *page, + const unsigned int truesize) +{ +#if (PAGE_SIZE >= 8192) + unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - + ixgbe_rx_bufsz(rx_ring); +#endif + /* avoid re-using remote pages */ + if (unlikely(ixgbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + page_ref_inc(page); + + return true; +} + /** * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on @@ -1951,18 +1987,18 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_bufsz(rx_ring); #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); - unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - - ixgbe_rx_bufsz(rx_ring); #endif - if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; + if (size <= IXGBE_RX_HDR_SIZE) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* page is not reserved, we can reuse buffer as-is */ @@ -1974,34 +2010,11 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, return false; } +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, rx_buffer->page_offset, size, truesize); - /* avoid re-using remote pages */ - if (unlikely(ixgbe_page_is_reserved(page))) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; - - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= truesize; -#else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; - - if (rx_buffer->page_offset > last_offset) - return false; -#endif - - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - page_ref_inc(page); - - return true; + return ixgbe_can_reuse_rx_page(rx_ring, rx_buffer, page, truesize); } static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, -- cgit v1.2.3 From f215af8cae4c283d8a522ea166d94f763dc4aebf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:35:44 -0800 Subject: ixgbe: Only DMA sync frame length On some platforms, syncing a buffer for DMA is expensive. Rather than sync the whole 2K receive buffer, only synchronise the length of the frame, which will typically be the MTU, or a much smaller TCP ACK. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a19dda5711ae..dde2c852e01d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1841,7 +1841,7 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, frag->page_offset, - ixgbe_rx_bufsz(rx_ring), + skb_frag_size(frag), DMA_FROM_DEVICE); } IXGBE_CB(skb)->dma = 0; @@ -1983,12 +1983,11 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring, **/ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - union ixgbe_adv_rx_desc *rx_desc, + unsigned int size, struct sk_buff *skb) { struct page *page = rx_buffer->page; unsigned char *va = page_address(page) + rx_buffer->page_offset; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_bufsz(rx_ring); #else @@ -2020,6 +2019,7 @@ add_tail_frag: static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc) { + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; struct page *page; @@ -2074,14 +2074,14 @@ dma_sync: dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - ixgbe_rx_bufsz(rx_ring), + size, DMA_FROM_DEVICE); rx_buffer->skb = NULL; } /* pull page into skb */ - if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (ixgbe_add_rx_frag(rx_ring, rx_buffer, size, skb)) { /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { -- cgit v1.2.3 From f3213d9321735aa8e252d87796d5db43d4b830ec Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:35:54 -0800 Subject: ixgbe: Update driver to make use of DMA attributes in Rx path This patch adds support for DMA_ATTR_SKIP_CPU_SYNC and DMA_ATTR_WEAK_ORDERING. By enabling both of these for the Rx path we are able to see performance improvements on architectures that implement either one due to the fact that page mapping and unmapping only has to sync what is actually being used instead of the entire buffer. In addition by enabling the weak ordering attribute enables a performance improvement for architectures that can associate a memory ordering with a DMA buffer such as Sparc. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 3 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 56 ++++++++++++++++++--------- 2 files changed, 40 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 6530eff01a0b..8167e77b924f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -104,6 +104,9 @@ /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define IXGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + enum ixgbe_tx_flags { /* cmd_type flags */ IXGBE_TX_FLAGS_HW_VLAN = 0x01, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index dde2c852e01d..ddde6759f094 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1570,8 +1570,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); /* * if mapping failed free memory back to system since @@ -1614,6 +1616,12 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) if (!ixgbe_alloc_mapped_page(rx_ring, bi)) break; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + /* * Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. @@ -1832,8 +1840,10 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, { /* if the page was released unmap it, else just sync our portion */ if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); IXGBE_CB(skb)->page_released = false; } else { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; @@ -1917,12 +1927,6 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, /* transfer page from old buffer to new buffer */ *new_buff = *old_buff; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, - new_buff->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); } static inline bool ixgbe_page_is_reserved(struct page *page) @@ -2089,9 +2093,10 @@ dma_sync: IXGBE_CB(skb)->page_released = true; } else { /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); } /* clear contents of buffer_info */ @@ -4883,10 +4888,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; if (IXGBE_CB(skb)->page_released) - dma_unmap_page(dev, - IXGBE_CB(skb)->dma, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + dma_unmap_page_attrs(dev, + IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); dev_kfree_skb(skb); rx_buffer->skb = NULL; } @@ -4894,8 +4900,20 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) if (!rx_buffer->page) continue; - dma_unmap_page(dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); rx_buffer->page = NULL; -- cgit v1.2.3 From 1b56cf49f5b0bab9ad4eab18f9b0aee1929afd89 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:36:03 -0800 Subject: ixgbe: Update code to better handle incrementing page count Batch the page count updates instead of doing them one at a time. By doing this we can improve the overall performance as the atomic increment operations can be expensive due to the fact that on x86 they are locked operations which can cause stalls. By doing bulk updates we can consolidate the stall which should help to improve the overall receive performance. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Acked-by: John Fastabend Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 7 ++++- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 39 +++++++++++++++++---------- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 8167e77b924f..d765db6bd8b2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -195,7 +195,12 @@ struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; struct page *page; - unsigned int page_offset; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; }; struct ixgbe_queue_stats { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ddde6759f094..e4487109292a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1589,6 +1589,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = 0; + bi->pagecnt_bias = 1; return true; } @@ -1943,13 +1944,15 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring, unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - ixgbe_rx_bufsz(rx_ring); #endif + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; + /* avoid re-using remote pages */ if (unlikely(ixgbe_page_is_reserved(page))) return false; #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) + if (unlikely(page_count(page) != pagecnt_bias)) return false; /* flip page offset to other buffer */ @@ -1962,10 +1965,14 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring, return false; #endif - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. */ - page_ref_inc(page); + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } return true; } @@ -2009,7 +2016,6 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, return true; /* this page cannot be reused so discard it */ - __free_pages(page, ixgbe_rx_pg_order(rx_ring)); return false; } @@ -2088,15 +2094,19 @@ dma_sync: if (ixgbe_add_rx_frag(rx_ring, rx_buffer, size, skb)) { /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); - } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { - /* the page has been released from the ring */ - IXGBE_CB(skb)->page_released = true; } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); + if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); + } + __page_frag_cache_drain(page, + rx_buffer->pagecnt_bias); } /* clear contents of buffer_info */ @@ -4914,7 +4924,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); - __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); rx_buffer->page = NULL; } -- cgit v1.2.3 From 4f4542bfb3b539bef118578ffafcc98e4ce91979 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:36:14 -0800 Subject: ixgbe: Make use of order 1 pages and 3K buffers independent of FCoE In order to support build_skb with jumbo frames it will be necessary to use 3K buffers for the Rx path with 8K pages backing them. This is needed on architectures that implement 4K pages because we can't support 2K buffers plus padding in a 4K page. In the case of systems that support page sizes larger than 4K the 3K attribute will only be applied to FCoE as we can fall back to using just 2K buffers and adding the padding. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 20 +++++++++----------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 19 +++++++++++++------ 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index d765db6bd8b2..f3515ce8894d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -226,13 +226,14 @@ struct ixgbe_rx_queue_stats { #define IXGBE_TS_HDR_LEN 8 enum ixgbe_ring_state_t { + __IXGBE_RX_3K_BUFFER, + __IXGBE_RX_RSC_ENABLED, + __IXGBE_RX_CSUM_UDP_ZERO_ERR, + __IXGBE_RX_FCOE, __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, - __IXGBE_RX_RSC_ENABLED, - __IXGBE_RX_CSUM_UDP_ZERO_ERR, - __IXGBE_RX_FCOE, }; struct ixgbe_fwd_adapter { @@ -344,19 +345,16 @@ struct ixgbe_ring_feature { */ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) { -#ifdef IXGBE_FCOE - if (test_bit(__IXGBE_RX_FCOE, &ring->state)) - return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : - IXGBE_RXBUFFER_3K; -#endif + if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + return IXGBE_RXBUFFER_3K; return IXGBE_RXBUFFER_2K; } static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) { -#ifdef IXGBE_FCOE - if (test_bit(__IXGBE_RX_FCOE, &ring->state)) - return (PAGE_SIZE < 8192) ? 1 : 0; +#if (PAGE_SIZE < 8192) + if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + return 1; #endif return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e4487109292a..415e5ad1485a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1604,6 +1604,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; + u16 bufsz; /* nothing to do */ if (!cleaned_count) @@ -1613,14 +1614,15 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; + bufsz = ixgbe_rx_bufsz(rx_ring); + do { if (!ixgbe_alloc_mapped_page(rx_ring, bi)) break; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - ixgbe_rx_bufsz(rx_ring), + bi->page_offset, bufsz, DMA_FROM_DEVICE); /* @@ -2000,9 +2002,9 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct page *page = rx_buffer->page; unsigned char *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) - unsigned int truesize = ixgbe_rx_bufsz(rx_ring); + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size); #endif if (unlikely(skb_is_nonlinear(skb))) @@ -3866,10 +3868,15 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { rx_ring = adapter->rx_ring[i]; + + clear_ring_rsc_enabled(rx_ring); + clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_ring_rsc_enabled(rx_ring); - else - clear_ring_rsc_enabled(rx_ring); + + if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); } } -- cgit v1.2.3 From c3630cc40b4f0fe004e21f19bfb5cd2231c105f8 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:36:28 -0800 Subject: ixgbe: Use length to determine if descriptor is done This change makes it so that we use the length of the packet instead of the DD status bit to determine if a new descriptor is ready to be processed. The obvious advantage is that it cuts down on reads as we don't really even need the DD bit if going from a 0 to a non-zero value on size is enough to inform us that the packet has been completed. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 04f15092ae31..51a1695a75ee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1868,7 +1868,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, tx_ntc = tx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); - while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { + while (rx_desc->wb.upper.length) { /* check Rx buffer */ rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 415e5ad1485a..6fc26cdacba2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1640,8 +1640,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) i -= rx_ring->count; } - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.upper.status_error = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); @@ -2154,7 +2154,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); - if (!rx_desc->wb.upper.status_error) + if (!rx_desc->wb.upper.length) break; /* This memory barrier is needed to keep us from reading @@ -3698,6 +3698,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; + union ixgbe_adv_rx_desc *rx_desc; u64 rdba = ring->dma; u32 rxdctl; u8 reg_idx = ring->reg_idx; @@ -3732,6 +3733,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, rxdctl |= 0x080420; } + /* initialize Rx descriptor 0 */ + rx_desc = IXGBE_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + /* enable receive descriptor ring */ rxdctl |= IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); @@ -4940,9 +4945,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; memset(rx_ring->rx_buffer_info, 0, size); - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; -- cgit v1.2.3 From 3fd218767fa49857e812e53a27fcbf5d24d040d6 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:36:45 -0800 Subject: ixgbe: Break out Rx buffer page management We are going to be expanding the number of Rx paths in the driver. Instead of duplicating all that code I am pulling it apart into separate functions so that we don't have so much code duplication. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 231 ++++++++++++++------------ 1 file changed, 122 insertions(+), 109 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6fc26cdacba2..1e5575305f2a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1847,7 +1847,6 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); - IXGBE_CB(skb)->page_released = false; } else { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; @@ -1857,7 +1856,6 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, skb_frag_size(frag), DMA_FROM_DEVICE); } - IXGBE_CB(skb)->dma = 0; } /** @@ -1928,8 +1926,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - /* transfer page from old buffer to new buffer */ - *new_buff = *old_buff; + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; } static inline bool ixgbe_page_is_reserved(struct page *page) @@ -1937,16 +1941,10 @@ static inline bool ixgbe_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } -static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *rx_buffer, - struct page *page, - const unsigned int truesize) +static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) { -#if (PAGE_SIZE >= 8192) - unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - - ixgbe_rx_bufsz(rx_ring); -#endif - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; /* avoid re-using remote pages */ if (unlikely(ixgbe_page_is_reserved(page))) @@ -1954,16 +1952,17 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring, #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != pagecnt_bias)) + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) return false; - - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= truesize; #else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; - - if (rx_buffer->page_offset > last_offset) + /* The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define IXGBE_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) + if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) return false; #endif @@ -1971,7 +1970,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring, * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ - if (unlikely(pagecnt_bias == 1)) { + if (unlikely(!pagecnt_bias)) { page_ref_add(page, USHRT_MAX); rx_buffer->pagecnt_bias = USHRT_MAX; } @@ -1994,106 +1993,65 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring, * The function will then update the page offset if necessary and return * true if the buffer can be reused by the adapter. **/ -static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, +static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - unsigned int size, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - struct page *page = rx_buffer->page; - unsigned char *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(size); #endif - - if (unlikely(skb_is_nonlinear(skb))) - goto add_tail_frag; - - if (size <= IXGBE_RX_HDR_SIZE) { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!ixgbe_page_is_reserved(page))) - return true; - - /* this page cannot be reused so discard it */ - return false; - } - -add_tail_frag: - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); - - return ixgbe_can_reuse_rx_page(rx_ring, rx_buffer, page, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif } -static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc) +static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff **skb, + const unsigned int size) { - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); struct ixgbe_rx_buffer *rx_buffer; - struct sk_buff *skb; - struct page *page; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); - - skb = rx_buffer->skb; - - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; - /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); -#endif - - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, - IXGBE_RX_HDR_SIZE); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - return NULL; - } - - /* - * we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); - - /* - * Delay unmapping of the first packet. It carries the - * header information, HW may still access the header - * after the writeback. Only unmap it when EOP is - * reached - */ - if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) - goto dma_sync; - - IXGBE_CB(skb)->dma = rx_buffer->dma; + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) { + if (!*skb) + goto skip_sync; } else { - if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) - ixgbe_dma_sync_frag(rx_ring, skb); + if (*skb) + ixgbe_dma_sync_frag(rx_ring, *skb); + } -dma_sync: - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; - rx_buffer->skb = NULL; - } + return rx_buffer; +} - /* pull page into skb */ - if (ixgbe_add_rx_frag(rx_ring, rx_buffer, size, skb)) { +static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + if (ixgbe_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -2107,12 +2065,55 @@ dma_sync: DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); } - __page_frag_cache_drain(page, + __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); } - /* clear contents of buffer_info */ + /* clear contents of rx_buffer */ rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} + +static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + if (size > IXGBE_RX_HDR_SIZE) { + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + IXGBE_CB(skb)->dma = rx_buffer->dma; + + skb_add_rx_frag(skb, 0, rx_buffer->page, + rx_buffer->page_offset, + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + } return skb; } @@ -2144,7 +2145,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; + unsigned int size; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { @@ -2153,8 +2156,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); - - if (!rx_desc->wb.upper.length) + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) break; /* This memory barrier is needed to keep us from reading @@ -2163,13 +2166,23 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ dma_rmb(); + rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); + /* retrieve a buffer from the ring */ - skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); + if (skb) + ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); + else + skb = ixgbe_construct_skb(rx_ring, rx_buffer, + rx_desc, size); /* exit if we failed to retrieve a buffer */ - if (!skb) + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; break; + } + ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); cleaned_count++; /* place incomplete frames back on ring for completion */ -- cgit v1.2.3 From 2de6aa3a666e63699978f81d0d5523e7e0778f7b Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:36:54 -0800 Subject: ixgbe: Add support for padding packet This patch adds support for providing a buffer with headroom and tailroom to allow for shared info, NET_SKB_PAD, and NET_IP_ALIGN. With this combined with the DMA changes we can start using build_skb to build frames around an incoming Rx buffer instead of having to memcpy the headers. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 17 +++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 43 ++++++++++++++++++++++++--- 2 files changed, 56 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index f3515ce8894d..a2cc43d28888 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -91,6 +91,14 @@ #define IXGBE_RXBUFFER_4K 4096 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#if (PAGE_SIZE < 8192) +#define IXGBE_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD) +#else +#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K +#endif + /* * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, @@ -227,6 +235,7 @@ struct ixgbe_rx_queue_stats { enum ixgbe_ring_state_t { __IXGBE_RX_3K_BUFFER, + __IXGBE_RX_BUILD_SKB_ENABLED, __IXGBE_RX_RSC_ENABLED, __IXGBE_RX_CSUM_UDP_ZERO_ERR, __IXGBE_RX_FCOE, @@ -236,6 +245,9 @@ enum ixgbe_ring_state_t { __IXGBE_HANG_CHECK_ARMED, }; +#define ring_uses_build_skb(ring) \ + test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + struct ixgbe_fwd_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct net_device *netdev; @@ -347,6 +359,10 @@ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) { if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) return IXGBE_RXBUFFER_3K; +#if (PAGE_SIZE < 8192) + if (ring_uses_build_skb(ring)) + return IXGBE_MAX_FRAME_BUILD_SKB; +#endif return IXGBE_RXBUFFER_2K; } @@ -545,6 +561,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) #define IXGBE_FLAG2_EEE_CAPABLE BIT(14) #define IXGBE_FLAG2_EEE_ENABLED BIT(15) +#define IXGBE_FLAG2_RX_LEGACY BIT(16) /* Tx fast path data */ int num_tx_queues; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1e5575305f2a..7c696b38b405 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1552,6 +1552,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, } } +static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; +} + static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *bi) { @@ -1588,7 +1593,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, bi->dma = dma; bi->page = page; - bi->page_offset = 0; + bi->page_offset = ixgbe_rx_offset(rx_ring); bi->pagecnt_bias = 1; return true; @@ -2001,7 +2006,9 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(size); #endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); @@ -2083,7 +2090,7 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size); #endif struct sk_buff *skb; @@ -3410,7 +3417,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ - srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) + srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -3744,6 +3754,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, */ rxdctl &= ~0x3FFFFF; rxdctl |= 0x080420; +#if (PAGE_SIZE < 8192) + } else { + rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | + IXGBE_RXDCTL_RLPML_EN); + + /* Limit the maximum frame size so we don't overrun the skb */ + if (ring_uses_build_skb(ring) && + !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB | + IXGBE_RXDCTL_RLPML_EN; +#endif } /* initialize Rx descriptor 0 */ @@ -3889,12 +3910,26 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) clear_ring_rsc_enabled(rx_ring); clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_ring_rsc_enabled(rx_ring); if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + + if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + continue; + + set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#if (PAGE_SIZE < 8192) + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif } } -- cgit v1.2.3 From 2ccdf26ff614dd49b14e76c0c076f5f4e9562e79 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:37:03 -0800 Subject: ixgbe: Add private flag to control buffer mode Since there are potential drawbacks to the new Rx allocation approach I thought it best to add a "chicken bit" so that we can turn the feature off if in the event that a problem is found. It also provides a means of validating the legacy Rx path in the event that we are forced to fall back. At some point in the future when we are convinced we don't need it anymore we might be able to drop the legacy-rx flag. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 47 ++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 51a1695a75ee..68500881cbc0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -151,6 +151,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { }; #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN +static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) + /* currently supported speeds for 10G */ #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ SUPPORTED_10000baseKX4_Full | \ @@ -1001,6 +1008,8 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN; } static void ixgbe_get_ringparam(struct net_device *netdev, @@ -1140,6 +1149,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) return IXGBE_TEST_LEN; case ETH_SS_STATS: return IXGBE_STATS_LEN; + case ETH_SS_PRIV_FLAGS: + return IXGBE_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } @@ -1264,6 +1275,9 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, } /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, ixgbe_priv_flags_strings, + IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); } } @@ -3345,6 +3359,37 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) return 0; } +static u32 ixgbe_get_priv_flags(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + unsigned int flags2 = adapter->flags2; + + flags2 &= ~IXGBE_FLAG2_RX_LEGACY; + if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) + flags2 |= IXGBE_FLAG2_RX_LEGACY; + + if (flags2 != adapter->flags2) { + adapter->flags2 = flags2; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + } + + return 0; +} + static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -3381,6 +3426,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_eee = ixgbe_set_eee, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, + .get_priv_flags = ixgbe_get_priv_flags, + .set_priv_flags = ixgbe_set_priv_flags, .get_ts_info = ixgbe_get_ts_info, .get_module_info = ixgbe_get_module_info, .get_module_eeprom = ixgbe_get_module_eeprom, -- cgit v1.2.3 From 6f429223b31c550b835b4f066ac034d0cf0cc71e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:37:13 -0800 Subject: ixgbe: Add support for build_skb This patch adds build_skb support to the Rx path. There are several advantages to this change. 1. It avoids the memcpy and skb->head allocation for small packets which improves performance by about 5% in my tests. 2. It avoids the memcpy, skb->head allocation, and eth_get_headlen for larger packets improving performance by about 10% in my tests. 3. For VXLAN packets it allows the full header to be in skb->data which improves the performance by as much as 30% in some of my tests. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 49 ++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7c696b38b405..2a10365eae75 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1896,7 +1896,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, } /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) + if (!skb_headlen(skb)) ixgbe_pull_tail(rx_ring, skb); #ifdef IXGBE_FCOE @@ -2125,6 +2125,49 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, return skb; } +static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb to around the page buffer */ + skb = build_skb(va - IXGBE_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, IXGBE_SKB_PAD); + __skb_put(skb, size); + + /* record DMA address if this is the start of a chain of buffers */ + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + IXGBE_CB(skb)->dma = rx_buffer->dma; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + /** * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @q_vector: structure containing interrupt and ring information @@ -2178,6 +2221,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* retrieve a buffer from the ring */ if (skb) ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = ixgbe_build_skb(rx_ring, rx_buffer, + rx_desc, size); else skb = ixgbe_construct_skb(rx_ring, rx_buffer, rx_desc, size); @@ -3918,6 +3964,7 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) continue; -- cgit v1.2.3 From ffed21bcee7a544f99a9c9b18c23b361a0b1e476 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jan 2017 08:37:29 -0800 Subject: ixgbe: Don't bother clearing buffer memory for descriptor rings This patch makes it so that we don't need to bother with clearing the memory out for the descriptor rings. The general idea is to only free buffers associated with buffers in use which are located between the next_to_clean and next_to_use or next_to_alloc values. Everything outside of those regions can be safely ignored since they should have no buffers associated with them. The advantage to doing things this way is that is should speed up bring-up and tear-down of the rings. Specifically we can avoid the 512 or more cycles required to memset the rings in tear-down. In the bring-up phase we then clear the memory as a part of initialization. The general idea is that the clearing in initialization can act as a prefetch of sorts for the buffer info structures so they are in the local CPU when we go to populate them. This should help to improve overall time needed to perform a suspend/resume. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 11 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 159 +++++++++++++---------- 2 files changed, 99 insertions(+), 71 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 68500881cbc0..a7574c7b12af 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1904,7 +1904,16 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, /* unmap buffer on Tx side */ tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); /* increment Rx/Tx next to clean counters */ rx_ntc++; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2a10365eae75..060cdce8058f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -945,28 +945,6 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, } } -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, - struct ixgbe_tx_buffer *tx_buffer) -{ - if (tx_buffer->skb) { - dev_kfree_skb_any(tx_buffer->skb); - if (dma_unmap_len(tx_buffer, len)) - dma_unmap_single(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } else if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } - tx_buffer->next_to_watch = NULL; - tx_buffer->skb = NULL; - dma_unmap_len_set(tx_buffer, len, 0); - /* tx_buffer must be completely set up in the transmit path */ -} - static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1198,7 +1176,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, DMA_TO_DEVICE); /* clear tx_buffer data */ - tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); /* unmap remaining buffers */ @@ -3293,6 +3270,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct ixgbe_tx_buffer) * ring->count); + /* enable queue */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); @@ -3813,6 +3794,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, #endif } + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct ixgbe_rx_buffer) * ring->count); + /* initialize Rx descriptor 0 */ rx_desc = IXGBE_RX_DESC(ring, 0); rx_desc->wb.upper.length = 0; @@ -4990,33 +4975,22 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) **/ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) { - struct device *dev = rx_ring->dev; - unsigned long size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buffer_info) - return; + u16 i = rx_ring->next_to_clean; + struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; - + while (i != rx_ring->next_to_alloc) { if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; if (IXGBE_CB(skb)->page_released) - dma_unmap_page_attrs(dev, + dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); dev_kfree_skb(skb); - rx_buffer->skb = NULL; } - if (!rx_buffer->page) - continue; - /* Invalidate cache lines that may have been written to by * device so that we avoid corrupting memory. */ @@ -5027,19 +5001,21 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) DMA_FROM_DEVICE); /* free resources associated with mapping */ - dma_unmap_page_attrs(dev, rx_buffer->dma, + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); - rx_buffer->page = NULL; + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } } - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); - rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -5508,28 +5484,57 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) **/ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) { - struct ixgbe_tx_buffer *tx_buffer_info; - unsigned long size; - u16 i; + u16 i = tx_ring->next_to_clean; + struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_buffer_info) - return; + while (i != tx_ring->next_to_use) { + union ixgbe_adv_tx_desc *eop_desc, *tx_desc; - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - } + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); - netdev_tx_reset_queue(txring_txq(tx_ring)); + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); - size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; - memset(tx_ring->tx_buffer_info, 0, size); + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IXGBE_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } - /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; } @@ -5975,9 +5980,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) if (tx_ring->q_vector) ring_node = tx_ring->q_vector->numa_node; - tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); + tx_ring->tx_buffer_info = vmalloc_node(size, ring_node); if (!tx_ring->tx_buffer_info) - tx_ring->tx_buffer_info = vzalloc(size); + tx_ring->tx_buffer_info = vmalloc(size); if (!tx_ring->tx_buffer_info) goto err; @@ -6059,9 +6064,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) if (rx_ring->q_vector) ring_node = rx_ring->q_vector->numa_node; - rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); + rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); if (!rx_ring->rx_buffer_info) - rx_ring->rx_buffer_info = vzalloc(size); + rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) goto err; @@ -7776,18 +7781,32 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, return; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; /* clear dma mappings for failed tx_buffer_info map */ - for (;;) { + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (i--) + i += tx_ring->count; tx_buffer = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); - if (tx_buffer == first) - break; - if (i == 0) - i = tx_ring->count; - i--; } + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + tx_ring->next_to_use = i; } -- cgit v1.2.3 From a57bac43468e005064d6560183b313b269403f0a Mon Sep 17 00:00:00 2001 From: Christoph Haag Date: Fri, 10 Feb 2017 14:02:45 +0100 Subject: Bluetooth: btusb: add support for 0bb4:0306 It's a custom USB ID for the broadcom bt adapter in the HTC Vive. T: Bus=01 Lev=02 Prnt=02 Port=01 Cnt=02 Dev#= 6 Spd=12 MxCh= 0 D: Ver= 2.00 Cls=ff(vend.) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=0bb4 ProdID=0306 Rev= 1.12 S: Manufacturer=Broadcom Corp S: Product=BCM2045A0 S: SerialNumber=AC3743E110CE C:* #Ifs= 4 Cfg#= 1 Atr=e0 MxPwr= 0mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms I:* If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=btusb E: Ad=84(I) Atr=02(Bulk) MxPS= 32 Ivl=0ms E: Ad=04(O) Atr=02(Bulk) MxPS= 32 Ivl=0ms I:* If#= 3 Alt= 0 #EPs= 0 Cls=fe(app. ) Sub=01 Prot=01 Driver=(none) dmesg: Bluetooth: hci0: BCM: chip id 102 Bluetooth: hci0: c-l Bluetooth: hci0: BCM (001.001.005) build 0000 Bluetooth: hci0: BCM (001.001.005) build 0481 Bluetooth: hci0: BCM20703A1 Generic USB 20Mhz fcbga_BU Signed-off-by: Christoph Haag Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 2f633df9f4e6..fa4d9313442e 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -130,6 +130,10 @@ static const struct usb_device_id btusb_table[] = { /* Broadcom BCM43142A0 (Foxconn/Lenovo) */ { USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM }, + /* Broadcom BCM920703 (HTC Vive) */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + /* Foxconn - Hon Hai */ { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, -- cgit v1.2.3 From 747d3f1a8a2fc4d8447c63205421a6a67f22799b Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 2 Jan 2017 10:09:56 -0300 Subject: Bluetooth: btqcomsmd: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/bluetooth/btqcomsmd.ko | grep alias $ After this patch: $ modinfo drivers/bluetooth/btqcomsmd.ko | grep alias alias: of:N*T*Cqcom,wcnss-btC* alias: of:N*T*Cqcom,wcnss-bt Signed-off-by: Javier Martinez Canillas Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btqcomsmd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index 08c2c93887c1..8d4868af9bbd 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -165,6 +165,7 @@ static const struct of_device_id btqcomsmd_of_match[] = { { .compatible = "qcom,wcnss-bt", }, { }, }; +MODULE_DEVICE_TABLE(of, btqcomsmd_of_match); static struct platform_driver btqcomsmd_driver = { .probe = btqcomsmd_probe, -- cgit v1.2.3 From 10ab133b7a1a7265600d580d9e056d86aea70b53 Mon Sep 17 00:00:00 2001 From: Rajat Jain Date: Wed, 1 Feb 2017 14:24:08 -0800 Subject: Bluetooth: btusb: Use an error label for error paths Use a label to remove the repetetive cleanup, for error cases. Signed-off-by: Rajat Jain Reviewed-by: Brian Norris Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index fa4d9313442e..699b2dbaf75b 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2995,18 +2995,15 @@ static int btusb_probe(struct usb_interface *intf, err = usb_set_interface(data->udev, 0, 0); if (err < 0) { BT_ERR("failed to set interface 0, alt 0 %d", err); - hci_free_dev(hdev); - return err; + goto out_free_dev; } } if (data->isoc) { err = usb_driver_claim_interface(&btusb_driver, data->isoc, data); - if (err < 0) { - hci_free_dev(hdev); - return err; - } + if (err < 0) + goto out_free_dev; } #ifdef CONFIG_BT_HCIBTUSB_BCM @@ -3020,14 +3017,16 @@ static int btusb_probe(struct usb_interface *intf, #endif err = hci_register_dev(hdev); - if (err < 0) { - hci_free_dev(hdev); - return err; - } + if (err < 0) + goto out_free_dev; usb_set_intfdata(intf, data); return 0; + +out_free_dev: + hci_free_dev(hdev); + return err; } static void btusb_disconnect(struct usb_interface *intf) -- cgit v1.2.3 From fd913ef7ce619467c6b0644af48ba1fec499c623 Mon Sep 17 00:00:00 2001 From: Rajat Jain Date: Wed, 1 Feb 2017 14:24:09 -0800 Subject: Bluetooth: btusb: Add out-of-band wakeup support Some onboard BT chips (e.g. Marvell 8997) contain a wakeup pin that can be connected to a gpio on the CPU side, and can be used to wakeup the host out-of-band. This can be useful in situations where the in-band wakeup is not possible or not preferable (e.g. the in-band wakeup may require the USB host controller to remain active, and hence consuming more system power during system sleep). The oob gpio interrupt to be used for wakeup on the CPU side, is read from the device tree node, (using standard interrupt descriptors). A devcie tree binding document is also added for the driver. The compatible string is in compliance with Documentation/devicetree/bindings/usb/usb-device.txt Signed-off-by: Rajat Jain Reviewed-by: Brian Norris Acked-by: Rob Herring Signed-off-by: Marcel Holtmann --- Documentation/devicetree/bindings/net/btusb.txt | 40 ++++++++++++ drivers/bluetooth/btusb.c | 85 +++++++++++++++++++++++++ 2 files changed, 125 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/btusb.txt diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt new file mode 100644 index 000000000000..2c0355c85972 --- /dev/null +++ b/Documentation/devicetree/bindings/net/btusb.txt @@ -0,0 +1,40 @@ +Generic Bluetooth controller over USB (btusb driver) +--------------------------------------------------- + +Required properties: + + - compatible : should comply with the format "usbVID,PID" specified in + Documentation/devicetree/bindings/usb/usb-device.txt + At the time of writing, the only OF supported devices + (more may be added later) are: + + "usb1286,204e" (Marvell 8997) + +Optional properties: + + - interrupt-parent: phandle of the parent interrupt controller + - interrupt-names: (see below) + - interrupts : The interrupt specified by the name "wakeup" is the interrupt + that shall be used for out-of-band wake-on-bt. Driver will + request this interrupt for wakeup. During system suspend, the + irq will be enabled so that the bluetooth chip can wakeup host + platform out of band. During system resume, the irq will be + disabled to make sure unnecessary interrupt is not received. + +Example: + +Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt: + +&usb_host1_ehci { + status = "okay"; + #address-cells = <1>; + #size-cells = <0>; + + mvl_bt1: bt@1 { + compatible = "usb1286,204e"; + reg = <1>; + interrupt-parent = <&gpio0>; + interrupt-name = "wakeup"; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; + }; +}; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 699b2dbaf75b..f6bf990ccaeb 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include @@ -373,6 +375,7 @@ static const struct usb_device_id blacklist_table[] = { #define BTUSB_BOOTING 9 #define BTUSB_RESET_RESUME 10 #define BTUSB_DIAG_RUNNING 11 +#define BTUSB_OOB_WAKE_ENABLED 12 struct btusb_data { struct hci_dev *hdev; @@ -420,6 +423,8 @@ struct btusb_data { int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); int (*setup_on_usb)(struct hci_dev *hdev); + + int oob_wake_irq; /* irq for out-of-band wake-on-bt */ }; static inline void btusb_free_frags(struct btusb_data *data) @@ -2732,6 +2737,66 @@ static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable) } #endif +#ifdef CONFIG_PM +static irqreturn_t btusb_oob_wake_handler(int irq, void *priv) +{ + struct btusb_data *data = priv; + + pm_wakeup_event(&data->udev->dev, 0); + + /* Disable only if not already disabled (keep it balanced) */ + if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) { + disable_irq_nosync(irq); + disable_irq_wake(irq); + } + return IRQ_HANDLED; +} + +static const struct of_device_id btusb_match_table[] = { + { .compatible = "usb1286,204e" }, + { } +}; +MODULE_DEVICE_TABLE(of, btusb_match_table); + +/* Use an oob wakeup pin? */ +static int btusb_config_oob_wake(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct device *dev = &data->udev->dev; + int irq, ret; + + clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags); + + if (!of_match_device(btusb_match_table, dev)) + return 0; + + /* Move on if no IRQ specified */ + irq = of_irq_get_byname(dev->of_node, "wakeup"); + if (irq <= 0) { + bt_dev_dbg(hdev, "%s: no OOB Wakeup IRQ in DT", __func__); + return 0; + } + + ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler, + 0, "OOB Wake-on-BT", data); + if (ret) { + bt_dev_err(hdev, "%s: IRQ request failed", __func__); + return ret; + } + + ret = device_init_wakeup(dev, true); + if (ret) { + bt_dev_err(hdev, "%s: failed to init_wakeup", __func__); + return ret; + } + + data->oob_wake_irq = irq; + disable_irq(irq); + bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq); + return 0; +} +#endif + static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -2853,6 +2918,11 @@ static int btusb_probe(struct usb_interface *intf, hdev->send = btusb_send_frame; hdev->notify = btusb_notify; +#ifdef CONFIG_PM + err = btusb_config_oob_wake(hdev); + if (err) + goto out_free_dev; +#endif if (id->driver_info & BTUSB_CW6622) set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); @@ -3065,6 +3135,9 @@ static void btusb_disconnect(struct usb_interface *intf) usb_driver_release_interface(&btusb_driver, data->isoc); } + if (data->oob_wake_irq) + device_init_wakeup(&data->udev->dev, false); + hci_free_dev(hdev); } @@ -3093,6 +3166,12 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) btusb_stop_traffic(data); usb_kill_anchored_urbs(&data->tx_anchor); + if (data->oob_wake_irq && device_may_wakeup(&data->udev->dev)) { + set_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags); + enable_irq_wake(data->oob_wake_irq); + enable_irq(data->oob_wake_irq); + } + /* Optionally request a device reset on resume, but only when * wakeups are disabled. If wakeups are enabled we assume the * device will stay powered up throughout suspend. @@ -3130,6 +3209,12 @@ static int btusb_resume(struct usb_interface *intf) if (--data->suspend_count) return 0; + /* Disable only if not already disabled (keep it balanced) */ + if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) { + disable_irq(data->oob_wake_irq); + disable_irq_wake(data->oob_wake_irq); + } + if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; -- cgit v1.2.3 From a4ccc9e33d2f01532bcceb621ea06bbf4db6efac Mon Sep 17 00:00:00 2001 From: Rajat Jain Date: Wed, 1 Feb 2017 14:24:10 -0800 Subject: Bluetooth: btusb: Configure Marvell to use one of the pins for oob wakeup The Marvell devices may have many gpio pins, and hence for wakeup on these out-of-band pins, the chip needs to be told which pin is to be used for wakeup, using an hci command. Thus, we read the pin number etc from the device tree node and send a command to the chip. Signed-off-by: Rajat Jain Reviewed-by: Brian Norris Acked-by: Rob Herring Signed-off-by: Marcel Holtmann --- Documentation/devicetree/bindings/net/btusb.txt | 3 + .../devicetree/bindings/net/marvell-bt-8xxx.txt | 86 ++++++++++++++++++++++ .../devicetree/bindings/net/marvell-bt-sd8xxx.txt | 56 -------------- drivers/bluetooth/btusb.c | 51 +++++++++++++ 4 files changed, 140 insertions(+), 56 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt delete mode 100644 Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt index 2c0355c85972..01fa2d4188d4 100644 --- a/Documentation/devicetree/bindings/net/btusb.txt +++ b/Documentation/devicetree/bindings/net/btusb.txt @@ -10,6 +10,9 @@ Required properties: "usb1286,204e" (Marvell 8997) +Also, vendors that use btusb may have device additional properties, e.g: +Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt + Optional properties: - interrupt-parent: phandle of the parent interrupt controller diff --git a/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt new file mode 100644 index 000000000000..9be1059ff03f --- /dev/null +++ b/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt @@ -0,0 +1,86 @@ +Marvell 8897/8997 (sd8897/sd8997) bluetooth devices (SDIO or USB based) +------ +The 8997 devices supports multiple interfaces. When used on SDIO interfaces, +the btmrvl driver is used and when used on USB interface, the btusb driver is +used. + +Required properties: + + - compatible : should be one of the following: + * "marvell,sd8897-bt" (for SDIO) + * "marvell,sd8997-bt" (for SDIO) + * "usb1286,204e" (for USB) + +Optional properties: + + - marvell,cal-data: Calibration data downloaded to the device during + initialization. This is an array of 28 values(u8). + This is only applicable to SDIO devices. + + - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip. + firmware will use the pin to wakeup host system (u16). + - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host + platform. The value will be configured to firmware. This + is needed to work chip's sleep feature as expected (u16). + - interrupt-parent: phandle of the parent interrupt controller + - interrupt-names: Used only for USB based devices (See below) + - interrupts : specifies the interrupt pin number to the cpu. For SDIO, the + driver will use the first interrupt specified in the interrupt + array. For USB based devices, the driver will use the interrupt + named "wakeup" from the interrupt-names and interrupt arrays. + The driver will request an irq based on this interrupt number. + During system suspend, the irq will be enabled so that the + bluetooth chip can wakeup host platform under certain + conditions. During system resume, the irq will be disabled + to make sure unnecessary interrupt is not received. + +Example: + +IRQ pin 119 is used as system wakeup source interrupt. +wakeup pin 13 and gap 100ms are configured so that firmware can wakeup host +using this device side pin and wakeup latency. + +Example for SDIO device follows (calibration data is also available in +below example). + +&mmc3 { + status = "okay"; + vmmc-supply = <&wlan_en_reg>; + bus-width = <4>; + cap-power-off-card; + keep-power-in-suspend; + + #address-cells = <1>; + #size-cells = <0>; + btmrvl: bluetooth@2 { + compatible = "marvell,sd8897-bt"; + reg = <2>; + interrupt-parent = <&pio>; + interrupts = <119 IRQ_TYPE_LEVEL_LOW>; + + marvell,cal-data = /bits/ 8 < + 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 + 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 + 0x00 0x00 0xf0 0x00>; + marvell,wakeup-pin = /bits/ 16 <0x0d>; + marvell,wakeup-gap-ms = /bits/ 16 <0x64>; + }; +}; + +Example for USB device: + +&usb_host1_ohci { + status = "okay"; + #address-cells = <1>; + #size-cells = <0>; + + mvl_bt1: bt@1 { + compatible = "usb1286,204e"; + reg = <1>; + interrupt-parent = <&gpio0>; + interrupt-names = "wakeup"; + interrupts = <119 IRQ_TYPE_LEVEL_LOW>; + marvell,wakeup-pin = /bits/ 16 <0x0d>; + marvell,wakeup-gap-ms = /bits/ 16 <0x64>; + }; +}; diff --git a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt deleted file mode 100644 index 6a9a63cb0543..000000000000 --- a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt +++ /dev/null @@ -1,56 +0,0 @@ -Marvell 8897/8997 (sd8897/sd8997) bluetooth SDIO devices ------- - -Required properties: - - - compatible : should be one of the following: - * "marvell,sd8897-bt" - * "marvell,sd8997-bt" - -Optional properties: - - - marvell,cal-data: Calibration data downloaded to the device during - initialization. This is an array of 28 values(u8). - - - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip. - firmware will use the pin to wakeup host system (u16). - - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host - platform. The value will be configured to firmware. This - is needed to work chip's sleep feature as expected (u16). - - interrupt-parent: phandle of the parent interrupt controller - - interrupts : interrupt pin number to the cpu. Driver will request an irq based - on this interrupt number. During system suspend, the irq will be - enabled so that the bluetooth chip can wakeup host platform under - certain condition. During system resume, the irq will be disabled - to make sure unnecessary interrupt is not received. - -Example: - -IRQ pin 119 is used as system wakeup source interrupt. -wakeup pin 13 and gap 100ms are configured so that firmware can wakeup host -using this device side pin and wakeup latency. -calibration data is also available in below example. - -&mmc3 { - status = "okay"; - vmmc-supply = <&wlan_en_reg>; - bus-width = <4>; - cap-power-off-card; - keep-power-in-suspend; - - #address-cells = <1>; - #size-cells = <0>; - btmrvl: bluetooth@2 { - compatible = "marvell,sd8897-bt"; - reg = <2>; - interrupt-parent = <&pio>; - interrupts = <119 IRQ_TYPE_LEVEL_LOW>; - - marvell,cal-data = /bits/ 8 < - 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 - 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 - 0x00 0x00 0xf0 0x00>; - marvell,wakeup-pin = /bits/ 16 <0x0d>; - marvell,wakeup-gap-ms = /bits/ 16 <0x64>; - }; -}; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index f6bf990ccaeb..362361f08fbd 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2347,6 +2347,50 @@ static int btusb_shutdown_intel(struct hci_dev *hdev) return 0; } +#ifdef CONFIG_PM +/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */ +static int marvell_config_oob_wake(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct btusb_data *data = hci_get_drvdata(hdev); + struct device *dev = &data->udev->dev; + u16 pin, gap, opcode; + int ret; + u8 cmd[5]; + + /* Move on if no wakeup pin specified */ + if (of_property_read_u16(dev->of_node, "marvell,wakeup-pin", &pin) || + of_property_read_u16(dev->of_node, "marvell,wakeup-gap-ms", &gap)) + return 0; + + /* Vendor specific command to configure a GPIO as wake-up pin */ + opcode = hci_opcode_pack(0x3F, 0x59); + cmd[0] = opcode & 0xFF; + cmd[1] = opcode >> 8; + cmd[2] = 2; /* length of parameters that follow */ + cmd[3] = pin; + cmd[4] = gap; /* time in ms, for which wakeup pin should be asserted */ + + skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); + if (!skb) { + bt_dev_err(hdev, "%s: No memory\n", __func__); + return -ENOMEM; + } + + memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd)); + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + + ret = btusb_send_frame(hdev, skb); + if (ret) { + bt_dev_err(hdev, "%s: configuration failed\n", __func__); + kfree_skb(skb); + return ret; + } + + return 0; +} +#endif + static int btusb_set_bdaddr_marvell(struct hci_dev *hdev, const bdaddr_t *bdaddr) { @@ -2922,6 +2966,13 @@ static int btusb_probe(struct usb_interface *intf, err = btusb_config_oob_wake(hdev); if (err) goto out_free_dev; + + /* Marvell devices may need a specific chip configuration */ + if (id->driver_info & BTUSB_MARVELL && data->oob_wake_irq) { + err = marvell_config_oob_wake(hdev); + if (err) + goto out_free_dev; + } #endif if (id->driver_info & BTUSB_CW6622) set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); -- cgit v1.2.3 From 9dcbc313cd4ada386487b3055d9b6ec169d51765 Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Thu, 29 Dec 2016 09:51:19 -0300 Subject: Bluetooth: Fix NULL pointer dereference in bt_sock_recvmsg As per the comment in include/linux/net.h, the recvfrom handlers should expect msg_name to be NULL. However, bt_sock_recvmsg() is currently not checking it, which could lead to a NULL pointer dereference. The following NULL pointer dereference was produced while testing L2CAP datagram reception. Note that the kernel is tainted due to the r8723bs module being inserted. However, it seems the fix still applies. $ l2test -r -G l2test[326]: Receiving ... Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = ee008000 [00000000] *pgd=7f896835 Internal error: Oops: 817 [#1] PREEMPT SMP ARM Modules linked in: r8723bs(O) CPU: 0 PID: 326 Comm: l2test Tainted: G O 4.8.0 #1 Hardware name: Allwinner sun7i (A20) Family task: ef1c6880 task.stack: eea70000 PC is at __memzero+0x58/0x80 LR is at l2cap_skb_msg_name+0x1c/0x4c pc : [] lr : [] psr: 00070013 sp : eea71e60 ip : 00000000 fp : 00034e1c r10: 00000000 r9 : 00000000 r8 : eea71ed4 r7 : 000002a0 r6 : eea71ed8 r5 : 00000000 r4 : ee4a5d80 r3 : 00000000 r2 : 00000000 r1 : 0000000e r0 : 00000000 Flags: nzcv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none Control: 10c5387d Table: 7600806a DAC: 00000051 Process l2test (pid: 326, stack limit = 0xeea70210) Stack: (0xeea71e60 to 0xeea72000) 1e60: ee4a5d80 eeac2800 000002a0 c04d7114 173eefa0 00000000 c06ca68e 00000000 1e80: 00000001 eeac2800 eef23500 00000000 000002a0 eea71ed4 eea70000 c0504d50 1ea0: 00000000 00000000 eef23500 00000000 00000000 c044e8a0 eea71edc eea9f904 1ec0: bef89aa0 fffffff7 00000000 00035008 000002a0 00000000 00000000 00000000 1ee0: 00000000 00000000 eea71ed4 00000000 00000000 00000000 00004000 00000000 1f00: 0000011b c01078c4 eea70000 c044e5e4 00000000 00000000 642f0001 6c2f7665 1f20: 0000676f 00000000 00000000 00000000 00000000 00000000 00000000 00000000 1f40: 00000000 00000000 00000000 00000000 00000000 ffffffff 00000001 bef89ad8 1f60: 000000a8 c01078c4 eea70000 00000000 00034e1c c01e6c74 00000000 00000000 1f80: 00034e1c 000341f8 00000000 00000123 c01078c4 c044e90c 00000000 00000000 1fa0: 000002a0 c0107700 00034e1c 000341f8 00000003 00035008 000002a0 00000000 1fc0: 00034e1c 000341f8 00000000 00000123 00000000 00000000 00011ffc 00034e1c 1fe0: 00000000 bef89aa4 0001211c b6eebb60 60070010 00000003 00000000 00000000 [] (__memzero) from [] (l2cap_skb_msg_name+0x1c/0x4c) [] (l2cap_skb_msg_name) from [] (bt_sock_recvmsg+0x128/0x160) [] (bt_sock_recvmsg) from [] (l2cap_sock_recvmsg+0x98/0x134) [] (l2cap_sock_recvmsg) from [] (SyS_recvfrom+0x94/0xec) [] (SyS_recvfrom) from [] (SyS_recv+0x14/0x1c) [] (SyS_recv) from [] (ret_fast_syscall+0x0/0x3c) Code: e3110010 18a0500c e49de004 e3110008 (18a0000c) ---[ end trace 224e35e79fe06b42 ]--- Signed-off-by: Ezequiel Garcia Signed-off-by: Marcel Holtmann --- net/bluetooth/af_bluetooth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 1aff2da9bc74..cfb2faba46de 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -245,7 +245,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (err == 0) { sock_recv_ts_and_drops(msg, sk, skb); - if (bt_sk(sk)->skb_msg_name) + if (msg->msg_name && bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); } -- cgit v1.2.3 From 9af02d86e11dc409e5c3de46e81c0a492ba58905 Mon Sep 17 00:00:00 2001 From: Jeffy Chen Date: Mon, 23 Jan 2017 12:18:51 +0800 Subject: btmrvl: avoid double-disable_irq() race It's much the same as what we did for mwifiex in: b9da4d2 mwifiex: avoid double-disable_irq() race "We have a race where the wakeup IRQ might be in flight while we're calling mwifiex_disable_wake() from resume(). This can leave us disabling the IRQ twice. Let's disable the IRQ and enable it in case if we have double-disabled it." Signed-off-by: Jeffy Chen Reviewed-by: Brian Norris Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_sdio.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index d02f2c14df32..c738baeb6d45 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1682,8 +1682,12 @@ static int btmrvl_sdio_resume(struct device *dev) /* Disable platform specific wakeup interrupt */ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) { disable_irq_wake(card->plt_wake_cfg->irq_bt); - if (!card->plt_wake_cfg->wake_by_bt) - disable_irq(card->plt_wake_cfg->irq_bt); + disable_irq(card->plt_wake_cfg->irq_bt); + if (card->plt_wake_cfg->wake_by_bt) + /* Undo our disable, since interrupt handler already + * did this. + */ + enable_irq(card->plt_wake_cfg->irq_bt); } return 0; -- cgit v1.2.3 From fd1a88da81d60a27960174bd9cda312c712961d1 Mon Sep 17 00:00:00 2001 From: Jeffy Chen Date: Mon, 23 Jan 2017 12:18:52 +0800 Subject: btmrvl: set irq_bt to -1 when failed to parse it The irq_of_parse_and_map will return 0 as a invalid irq. Set irq_bt to -1 in this case, so that the btmrvl resume/suspend code would not try to enable/disable it. Signed-off-by: Jeffy Chen Reviewed-by: Brian Norris Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_sdio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index c738baeb6d45..796f7194a772 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -97,6 +97,7 @@ static int btmrvl_sdio_probe_of(struct device *dev, cfg->irq_bt = irq_of_parse_and_map(card->plt_of_node, 0); if (!cfg->irq_bt) { dev_err(dev, "fail to parse irq_bt from device tree"); + cfg->irq_bt = -1; } else { ret = devm_request_irq(dev, cfg->irq_bt, btmrvl_wake_irq_bt, -- cgit v1.2.3 From ffb955dba61dd823c3d52a083b6c8d0dcfda0efe Mon Sep 17 00:00:00 2001 From: Jeffy Chen Date: Mon, 23 Jan 2017 12:18:53 +0800 Subject: btmrvl: use dt's irqflags for wakeup pin Use irqflags parsed from dt. Signed-off-by: Jeffy Chen Reviewed-by: Brian Norris Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_sdio.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 796f7194a772..23711fe9e0a5 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -101,8 +101,7 @@ static int btmrvl_sdio_probe_of(struct device *dev, } else { ret = devm_request_irq(dev, cfg->irq_bt, btmrvl_wake_irq_bt, - IRQF_TRIGGER_LOW, - "bt_wake", cfg); + 0, "bt_wake", cfg); if (ret) { dev_err(dev, "Failed to request irq_bt %d (%d)\n", -- cgit v1.2.3 From fdfddc601713d4ed564b0bd25b29415bd622020a Mon Sep 17 00:00:00 2001 From: Wen-chien Jesse Sung Date: Tue, 10 Jan 2017 15:41:13 +0800 Subject: Bluetooth: btusb: Add support for 413c:8143 This is a Boardcom module and requires patchram to work. T: Bus=01 Lev=03 Prnt=03 Port=01 Cnt=02 Dev#= 5 Spd=12 MxCh= 0 D: Ver= 2.00 Cls=ff(vend.) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=413c ProdID=8143 Rev= 1.12 S: Manufacturer=Broadcom Corp S: Product=BCM20702A0 S: SerialNumber=20689D1FAF94 C:* #Ifs= 4 Cfg#= 1 Atr=e0 MxPwr= 0mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none) E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none) E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none) E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none) E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none) E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none) E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none) E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms I:* If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none) E: Ad=84(I) Atr=02(Bulk) MxPS= 32 Ivl=0ms E: Ad=04(O) Atr=02(Bulk) MxPS= 32 Ivl=0ms I:* If#= 3 Alt= 0 #EPs= 0 Cls=fe(app. ) Sub=01 Prot=01 Driver=(none) Signed-off-by: Wen-chien Jesse Sung Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 362361f08fbd..0fe1c62d9345 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -160,6 +160,10 @@ static const struct usb_device_id btusb_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, + /* Dell Computer - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x413c, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + /* Toshiba Corp - Broadcom based */ { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, -- cgit v1.2.3 From 3af3a594e111474fab03d872f41760e10d4c17c4 Mon Sep 17 00:00:00 2001 From: Wen-chien Jesse Sung Date: Tue, 10 Jan 2017 11:46:28 +0800 Subject: Bluetooth: btbcm: Add a delay for module reset Some btbcm devices require more time to complete its reset process. They won't reply any hci command until reset is done. [ 17.218554] Bluetooth: hci0 command 0x1001 tx timeout [ 25.214999] Bluetooth: hci0: BCM: Reading local version info failed (-110) Signed-off-by: Wen-chien Jesse Sung Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btbcm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index fdb44829ab6f..ba3dd2eafc09 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -178,6 +178,9 @@ static int btbcm_reset(struct hci_dev *hdev) } kfree_skb(skb); + /* 100 msec delay for module to complete reset process */ + msleep(100); + return 0; } -- cgit v1.2.3 From 89ab37b489d11e2ec3a70635139dcda076c16354 Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Thu, 5 Jan 2017 11:10:54 -0600 Subject: Bluetooth: hci_bcm: Add support for BCM2E95 and BCM2E96 The BCM2E96 ID is used by the ECS EF20 laptop, and BCM2E95 is present in the Weibu F3C. Both are now logged as: hci0: BCM: chip id 82 hci0: BCM43341B0 (002.001.014) build 0000 hci0: BCM (002.001.014) build 0158 The ECS vendor kernel predates the host-wakeup support in hci_bcm but it explicitly has a comment saying that the GPIO assignment needs to be reordered for BCM2E96: 1. (not used in vendor driver) 2. Device wakeup 3. Shutdown For both devices in question, the DSDT has these GPIOs listed in order of GpioInt, GpioIo, GpioIo. And if we use the first one listed (GpioInt) as the host wakeup, that interrupt handler fires while doing bluetooth I/O. I am assuming the convention of GPIO ordering has been changed for these new device IDs, so lets use the new ordering on such devices. Signed-off-by: Daniel Drake Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_bcm.c | 68 +++++++++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 24 deletions(-) diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 8f6c23c20c52..5262a2077d7a 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -618,14 +618,25 @@ unlock: } #endif -static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false }; -static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false }; -static const struct acpi_gpio_params host_wakeup_gpios = { 2, 0, false }; - -static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = { - { "device-wakeup-gpios", &device_wakeup_gpios, 1 }, - { "shutdown-gpios", &shutdown_gpios, 1 }, - { "host-wakeup-gpios", &host_wakeup_gpios, 1 }, +static const struct acpi_gpio_params int_last_device_wakeup_gpios = { 0, 0, false }; +static const struct acpi_gpio_params int_last_shutdown_gpios = { 1, 0, false }; +static const struct acpi_gpio_params int_last_host_wakeup_gpios = { 2, 0, false }; + +static const struct acpi_gpio_mapping acpi_bcm_int_last_gpios[] = { + { "device-wakeup-gpios", &int_last_device_wakeup_gpios, 1 }, + { "shutdown-gpios", &int_last_shutdown_gpios, 1 }, + { "host-wakeup-gpios", &int_last_host_wakeup_gpios, 1 }, + { }, +}; + +static const struct acpi_gpio_params int_first_host_wakeup_gpios = { 0, 0, false }; +static const struct acpi_gpio_params int_first_device_wakeup_gpios = { 1, 0, false }; +static const struct acpi_gpio_params int_first_shutdown_gpios = { 2, 0, false }; + +static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = { + { "device-wakeup-gpios", &int_first_device_wakeup_gpios, 1 }, + { "shutdown-gpios", &int_first_shutdown_gpios, 1 }, + { "host-wakeup-gpios", &int_first_host_wakeup_gpios, 1 }, { }, }; @@ -692,12 +703,19 @@ static int bcm_acpi_probe(struct bcm_device *dev) struct platform_device *pdev = dev->pdev; LIST_HEAD(resources); const struct dmi_system_id *dmi_id; + const struct acpi_gpio_mapping *gpio_mapping = acpi_bcm_int_last_gpios; + const struct acpi_device_id *id; int ret; - /* Retrieve GPIO data */ dev->name = dev_name(&pdev->dev); + + /* Retrieve GPIO data */ + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (id) + gpio_mapping = (const struct acpi_gpio_mapping *) id->driver_data; + ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev), - acpi_bcm_default_gpios); + gpio_mapping); if (ret) return ret; @@ -822,20 +840,22 @@ static const struct hci_uart_proto bcm_proto = { #ifdef CONFIG_ACPI static const struct acpi_device_id bcm_acpi_match[] = { - { "BCM2E1A", 0 }, - { "BCM2E39", 0 }, - { "BCM2E3A", 0 }, - { "BCM2E3D", 0 }, - { "BCM2E3F", 0 }, - { "BCM2E40", 0 }, - { "BCM2E54", 0 }, - { "BCM2E55", 0 }, - { "BCM2E64", 0 }, - { "BCM2E65", 0 }, - { "BCM2E67", 0 }, - { "BCM2E71", 0 }, - { "BCM2E7B", 0 }, - { "BCM2E7C", 0 }, + { "BCM2E1A", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E39", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E3A", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E3D", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E3F", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E40", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E54", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E55", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E64", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E65", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E67", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E71", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E7B", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E7C", (kernel_ulong_t)&acpi_bcm_int_last_gpios }, + { "BCM2E95", (kernel_ulong_t)&acpi_bcm_int_first_gpios }, + { "BCM2E96", (kernel_ulong_t)&acpi_bcm_int_first_gpios }, { }, }; MODULE_DEVICE_TABLE(acpi, bcm_acpi_match); -- cgit v1.2.3 From 441ad62d6c3f131f1dbd7dcdd9cbe3f74dbd8501 Mon Sep 17 00:00:00 2001 From: Dmitry Tunin Date: Thu, 5 Jan 2017 13:19:53 +0300 Subject: Bluetooth: Add another AR3012 04ca:3018 device T: Bus=01 Lev=01 Prnt=01 Port=07 Cnt=04 Dev#= 5 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=04ca ProdID=3018 Rev=00.01 C: #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I: If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb I: If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb Signed-off-by: Dmitry Tunin Signed-off-by: Marcel Holtmann Cc: stable@vger.kernel.org --- drivers/bluetooth/ath3k.c | 2 ++ drivers/bluetooth/btusb.c | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index fadba88745dc..b793853ff05f 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -94,6 +94,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x04CA, 0x300f) }, { USB_DEVICE(0x04CA, 0x3010) }, { USB_DEVICE(0x04CA, 0x3014) }, + { USB_DEVICE(0x04CA, 0x3018) }, { USB_DEVICE(0x0930, 0x0219) }, { USB_DEVICE(0x0930, 0x021c) }, { USB_DEVICE(0x0930, 0x0220) }, @@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 0fe1c62d9345..1c8094ef3f22 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -219,6 +219,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, -- cgit v1.2.3 From e27ee2b16bad0c2ae648f5ee67d0555e0cd799d6 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 2 Jan 2017 10:23:23 -0300 Subject: Bluetooth: btqcomsmd: Allow driver to build if COMPILE_TEST is enabled The driver only has runtime but no build time dependency with QCOM_SMD && QCOM_WCNSS_CTRL. So it can be built for testing purposes if COMPILE_TEST option is enabled. This is useful to have more build coverage and make sure that the driver is not affected by changes that could cause build regressions. Signed-off-by: Javier Martinez Canillas Signed-off-by: Marcel Holtmann --- drivers/bluetooth/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 3cc9bff9d99d..c2c14a12713b 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -344,7 +344,7 @@ config BT_WILINK config BT_QCOMSMD tristate "Qualcomm SMD based HCI support" - depends on QCOM_SMD && QCOM_WCNSS_CTRL + depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST select BT_QCA help Qualcomm SMD based HCI driver. -- cgit v1.2.3 From a9a4840dc43c978b2978b37ed1737c385a33b410 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 28 Dec 2016 21:30:30 +0000 Subject: Bluetooth: btmrvl: fix spelling mistake: "caibration" -> "calibration" trivial fix to spelling mistake in BT_ERR error message Signed-off-by: Colin Ian King Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index e6a85f0e6309..c38cb5b91291 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -502,7 +502,7 @@ static int btmrvl_download_cal_data(struct btmrvl_private *priv, ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data, BT_CAL_HDR_LEN + len); if (ret) - BT_ERR("Failed to download caibration data"); + BT_ERR("Failed to download calibration data"); return 0; } -- cgit v1.2.3 From 27d1c469a5d3a104fc5a9313dac84e726f8f3de9 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 28 Dec 2016 21:17:54 +0000 Subject: Bluetooth: fix spelling mistake: "advetising" -> "advertising" trivial fix to spelling mistake in BT_ERR_RATELIMITED error message Signed-off-by: Colin Ian King Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index e17aacbc5630..0b4dba08a14e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -4749,7 +4749,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, case LE_ADV_SCAN_RSP: break; default: - BT_ERR_RATELIMITED("Unknown advetising packet type: 0x%02x", + BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x", type); return; } -- cgit v1.2.3 From 45da1ca2e20fb5b13c1abc8e65185dda08c88030 Mon Sep 17 00:00:00 2001 From: Arjun V Date: Thu, 16 Feb 2017 12:22:45 +0530 Subject: cxgb4: Increase max number of tc u32 links Make max number of supported tc u32 links equal to max number of filters supported by hardware. Signed-off-by: Arjun V Signed-off-by: Rahul Lakkireddy Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3 +-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | 12 +++++------- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h | 5 +---- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8ba3dc2e236c..afb0967d2ce6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4907,8 +4907,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "continuing\n"); adapter->params.offload = 0; } else { - adapter->tc_u32 = cxgb4_init_tc_u32(adapter, - CXGB4_MAX_LINK_HANDLE); + adapter->tc_u32 = cxgb4_init_tc_u32(adapter); if (!adapter->tc_u32) dev_warn(&pdev->dev, "could not offload tc u32, continuing\n"); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 52af62e0ecb6..a1b19422b339 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -437,28 +437,26 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap) t4_free_mem(adap->tc_u32); } -struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap, - unsigned int size) +struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) { + unsigned int max_tids = adap->tids.nftids; struct cxgb4_tc_u32_table *t; unsigned int i; - if (!size) + if (!max_tids) return NULL; t = t4_alloc_mem(sizeof(*t) + - (size * sizeof(struct cxgb4_link))); + (max_tids * sizeof(struct cxgb4_link))); if (!t) return NULL; - t->size = size; + t->size = max_tids; for (i = 0; i < t->size; i++) { struct cxgb4_link *link = &t->table[i]; unsigned int bmap_size; - unsigned int max_tids; - max_tids = adap->tids.nftids; bmap_size = BITS_TO_LONGS(max_tids); link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size); if (!link->tid_map) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h index 6bdc885eff22..021261a41c13 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h @@ -37,8 +37,6 @@ #include -#define CXGB4_MAX_LINK_HANDLE 32 - static inline bool can_tc_u32_offload(struct net_device *dev) { struct adapter *adap = netdev2adap(dev); @@ -52,6 +50,5 @@ int cxgb4_delete_knode(struct net_device *dev, __be16 protocol, struct tc_cls_u32_offload *cls); void cxgb4_cleanup_tc_u32(struct adapter *adapter); -struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap, - unsigned int size); +struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap); #endif /* __CXGB4_TC_U32_H */ -- cgit v1.2.3 From 5d071c24f0cb8ce9fb5642c2a65ab5ab7f5ad244 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 16 Feb 2017 12:25:52 +0530 Subject: cxgb4: Add new T5 and T6 pci device id's Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index ecf3ccc257bc..a323185507ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -169,6 +169,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/ CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/ + CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */ /* T6 adapters: */ @@ -185,6 +188,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6011), CH_PCI_ID_TABLE_FENTRY(0x6014), CH_PCI_ID_TABLE_FENTRY(0x6015), + CH_PCI_ID_TABLE_FENTRY(0x6080), + CH_PCI_ID_TABLE_FENTRY(0x6081), CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ -- cgit v1.2.3 From f3caf8618bce7e86c6f4f86785dd004c71b63a2d Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 16 Feb 2017 12:27:15 +0530 Subject: cxgb4: Remove redundant code in t4_uld_clean_up() Remove variable rxq_info and also remove redundant assignment to it. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 36105b6837cb..d0868c2320da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -591,7 +591,6 @@ void t4_uld_mem_free(struct adapter *adap) void t4_uld_clean_up(struct adapter *adap) { - struct sge_uld_rxq_info *rxq_info; unsigned int i; if (!adap->uld) @@ -599,7 +598,6 @@ void t4_uld_clean_up(struct adapter *adap) for (i = 0; i < CXGB4_ULD_MAX; i++) { if (!adap->uld[i].handle) continue; - rxq_info = adap->sge.uld_rxq_info[i]; if (adap->flags & FULL_INIT_DONE) quiesce_rx_uld(adap, i); if (adap->flags & USING_MSIX) -- cgit v1.2.3 From 01f0f4253469e1b1e80bc84ae64728c294534836 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 10 Feb 2017 04:27:58 -0800 Subject: mlx4: do not fire tasklet unless necessary All rx and rx netdev interrupts are handled by respectively by mlx4_en_rx_irq() and mlx4_en_tx_irq() which simply schedule a NAPI. But mlx4_eq_int() also fires a tasklet to service all items that were queued via mlx4_add_cq_to_tasklet(), but this handler was not called unless user cqe was handled. This is very confusing, as "mpstat -I SCPU ..." show huge number of tasklet invocations. This patch saves this overhead, by carefully firing the tasklet directly from mlx4_add_cq_to_tasklet(), removing four atomic operations per IRQ. Signed-off-by: Eric Dumazet Cc: Tariq Toukan Cc: Saeed Mahameed Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cq.c | 6 +++++- drivers/net/ethernet/mellanox/mlx4/eq.c | 9 +-------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 6b8635378f1f..fa6d2354a0e9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -81,8 +81,9 @@ void mlx4_cq_tasklet_cb(unsigned long data) static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) { - unsigned long flags; struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; + unsigned long flags; + bool kick; spin_lock_irqsave(&tasklet_ctx->lock, flags); /* When migrating CQs between EQs will be implemented, please note @@ -92,7 +93,10 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) */ if (list_empty_careful(&cq->tasklet_ctx.list)) { atomic_inc(&cq->refcount); + kick = list_empty(&tasklet_ctx->list); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); + if (kick) + tasklet_schedule(&tasklet_ctx->task); } spin_unlock_irqrestore(&tasklet_ctx->lock, flags); } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 0509996957d9..39232b6a974f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -494,7 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_eqe *eqe; - int cqn = -1; + int cqn; int eqes_found = 0; int set_ci = 0; int port; @@ -840,13 +840,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eq_set_ci(eq, 1); - /* cqn is 24bit wide but is initialized such that its higher bits - * are ones too. Thus, if we got any event, cqn's high bits should be off - * and we need to schedule the tasklet. - */ - if (!(cqn & ~0xffffff)) - tasklet_schedule(&eq->tasklet_ctx.task); - return eqes_found; } -- cgit v1.2.3 From 882f312dc0751c973db26478f07f082c584d16aa Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 14 Feb 2017 10:23:31 -0800 Subject: ptp: do not explicitly set drvdata in ptp_clock_register() We do not need explicitly call dev_set_drvdata(), as it is done for us by device_create(). Acked-by: Richard Cochran Signed-off-by: Dmitry Torokhov Signed-off-by: David S. Miller --- drivers/ptp/ptp_clock.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 9c13381b6966..b4e5e8022c29 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -227,8 +227,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, if (IS_ERR(ptp->dev)) goto no_device; - dev_set_drvdata(ptp->dev, ptp); - err = ptp_populate_sysfs(ptp); if (err) goto no_sysfs; -- cgit v1.2.3 From 6f7aa56bae6ff38727d5c8bf6ee7d4202b4e3865 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 14 Feb 2017 10:23:32 -0800 Subject: ptp: use kcalloc when allocating arrays kcalloc is more semantically correct when allocating arrays of objects, and overflow-safe. Signed-off-by: Dmitry Torokhov Signed-off-by: David S. Miller --- drivers/ptp/ptp_sysfs.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 53d43954a974..27cd46ab5e32 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -269,13 +269,12 @@ static int ptp_populate_pins(struct ptp_clock *ptp) struct ptp_clock_info *info = ptp->info; int err = -ENOMEM, i, n_pins = info->n_pins; - ptp->pin_dev_attr = kzalloc(n_pins * sizeof(*ptp->pin_dev_attr), + ptp->pin_dev_attr = kcalloc(n_pins, sizeof(*ptp->pin_dev_attr), GFP_KERNEL); if (!ptp->pin_dev_attr) goto no_dev_attr; - ptp->pin_attr = kzalloc((1 + n_pins) * sizeof(struct attribute *), - GFP_KERNEL); + ptp->pin_attr = kcalloc(1 + n_pins, sizeof(*ptp->pin_attr), GFP_KERNEL); if (!ptp->pin_attr) goto no_pin_attr; -- cgit v1.2.3 From af59e717d5ff9c8dbf9bcc581c0dfb3b2a9c9030 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 14 Feb 2017 10:23:33 -0800 Subject: ptp: use is_visible method to hide unused attributes Instead of creating selected attributes after the device is created (and after userspace potentially seen uevent), lets use attribute group is_visible() method to control which attributes are shown. This will allow us to create all attributes (except "pins" group, which will be taken care of later) before userspace gets notified about new ptp class device. Signed-off-by: Dmitry Torokhov Signed-off-by: David S. Miller --- drivers/ptp/ptp_sysfs.c | 125 +++++++++++++++++++++--------------------------- 1 file changed, 55 insertions(+), 70 deletions(-) diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 27cd46ab5e32..426e42c51df4 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -46,27 +46,6 @@ PTP_SHOW_INT(n_periodic_outputs, n_per_out); PTP_SHOW_INT(n_programmable_pins, n_pins); PTP_SHOW_INT(pps_available, pps); -static struct attribute *ptp_attrs[] = { - &dev_attr_clock_name.attr, - &dev_attr_max_adjustment.attr, - &dev_attr_n_alarms.attr, - &dev_attr_n_external_timestamps.attr, - &dev_attr_n_periodic_outputs.attr, - &dev_attr_n_programmable_pins.attr, - &dev_attr_pps_available.attr, - NULL, -}; - -static const struct attribute_group ptp_group = { - .attrs = ptp_attrs, -}; - -const struct attribute_group *ptp_groups[] = { - &ptp_group, - NULL, -}; - - static ssize_t extts_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -91,6 +70,7 @@ static ssize_t extts_enable_store(struct device *dev, out: return err; } +static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store); static ssize_t extts_fifo_show(struct device *dev, struct device_attribute *attr, char *page) @@ -124,6 +104,7 @@ out: mutex_unlock(&ptp->tsevq_mux); return cnt; } +static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL); static ssize_t period_store(struct device *dev, struct device_attribute *attr, @@ -151,6 +132,7 @@ static ssize_t period_store(struct device *dev, out: return err; } +static DEVICE_ATTR(period, 0220, NULL, period_store); static ssize_t pps_enable_store(struct device *dev, struct device_attribute *attr, @@ -177,6 +159,57 @@ static ssize_t pps_enable_store(struct device *dev, out: return err; } +static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); + +static struct attribute *ptp_attrs[] = { + &dev_attr_clock_name.attr, + + &dev_attr_max_adjustment.attr, + &dev_attr_n_alarms.attr, + &dev_attr_n_external_timestamps.attr, + &dev_attr_n_periodic_outputs.attr, + &dev_attr_n_programmable_pins.attr, + &dev_attr_pps_available.attr, + + &dev_attr_extts_enable.attr, + &dev_attr_fifo.attr, + &dev_attr_period.attr, + &dev_attr_pps_enable.attr, + NULL +}; + +static umode_t ptp_is_attribute_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_clock_info *info = ptp->info; + umode_t mode = attr->mode; + + if (attr == &dev_attr_extts_enable.attr || + attr == &dev_attr_fifo.attr) { + if (!info->n_ext_ts) + mode = 0; + } else if (attr == &dev_attr_period.attr) { + if (!info->n_per_out) + mode = 0; + } else if (attr == &dev_attr_pps_enable.attr) { + if (!info->pps) + mode = 0; + } + + return mode; +} + +static const struct attribute_group ptp_group = { + .is_visible = ptp_is_attribute_visible, + .attrs = ptp_attrs, +}; + +const struct attribute_group *ptp_groups[] = { + &ptp_group, + NULL +}; static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name) { @@ -235,26 +268,11 @@ static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store); -static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL); -static DEVICE_ATTR(period, 0220, NULL, period_store); -static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); - int ptp_cleanup_sysfs(struct ptp_clock *ptp) { struct device *dev = ptp->dev; struct ptp_clock_info *info = ptp->info; - if (info->n_ext_ts) { - device_remove_file(dev, &dev_attr_extts_enable); - device_remove_file(dev, &dev_attr_fifo); - } - if (info->n_per_out) - device_remove_file(dev, &dev_attr_period); - - if (info->pps) - device_remove_file(dev, &dev_attr_pps_enable); - if (info->n_pins) { sysfs_remove_group(&dev->kobj, &ptp->pin_attr_group); kfree(ptp->pin_attr); @@ -306,46 +324,13 @@ no_dev_attr: int ptp_populate_sysfs(struct ptp_clock *ptp) { - struct device *dev = ptp->dev; struct ptp_clock_info *info = ptp->info; int err; - if (info->n_ext_ts) { - err = device_create_file(dev, &dev_attr_extts_enable); - if (err) - goto out1; - err = device_create_file(dev, &dev_attr_fifo); - if (err) - goto out2; - } - if (info->n_per_out) { - err = device_create_file(dev, &dev_attr_period); - if (err) - goto out3; - } - if (info->pps) { - err = device_create_file(dev, &dev_attr_pps_enable); - if (err) - goto out4; - } if (info->n_pins) { err = ptp_populate_pins(ptp); if (err) - goto out5; + return err; } return 0; -out5: - if (info->pps) - device_remove_file(dev, &dev_attr_pps_enable); -out4: - if (info->n_per_out) - device_remove_file(dev, &dev_attr_period); -out3: - if (info->n_ext_ts) - device_remove_file(dev, &dev_attr_fifo); -out2: - if (info->n_ext_ts) - device_remove_file(dev, &dev_attr_extts_enable); -out1: - return err; } -- cgit v1.2.3 From 85a66e55019583da1e0f18706b7a8281c9f6de5b Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 14 Feb 2017 10:23:34 -0800 Subject: ptp: create "pins" together with the rest of attributes Let's switch to using device_create_with_groups(), which will allow us to create "pins" attribute group together with the rest of ptp device attributes, and before userspace gets notified about ptp device creation. Signed-off-by: Dmitry Torokhov Signed-off-by: David S. Miller --- drivers/ptp/ptp_clock.c | 20 +++++++++++--------- drivers/ptp/ptp_private.h | 7 ++++--- drivers/ptp/ptp_sysfs.c | 39 +++++++++------------------------------ 3 files changed, 24 insertions(+), 42 deletions(-) diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index b4e5e8022c29..e8142803a1a7 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -221,16 +221,17 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, mutex_init(&ptp->pincfg_mux); init_waitqueue_head(&ptp->tsev_wq); + err = ptp_populate_pin_groups(ptp); + if (err) + goto no_pin_groups; + /* Create a new device in our class. */ - ptp->dev = device_create(ptp_class, parent, ptp->devid, ptp, - "ptp%d", ptp->index); + ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid, + ptp, ptp->pin_attr_groups, + "ptp%d", ptp->index); if (IS_ERR(ptp->dev)) goto no_device; - err = ptp_populate_sysfs(ptp); - if (err) - goto no_sysfs; - /* Register a new PPS source. */ if (info->pps) { struct pps_source_info pps; @@ -258,10 +259,10 @@ no_clock: if (ptp->pps_source) pps_unregister_source(ptp->pps_source); no_pps: - ptp_cleanup_sysfs(ptp); -no_sysfs: device_destroy(ptp_class, ptp->devid); no_device: + ptp_cleanup_pin_groups(ptp); +no_pin_groups: mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); ida_simple_remove(&ptp_clocks_map, index); @@ -280,8 +281,9 @@ int ptp_clock_unregister(struct ptp_clock *ptp) /* Release the clock's resources. */ if (ptp->pps_source) pps_unregister_source(ptp->pps_source); - ptp_cleanup_sysfs(ptp); + device_destroy(ptp_class, ptp->devid); + ptp_cleanup_pin_groups(ptp); posix_clock_unregister(&ptp->clock); return 0; diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 9c5d41421b65..d95888974d0c 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -54,6 +54,8 @@ struct ptp_clock { struct device_attribute *pin_dev_attr; struct attribute **pin_attr; struct attribute_group pin_attr_group; + /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ + const struct attribute_group *pin_attr_groups[2]; }; /* @@ -94,8 +96,7 @@ uint ptp_poll(struct posix_clock *pc, extern const struct attribute_group *ptp_groups[]; -int ptp_cleanup_sysfs(struct ptp_clock *ptp); - -int ptp_populate_sysfs(struct ptp_clock *ptp); +int ptp_populate_pin_groups(struct ptp_clock *ptp); +void ptp_cleanup_pin_groups(struct ptp_clock *ptp); #endif diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 426e42c51df4..48401dfcd999 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -268,25 +268,14 @@ static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr, return count; } -int ptp_cleanup_sysfs(struct ptp_clock *ptp) +int ptp_populate_pin_groups(struct ptp_clock *ptp) { - struct device *dev = ptp->dev; - struct ptp_clock_info *info = ptp->info; - - if (info->n_pins) { - sysfs_remove_group(&dev->kobj, &ptp->pin_attr_group); - kfree(ptp->pin_attr); - kfree(ptp->pin_dev_attr); - } - return 0; -} - -static int ptp_populate_pins(struct ptp_clock *ptp) -{ - struct device *dev = ptp->dev; struct ptp_clock_info *info = ptp->info; int err = -ENOMEM, i, n_pins = info->n_pins; + if (!n_pins) + return 0; + ptp->pin_dev_attr = kcalloc(n_pins, sizeof(*ptp->pin_dev_attr), GFP_KERNEL); if (!ptp->pin_dev_attr) @@ -309,28 +298,18 @@ static int ptp_populate_pins(struct ptp_clock *ptp) ptp->pin_attr_group.name = "pins"; ptp->pin_attr_group.attrs = ptp->pin_attr; - err = sysfs_create_group(&dev->kobj, &ptp->pin_attr_group); - if (err) - goto no_group; + ptp->pin_attr_groups[0] = &ptp->pin_attr_group; + return 0; -no_group: - kfree(ptp->pin_attr); no_pin_attr: kfree(ptp->pin_dev_attr); no_dev_attr: return err; } -int ptp_populate_sysfs(struct ptp_clock *ptp) +void ptp_cleanup_pin_groups(struct ptp_clock *ptp) { - struct ptp_clock_info *info = ptp->info; - int err; - - if (info->n_pins) { - err = ptp_populate_pins(ptp); - if (err) - return err; - } - return 0; + kfree(ptp->pin_attr); + kfree(ptp->pin_dev_attr); } -- cgit v1.2.3 From 0fa9e2899c5d128723056d2ecc950bda7b51af20 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Tue, 14 Feb 2017 23:36:32 +0100 Subject: net: nvidia: forcedeth: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/nvidia/forcedeth.c | 91 ++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 41 deletions(-) diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 58ba5d3f9e5f..92367a06491a 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -4237,14 +4237,15 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) return 0; } -static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int nv_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct fe_priv *np = netdev_priv(dev); - u32 speed; + u32 speed, supported, advertising; int adv; spin_lock_irq(&np->lock); - ecmd->port = PORT_MII; + cmd->base.port = PORT_MII; if (!netif_running(dev)) { /* We do not track link speed / duplex setting if the * interface is disabled. Force a link check */ @@ -4272,64 +4273,71 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) speed = -1; break; } - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; if (np->duplex) - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; } else { speed = SPEED_UNKNOWN; - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_cmd_speed_set(ecmd, speed); - ecmd->autoneg = np->autoneg; + cmd->base.speed = speed; + cmd->base.autoneg = np->autoneg; - ecmd->advertising = ADVERTISED_MII; + advertising = ADVERTISED_MII; if (np->autoneg) { - ecmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); if (adv & ADVERTISE_10HALF) - ecmd->advertising |= ADVERTISED_10baseT_Half; + advertising |= ADVERTISED_10baseT_Half; if (adv & ADVERTISE_10FULL) - ecmd->advertising |= ADVERTISED_10baseT_Full; + advertising |= ADVERTISED_10baseT_Full; if (adv & ADVERTISE_100HALF) - ecmd->advertising |= ADVERTISED_100baseT_Half; + advertising |= ADVERTISED_100baseT_Half; if (adv & ADVERTISE_100FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; if (np->gigabit == PHY_GIGABIT) { adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); if (adv & ADVERTISE_1000FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + advertising |= ADVERTISED_1000baseT_Full; } } - ecmd->supported = (SUPPORTED_Autoneg | + supported = (SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_MII); if (np->gigabit == PHY_GIGABIT) - ecmd->supported |= SUPPORTED_1000baseT_Full; + supported |= SUPPORTED_1000baseT_Full; - ecmd->phy_address = np->phyaddr; - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.phy_address = np->phyaddr; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); /* ignore maxtxpkt, maxrxpkt for now */ spin_unlock_irq(&np->lock); return 0; } -static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int nv_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct fe_priv *np = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; + u32 advertising; - if (ecmd->port != PORT_MII) - return -EINVAL; - if (ecmd->transceiver != XCVR_EXTERNAL) + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.port != PORT_MII) return -EINVAL; - if (ecmd->phy_address != np->phyaddr) { + if (cmd->base.phy_address != np->phyaddr) { /* TODO: support switching between multiple phys. Should be * trivial, but not enabled due to lack of test hardware. */ return -EINVAL; } - if (ecmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 mask; mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | @@ -4337,16 +4345,17 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (np->gigabit == PHY_GIGABIT) mask |= ADVERTISED_1000baseT_Full; - if ((ecmd->advertising & mask) == 0) + if ((advertising & mask) == 0) return -EINVAL; - } else if (ecmd->autoneg == AUTONEG_DISABLE) { + } else if (cmd->base.autoneg == AUTONEG_DISABLE) { /* Note: autonegotiation disable, speed 1000 intentionally * forbidden - no one should need that. */ if (speed != SPEED_10 && speed != SPEED_100) return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + if (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL) return -EINVAL; } else { return -EINVAL; @@ -4376,7 +4385,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) netif_tx_unlock_bh(dev); } - if (ecmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { int adv, bmcr; np->autoneg = 1; @@ -4384,13 +4393,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) /* advertise only what has been requested */ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - if (ecmd->advertising & ADVERTISED_10baseT_Half) + if (advertising & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; - if (ecmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; - if (ecmd->advertising & ADVERTISED_100baseT_Half) + if (advertising & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; - if (ecmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; @@ -4401,7 +4410,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (np->gigabit == PHY_GIGABIT) { adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); adv &= ~ADVERTISE_1000FULL; - if (ecmd->advertising & ADVERTISED_1000baseT_Full) + if (advertising & ADVERTISED_1000baseT_Full) adv |= ADVERTISE_1000FULL; mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); } @@ -4428,13 +4437,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) + if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF) adv |= ADVERTISE_10HALF; - if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) + if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL) adv |= ADVERTISE_10FULL; - if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) + if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF) adv |= ADVERTISE_100HALF; - if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) + if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL) adv |= ADVERTISE_100FULL; np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */ @@ -5241,8 +5250,6 @@ static const struct ethtool_ops ops = { .get_link = ethtool_op_get_link, .get_wol = nv_get_wol, .set_wol = nv_set_wol, - .get_settings = nv_get_settings, - .set_settings = nv_set_settings, .get_regs_len = nv_get_regs_len, .get_regs = nv_get_regs, .nway_reset = nv_nway_reset, @@ -5255,6 +5262,8 @@ static const struct ethtool_ops ops = { .get_sset_count = nv_get_sset_count, .self_test = nv_self_test, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = nv_get_link_ksettings, + .set_link_ksettings = nv_set_link_ksettings, }; /* The mgmt unit and driver use a semaphore to access the phy during init */ -- cgit v1.2.3 From 9fe9aa0b7334f65755fa6e9b20cb633a09fe76f7 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Wed, 15 Feb 2017 19:45:02 +0200 Subject: net: ethernet: ti: cpsw: correct ale dev to cpsw The ale is a property of cpsw, so change dev to cpsw->dev, aka pdev->dev, to be consistent. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 10f6f7496f50..9f3d9c67e3fe 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -3032,7 +3032,7 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_dma_ret; } - ale_params.dev = &ndev->dev; + ale_params.dev = &pdev->dev; ale_params.ale_ageout = ale_ageout; ale_params.ale_entries = data->ale_entries; ale_params.ale_ports = data->slaves; -- cgit v1.2.3 From ad8e963ca6838071dd2f3db2df66b247b99f4295 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Thu, 16 Feb 2017 09:10:49 +0100 Subject: net: oki-semi: pch_gbe: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- .../ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c | 58 ++++++++++++++-------- 1 file changed, 38 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index b19be7c6c1f4..21093276d2b7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -73,62 +73,80 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = { #define PCH_GBE_MAC_REGS_LEN (sizeof(struct pch_gbe_regs) / 4) #define PCH_GBE_REGS_LEN (PCH_GBE_MAC_REGS_LEN + PCH_GBE_PHY_REGS_LEN) /** - * pch_gbe_get_settings - Get device-specific settings + * pch_gbe_get_link_ksettings - Get device-specific settings * @netdev: Network interface device structure * @ecmd: Ethtool command * Returns: * 0: Successful. * Negative value: Failed. */ -static int pch_gbe_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int pch_gbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); + u32 supported, advertising; int ret; - ret = mii_ethtool_gset(&adapter->mii, ecmd); - ecmd->supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half); - ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); + ret = mii_ethtool_get_link_ksettings(&adapter->mii, ecmd); + + ethtool_convert_link_mode_to_legacy_u32(&supported, + ecmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + ecmd->link_modes.advertising); + + supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half); + advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); + + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); if (!netif_carrier_ok(adapter->netdev)) - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->base.speed = SPEED_UNKNOWN; return ret; } /** - * pch_gbe_set_settings - Set device-specific settings + * pch_gbe_set_link_ksettings - Set device-specific settings * @netdev: Network interface device structure * @ecmd: Ethtool command * Returns: * 0: Successful. * Negative value: Failed. */ -static int pch_gbe_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int pch_gbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ecmd) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_hw *hw = &adapter->hw; - u32 speed = ethtool_cmd_speed(ecmd); + struct ethtool_link_ksettings copy_ecmd; + u32 speed = ecmd->base.speed; + u32 advertising; int ret; pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET); + memcpy(©_ecmd, ecmd, sizeof(*ecmd)); + /* when set_settings() is called with a ethtool_cmd previously * filled by get_settings() on a down link, speed is -1: */ if (speed == UINT_MAX) { speed = SPEED_1000; - ethtool_cmd_speed_set(ecmd, speed); - ecmd->duplex = DUPLEX_FULL; + copy_ecmd.base.speed = speed; + copy_ecmd.base.duplex = DUPLEX_FULL; } - ret = mii_ethtool_sset(&adapter->mii, ecmd); + ret = mii_ethtool_set_link_ksettings(&adapter->mii, ©_ecmd); if (ret) { - netdev_err(netdev, "Error: mii_ethtool_sset\n"); + netdev_err(netdev, "Error: mii_ethtool_set_link_ksettings\n"); return ret; } hw->mac.link_speed = speed; - hw->mac.link_duplex = ecmd->duplex; - hw->phy.autoneg_advertised = ecmd->advertising; - hw->mac.autoneg = ecmd->autoneg; + hw->mac.link_duplex = copy_ecmd.base.duplex; + ethtool_convert_link_mode_to_legacy_u32( + &advertising, copy_ecmd.link_modes.advertising); + hw->phy.autoneg_advertised = advertising; + hw->mac.autoneg = copy_ecmd.base.autoneg; /* reset the link */ if (netif_running(adapter->netdev)) { @@ -487,8 +505,6 @@ static int pch_gbe_get_sset_count(struct net_device *netdev, int sset) } static const struct ethtool_ops pch_gbe_ethtool_ops = { - .get_settings = pch_gbe_get_settings, - .set_settings = pch_gbe_set_settings, .get_drvinfo = pch_gbe_get_drvinfo, .get_regs_len = pch_gbe_get_regs_len, .get_regs = pch_gbe_get_regs, @@ -503,6 +519,8 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = { .get_strings = pch_gbe_get_strings, .get_ethtool_stats = pch_gbe_get_ethtool_stats, .get_sset_count = pch_gbe_get_sset_count, + .get_link_ksettings = pch_gbe_get_link_ksettings, + .set_link_ksettings = pch_gbe_set_link_ksettings, }; void pch_gbe_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3 From 749e6720d2ee10d5221d5d7b8cee8ac5d1cd690e Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 16 Feb 2017 10:31:10 +0200 Subject: net/sched: cls_flower: Properly handle classifier flags dumping Dump the classifier flags only if non zero and make sure to check the return status of the handler that puts them into the netlink msg. Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 0826c8ec3a76..850d98294bf6 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1229,7 +1229,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) goto nla_put_failure; - nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags); + if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts)) goto nla_put_failure; -- cgit v1.2.3 From 7a335adad8b06778c0876aa5a5eb8954cd835bf5 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 16 Feb 2017 10:31:11 +0200 Subject: net/sched: cls_matchall: Dump the classifier flags The classifier flags are not dumped to user-space, do that. Signed-off-by: Or Gerlitz Acked-by: Jiri Pirko Acked-by: Yotam Gigi Signed-off-by: David S. Miller --- net/sched/cls_matchall.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index f2141cb55562..35ef1c1be1f3 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -244,6 +244,9 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid)) goto nla_put_failure; + if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags)) + goto nla_put_failure; + if (tcf_exts_dump(skb, &head->exts)) goto nla_put_failure; -- cgit v1.2.3 From e696028acc458aa3d43ad899371a963eb28336d8 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 16 Feb 2017 10:31:12 +0200 Subject: net/sched: Reflect HW offload status Currently there is no way of querying whether a filter is offloaded to HW or not when using "both" policy (where none of skip_sw or skip_hw flags are set by user-space). Add two new flags, "in hw" and "not in hw" such that user space can determine if a filter is actually offloaded to hw or not. The "in hw" UAPI semantics was chosen so it's similar to the "skip hw" flag logic. If none of these two flags are set, this signals running over older kernel. Signed-off-by: Or Gerlitz Reviewed-by: Amir Vadai Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 5 +++++ include/uapi/linux/pkt_cls.h | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index be5c12a5c375..269fd78bb0ae 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -481,6 +481,11 @@ static inline bool tc_flags_valid(u32 flags) return true; } +static inline bool tc_in_hw(u32 flags) +{ + return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; +} + enum tc_fl_command { TC_CLSFLOWER_REPLACE, TC_CLSFLOWER_DESTROY, diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 345551e71410..7a69f2a4ca0c 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -103,8 +103,10 @@ enum { #define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1) /* tca flags definitions */ -#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) -#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) +#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) /* don't offload filter to HW */ +#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) /* don't use filter in SW */ +#define TCA_CLS_FLAGS_IN_HW (1 << 2) /* filter is offloaded to HW */ +#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */ /* U32 filters */ -- cgit v1.2.3 From 55593960d0d88c6d80b7b3a615dbe09de85f2541 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 16 Feb 2017 10:31:13 +0200 Subject: net/sched: cls_flower: Reflect HW offload status Flower support for the "in hw" offloading flags. Signed-off-by: Or Gerlitz Reviewed-by: Amir Vadai Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 850d98294bf6..9d0c99d2e9fb 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -273,6 +273,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc); + if (!err) + f->flags |= TCA_CLS_FLAGS_IN_HW; if (tc_skip_sw(f->flags)) return err; @@ -912,6 +914,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, goto errout; } + if (!tc_in_hw(fnew->flags)) + fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + if (fold) { if (!tc_skip_sw(fold->flags)) rhashtable_remove_fast(&head->ht, &fold->ht_node, -- cgit v1.2.3 From c7d2b2f5eebe6e76efc11cfd7a600c0748234f3a Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 16 Feb 2017 10:31:14 +0200 Subject: net/sched: cls_matchall: Reflect HW offloading status Matchall support for the "in hw" offloading flags. Signed-off-by: Or Gerlitz Reviewed-by: Amir Vadai Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_matchall.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 35ef1c1be1f3..224eb2c14346 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -56,6 +56,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, struct net_device *dev = tp->q->dev_queue->dev; struct tc_to_netdev offload; struct tc_cls_matchall_offload mall_offload = {0}; + int err; offload.type = TC_SETUP_MATCHALL; offload.cls_mall = &mall_offload; @@ -63,8 +64,12 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, offload.cls_mall->exts = &head->exts; offload.cls_mall->cookie = cookie; - return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, - &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, + &offload); + if (!err) + head->flags |= TCA_CLS_FLAGS_IN_HW; + + return err; } static void mall_destroy_hw_filter(struct tcf_proto *tp, @@ -194,6 +199,9 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, } } + if (!tc_in_hw(new->flags)) + new->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + *arg = (unsigned long) head; rcu_assign_pointer(tp->root, new); if (head) -- cgit v1.2.3 From 24d3dc6d27eae19f422a5e216e25d3a16628d4ff Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 16 Feb 2017 10:31:15 +0200 Subject: net/sched: cls_u32: Reflect HW offload status U32 support for the "in hw" offloading flags. Signed-off-by: Or Gerlitz Reviewed-by: Amir Vadai Signed-off-by: David S. Miller --- net/sched/cls_u32.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index a6ec3e4b57ab..4dbe0c680fe6 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -523,6 +523,10 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &offload); + + if (!err) + n->flags |= TCA_CLS_FLAGS_IN_HW; + if (tc_skip_sw(flags)) return err; @@ -895,6 +899,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return err; } + if (!tc_in_hw(new->flags)) + new->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + u32_replace_knode(tp, tp_c, new); tcf_unbind_filter(tp, &n->res); call_rcu(&n->rcu, u32_delete_key_rcu); @@ -1014,6 +1021,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, if (err) goto errhw; + if (!tc_in_hw(n->flags)) + n->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + ins = &ht->ht[TC_U32_HASH(handle)]; for (pins = rtnl_dereference(*ins); pins; ins = &pins->next, pins = rtnl_dereference(*ins)) -- cgit v1.2.3 From 5cecb6cc008148b4afc51f7bacfa753e1a957483 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 16 Feb 2017 10:31:16 +0200 Subject: net/sched: cls_bpf: Reflect HW offload status BPF classifier support for the "in hw" offloading flags. Signed-off-by: Or Gerlitz Reviewed-by: Amir Vadai Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/sched/cls_bpf.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index d9c97018317d..80f688436dd7 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -148,6 +148,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_bpf_offload bpf_offload = {}; struct tc_to_netdev offload; + int err; offload.type = TC_SETUP_CLSBPF; offload.cls_bpf = &bpf_offload; @@ -159,8 +160,13 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, bpf_offload.exts_integrated = prog->exts_integrated; bpf_offload.gen_flags = prog->gen_flags; - return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, - tp->protocol, &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, + tp->protocol, &offload); + + if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE)) + prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; + + return err; } static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, @@ -511,6 +517,9 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, return ret; } + if (!tc_in_hw(prog->gen_flags)) + prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; + if (oldprog) { list_replace_rcu(&oldprog->link, &prog->link); tcf_unbind_filter(tp, &oldprog->res); -- cgit v1.2.3 From 4581be42fce5e1d208cbeb8e78df3f1b4673eff7 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Thu, 16 Feb 2017 17:07:39 +0800 Subject: net: mvneta: make mvneta_eth_tool_ops static The mvneta_eth_tool_ops is only used internally in mvneta driver, so make it static. Signed-off-by: Jisheng Zhang Acked-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index fdf71720e707..61dd4462411c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3959,7 +3959,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_do_ioctl = mvneta_ioctl, }; -const struct ethtool_ops mvneta_eth_tool_ops = { +static const struct ethtool_ops mvneta_eth_tool_ops = { .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .set_coalesce = mvneta_ethtool_set_coalesce, -- cgit v1.2.3 From 98687f426bb3e93b9dbbfb614ba331192beae482 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 11 Feb 2017 19:26:45 +0800 Subject: gfs2: Use rhashtable walk interface in glock_hash_walk The function glock_hash_walk walks the rhashtable by hand. This is broken because if it catches the hash table in the middle of a rehash, then it will miss entries. This patch replaces the manual walk by using the rhashtable walk interface. Fixes: 88ffbf3e037e ("GFS2: Use resizable hash table for glocks") Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- fs/gfs2/glock.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 94f50cac91c6..70e94170af85 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1420,26 +1420,32 @@ static struct shrinker glock_shrinker = { * @sdp: the filesystem * @bucket: the bucket * + * Note that the function can be called multiple times on the same + * object. So the user must ensure that the function can cope with + * that. */ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) { struct gfs2_glock *gl; - struct rhash_head *pos; - const struct bucket_table *tbl; - int i; + struct rhashtable_iter iter; - rcu_read_lock(); - tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table); - for (i = 0; i < tbl->size; i++) { - rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) { + rhashtable_walk_enter(&gl_hash_table, &iter); + + do { + gl = ERR_PTR(rhashtable_walk_start(&iter)); + if (gl) + continue; + + while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) if ((gl->gl_name.ln_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref)) examiner(gl); - } - } - rcu_read_unlock(); - cond_resched(); + + rhashtable_walk_stop(&iter); + } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); + + rhashtable_walk_exit(&iter); } /** -- cgit v1.2.3 From 40f9f439706073b4b0a654b3b99e18296b7990b3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 11 Feb 2017 19:26:46 +0800 Subject: tipc: Fix tipc_sk_reinit race conditions There are two problems with the function tipc_sk_reinit. Firstly it's doing a manual walk over an rhashtable. This is broken as an rhashtable can be resized and if you manually walk over it during a resize then you may miss entries. Secondly it's missing memory barriers as previously the code used spinlocks which provide the barriers implicitly. This patch fixes both problems. Fixes: 07f6c4bc048a ("tipc: convert tipc reference table to...") Signed-off-by: Herbert Xu Acked-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/net.c | 4 ++++ net/tipc/socket.c | 30 +++++++++++++++++++----------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/net/tipc/net.c b/net/tipc/net.c index 28bf4feeb81c..ab8a2d5d1e32 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -110,6 +110,10 @@ int tipc_net_start(struct net *net, u32 addr) char addr_string[16]; tn->own_addr = addr; + + /* Ensure that the new address is visible before we reinit. */ + smp_mb(); + tipc_named_reinit(net); tipc_sk_reinit(net); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 103d1fd058c0..6b09a778cc71 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -430,8 +430,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, INIT_LIST_HEAD(&tsk->cong_links); msg = &tsk->phdr; tn = net_generic(sock_net(sk), tipc_net_id); - tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, - NAMED_H_SIZE, 0); /* Finish initializing socket data structures */ sock->ops = ops; @@ -441,6 +439,13 @@ static int tipc_sk_create(struct net *net, struct socket *sock, pr_warn("Socket create failed; port number exhausted\n"); return -EINVAL; } + + /* Ensure tsk is visible before we read own_addr. */ + smp_mb(); + + tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, + NAMED_H_SIZE, 0); + msg_set_origport(msg, tsk->portid); setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); sk->sk_shutdown = 0; @@ -2234,24 +2239,27 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, void tipc_sk_reinit(struct net *net) { struct tipc_net *tn = net_generic(net, tipc_net_id); - const struct bucket_table *tbl; - struct rhash_head *pos; + struct rhashtable_iter iter; struct tipc_sock *tsk; struct tipc_msg *msg; - int i; - rcu_read_lock(); - tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); - for (i = 0; i < tbl->size; i++) { - rht_for_each_entry_rcu(tsk, pos, tbl, i, node) { + rhashtable_walk_enter(&tn->sk_rht, &iter); + + do { + tsk = ERR_PTR(rhashtable_walk_start(&iter)); + if (tsk) + continue; + + while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { spin_lock_bh(&tsk->sk.sk_lock.slock); msg = &tsk->phdr; msg_set_prevnode(msg, tn->own_addr); msg_set_orignode(msg, tn->own_addr); spin_unlock_bh(&tsk->sk.sk_lock.slock); } - } - rcu_read_unlock(); + + rhashtable_walk_stop(&iter); + } while (tsk == ERR_PTR(-EAGAIN)); } static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) -- cgit v1.2.3 From da20420f83ea0fbcf3d03afda08d971ea1d8a356 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 11 Feb 2017 19:26:47 +0800 Subject: rhashtable: Add nested tables This patch adds code that handles GFP_ATOMIC kmalloc failure on insertion. As we cannot use vmalloc, we solve it by making our hash table nested. That is, we allocate single pages at each level and reach our desired table size by nesting them. When a nested table is created, only a single page is allocated at the top-level. Lower levels are allocated on demand during insertion. Therefore for each insertion to succeed, only two (non-consecutive) pages are needed. After a nested table is created, a rehash will be scheduled in order to switch to a vmalloced table as soon as possible. Also, the rehash code will never rehash into a nested table. If we detect a nested table during a rehash, the rehash will be aborted and a new rehash will be scheduled. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 78 +++++++++---- lib/rhashtable.c | 270 ++++++++++++++++++++++++++++++++++++--------- 2 files changed, 276 insertions(+), 72 deletions(-) diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 5c132d3188be..f2e12a845910 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -61,6 +61,7 @@ struct rhlist_head { /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets + * @nest: Number of bits of first-level nested table. * @rehash: Current bucket being rehashed * @hash_rnd: Random seed to fold into hash * @locks_mask: Mask to apply before accessing locks[] @@ -68,10 +69,12 @@ struct rhlist_head { * @walkers: List of active walkers * @rcu: RCU structure for freeing the table * @future_tbl: Table under construction during rehashing + * @ntbl: Nested table used when out of memory. * @buckets: size * hash buckets */ struct bucket_table { unsigned int size; + unsigned int nest; unsigned int rehash; u32 hash_rnd; unsigned int locks_mask; @@ -81,7 +84,7 @@ struct bucket_table { struct bucket_table __rcu *future_tbl; - struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; + struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; /** @@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void *arg); void rhashtable_destroy(struct rhashtable *ht); +struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, + unsigned int hash); +struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, + struct bucket_table *tbl, + unsigned int hash); + #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) @@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht); #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) +static inline struct rhash_head __rcu *const *rht_bucket( + const struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : + &tbl->buckets[hash]; +} + +static inline struct rhash_head __rcu **rht_bucket_var( + struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : + &tbl->buckets[hash]; +} + +static inline struct rhash_head __rcu **rht_bucket_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : + &tbl->buckets[hash]; +} + /** * rht_for_each_continue - continue iterating over hash chain * @pos: the &struct rhash_head to use as a loop cursor. @@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht); * @hash: the hash value / bucket index */ #define rht_for_each(pos, tbl, hash) \ - rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) + rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash) /** * rht_for_each_entry_continue - continue iterating over hash chain @@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht); * @member: name of the &struct rhash_head within the hashable struct. */ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ - rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ + rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \ tbl, hash, member) /** @@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht); * This hash chain list-traversal primitive allows for the looped code to * remove the loop cursor from the list. */ -#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ - for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ - next = !rht_is_a_nulls(pos) ? \ - rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ - (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ - pos = next, \ - next = !rht_is_a_nulls(pos) ? \ +#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ + for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \ + next = !rht_is_a_nulls(pos) ? \ + rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ + pos = next, \ + next = !rht_is_a_nulls(pos) ? \ rht_dereference_bucket(pos->next, tbl, hash) : NULL) /** @@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht); * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_rcu(pos, tbl, hash) \ - rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) + rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash) /** * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain @@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht); * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ - rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ +#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ + rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \ tbl, hash, member) /** @@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; - const struct bucket_table *tbl; + struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; @@ -697,8 +727,12 @@ slow_path: } elasticity = ht->elasticity; - pprev = &tbl->buckets[hash]; - rht_for_each(head, tbl, hash) { + pprev = rht_bucket_insert(ht, tbl, hash); + data = ERR_PTR(-ENOMEM); + if (!pprev) + goto out; + + rht_for_each_continue(head, *pprev, tbl, hash) { struct rhlist_head *plist; struct rhlist_head *list; @@ -736,7 +770,7 @@ slow_path: if (unlikely(rht_grow_above_100(ht, tbl))) goto slow_path; - head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + head = rht_dereference_bucket(*pprev, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (rhlist) { @@ -746,7 +780,7 @@ slow_path: RCU_INIT_POINTER(list->next, NULL); } - rcu_assign_pointer(tbl->buckets[hash], obj); + rcu_assign_pointer(*pprev, obj); atomic_inc(&ht->nelems); if (rht_grow_above_75(ht, tbl)) @@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one( spin_lock_bh(lock); - pprev = &tbl->buckets[hash]; - rht_for_each(he, tbl, hash) { + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(he, *pprev, tbl, hash) { struct rhlist_head *list; list = container_of(he, struct rhlist_head, rhead); @@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast( spin_lock_bh(lock); - pprev = &tbl->buckets[hash]; - rht_for_each(he, tbl, hash) { + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(he, *pprev, tbl, hash) { if (he != obj_old) { pprev = &he->next; continue; diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 32d0ad058380..172454e6b979 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -32,6 +32,11 @@ #define HASH_MIN_SIZE 4U #define BUCKET_LOCKS_PER_CPU 32UL +union nested_table { + union nested_table __rcu *table; + struct rhash_head __rcu *bucket; +}; + static u32 head_hashfn(struct rhashtable *ht, const struct bucket_table *tbl, const struct rhash_head *he) @@ -76,6 +81,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, /* Never allocate more than 0.5 locks per bucket */ size = min_t(unsigned int, size, tbl->size >> 1); + if (tbl->nest) + size = min(size, 1U << tbl->nest); + if (sizeof(spinlock_t) != 0) { tbl->locks = NULL; #ifdef CONFIG_NUMA @@ -99,8 +107,45 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, return 0; } +static void nested_table_free(union nested_table *ntbl, unsigned int size) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + const unsigned int len = 1 << shift; + unsigned int i; + + ntbl = rcu_dereference_raw(ntbl->table); + if (!ntbl) + return; + + if (size > len) { + size >>= shift; + for (i = 0; i < len; i++) + nested_table_free(ntbl + i, size); + } + + kfree(ntbl); +} + +static void nested_bucket_table_free(const struct bucket_table *tbl) +{ + unsigned int size = tbl->size >> tbl->nest; + unsigned int len = 1 << tbl->nest; + union nested_table *ntbl; + unsigned int i; + + ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + + for (i = 0; i < len; i++) + nested_table_free(ntbl + i, size); + + kfree(ntbl); +} + static void bucket_table_free(const struct bucket_table *tbl) { + if (tbl->nest) + nested_bucket_table_free(tbl); + if (tbl) kvfree(tbl->locks); @@ -112,6 +157,59 @@ static void bucket_table_free_rcu(struct rcu_head *head) bucket_table_free(container_of(head, struct bucket_table, rcu)); } +static union nested_table *nested_table_alloc(struct rhashtable *ht, + union nested_table __rcu **prev, + unsigned int shifted, + unsigned int nhash) +{ + union nested_table *ntbl; + int i; + + ntbl = rcu_dereference(*prev); + if (ntbl) + return ntbl; + + ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); + + if (ntbl && shifted) { + for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) + INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, + (i << shifted) | nhash); + } + + rcu_assign_pointer(*prev, ntbl); + + return ntbl; +} + +static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, + size_t nbuckets, + gfp_t gfp) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + struct bucket_table *tbl; + size_t size; + + if (nbuckets < (1 << (shift + 1))) + return NULL; + + size = sizeof(*tbl) + sizeof(tbl->buckets[0]); + + tbl = kzalloc(size, gfp); + if (!tbl) + return NULL; + + if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, + 0, 0)) { + kfree(tbl); + return NULL; + } + + tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; + + return tbl; +} + static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) @@ -126,10 +224,17 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); if (tbl == NULL && gfp == GFP_KERNEL) tbl = vzalloc(size); + + size = nbuckets; + + if (tbl == NULL && gfp != GFP_KERNEL) { + tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); + nbuckets = 0; + } if (tbl == NULL) return NULL; - tbl->size = nbuckets; + tbl->size = size; if (alloc_bucket_locks(ht, tbl, gfp) < 0) { bucket_table_free(tbl); @@ -164,12 +269,17 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl = rhashtable_last_table(ht, rht_dereference_rcu(old_tbl->future_tbl, ht)); - struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; - int err = -ENOENT; + struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); + int err = -EAGAIN; struct rhash_head *head, *next, *entry; spinlock_t *new_bucket_lock; unsigned int new_hash; + if (new_tbl->nest) + goto out; + + err = -ENOENT; + rht_for_each(entry, old_tbl, old_hash) { err = 0; next = rht_dereference_bucket(entry->next, old_tbl, old_hash); @@ -202,19 +312,26 @@ out: return err; } -static void rhashtable_rehash_chain(struct rhashtable *ht, +static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); spinlock_t *old_bucket_lock; + int err; old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); spin_lock_bh(old_bucket_lock); - while (!rhashtable_rehash_one(ht, old_hash)) + while (!(err = rhashtable_rehash_one(ht, old_hash))) ; - old_tbl->rehash++; + + if (err == -ENOENT) { + old_tbl->rehash++; + err = 0; + } spin_unlock_bh(old_bucket_lock); + + return err; } static int rhashtable_rehash_attach(struct rhashtable *ht, @@ -246,13 +363,17 @@ static int rhashtable_rehash_table(struct rhashtable *ht) struct bucket_table *new_tbl; struct rhashtable_walker *walker; unsigned int old_hash; + int err; new_tbl = rht_dereference(old_tbl->future_tbl, ht); if (!new_tbl) return 0; - for (old_hash = 0; old_hash < old_tbl->size; old_hash++) - rhashtable_rehash_chain(ht, old_hash); + for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { + err = rhashtable_rehash_chain(ht, old_hash); + if (err) + return err; + } /* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); @@ -271,31 +392,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht) return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; } -/** - * rhashtable_expand - Expand hash table while allowing concurrent lookups - * @ht: the hash table to expand - * - * A secondary bucket array is allocated and the hash entries are migrated. - * - * This function may only be called in a context where it is safe to call - * synchronize_rcu(), e.g. not within a rcu_read_lock() section. - * - * The caller must ensure that no concurrent resizing occurs by holding - * ht->mutex. - * - * It is valid to have concurrent insertions and deletions protected by per - * bucket locks or concurrent RCU protected lookups and traversals. - */ -static int rhashtable_expand(struct rhashtable *ht) +static int rhashtable_rehash_alloc(struct rhashtable *ht, + struct bucket_table *old_tbl, + unsigned int size) { - struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *new_tbl; int err; ASSERT_RHT_MUTEX(ht); - old_tbl = rhashtable_last_table(ht, old_tbl); - - new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); + new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (new_tbl == NULL) return -ENOMEM; @@ -324,12 +430,9 @@ static int rhashtable_expand(struct rhashtable *ht) */ static int rhashtable_shrink(struct rhashtable *ht) { - struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); unsigned int nelems = atomic_read(&ht->nelems); unsigned int size = 0; - int err; - - ASSERT_RHT_MUTEX(ht); if (nelems) size = roundup_pow_of_two(nelems * 3 / 2); @@ -342,15 +445,7 @@ static int rhashtable_shrink(struct rhashtable *ht) if (rht_dereference(old_tbl->future_tbl, ht)) return -EEXIST; - new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); - if (new_tbl == NULL) - return -ENOMEM; - - err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); - if (err) - bucket_table_free(new_tbl); - - return err; + return rhashtable_rehash_alloc(ht, old_tbl, size); } static void rht_deferred_worker(struct work_struct *work) @@ -366,11 +461,14 @@ static void rht_deferred_worker(struct work_struct *work) tbl = rhashtable_last_table(ht, tbl); if (rht_grow_above_75(ht, tbl)) - rhashtable_expand(ht); + err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) - rhashtable_shrink(ht); + err = rhashtable_shrink(ht); + else if (tbl->nest) + err = rhashtable_rehash_alloc(ht, tbl, tbl->size); - err = rhashtable_rehash_table(ht); + if (!err) + err = rhashtable_rehash_table(ht); mutex_unlock(&ht->mutex); @@ -439,8 +537,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, int elasticity; elasticity = ht->elasticity; - pprev = &tbl->buckets[hash]; - rht_for_each(head, tbl, hash) { + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(head, *pprev, tbl, hash) { struct rhlist_head *list; struct rhlist_head *plist; @@ -477,6 +575,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, struct rhash_head *obj, void *data) { + struct rhash_head __rcu **pprev; struct bucket_table *new_tbl; struct rhash_head *head; @@ -499,7 +598,11 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, if (unlikely(rht_grow_above_100(ht, tbl))) return ERR_PTR(-EAGAIN); - head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + pprev = rht_bucket_insert(ht, tbl, hash); + if (!pprev) + return ERR_PTR(-ENOMEM); + + head = rht_dereference_bucket(*pprev, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (ht->rhlist) { @@ -509,7 +612,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, RCU_INIT_POINTER(list->next, NULL); } - rcu_assign_pointer(tbl->buckets[hash], obj); + rcu_assign_pointer(*pprev, obj); atomic_inc(&ht->nelems); if (rht_grow_above_75(ht, tbl)) @@ -975,7 +1078,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg) { - const struct bucket_table *tbl; + struct bucket_table *tbl; unsigned int i; cancel_work_sync(&ht->run_work); @@ -986,7 +1089,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; - for (pos = rht_dereference(tbl->buckets[i], ht), + for (pos = rht_dereference(*rht_bucket(tbl, i), ht), next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; !rht_is_a_nulls(pos); @@ -1007,3 +1110,70 @@ void rhashtable_destroy(struct rhashtable *ht) return rhashtable_free_and_destroy(ht, NULL, NULL); } EXPORT_SYMBOL_GPL(rhashtable_destroy); + +struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, + unsigned int hash) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + static struct rhash_head __rcu *rhnull = + (struct rhash_head __rcu *)NULLS_MARKER(0); + unsigned int index = hash & ((1 << tbl->nest) - 1); + unsigned int size = tbl->size >> tbl->nest; + unsigned int subhash = hash; + union nested_table *ntbl; + + ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash); + subhash >>= tbl->nest; + + while (ntbl && size > (1 << shift)) { + index = subhash & ((1 << shift) - 1); + ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash); + size >>= shift; + subhash >>= shift; + } + + if (!ntbl) + return &rhnull; + + return &ntbl[subhash].bucket; + +} +EXPORT_SYMBOL_GPL(rht_bucket_nested); + +struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, + struct bucket_table *tbl, + unsigned int hash) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + unsigned int index = hash & ((1 << tbl->nest) - 1); + unsigned int size = tbl->size >> tbl->nest; + union nested_table *ntbl; + unsigned int shifted; + unsigned int nhash; + + ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + hash >>= tbl->nest; + nhash = index; + shifted = tbl->nest; + ntbl = nested_table_alloc(ht, &ntbl[index].table, + size <= (1 << shift) ? shifted : 0, nhash); + + while (ntbl && size > (1 << shift)) { + index = hash & ((1 << shift) - 1); + size >>= shift; + hash >>= shift; + nhash |= index << shifted; + shifted += shift; + ntbl = nested_table_alloc(ht, &ntbl[index].table, + size <= (1 << shift) ? shifted : 0, + nhash); + } + + if (!ntbl) + return NULL; + + return &ntbl[hash].bucket; + +} +EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); -- cgit v1.2.3 From 6d6a505a1e4c40cc37f83076c8779881dd60e144 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 16 Feb 2017 13:54:32 +0100 Subject: net: ethoc: Use eth_hw_addr_random() Use eth_hw_addr_random() to set a random dev_addr and update addr_assign_type instead of open-coding it. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/ethoc.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index f18aba05f1c2..23d82748f52b 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1031,7 +1031,6 @@ static int ethoc_probe(struct platform_device *pdev) struct ethoc *priv = NULL; int num_bd; int ret = 0; - bool random_mac = false; struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev); u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0; @@ -1169,16 +1168,11 @@ static int ethoc_probe(struct platform_device *pdev) /* Check the MAC again for validity, if it still isn't choose and * program a random one. */ - if (!is_valid_ether_addr(netdev->dev_addr)) { - eth_random_addr(netdev->dev_addr); - random_mac = true; - } + if (!is_valid_ether_addr(netdev->dev_addr)) + eth_hw_addr_random(netdev); ethoc_do_set_mac_address(netdev); - if (random_mac) - netdev->addr_assign_type = NET_ADDR_RANDOM; - /* Allow the platform setup code to adjust MII management bus clock. */ if (!eth_clkfreq) { struct clk *clk = devm_clk_get(&pdev->dev, NULL); -- cgit v1.2.3 From 6850f8b50928463213532d7812b74161d31d3cbd Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 16 Feb 2017 15:11:19 +0100 Subject: net: bgmac: store MAC address directly in netdev->dev_addr After commit 34a5102c3235 ("net: bgmac: allocate struct bgmac just once & don't copy it") the mac_addr member of struct bgmac is no longer necessary to pass the MAC address to bgmac_enet_probe(). Instead it can directly be stored in netdev->dev_addr. Also use eth_hw_addr_random() instead of eth_random_addr() in case a random MAC is nedded. This will make sure netdev->addr_assign_type will be properly set. Signed-off-by: Tobias Klauser Acked-by: Jon Mason Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac-bcma.c | 2 +- drivers/net/ethernet/broadcom/bgmac-platform.c | 2 +- drivers/net/ethernet/broadcom/bgmac.c | 9 ++++----- drivers/net/ethernet/broadcom/bgmac.h | 1 - 4 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 5ef60d4f12b4..d59cfcc4c4d5 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -144,7 +144,7 @@ static int bgmac_probe(struct bcma_device *core) goto err; } - ether_addr_copy(bgmac->mac_addr, mac); + ether_addr_copy(bgmac->net_dev->dev_addr, mac); /* On BCM4706 we need common core to access PHY */ if (core->id.id == BCMA_CORE_4706_MAC_GBIT && diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 805e6ed6c390..7b1af950f312 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -169,7 +169,7 @@ static int bgmac_probe(struct platform_device *pdev) mac_addr = of_get_mac_address(np); if (mac_addr) - ether_addr_copy(bgmac->mac_addr, mac_addr); + ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr); else dev_warn(&pdev->dev, "MAC address not present in device tree\n"); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 20fe2520da42..415046750bb4 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1477,14 +1477,13 @@ int bgmac_enet_probe(struct bgmac *bgmac) net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); - if (!is_valid_ether_addr(bgmac->mac_addr)) { + if (!is_valid_ether_addr(net_dev->dev_addr)) { dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", - bgmac->mac_addr); - eth_random_addr(bgmac->mac_addr); + net_dev->dev_addr); + eth_hw_addr_random(net_dev); dev_warn(bgmac->dev, "Using random MAC: %pM\n", - bgmac->mac_addr); + net_dev->dev_addr); } - ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr); /* This (reset &) enable is not preset in specs or reference driver but * Broadcom does it in arch PCI code when enabling fake PCI device. diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index ab2db76e4fb8..248727dc62f2 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -474,7 +474,6 @@ struct bgmac { struct device *dev; struct device *dma_dev; - unsigned char mac_addr[ETH_ALEN]; u32 feature_flags; struct net_device *net_dev; -- cgit v1.2.3 From afcb50ba7f745eea32f91d7f63d6aa88f929f9c4 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Thu, 16 Feb 2017 11:29:21 -0800 Subject: bridge: vlan_tunnel: explicitly reset metadata attrs to NULL on failure Fixes: efa5356b0d97 ("bridge: per vlan dst_metadata netlink support") Signed-off-by: Roopa Prabhu Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_vlan_tunnel.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c index b2b79a070162..6d2c4eed2dc8 100644 --- a/net/bridge/br_vlan_tunnel.c +++ b/net/bridge/br_vlan_tunnel.c @@ -85,6 +85,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg, return 0; out: dst_release(&vlan->tinfo.tunnel_dst->dst); + vlan->tinfo.tunnel_dst = NULL; + vlan->tinfo.tunnel_id = 0; return err; } -- cgit v1.2.3 From c78f8bdfa11fcceb9723c61212e4bd8f76c87f9e Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 16 Feb 2017 22:24:48 +0100 Subject: bpf: mark all registered map/prog types as __ro_after_init All map types and prog types are registered to the BPF core through bpf_register_map_type() and bpf_register_prog_type() during init and remain unchanged thereafter. As by design we don't (and never will) have any pluggable code that can register to that at any later point in time, lets mark all the existing bpf_{map,prog}_type_list objects in the tree as __ro_after_init, so they can be moved to read-only section from then onwards. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/arraymap.c | 10 +++++----- kernel/bpf/hashtab.c | 8 ++++---- kernel/bpf/lpm_trie.c | 2 +- kernel/bpf/stackmap.c | 2 +- kernel/trace/bpf_trace.c | 6 +++--- net/core/filter.c | 18 +++++++++--------- 6 files changed, 23 insertions(+), 23 deletions(-) diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 3d55d95dcf49..6b6f41f0b211 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -269,7 +269,7 @@ static const struct bpf_map_ops array_ops = { .map_delete_elem = array_map_delete_elem, }; -static struct bpf_map_type_list array_type __read_mostly = { +static struct bpf_map_type_list array_type __ro_after_init = { .ops = &array_ops, .type = BPF_MAP_TYPE_ARRAY, }; @@ -283,7 +283,7 @@ static const struct bpf_map_ops percpu_array_ops = { .map_delete_elem = array_map_delete_elem, }; -static struct bpf_map_type_list percpu_array_type __read_mostly = { +static struct bpf_map_type_list percpu_array_type __ro_after_init = { .ops = &percpu_array_ops, .type = BPF_MAP_TYPE_PERCPU_ARRAY, }; @@ -409,7 +409,7 @@ static const struct bpf_map_ops prog_array_ops = { .map_fd_put_ptr = prog_fd_array_put_ptr, }; -static struct bpf_map_type_list prog_array_type __read_mostly = { +static struct bpf_map_type_list prog_array_type __ro_after_init = { .ops = &prog_array_ops, .type = BPF_MAP_TYPE_PROG_ARRAY, }; @@ -522,7 +522,7 @@ static const struct bpf_map_ops perf_event_array_ops = { .map_release = perf_event_fd_array_release, }; -static struct bpf_map_type_list perf_event_array_type __read_mostly = { +static struct bpf_map_type_list perf_event_array_type __ro_after_init = { .ops = &perf_event_array_ops, .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, }; @@ -564,7 +564,7 @@ static const struct bpf_map_ops cgroup_array_ops = { .map_fd_put_ptr = cgroup_fd_array_put_ptr, }; -static struct bpf_map_type_list cgroup_array_type __read_mostly = { +static struct bpf_map_type_list cgroup_array_type __ro_after_init = { .ops = &cgroup_array_ops, .type = BPF_MAP_TYPE_CGROUP_ARRAY, }; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index a753bbe7df0a..3ea87fb19a94 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1023,7 +1023,7 @@ static const struct bpf_map_ops htab_ops = { .map_delete_elem = htab_map_delete_elem, }; -static struct bpf_map_type_list htab_type __read_mostly = { +static struct bpf_map_type_list htab_type __ro_after_init = { .ops = &htab_ops, .type = BPF_MAP_TYPE_HASH, }; @@ -1037,7 +1037,7 @@ static const struct bpf_map_ops htab_lru_ops = { .map_delete_elem = htab_lru_map_delete_elem, }; -static struct bpf_map_type_list htab_lru_type __read_mostly = { +static struct bpf_map_type_list htab_lru_type __ro_after_init = { .ops = &htab_lru_ops, .type = BPF_MAP_TYPE_LRU_HASH, }; @@ -1124,7 +1124,7 @@ static const struct bpf_map_ops htab_percpu_ops = { .map_delete_elem = htab_map_delete_elem, }; -static struct bpf_map_type_list htab_percpu_type __read_mostly = { +static struct bpf_map_type_list htab_percpu_type __ro_after_init = { .ops = &htab_percpu_ops, .type = BPF_MAP_TYPE_PERCPU_HASH, }; @@ -1138,7 +1138,7 @@ static const struct bpf_map_ops htab_lru_percpu_ops = { .map_delete_elem = htab_lru_map_delete_elem, }; -static struct bpf_map_type_list htab_lru_percpu_type __read_mostly = { +static struct bpf_map_type_list htab_lru_percpu_type __ro_after_init = { .ops = &htab_lru_percpu_ops, .type = BPF_MAP_TYPE_LRU_PERCPU_HASH, }; diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index e0f6a0bd279b..8bfe0afaee10 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -508,7 +508,7 @@ static const struct bpf_map_ops trie_ops = { .map_delete_elem = trie_delete_elem, }; -static struct bpf_map_type_list trie_type __read_mostly = { +static struct bpf_map_type_list trie_type __ro_after_init = { .ops = &trie_ops, .type = BPF_MAP_TYPE_LPM_TRIE, }; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index be8519148c25..22aa45cd0324 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -273,7 +273,7 @@ static const struct bpf_map_ops stack_map_ops = { .map_delete_elem = stack_map_delete_elem, }; -static struct bpf_map_type_list stack_map_type __read_mostly = { +static struct bpf_map_type_list stack_map_type __ro_after_init = { .ops = &stack_map_ops, .type = BPF_MAP_TYPE_STACK_TRACE, }; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 424daa4586d1..cee9802cf3e0 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -506,7 +506,7 @@ static const struct bpf_verifier_ops kprobe_prog_ops = { .is_valid_access = kprobe_prog_is_valid_access, }; -static struct bpf_prog_type_list kprobe_tl = { +static struct bpf_prog_type_list kprobe_tl __ro_after_init = { .ops = &kprobe_prog_ops, .type = BPF_PROG_TYPE_KPROBE, }; @@ -589,7 +589,7 @@ static const struct bpf_verifier_ops tracepoint_prog_ops = { .is_valid_access = tp_prog_is_valid_access, }; -static struct bpf_prog_type_list tracepoint_tl = { +static struct bpf_prog_type_list tracepoint_tl __ro_after_init = { .ops = &tracepoint_prog_ops, .type = BPF_PROG_TYPE_TRACEPOINT, }; @@ -648,7 +648,7 @@ static const struct bpf_verifier_ops perf_event_prog_ops = { .convert_ctx_access = pe_prog_convert_ctx_access, }; -static struct bpf_prog_type_list perf_event_tl = { +static struct bpf_prog_type_list perf_event_tl __ro_after_init = { .ops = &perf_event_prog_ops, .type = BPF_PROG_TYPE_PERF_EVENT, }; diff --git a/net/core/filter.c b/net/core/filter.c index 0b753cbb2536..e466e0040137 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3296,47 +3296,47 @@ static const struct bpf_verifier_ops cg_sock_ops = { .convert_ctx_access = sock_filter_convert_ctx_access, }; -static struct bpf_prog_type_list sk_filter_type __read_mostly = { +static struct bpf_prog_type_list sk_filter_type __ro_after_init = { .ops = &sk_filter_ops, .type = BPF_PROG_TYPE_SOCKET_FILTER, }; -static struct bpf_prog_type_list sched_cls_type __read_mostly = { +static struct bpf_prog_type_list sched_cls_type __ro_after_init = { .ops = &tc_cls_act_ops, .type = BPF_PROG_TYPE_SCHED_CLS, }; -static struct bpf_prog_type_list sched_act_type __read_mostly = { +static struct bpf_prog_type_list sched_act_type __ro_after_init = { .ops = &tc_cls_act_ops, .type = BPF_PROG_TYPE_SCHED_ACT, }; -static struct bpf_prog_type_list xdp_type __read_mostly = { +static struct bpf_prog_type_list xdp_type __ro_after_init = { .ops = &xdp_ops, .type = BPF_PROG_TYPE_XDP, }; -static struct bpf_prog_type_list cg_skb_type __read_mostly = { +static struct bpf_prog_type_list cg_skb_type __ro_after_init = { .ops = &cg_skb_ops, .type = BPF_PROG_TYPE_CGROUP_SKB, }; -static struct bpf_prog_type_list lwt_in_type __read_mostly = { +static struct bpf_prog_type_list lwt_in_type __ro_after_init = { .ops = &lwt_inout_ops, .type = BPF_PROG_TYPE_LWT_IN, }; -static struct bpf_prog_type_list lwt_out_type __read_mostly = { +static struct bpf_prog_type_list lwt_out_type __ro_after_init = { .ops = &lwt_inout_ops, .type = BPF_PROG_TYPE_LWT_OUT, }; -static struct bpf_prog_type_list lwt_xmit_type __read_mostly = { +static struct bpf_prog_type_list lwt_xmit_type __ro_after_init = { .ops = &lwt_xmit_ops, .type = BPF_PROG_TYPE_LWT_XMIT, }; -static struct bpf_prog_type_list cg_sock_type __read_mostly = { +static struct bpf_prog_type_list cg_sock_type __ro_after_init = { .ops = &cg_sock_ops, .type = BPF_PROG_TYPE_CGROUP_SOCK }; -- cgit v1.2.3 From 9383191da4e40360a5d880fbe6bb03911c61621b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 16 Feb 2017 22:24:49 +0100 Subject: bpf: remove stubs for cBPF from arch code Remove the dummy bpf_jit_compile() stubs for eBPF JITs and make that a single __weak function in the core that can be overridden similarly to the eBPF one. Also remove stale pr_err() mentions of bpf_jit_compile. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/arm64/net/bpf_jit_comp.c | 5 ----- arch/powerpc/net/bpf_jit_comp64.c | 2 -- arch/s390/net/bpf_jit_comp.c | 8 -------- arch/x86/net/bpf_jit_comp.c | 8 ++------ include/linux/filter.h | 6 +----- kernel/bpf/core.c | 12 +++++++++++- 6 files changed, 14 insertions(+), 27 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index b2fc97a2c56c..c444408d5a8c 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -813,11 +813,6 @@ static inline void bpf_flush_icache(void *start, void *end) flush_icache_range((unsigned long)start, (unsigned long)end); } -void bpf_jit_compile(struct bpf_prog *prog) -{ - /* Nothing to do here. We support Internal BPF. */ -} - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_prog *tmp, *orig_prog = prog; diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 73a5cf18fd84..f9ebd02260da 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -961,8 +961,6 @@ common_load: return 0; } -void bpf_jit_compile(struct bpf_prog *fp) { } - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { u32 proglen; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 167b31b186c1..6454efd22e63 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1262,14 +1262,6 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) return 0; } -/* - * Classic BPF function stub. BPF programs will be converted into - * eBPF and then bpf_int_jit_compile() will be called. - */ -void bpf_jit_compile(struct bpf_prog *fp) -{ -} - /* * Compile eBPF program "fp" */ diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index bb660e53cbd6..26123d0ae13a 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1067,13 +1067,13 @@ common_load: ilen = prog - temp; if (ilen > BPF_MAX_INSN_SIZE) { - pr_err("bpf_jit_compile fatal insn size error\n"); + pr_err("bpf_jit: fatal insn size error\n"); return -EFAULT; } if (image) { if (unlikely(proglen + ilen > oldproglen)) { - pr_err("bpf_jit_compile fatal error\n"); + pr_err("bpf_jit: fatal error\n"); return -EFAULT; } memcpy(image + proglen, temp, ilen); @@ -1085,10 +1085,6 @@ common_load: return proglen; } -void bpf_jit_compile(struct bpf_prog *prog) -{ -} - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_binary_header *header = NULL; diff --git a/include/linux/filter.h b/include/linux/filter.h index e4eb2546339a..c7a70e0cc3a0 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -607,6 +607,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); +void bpf_jit_compile(struct bpf_prog *prog); bool bpf_helper_changes_pkt_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, @@ -625,7 +626,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, bpf_jit_fill_hole_t bpf_fill_ill_insns); void bpf_jit_binary_free(struct bpf_binary_header *hdr); -void bpf_jit_compile(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp); struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); @@ -669,10 +669,6 @@ static inline bool bpf_jit_blinding_enabled(void) return true; } #else -static inline void bpf_jit_compile(struct bpf_prog *fp) -{ -} - static inline void bpf_jit_free(struct bpf_prog *fp) { bpf_prog_unlock_free(fp); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index fddd76b1b627..2831ba1e71c1 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1154,12 +1154,22 @@ const struct bpf_func_proto bpf_tail_call_proto = { .arg3_type = ARG_ANYTHING, }; -/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */ +/* Stub for JITs that only support cBPF. eBPF programs are interpreted. + * It is encouraged to implement bpf_int_jit_compile() instead, so that + * eBPF and implicitly also cBPF can get JITed! + */ struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) { return prog; } +/* Stub for JITs that support eBPF. All cBPF code gets transformed into + * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). + */ +void __weak bpf_jit_compile(struct bpf_prog *prog) +{ +} + bool __weak bpf_helper_changes_pkt_data(void *func) { return false; -- cgit v1.2.3 From 74451e66d516c55e309e8d89a4a1e7596e46aacd Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 16 Feb 2017 22:24:50 +0100 Subject: bpf: make jited programs visible in traces Long standing issue with JITed programs is that stack traces from function tracing check whether a given address is kernel code through {__,}kernel_text_address(), which checks for code in core kernel, modules and dynamically allocated ftrace trampolines. But what is still missing is BPF JITed programs (interpreted programs are not an issue as __bpf_prog_run() will be attributed to them), thus when a stack trace is triggered, the code walking the stack won't see any of the JITed ones. The same for address correlation done from user space via reading /proc/kallsyms. This is read by tools like perf, but the latter is also useful for permanent live tracing with eBPF itself in combination with stack maps when other eBPF types are part of the callchain. See offwaketime example on dumping stack from a map. This work tries to tackle that issue by making the addresses and symbols known to the kernel. The lookup from *kernel_text_address() is implemented through a latched RB tree that can be read under RCU in fast-path that is also shared for symbol/size/offset lookup for a specific given address in kallsyms. The slow-path iteration through all symbols in the seq file done via RCU list, which holds a tiny fraction of all exported ksyms, usually below 0.1 percent. Function symbols are exported as bpf_prog_, in order to aide debugging and attribution. This facility is currently enabled for root-only when bpf_jit_kallsyms is set to 1, and disabled if hardening is active in any mode. The rationale behind this is that still a lot of systems ship with world read permissions on kallsyms thus addresses should not get suddenly exposed for them. If that situation gets much better in future, we always have the option to change the default on this. Likewise, unprivileged programs are not allowed to add entries there either, but that is less of a concern as most such programs types relevant in this context are for root-only anyway. If enabled, call graphs and stack traces will then show a correct attribution; one example is illustrated below, where the trace is now visible in tooling such as perf script --kallsyms=/proc/kallsyms and friends. Before: 7fff8166889d bpf_clone_redirect+0x80007f0020ed (/lib/modules/4.9.0-rc8+/build/vmlinux) f5d80 __sendmsg_nocancel+0xffff006451f1a007 (/usr/lib64/libc-2.18.so) After: 7fff816688b7 bpf_clone_redirect+0x80007f002107 (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fffa0575728 bpf_prog_33c45a467c9e061a+0x8000600020fb (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fffa07ef1fc cls_bpf_classify+0x8000600020dc (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff81678b68 tc_classify+0x80007f002078 (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff8164d40b __netif_receive_skb_core+0x80007f0025fb (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff8164d718 __netif_receive_skb+0x80007f002018 (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff8164e565 process_backlog+0x80007f002095 (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff8164dc71 net_rx_action+0x80007f002231 (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff81767461 __softirqentry_text_start+0x80007f0020d1 (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff817658ac do_softirq_own_stack+0x80007f00201c (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff810a2c20 do_softirq+0x80007f002050 (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff810a2cb5 __local_bh_enable_ip+0x80007f002085 (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff8168d452 ip_finish_output2+0x80007f002152 (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff8168ea3d ip_finish_output+0x80007f00217d (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff8168f2af ip_output+0x80007f00203f (/lib/modules/4.9.0-rc8+/build/vmlinux) [...] 7fff81005854 do_syscall_64+0x80007f002054 (/lib/modules/4.9.0-rc8+/build/vmlinux) 7fff817649eb return_from_SYSCALL_64+0x80007f002000 (/lib/modules/4.9.0-rc8+/build/vmlinux) f5d80 __sendmsg_nocancel+0xffff01c484812007 (/usr/lib64/libc-2.18.so) Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Cc: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 12 ++ arch/arm64/net/bpf_jit_comp.c | 15 --- arch/powerpc/net/bpf_jit_comp64.c | 1 + arch/s390/net/bpf_jit_comp.c | 18 --- arch/x86/net/bpf_jit_comp.c | 15 --- include/linux/bpf.h | 4 + include/linux/filter.h | 112 ++++++++++++++++++- kernel/bpf/core.c | 223 ++++++++++++++++++++++++++++++++++++++ kernel/bpf/syscall.c | 2 + kernel/extable.c | 9 +- kernel/kallsyms.c | 61 +++++++++-- net/Kconfig | 3 +- net/core/sysctl_net_core.c | 7 ++ 13 files changed, 419 insertions(+), 63 deletions(-) diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index b80fbd4e5575..2ebabc93014a 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -54,6 +54,18 @@ Values : 1 - enable JIT hardening for unprivileged users only 2 - enable JIT hardening for all users +bpf_jit_kallsyms +---------------- + +When Berkeley Packet Filter Just in Time compiler is enabled, then compiled +images are unknown addresses to the kernel, meaning they neither show up in +traces nor in /proc/kallsyms. This enables export of these addresses, which +can be used for debugging/tracing. If bpf_jit_harden is enabled, this feature +is disabled. +Values : + 0 - disable JIT kallsyms export (default value) + 1 - enable JIT kallsyms export for privileged users only + dev_weight -------------- diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index c444408d5a8c..05d12104d270 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -910,18 +910,3 @@ out: tmp : orig_prog); return prog; } - -void bpf_jit_free(struct bpf_prog *prog) -{ - unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK; - struct bpf_binary_header *header = (void *)addr; - - if (!prog->jited) - goto free_filter; - - set_memory_rw(addr, header->pages); - bpf_jit_binary_free(header); - -free_filter: - bpf_prog_unlock_free(prog); -} diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index f9ebd02260da..c34166ef76fc 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -1064,6 +1064,7 @@ out: return fp; } +/* Overriding bpf_jit_free() as we don't set images read-only. */ void bpf_jit_free(struct bpf_prog *fp) { unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 6454efd22e63..f1d0e62ec1dd 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1339,21 +1339,3 @@ out: tmp : orig_fp); return fp; } - -/* - * Free eBPF program - */ -void bpf_jit_free(struct bpf_prog *fp) -{ - unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; - struct bpf_binary_header *header = (void *)addr; - - if (!fp->jited) - goto free_filter; - - set_memory_rw(addr, header->pages); - bpf_jit_binary_free(header); - -free_filter: - bpf_prog_unlock_free(fp); -} diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 26123d0ae13a..18a62e208826 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1180,18 +1180,3 @@ out: tmp : orig_prog); return prog; } - -void bpf_jit_free(struct bpf_prog *fp) -{ - unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; - struct bpf_binary_header *header = (void *)addr; - - if (!fp->jited) - goto free_filter; - - set_memory_rw(addr, header->pages); - bpf_jit_binary_free(header); - -free_filter: - bpf_prog_unlock_free(fp); -} diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 57d60dc5b600..909fc033173a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -8,10 +8,12 @@ #define _LINUX_BPF_H 1 #include + #include #include #include #include +#include struct perf_event; struct bpf_map; @@ -177,6 +179,8 @@ struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; u32 max_ctx_offset; + struct latch_tree_node ksym_tnode; + struct list_head ksym_lnode; const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; diff --git a/include/linux/filter.h b/include/linux/filter.h index c7a70e0cc3a0..0c1cc9143cb2 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -54,6 +54,12 @@ struct bpf_prog_aux; #define BPF_REG_AX MAX_BPF_REG #define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) +/* As per nm, we expose JITed images as text (code) section for + * kallsyms. That way, tools like perf can find it to match + * addresses. + */ +#define BPF_SYM_ELF_TYPE 't' + /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 @@ -555,6 +561,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { set_memory_rw((unsigned long)fp, fp->pages); } + +static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) +{ + set_memory_rw((unsigned long)hdr, hdr->pages); +} #else static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { @@ -563,8 +574,21 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp) static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { } + +static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) +{ +} #endif /* CONFIG_DEBUG_SET_MODULE_RONX */ +static inline struct bpf_binary_header * +bpf_jit_binary_hdr(const struct bpf_prog *fp) +{ + unsigned long real_start = (unsigned long)fp->bpf_func; + unsigned long addr = real_start & PAGE_MASK; + + return (void *)addr; +} + int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); static inline int sk_filter(struct sock *sk, struct sk_buff *skb) { @@ -617,6 +641,7 @@ void bpf_warn_invalid_xdp_action(u32 act); #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; extern int bpf_jit_harden; +extern int bpf_jit_kallsyms; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); @@ -651,6 +676,11 @@ static inline bool bpf_jit_is_ebpf(void) # endif } +static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) +{ + return fp->jited && bpf_jit_is_ebpf(); +} + static inline bool bpf_jit_blinding_enabled(void) { /* These are the prerequisites, should someone ever have the @@ -668,11 +698,91 @@ static inline bool bpf_jit_blinding_enabled(void) return true; } -#else + +static inline bool bpf_jit_kallsyms_enabled(void) +{ + /* There are a couple of corner cases where kallsyms should + * not be enabled f.e. on hardening. + */ + if (bpf_jit_harden) + return false; + if (!bpf_jit_kallsyms) + return false; + if (bpf_jit_kallsyms == 1) + return true; + + return false; +} + +const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym); +bool is_bpf_text_address(unsigned long addr); +int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym); + +static inline const char * +bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) +{ + const char *ret = __bpf_address_lookup(addr, size, off, sym); + + if (ret && modname) + *modname = NULL; + return ret; +} + +void bpf_prog_kallsyms_add(struct bpf_prog *fp); +void bpf_prog_kallsyms_del(struct bpf_prog *fp); + +#else /* CONFIG_BPF_JIT */ + +static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) +{ + return false; +} + static inline void bpf_jit_free(struct bpf_prog *fp) { bpf_prog_unlock_free(fp); } + +static inline bool bpf_jit_kallsyms_enabled(void) +{ + return false; +} + +static inline const char * +__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym) +{ + return NULL; +} + +static inline bool is_bpf_text_address(unsigned long addr) +{ + return false; +} + +static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *sym) +{ + return -ERANGE; +} + +static inline const char * +bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) +{ + return NULL; +} + +static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) +{ +} + +static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) +{ +} #endif /* CONFIG_BPF_JIT */ #define BPF_ANC BIT(15) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 2831ba1e71c1..f45827e205d3 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -28,6 +28,9 @@ #include #include #include +#include +#include +#include #include @@ -95,6 +98,8 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) fp->aux = aux; fp->aux->prog = fp; + INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode); + return fp; } EXPORT_SYMBOL_GPL(bpf_prog_alloc); @@ -290,6 +295,206 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, } #ifdef CONFIG_BPF_JIT +static __always_inline void +bpf_get_prog_addr_region(const struct bpf_prog *prog, + unsigned long *symbol_start, + unsigned long *symbol_end) +{ + const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); + unsigned long addr = (unsigned long)hdr; + + WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); + + *symbol_start = addr; + *symbol_end = addr + hdr->pages * PAGE_SIZE; +} + +static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) +{ + BUILD_BUG_ON(sizeof("bpf_prog_") + + sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN); + + sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); + sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); + *sym = 0; +} + +static __always_inline unsigned long +bpf_get_prog_addr_start(struct latch_tree_node *n) +{ + unsigned long symbol_start, symbol_end; + const struct bpf_prog_aux *aux; + + aux = container_of(n, struct bpf_prog_aux, ksym_tnode); + bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); + + return symbol_start; +} + +static __always_inline bool bpf_tree_less(struct latch_tree_node *a, + struct latch_tree_node *b) +{ + return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b); +} + +static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) +{ + unsigned long val = (unsigned long)key; + unsigned long symbol_start, symbol_end; + const struct bpf_prog_aux *aux; + + aux = container_of(n, struct bpf_prog_aux, ksym_tnode); + bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); + + if (val < symbol_start) + return -1; + if (val >= symbol_end) + return 1; + + return 0; +} + +static const struct latch_tree_ops bpf_tree_ops = { + .less = bpf_tree_less, + .comp = bpf_tree_comp, +}; + +static DEFINE_SPINLOCK(bpf_lock); +static LIST_HEAD(bpf_kallsyms); +static struct latch_tree_root bpf_tree __cacheline_aligned; + +int bpf_jit_kallsyms __read_mostly; + +static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) +{ + WARN_ON_ONCE(!list_empty(&aux->ksym_lnode)); + list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms); + latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); +} + +static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux) +{ + if (list_empty(&aux->ksym_lnode)) + return; + + latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); + list_del_rcu(&aux->ksym_lnode); +} + +static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) +{ + return fp->jited && !bpf_prog_was_classic(fp); +} + +static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) +{ + return list_empty(&fp->aux->ksym_lnode) || + fp->aux->ksym_lnode.prev == LIST_POISON2; +} + +void bpf_prog_kallsyms_add(struct bpf_prog *fp) +{ + unsigned long flags; + + if (!bpf_prog_kallsyms_candidate(fp) || + !capable(CAP_SYS_ADMIN)) + return; + + spin_lock_irqsave(&bpf_lock, flags); + bpf_prog_ksym_node_add(fp->aux); + spin_unlock_irqrestore(&bpf_lock, flags); +} + +void bpf_prog_kallsyms_del(struct bpf_prog *fp) +{ + unsigned long flags; + + if (!bpf_prog_kallsyms_candidate(fp)) + return; + + spin_lock_irqsave(&bpf_lock, flags); + bpf_prog_ksym_node_del(fp->aux); + spin_unlock_irqrestore(&bpf_lock, flags); +} + +static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) +{ + struct latch_tree_node *n; + + if (!bpf_jit_kallsyms_enabled()) + return NULL; + + n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); + return n ? + container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : + NULL; +} + +const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym) +{ + unsigned long symbol_start, symbol_end; + struct bpf_prog *prog; + char *ret = NULL; + + rcu_read_lock(); + prog = bpf_prog_kallsyms_find(addr); + if (prog) { + bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end); + bpf_get_prog_name(prog, sym); + + ret = sym; + if (size) + *size = symbol_end - symbol_start; + if (off) + *off = addr - symbol_start; + } + rcu_read_unlock(); + + return ret; +} + +bool is_bpf_text_address(unsigned long addr) +{ + bool ret; + + rcu_read_lock(); + ret = bpf_prog_kallsyms_find(addr) != NULL; + rcu_read_unlock(); + + return ret; +} + +int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym) +{ + unsigned long symbol_start, symbol_end; + struct bpf_prog_aux *aux; + unsigned int it = 0; + int ret = -ERANGE; + + if (!bpf_jit_kallsyms_enabled()) + return ret; + + rcu_read_lock(); + list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) { + if (it++ != symnum) + continue; + + bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); + bpf_get_prog_name(aux->prog, sym); + + *value = symbol_start; + *type = BPF_SYM_ELF_TYPE; + + ret = 0; + break; + } + rcu_read_unlock(); + + return ret; +} + struct bpf_binary_header * bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, unsigned int alignment, @@ -326,6 +531,24 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr) module_memfree(hdr); } +/* This symbol is only overridden by archs that have different + * requirements than the usual eBPF JITs, f.e. when they only + * implement cBPF JIT, do not set images read-only, etc. + */ +void __weak bpf_jit_free(struct bpf_prog *fp) +{ + if (fp->jited) { + struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); + + bpf_jit_binary_unlock_ro(hdr); + bpf_jit_binary_free(hdr); + + WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); + } + + bpf_prog_unlock_free(fp); +} + int bpf_jit_harden __read_mostly; static int bpf_jit_blind_insn(const struct bpf_insn *from, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index f74ca17af64a..461eb1e66a0f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -707,6 +707,7 @@ void bpf_prog_put(struct bpf_prog *prog) { if (atomic_dec_and_test(&prog->aux->refcnt)) { trace_bpf_prog_put_rcu(prog); + bpf_prog_kallsyms_del(prog); call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); } } @@ -903,6 +904,7 @@ static int bpf_prog_load(union bpf_attr *attr) /* failed to allocate fd */ goto free_used_maps; + bpf_prog_kallsyms_add(prog); trace_bpf_prog_load(prog, err); return err; diff --git a/kernel/extable.c b/kernel/extable.c index e3beec4a2339..bd82117ad424 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -104,6 +105,8 @@ int __kernel_text_address(unsigned long addr) return 1; if (is_ftrace_trampoline(addr)) return 1; + if (is_bpf_text_address(addr)) + return 1; /* * There might be init symbols in saved stacktraces. * Give those symbols a chance to be printed in @@ -123,7 +126,11 @@ int kernel_text_address(unsigned long addr) return 1; if (is_module_text_address(addr)) return 1; - return is_ftrace_trampoline(addr); + if (is_ftrace_trampoline(addr)) + return 1; + if (is_bpf_text_address(addr)) + return 1; + return 0; } /* diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index fafd1a3ef0da..6a3b249a2ae1 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -300,10 +301,11 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { char namebuf[KSYM_NAME_LEN]; + if (is_ksym_addr(addr)) return !!get_symbol_pos(addr, symbolsize, offset); - - return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf); + return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || + !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); } /* @@ -318,6 +320,8 @@ const char *kallsyms_lookup(unsigned long addr, unsigned long *offset, char **modname, char *namebuf) { + const char *ret; + namebuf[KSYM_NAME_LEN - 1] = 0; namebuf[0] = 0; @@ -333,9 +337,13 @@ const char *kallsyms_lookup(unsigned long addr, return namebuf; } - /* See if it's in a module. */ - return module_address_lookup(addr, symbolsize, offset, modname, - namebuf); + /* See if it's in a module or a BPF JITed image. */ + ret = module_address_lookup(addr, symbolsize, offset, + modname, namebuf); + if (!ret) + ret = bpf_address_lookup(addr, symbolsize, + offset, modname, namebuf); + return ret; } int lookup_symbol_name(unsigned long addr, char *symname) @@ -471,6 +479,7 @@ EXPORT_SYMBOL(__print_symbol); /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ struct kallsym_iter { loff_t pos; + loff_t pos_mod_end; unsigned long value; unsigned int nameoff; /* If iterating in core kernel symbols. */ char type; @@ -481,13 +490,27 @@ struct kallsym_iter { static int get_ksymbol_mod(struct kallsym_iter *iter) { - if (module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value, - &iter->type, iter->name, iter->module_name, - &iter->exported) < 0) + int ret = module_get_kallsym(iter->pos - kallsyms_num_syms, + &iter->value, &iter->type, + iter->name, iter->module_name, + &iter->exported); + if (ret < 0) { + iter->pos_mod_end = iter->pos; return 0; + } + return 1; } +static int get_ksymbol_bpf(struct kallsym_iter *iter) +{ + iter->module_name[0] = '\0'; + iter->exported = 0; + return bpf_get_kallsym(iter->pos - iter->pos_mod_end, + &iter->value, &iter->type, + iter->name) < 0 ? 0 : 1; +} + /* Returns space to next name. */ static unsigned long get_ksymbol_core(struct kallsym_iter *iter) { @@ -508,16 +531,30 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) iter->name[0] = '\0'; iter->nameoff = get_symbol_offset(new_pos); iter->pos = new_pos; + if (new_pos == 0) + iter->pos_mod_end = 0; +} + +static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) +{ + iter->pos = pos; + + if (iter->pos_mod_end > 0 && + iter->pos_mod_end < iter->pos) + return get_ksymbol_bpf(iter); + + if (!get_ksymbol_mod(iter)) + return get_ksymbol_bpf(iter); + + return 1; } /* Returns false if pos at or past end of file. */ static int update_iter(struct kallsym_iter *iter, loff_t pos) { /* Module symbols can be accessed randomly. */ - if (pos >= kallsyms_num_syms) { - iter->pos = pos; - return get_ksymbol_mod(iter); - } + if (pos >= kallsyms_num_syms) + return update_iter_mod(iter, pos); /* If we're not on the desired position, reset to new position. */ if (pos != iter->pos) diff --git a/net/Kconfig b/net/Kconfig index f19c0c3b9589..102f781a0131 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -297,7 +297,8 @@ config BPF_JIT Note, admin should enable this feature changing: /proc/sys/net/core/bpf_jit_enable - /proc/sys/net/core/bpf_jit_harden (optional) + /proc/sys/net/core/bpf_jit_harden (optional) + /proc/sys/net/core/bpf_jit_kallsyms (optional) config NET_FLOW_LIMIT bool diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index eaa72eb0399c..4ead336e14ea 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -334,6 +334,13 @@ static struct ctl_table net_core_table[] = { .mode = 0600, .proc_handler = proc_dointvec, }, + { + .procname = "bpf_jit_kallsyms", + .data = &bpf_jit_kallsyms, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec, + }, # endif #endif { -- cgit v1.2.3 From eda7a5e88d95d8cc4315451dcfb74064773cbc02 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Thu, 16 Feb 2017 13:38:04 -0800 Subject: bridge: don't indicate expiry on NTF_EXT_LEARNED fdb entries added_by_external_learn fdb entries are added and expired by external entities like switchdev driver or external controllers. ageing is already disabled for such entries. Hence, don't indicate expiry for such fdb entries. CC: Nikolay Aleksandrov CC: Jiri Pirko CC: Ido Schimmel Signed-off-by: Roopa Prabhu Reviewed-by: Ido Schimmel Tested-by: Ido Schimmel Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 4ac11574117c..4f598dc2d916 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -65,7 +65,7 @@ static inline unsigned long hold_time(const struct net_bridge *br) static inline int has_expired(const struct net_bridge *br, const struct net_bridge_fdb_entry *fdb) { - return !fdb->is_static && + return !fdb->is_static && !fdb->added_by_external_learn && time_before_eq(fdb->updated + hold_time(br), jiffies); } -- cgit v1.2.3 From 336f8a71aa178a35b4c33130bb98331b7930337a Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Thu, 16 Feb 2017 22:46:14 +0100 Subject: net: hamachi: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/packetengines/hamachi.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index baff744b560e..8b026dbf0d8d 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -1811,21 +1811,23 @@ static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int hamachi_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct hamachi_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); - mii_ethtool_gset(&np->mii_if, ecmd); + mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return 0; } -static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int hamachi_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct hamachi_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); - res = mii_ethtool_sset(&np->mii_if, ecmd); + res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return res; } @@ -1845,10 +1847,10 @@ static u32 hamachi_get_link(struct net_device *dev) static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = hamachi_get_drvinfo, - .get_settings = hamachi_get_settings, - .set_settings = hamachi_set_settings, .nway_reset = hamachi_nway_reset, .get_link = hamachi_get_link, + .get_link_ksettings = hamachi_get_link_ksettings, + .set_link_ksettings = hamachi_set_link_ksettings, }; static const struct ethtool_ops ethtool_ops_no_mii = { -- cgit v1.2.3 From 99f18f1d2f447afe575c1ad2641050c77d244760 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Thu, 16 Feb 2017 23:28:01 +0100 Subject: net: qlogic: netxen: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- .../ethernet/qlogic/netxen/netxen_nic_ethtool.c | 123 +++++++++++---------- 1 file changed, 65 insertions(+), 58 deletions(-) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index f9034467736c..3157f97dd782 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -96,69 +96,70 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) } static int -netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +netxen_nic_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netxen_adapter *adapter = netdev_priv(dev); int check_sfp_module = 0; + u32 supported, advertising; /* read which mode */ if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - ecmd->supported = (SUPPORTED_10baseT_Half | + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); - ecmd->advertising = (ADVERTISED_100baseT_Half | + advertising = (ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->duplex = adapter->link_duplex; - ecmd->autoneg = adapter->link_autoneg; + cmd->base.speed = adapter->link_speed; + cmd->base.duplex = adapter->link_duplex; + cmd->base.autoneg = adapter->link_autoneg; } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { u32 val; val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); if (val == NETXEN_PORT_MODE_802_3_AP) { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; + supported = SUPPORTED_1000baseT_Full; + advertising = ADVERTISED_1000baseT_Full; } else { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; + supported = SUPPORTED_10000baseT_Full; + advertising = ADVERTISED_10000baseT_Full; } if (netif_running(dev) && adapter->has_link_events) { - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->autoneg = adapter->link_autoneg; - ecmd->duplex = adapter->link_duplex; + cmd->base.speed = adapter->link_speed; + cmd->base.autoneg = adapter->link_autoneg; + cmd->base.duplex = adapter->link_duplex; goto skip; } - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { u16 pcifn = adapter->ahw.pci_func; val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn)); - ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ * - P3_LINK_SPEED_VAL(pcifn, val)); + cmd->base.speed = P3_LINK_SPEED_MHZ * + P3_LINK_SPEED_VAL(pcifn, val); } else - ethtool_cmd_speed_set(ecmd, SPEED_10000); + cmd->base.speed = SPEED_10000; - ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_DISABLE; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_DISABLE; } else return -EIO; skip: - ecmd->phy_address = adapter->physical_port; - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.phy_address = adapter->physical_port; switch (adapter->ahw.board_type) { case NETXEN_BRDTYPE_P2_SB35_4G: @@ -167,16 +168,16 @@ skip: case NETXEN_BRDTYPE_P3_4_GB: case NETXEN_BRDTYPE_P3_4_GB_MM: - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; case NETXEN_BRDTYPE_P2_SB31_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4_LP: case NETXEN_BRDTYPE_P3_10000_BASE_T: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->autoneg = (adapter->ahw.board_type == + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + cmd->base.autoneg = (adapter->ahw.board_type == NETXEN_BRDTYPE_P2_SB31_10G_CX4) ? (AUTONEG_DISABLE) : (adapter->link_autoneg); break; @@ -185,39 +186,39 @@ skip: case NETXEN_BRDTYPE_P3_IMEZ: case NETXEN_BRDTYPE_P3_XG_LOM: case NETXEN_BRDTYPE_P3_HMEZ: - ecmd->supported |= SUPPORTED_MII; - ecmd->advertising |= ADVERTISED_MII; - ecmd->port = PORT_MII; - ecmd->autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_MII; + advertising |= ADVERTISED_MII; + cmd->base.port = PORT_MII; + cmd->base.autoneg = AUTONEG_DISABLE; break; case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: case NETXEN_BRDTYPE_P3_10G_SFP_CT: case NETXEN_BRDTYPE_P3_10G_SFP_QT: - ecmd->advertising |= ADVERTISED_TP; - ecmd->supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + supported |= SUPPORTED_TP; check_sfp_module = netif_running(dev) && adapter->has_link_events; case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P3_10G_XFP: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + cmd->base.autoneg = AUTONEG_DISABLE; break; case NETXEN_BRDTYPE_P3_10G_TP: if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= + cmd->base.autoneg = AUTONEG_DISABLE; + supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; check_sfp_module = netif_running(dev) && adapter->has_link_events; } else { - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= + supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; } break; default: @@ -232,31 +233,37 @@ skip: case LINKEVENT_MODULE_OPTICAL_SRLR: case LINKEVENT_MODULE_OPTICAL_LRM: case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; break; case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: case LINKEVENT_MODULE_TWINAX: - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; break; default: - ecmd->port = -1; + cmd->base.port = -1; } } if (!netif_running(dev) || !adapter->ahw.linkup) { - ecmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } static int -netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +netxen_nic_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netxen_adapter *adapter = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; int ret; if (adapter->ahw.port_type != NETXEN_NIC_GBE) @@ -265,16 +272,16 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) return -EOPNOTSUPP; - ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex, - ecmd->autoneg); + ret = nx_fw_cmd_set_gbe_port(adapter, speed, cmd->base.duplex, + cmd->base.autoneg); if (ret == NX_RCODE_NOT_SUPPORTED) return -EOPNOTSUPP; else if (ret) return -EIO; adapter->link_speed = speed; - adapter->link_duplex = ecmd->duplex; - adapter->link_autoneg = ecmd->autoneg; + adapter->link_duplex = cmd->base.duplex; + adapter->link_autoneg = cmd->base.autoneg; if (!netif_running(dev)) return 0; @@ -931,8 +938,6 @@ netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, } const struct ethtool_ops netxen_nic_ethtool_ops = { - .get_settings = netxen_nic_get_settings, - .set_settings = netxen_nic_set_settings, .get_drvinfo = netxen_nic_get_drvinfo, .get_regs_len = netxen_nic_get_regs_len, .get_regs = netxen_nic_get_regs, @@ -954,4 +959,6 @@ const struct ethtool_ops netxen_nic_ethtool_ops = { .get_dump_flag = netxen_get_dump_flag, .get_dump_data = netxen_get_dump_data, .set_dump = netxen_set_dump, + .get_link_ksettings = netxen_nic_get_link_ksettings, + .set_link_ksettings = netxen_nic_set_link_ksettings, }; -- cgit v1.2.3 From 025331df34f6722f86b467cb13a69326444ab1bc Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 17 Feb 2017 01:56:11 +0100 Subject: rtnl: don't account unused struct ifla_port_vsi in rtnl_port_size When allocating rtnl dump messages, struct ifla_port_vsi is never dumped, so we can save header plus payload in rtnl_port_size(). Infact, attribute IFLA_PORT_VSI_TYPE and struct ifla_port_vsi are not used anywhere in the kernel. We only need to keep the nla policy should applications in user space be filling this out. Same NLA_BINARY issue exists as was fixed in 364d5716a7ad ("rtnetlink: ifla_vf_policy: fix misuses of NLA_BINARY") and others, but then again IFLA_PORT_VSI_TYPE is not used anywhere, so just add a comment that it's unused. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index adfb54b896da..e3286d32eca5 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -876,8 +876,6 @@ static size_t rtnl_port_size(const struct net_device *dev, { size_t port_size = nla_total_size(4) /* PORT_VF */ + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ - + nla_total_size(sizeof(struct ifla_port_vsi)) - /* PORT_VSI_TYPE */ + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ + nla_total_size(1) /* PROT_VDP_REQUEST */ @@ -1491,14 +1489,19 @@ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { [IFLA_PORT_VF] = { .type = NLA_U32 }, [IFLA_PORT_PROFILE] = { .type = NLA_STRING, .len = PORT_PROFILE_MAX }, - [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_port_vsi)}, [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, .len = PORT_UUID_MAX }, [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, .len = PORT_UUID_MAX }, [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, + + /* Unused, but we need to keep it here since user space could + * fill it. It's also broken with regard to NLA_BINARY use in + * combination with structs. + */ + [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, + .len = sizeof(struct ifla_port_vsi) }, }; static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { -- cgit v1.2.3 From b5124720ed2e5b43c851a32e9acc8d29e3e780ae Mon Sep 17 00:00:00 2001 From: Simon Xiao Date: Fri, 17 Feb 2017 11:36:20 -0800 Subject: netvsc: fix typo on statistics Return the correct tx_errors stats in netvsc. Reviewed-by: Haiyang Zhang Signed-off-by: Simon Xiao Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 425285f36595..2d3cdb026a99 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -920,7 +920,7 @@ static void netvsc_get_stats64(struct net_device *net, } t->tx_dropped = net->stats.tx_dropped; - t->tx_errors = net->stats.tx_dropped; + t->tx_errors = net->stats.tx_errors; t->rx_dropped = net->stats.rx_dropped; t->rx_errors = net->stats.rx_errors; -- cgit v1.2.3 From 88c4845d7dec835ba7ad1379e30a09658b05495d Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 17 Feb 2017 18:16:21 +0000 Subject: rxrpc: Change module filename to rxrpc.ko Change module filename from af-rxrpc.ko to rxrpc.ko so as to be consistent with the other protocol drivers. Also adjust the documentation to reflect this. Further, there is no longer a standalone rxkad module, as it has been merged into the rxrpc core, so get rid of references to that. Reported-by: Marc Dionne Signed-off-by: David Howells Signed-off-by: David S. Miller --- Documentation/filesystems/afs.txt | 34 +--------------------------------- net/rxrpc/Makefile | 12 ++++++------ 2 files changed, 7 insertions(+), 39 deletions(-) diff --git a/Documentation/filesystems/afs.txt b/Documentation/filesystems/afs.txt index ffef91c4e0d6..060da408923b 100644 --- a/Documentation/filesystems/afs.txt +++ b/Documentation/filesystems/afs.txt @@ -64,8 +64,7 @@ USAGE When inserting the driver modules the root cell must be specified along with a list of volume location server IP addresses: - modprobe af_rxrpc - modprobe rxkad + modprobe rxrpc modprobe kafs rootcell=cambridge.redhat.com:172.16.18.73:172.16.18.91 The first module is the AF_RXRPC network protocol driver. This provides the @@ -214,34 +213,3 @@ If a file is opened with a particular key and then the file descriptor is passed to a process that doesn't have that key (perhaps over an AF_UNIX socket), then the operations on the file will be made with key that was used to open the file. - - -======== -EXAMPLES -======== - -Here's what I use to test this. Some of the names and IP addresses are local -to my internal DNS. My "root.afs" partition has a mount point within it for -some public volumes volumes. - -insmod /tmp/rxrpc.o -insmod /tmp/rxkad.o -insmod /tmp/kafs.o rootcell=cambridge.redhat.com:172.16.18.91 - -mount -t afs \%root.afs. /afs -mount -t afs \%cambridge.redhat.com:root.cell. /afs/cambridge.redhat.com/ - -echo add grand.central.org 18.9.48.14:128.2.203.61:130.237.48.87 > /proc/fs/afs/cells -mount -t afs "#grand.central.org:root.cell." /afs/grand.central.org/ -mount -t afs "#grand.central.org:root.archive." /afs/grand.central.org/archive -mount -t afs "#grand.central.org:root.contrib." /afs/grand.central.org/contrib -mount -t afs "#grand.central.org:root.doc." /afs/grand.central.org/doc -mount -t afs "#grand.central.org:root.project." /afs/grand.central.org/project -mount -t afs "#grand.central.org:root.service." /afs/grand.central.org/service -mount -t afs "#grand.central.org:root.software." /afs/grand.central.org/software -mount -t afs "#grand.central.org:root.user." /afs/grand.central.org/user - -umount /afs -rmmod kafs -rmmod rxkad -rmmod rxrpc diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index 8fc6ea347182..b9da4d6b914f 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile @@ -2,7 +2,9 @@ # Makefile for Linux kernel RxRPC # -af-rxrpc-y := \ +obj-$(CONFIG_AF_RXRPC) += rxrpc.o + +rxrpc-y := \ af_rxrpc.o \ call_accept.o \ call_event.o \ @@ -26,8 +28,6 @@ af-rxrpc-y := \ skbuff.o \ utils.o -af-rxrpc-$(CONFIG_PROC_FS) += proc.o -af-rxrpc-$(CONFIG_RXKAD) += rxkad.o -af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o - -obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o +rxrpc-$(CONFIG_PROC_FS) += proc.o +rxrpc-$(CONFIG_RXKAD) += rxkad.o +rxrpc-$(CONFIG_SYSCTL) += sysctl.o -- cgit v1.2.3 From 806a837650501f5eee359a197824db8752fa6e6c Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Fri, 17 Feb 2017 14:34:19 +0800 Subject: pkt_sched: Remove useless qdisc_stab_lock The qdisc_stab_lock is used in qdisc_get_stab and qdisc_put_stab. These two functions are invoked in qdisc_create, qdisc_change, and qdisc_destroy which run fully under RTNL. So it already makes sure only one could access the qdisc_stab_list at the same time. Then it is unnecessary to use qdisc_stab_lock now. Signed-off-by: Gao Feng Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/sch_api.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index a13c15e8f087..bcf49cd22786 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -440,7 +440,6 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab) EXPORT_SYMBOL(qdisc_put_rtab); static LIST_HEAD(qdisc_stab_list); -static DEFINE_SPINLOCK(qdisc_stab_lock); static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = { [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) }, @@ -474,20 +473,15 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) if (tsize != s->tsize || (!tab && tsize > 0)) return ERR_PTR(-EINVAL); - spin_lock(&qdisc_stab_lock); - list_for_each_entry(stab, &qdisc_stab_list, list) { if (memcmp(&stab->szopts, s, sizeof(*s))) continue; if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) continue; stab->refcnt++; - spin_unlock(&qdisc_stab_lock); return stab; } - spin_unlock(&qdisc_stab_lock); - stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); if (!stab) return ERR_PTR(-ENOMEM); @@ -497,9 +491,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) if (tsize > 0) memcpy(stab->data, tab, tsize * sizeof(u16)); - spin_lock(&qdisc_stab_lock); list_add_tail(&stab->list, &qdisc_stab_list); - spin_unlock(&qdisc_stab_lock); return stab; } @@ -514,14 +506,10 @@ void qdisc_put_stab(struct qdisc_size_table *tab) if (!tab) return; - spin_lock(&qdisc_stab_lock); - if (--tab->refcnt == 0) { list_del(&tab->list); call_rcu_bh(&tab->rcu, stab_kfree_rcu); } - - spin_unlock(&qdisc_stab_lock); } EXPORT_SYMBOL(qdisc_put_stab); -- cgit v1.2.3 From 3b4735281f67b0aa62bf74c8a1a7758c17f7158d Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Fri, 17 Feb 2017 08:57:54 +0100 Subject: nfp: Use PCI_DEVICE_ID_NETRONOME_NFP* defines Use PCI_DEVICE_ID_NETRONOME_NFP*, defined in linux/pci_ids.h, rather than replicating the same values in the NFP driver. Signed-off-by: Simon Horman Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 4 ++-- drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c | 4 ++-- drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h | 5 ----- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 6e1bb3a4a611..db52b6a53c6f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -58,11 +58,11 @@ static const char nfp_driver_name[] = "nfp"; const char nfp_driver_version[] = VERMAGIC_STRING; static const struct pci_device_id nfp_pci_device_ids[] = { - { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, }, - { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP4000, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, }, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 165e085c2a24..15cc3e77cf6a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -640,8 +640,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000; } - if (nfp->pdev->device == PCI_DEVICE_NFP4000 || - nfp->pdev->device == PCI_DEVICE_NFP6000) { + if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 || + nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) { nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); expl_groups = 4; } else { diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 74e6f9f59bdc..edecc0a27485 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -58,11 +58,6 @@ #define PCI_64BIT_BAR_COUNT 3 -/* NFP hardware vendor/device ids. - */ -#define PCI_DEVICE_NFP4000 0x4000 -#define PCI_DEVICE_NFP6000 0x6000 - #define NFP_CPP_NUM_TARGETS 16 struct device; -- cgit v1.2.3 From d2c58294f5416467ae0c5d00675bce3cd19595dd Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Fri, 17 Feb 2017 04:16:22 -0500 Subject: rds:Remove unnecessary ib_ring unalloc In the function rds_ib_xmit_atomic, ib_ring is not allocated successfully. As such, it is not necessary to unalloc it. Cc: Joe Jin Cc: Junxiao Bi Signed-off-by: Zhu Yanjun Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/ib_send.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 5e72de10c484..6ab39dbcca01 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -770,7 +770,6 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos); if (work_alloc != 1) { - rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_stats_inc(s_ib_tx_ring_full); ret = -ENOMEM; goto out; -- cgit v1.2.3 From 74179d44b6e199adaec8c7d842dc61ebefa314c5 Mon Sep 17 00:00:00 2001 From: Valentin Longchamp Date: Fri, 17 Feb 2017 11:31:22 +0100 Subject: net/wan: add MODULE_LICENSE for fsl_ucc_hdlc It is required to build it as a module. Signed-off-by: Valentin Longchamp Signed-off-by: David S. Miller --- drivers/net/wan/fsl_ucc_hdlc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index d869533d2e79..a5045b5279d7 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -1175,3 +1175,4 @@ static struct platform_driver ucc_hdlc_driver = { }; module_platform_driver(ucc_hdlc_driver); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From fbdf0e28d061708cf18ba0f8e0db5360dc9a15b9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 17 Feb 2017 16:08:30 +0100 Subject: vmxnet3: prevent building with 64K pages I got a warning about broken code on ARM64 with 64K pages: drivers/net/vmxnet3/vmxnet3_drv.c: In function 'vmxnet3_rq_init': drivers/net/vmxnet3/vmxnet3_drv.c:1679:29: error: large integer implicitly truncated to unsigned type [-Werror=overflow] rq->buf_info[0][i].len = PAGE_SIZE; 'len' here is a 16-bit integer, so this clearly won't work. I don't think this driver is used much on anything other than x86, so there is no need to fix this properly and we can work around it with a Kconfig dependency to forbid known-broken configurations. qemu in theory supports it on other architectures too, but presumably only for compatibility with x86 guests that also run on vmware. CONFIG_PAGE_SIZE_64KB is used on hexagon, mips, sh and tile, the other symbols are architecture-specific names for the same thing. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/Kconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 823bc2fd201f..100fbdc9b95c 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -460,6 +460,9 @@ config XEN_NETDEV_BACKEND config VMXNET3 tristate "VMware VMXNET3 ethernet driver" depends on PCI && INET + depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \ + IA64_PAGE_SIZE_64KB || MICROBLAZE_64K_PAGES || \ + PARISC_PAGE_SIZE_64KB || PPC_64K_PAGES) help This driver supports VMware's vmxnet3 virtual ethernet NIC. To compile this driver as a module, choose M here: the -- cgit v1.2.3 From 0ccb998bf46d8dc9799b60ea0090fb08c37e96ad Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Fri, 17 Feb 2017 15:49:13 +0000 Subject: sfc: fix filter_id misinterpretation in edge case On EF10, hardware filter IDs are 13 bits, but in some places we store 32-bit "full filter IDs" in which higher order bits encode the filter match-priority. This could cause a filter to have a full filter ID of 0xffff, which is also the value EFX_EF10_FILTER_ID_INVALID which we use in 16-bit "short" filter IDs (without match-priority bits). This would occur if the hardware filter ID was 0x1fff and the match-priority was 7. Unfortunately, some code that checks for EFX_EF10_FILTER_ID_INVALID can be called on full filter IDs, and will WARN_ON if this ever happens. So, since we have plenty of spare bits in the full filter ID, this patch shifts the priority bits left one bit when constructing the full filter IDs, ensuring that the 0x2000 bit of a full filter ID will always be 0 and thus no full filter ID can ever equal EFX_EF10_FILTER_ID_INVALID. This patch also replaces open-coded full<->short filter ID conversions with calls to functions, thus keeping the definition of the full filter ID format in one place. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 48 +++++++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index b29a819f721d..7da96998488a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -134,6 +134,22 @@ static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); +static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id) +{ + WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID); + return filter_id & (HUNT_FILTER_TBL_ROWS - 1); +} + +static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id) +{ + return filter_id / (HUNT_FILTER_TBL_ROWS * 2); +} + +static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx) +{ + return pri * HUNT_FILTER_TBL_ROWS * 2 + idx; +} + static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) { efx_dword_t reg; @@ -4194,7 +4210,7 @@ found: /* If successful, return the inserted filter ID */ if (rc == 0) - rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; + rc = efx_ef10_make_filter_id(match_pri, ins_index); wake_up_all(&table->waitq); out_unlock: @@ -4217,7 +4233,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, unsigned int priority_mask, u32 filter_id, bool by_index) { - unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; + unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); struct efx_ef10_filter_table *table = efx->filter_state; MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_HANDLE_OFST + @@ -4244,7 +4260,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, if (!spec || (!by_index && efx_ef10_filter_pri(table, spec) != - filter_id / HUNT_FILTER_TBL_ROWS)) { + efx_ef10_filter_get_unsafe_pri(filter_id))) { rc = -ENOENT; goto out_unlock; } @@ -4319,11 +4335,6 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx, filter_id, false); } -static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) -{ - return filter_id % HUNT_FILTER_TBL_ROWS; -} - static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id) @@ -4337,7 +4348,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id, struct efx_filter_spec *spec) { - unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; + unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); struct efx_ef10_filter_table *table = efx->filter_state; const struct efx_filter_spec *saved_spec; int rc; @@ -4346,7 +4357,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); if (saved_spec && saved_spec->priority == priority && efx_ef10_filter_pri(table, saved_spec) == - filter_id / HUNT_FILTER_TBL_ROWS) { + efx_ef10_filter_get_unsafe_pri(filter_id)) { *spec = *saved_spec; rc = 0; } else { @@ -4398,7 +4409,7 @@ static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; - return table->rx_match_count * HUNT_FILTER_TBL_ROWS; + return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2; } static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, @@ -4418,8 +4429,9 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, count = -EMSGSIZE; break; } - buf[count++] = (efx_ef10_filter_pri(table, spec) * - HUNT_FILTER_TBL_ROWS + + buf[count++] = + efx_ef10_make_filter_id( + efx_ef10_filter_pri(table, spec), filter_idx); } } @@ -4971,7 +4983,7 @@ static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) unsigned int filter_idx; if (*id != EFX_EF10_FILTER_ID_INVALID) { - filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id); + filter_idx = efx_ef10_filter_get_unsafe_id(*id); if (!table->entry[filter_idx].spec) netif_dbg(efx, drv, efx->net_dev, "marked null spec old %04x:%04x\n", *id, @@ -5106,7 +5118,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, rc = EFX_EF10_FILTER_ID_INVALID; } } - ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc); + ids[i] = efx_ef10_filter_get_unsafe_id(rc); } if (multicast && rollback) { @@ -5130,7 +5142,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, return rc; } else { vlan->default_filters[EFX_EF10_BCAST] = - efx_ef10_filter_get_unsafe_id(efx, rc); + efx_ef10_filter_get_unsafe_id(rc); } } @@ -5225,7 +5237,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, id = &vlan->default_filters[map[encap_type]]; EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); - *id = efx_ef10_filter_get_unsafe_id(efx, rc); + *id = efx_ef10_filter_get_unsafe_id(rc); if (!nic_data->workaround_26807 && !encap_type) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, @@ -5250,7 +5262,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, vlan->default_filters[EFX_EF10_BCAST] != EFX_EF10_FILTER_ID_INVALID); vlan->default_filters[EFX_EF10_BCAST] = - efx_ef10_filter_get_unsafe_id(efx, rc); + efx_ef10_filter_get_unsafe_id(rc); } } rc = 0; -- cgit v1.2.3 From 105eac6c35a168b8b6d8e594830a1da5585b260d Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Fri, 17 Feb 2017 15:50:12 +0000 Subject: sfc: forget filters from sw table if hw replies ENOENT on removing them If the hw doesn't think they exist, we should defer to its authority. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 7da96998488a..ec976ffb273f 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4309,13 +4309,18 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, table->entry[filter_idx].handle); - rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, - inbuf, sizeof(inbuf), NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, + inbuf, sizeof(inbuf), NULL, 0, NULL); spin_lock_bh(&efx->filter_lock); - if (rc == 0) { + if ((rc == 0) || (rc == -ENOENT)) { + /* Filter removed OK or didn't actually exist */ kfree(spec); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); + } else { + efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, + MC_CMD_FILTER_OP_IN_LEN, + NULL, 0, rc); } } -- cgit v1.2.3 From 9c568fd8844ec3986eb19b0b5d97536243d10d46 Mon Sep 17 00:00:00 2001 From: Peter Dunning Date: Fri, 17 Feb 2017 15:50:43 +0000 Subject: sfc: do not device_attach if a reset is pending efx_start_all can return without initialising queues as a reset is pending. This means that when netif_device_attach is called, the kernel can start sending traffic without having an initialised TX queue to send to. This patch avoids this by not calling netif_device_attach if there is a pending reset. Fixes: e283546c0465 ("sfc:On MCDI timeout, issue an FLR (and mark MCDI to fail-fast)") Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 8 ++++---- drivers/net/ethernet/sfc/ef10_sriov.c | 4 ++-- drivers/net/ethernet/sfc/efx.c | 10 ++++++---- drivers/net/ethernet/sfc/efx.h | 6 ++++++ drivers/net/ethernet/sfc/selftest.c | 2 +- 5 files changed, 19 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index ec976ffb273f..92e1c6d8b293 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -5389,7 +5389,7 @@ restore_filters: if (rc2) goto reset_nic; - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return rc; @@ -5675,7 +5675,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) if (was_enabled) efx_net_open(efx->net_dev); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); #ifdef CONFIG_SFC_SRIOV if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { @@ -6127,7 +6127,7 @@ static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) if (!(nic_data->datapath_caps & (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return 0; } @@ -6199,7 +6199,7 @@ static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) * trigger a re-attach. Since there won't be an MC reset, we * have to do the attach ourselves. */ - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); } return rc; diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index ed4b14283461..b7e4345c990d 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -549,7 +549,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) vf->efx->type->filter_table_probe(vf->efx); up_write(&vf->efx->filter_sem); efx_net_open(vf->efx->net_dev); - netif_device_attach(vf->efx->net_dev); + efx_device_attach_if_not_resetting(vf->efx); } return 0; @@ -667,7 +667,7 @@ restore_filters: if (rc2) goto reset_nic; - netif_device_attach(vf->efx->net_dev); + efx_device_attach_if_not_resetting(vf->efx); } return rc; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 8c4c273643dc..334bcc6df6b2 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -876,7 +876,7 @@ out: efx_schedule_reset(efx, RESET_TYPE_DISABLE); } else { efx_start_all(efx); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); } return rc; @@ -2182,6 +2182,8 @@ int efx_net_open(struct net_device *net_dev) efx_link_status_changed(efx); efx_start_all(efx); + if (efx->state == STATE_DISABLED || efx->reset_pending) + netif_device_detach(efx->net_dev); efx_selftest_async_start(efx); return 0; } @@ -2248,7 +2250,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) mutex_unlock(&efx->mac_lock); efx_start_all(efx); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return 0; } @@ -2744,7 +2746,7 @@ out: efx->state = STATE_DISABLED; } else { netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); } return rc; } @@ -3417,7 +3419,7 @@ static int efx_pm_thaw(struct device *dev) efx_start_all(efx); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); efx->state = STATE_READY; diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 342ae16e1f2d..ee14662415c5 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -276,6 +276,12 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) netif_tx_unlock_bh(dev); } +static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx) +{ + if ((efx->state != STATE_DISABLED) && !efx->reset_pending) + netif_device_attach(efx->net_dev); +} + static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) { if (WARN_ON(down_read_trylock(sem))) { diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index cd38b44ae23a..dab286a337a6 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -768,7 +768,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, __efx_reconfigure_port(efx); mutex_unlock(&efx->mac_lock); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return rc_test; } -- cgit v1.2.3 From a4ecb15a2432880353d0de4a35204ab8b65b8751 Mon Sep 17 00:00:00 2001 From: "Cui, Cheng" Date: Fri, 17 Feb 2017 17:04:55 +0000 Subject: tcp: accommodate sequence number to a peer's shrunk receive window caused by precision loss in window scaling Prevent sending out a left-shifted sequence number from a Linux sender in response to a peer's shrunk receive-window caused by losing least significant bits in window-scaling. Cc: "David S. Miller" Cc: Alexey Kuznetsov Cc: James Morris Cc: Hideaki YOSHIFUJI Cc: Patrick McHardy Signed-off-by: Cheng Cui Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 61f272a99a49..22548b5f05cb 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -83,7 +83,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) tcp_skb_pcount(skb)); } -/* SND.NXT, if window was not shrunk. +/* SND.NXT, if window was not shrunk or the amount of shrunk was less than one + * window scaling factor due to loss of precision. * If window has been shrunk, what should we make? It is not clear at all. * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( * Anything in between SND.UNA...SND.UNA+SND.WND also can be already @@ -93,7 +94,9 @@ static inline __u32 tcp_acceptable_seq(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); - if (!before(tcp_wnd_end(tp), tp->snd_nxt)) + if (!before(tcp_wnd_end(tp), tp->snd_nxt) || + (tp->rx_opt.wscale_ok && + ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) return tp->snd_nxt; else return tcp_wnd_end(tp); -- cgit v1.2.3 From 4e33e34625103593a71d2bae471ce49cef62ef06 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 17 Feb 2017 09:11:42 -0800 Subject: tcp: use page_ref_inc() in tcp_sendmsg() sk_page_frag_refill() allocates either a compound page or an order-0 page. We can use page_ref_inc() which is slightly faster than get_page() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d44a6989e76d..da385ae997a3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1288,7 +1288,7 @@ new_segment: } else { skb_fill_page_desc(skb, i, pfrag->page, pfrag->offset, copy); - get_page(pfrag->page); + page_ref_inc(pfrag->page); } pfrag->offset += copy; } -- cgit v1.2.3 From e059a465cc79c510003a23b72e39d4a85bdfa00f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 17 Feb 2017 19:58:10 +0000 Subject: Bluetooth: hci_qca: fix spelling mistake: "Spurrious" -> "Spurious" trivial fix to spelling mistake in error message Signed-off-by: Colin Ian King Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 05c230719a47..f242dfd0c2e2 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -335,7 +335,7 @@ static void hci_ibs_tx_idle_timeout(unsigned long arg) /* Fall through */ default: - BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state); + BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); break; } @@ -373,7 +373,7 @@ static void hci_ibs_wake_retrans_timeout(unsigned long arg) /* Fall through */ default: - BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state); + BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); break; } -- cgit v1.2.3 From 8f91566f99fab8f7a0d2728a1e3defb13675837a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 18 Feb 2017 22:52:55 +0000 Subject: btmrvl: fix spelling mistake: "actived" -> "activated" trivial fix to spelling mistake in error message Signed-off-by: Colin Ian King Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 23711fe9e0a5..08e01f002bad 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1624,7 +1624,7 @@ static int btmrvl_sdio_suspend(struct device *dev) if (priv->adapter->hs_state != HS_ACTIVATED) { if (btmrvl_enable_hs(priv)) { - BT_ERR("HS not actived, suspend failed!"); + BT_ERR("HS not activated, suspend failed!"); priv->adapter->is_suspending = false; return -EBUSY; } -- cgit v1.2.3 From a410c821c0cf50bc0b73a91435852cd04b2c7acd Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Mon, 12 Dec 2016 15:44:07 -0800 Subject: i40e: fix disable overflow promiscuous mode There exists a bug in which the driver is unable to exit overflow promiscuous mode after having added "too many" mac filters. It is expected that after triggering overflow promiscuous, removing the failed/extra filters should then disable overflow promiscuous mode. The bug exists because we were intentionally skipping the sync_vsi_filter path in cases where we were removing failed filters since they shouldn't have been added to the firmware in the first place, however we still need to go through the sync_vsi_filter code path to determine whether or not it is ok to exit overflow promiscuous mode. This patch fixes the bug by making sure we go through the sync_vsi_filter path in cases of failed filters. Change-ID: I634d249ca3e5fa50729553137c295e73e7722143 Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e83a8ca5dd65..fb8a52dd94cd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1447,18 +1447,20 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) if (!f) return; + /* If the filter was never added to firmware then we can just delete it + * directly and we don't want to set the status to remove or else an + * admin queue command will unnecessarily fire. + */ if ((f->state == I40E_FILTER_FAILED) || (f->state == I40E_FILTER_NEW)) { - /* this one never got added by the FW. Just remove it, - * no need to sync anything. - */ hash_del(&f->hlist); kfree(f); } else { f->state = I40E_FILTER_REMOVE; - vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } + + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } /** -- cgit v1.2.3 From 1d68005db4e72082f3e537d6c0cf831a71a7e2ee Mon Sep 17 00:00:00 2001 From: Joshua Hay Date: Mon, 12 Dec 2016 15:44:08 -0800 Subject: i40e: enable mc magic pkt wakeup during power down This patch adds a call to the mac_address_write admin q function during power down to update the PRTPM_SAH/SAL registers with the MC_MAG_EN bit thus enabling multicast magic packet wakeup. A FW workaround is needed to write the multicast magic wake up enable bit in the PRTPM_SAH register. The FW expects the mac address write admin q cmd to be called first with one of the WRITE_TYPE_LAA flags and then with the multicast relevant flags. *Note: This solution only works for X722 devices currently. A PFR will clear the previously mentioned bit by default, but X722 has support for a WOL_PRESERVE_ON_PFR flag which prevents the bit from being cleared. Once other devices support this flag, this solution should work as well. Change-ID: I51bd5b8535bd9051c2676e27c999c1657f786827 Signed-off-by: Joshua Hay Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 2 + drivers/net/ethernet/intel/i40e/i40e_main.c | 74 ++++++++++++++++++++--- 3 files changed, 67 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7a23d3e47c6f..f4c4ee3edde1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -348,6 +348,7 @@ struct i40e_pf { #define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) #define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52) #define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53) +#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index b2101a51534c..451f48b7540a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -538,6 +538,8 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { __le16 command_flags; +#define I40E_AQC_MC_MAG_EN 0x0100 +#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200 #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index fb8a52dd94cd..e9335af4cc28 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8815,16 +8815,17 @@ static int i40e_sw_init(struct i40e_pf *pf) } #endif /* CONFIG_PCI_IOV */ if (pf->hw.mac.type == I40E_MAC_X722) { - pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | - I40E_FLAG_128_QP_RSS_CAPABLE | - I40E_FLAG_HW_ATR_EVICT_CAPABLE | - I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | - I40E_FLAG_WB_ON_ITR_CAPABLE | - I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | - I40E_FLAG_NO_PCI_LINK_CHECK | - I40E_FLAG_USE_SET_LLDP_MIB | - I40E_FLAG_GENEVE_OFFLOAD_CAPABLE | - I40E_FLAG_PTP_L4_CAPABLE; + pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE + | I40E_FLAG_128_QP_RSS_CAPABLE + | I40E_FLAG_HW_ATR_EVICT_CAPABLE + | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE + | I40E_FLAG_WB_ON_ITR_CAPABLE + | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE + | I40E_FLAG_NO_PCI_LINK_CHECK + | I40E_FLAG_USE_SET_LLDP_MIB + | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE + | I40E_FLAG_PTP_L4_CAPABLE + | I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE; } else if ((pf->hw.aq.api_maj_ver > 1) || ((pf->hw.aq.api_maj_ver == 1) && (pf->hw.aq.api_min_ver > 4))) { @@ -11740,6 +11741,53 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) rtnl_unlock(); } +/** + * i40e_enable_mc_magic_wake - enable multicast magic packet wake up + * using the mac_address_write admin q function + * @pf: pointer to i40e_pf struct + **/ +static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + i40e_status ret; + u8 mac_addr[6]; + u16 flags = 0; + + /* Get current MAC address in case it's an LAA */ + if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { + ether_addr_copy(mac_addr, + pf->vsi[pf->lan_vsi]->netdev->dev_addr); + } else { + dev_err(&pf->pdev->dev, + "Failed to retrieve MAC address; using default\n"); + ether_addr_copy(mac_addr, hw->mac.addr); + } + + /* The FW expects the mac address write cmd to first be called with + * one of these flags before calling it again with the multicast + * enable flags. + */ + flags = I40E_AQC_WRITE_TYPE_LAA_WOL; + + if (hw->func_caps.flex10_enable && hw->partition_id != 1) + flags = I40E_AQC_WRITE_TYPE_LAA_ONLY; + + ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up"); + return; + } + + flags = I40E_AQC_MC_MAG_EN + | I40E_AQC_WOL_PRESERVE_ON_PFR + | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG; + ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); + if (ret) + dev_err(&pf->pdev->dev, + "Failed to enable Multicast Magic Packet wake up\n"); +} + /** * i40e_shutdown - PCI callback for shutting down * @pdev: PCI device information struct @@ -11762,6 +11810,9 @@ static void i40e_shutdown(struct pci_dev *pdev) cancel_work_sync(&pf->service_task); i40e_fdir_teardown(pf); + if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) + i40e_enable_mc_magic_wake(pf); + rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); @@ -11793,6 +11844,9 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); + if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) + i40e_enable_mc_magic_wake(pf); + rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); -- cgit v1.2.3 From 03aa268b146127361c206fe3942add2adf6ce1d7 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 12 Dec 2016 15:44:09 -0800 Subject: i40e: remove unnecessary call to i40e_update_link_info This call is made just prior to running i40e_link_event. In i40e_link_event, we set hw->phy.get_link_info to true just prior to calling i40e_get_link_status, which conveniently runs i40e_update_link_info for us. Thus, we are running i40e_update_link_info twice, which seems like something we don't need to do... Change-ID: I36467a570f44b7546d218c99e134ff97c2709315 Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e9335af4cc28..a6dca5822cff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10761,7 +10761,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw); i40e_link_event(pf); /* Initialize user-specific link properties */ -- cgit v1.2.3 From 8a68badd12a8b708a02d54cd5aac4d07851a6d5a Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Mon, 12 Dec 2016 15:44:10 -0800 Subject: i40evf: free rings in remove function When the i40evf_remove() calls netdev close, the device doesn't actually close - it schedules the work for the watchdog to perform. Since we're stopping the watchdog, this work doesn't get done. However, we're resetting the part, so we can free resources after the reset request has gone through. This plugs a memory leak. Change-ID: Id5335dcaf76ce00d2a4c3d26e9faf711d7f051cf Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 920c1cb06a92..5673dbd2cf7d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2871,7 +2871,8 @@ static void i40evf_remove(struct pci_dev *pdev) i40evf_request_reset(adapter); msleep(50); } - + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); i40evf_misc_irq_disable(adapter); i40evf_free_misc_irq(adapter); i40evf_reset_interrupt_capability(adapter); -- cgit v1.2.3 From 51f38262669b88b6f5ec52cb93dc72c58d85dc1f Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Mon, 12 Dec 2016 15:44:11 -0800 Subject: i40evf: add comment Add a comment to reduce confusion. Change-ID: I3d5819c0f3f5174680442ae54398a073d4a61f4f Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5673dbd2cf7d..f35dcaac5bb7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2153,6 +2153,11 @@ static int i40evf_close(struct net_device *netdev) adapter->state = __I40EVF_DOWN_PENDING; i40evf_free_traffic_irqs(adapter); + /* We explicitly don't free resources here because the hardware is + * still active and can DMA into memory. Resources are cleared in + * i40evf_virtchnl_completion() after we get confirmation from the PF + * driver that the rings have been stopped. + */ return 0; } -- cgit v1.2.3 From 3c234c4709529298498b597bcff35f56ac866a99 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Mon, 12 Dec 2016 15:44:12 -0800 Subject: i40e: Fix Adaptive ITR enabling This patch fixes a bug introduced with the addition of the per queue ITR feature support in ethtool. With that addition, there were functions added which converted the ITR settings to binary values. The IS_ENABLED macros that run on those values check whether a bit is set or not and with the value being binary, the bit check always returned ITR disabled which prevents any updating of the ITR rate. This patch fixes the problem by changing the functions to return the current ITR value instead and renaming it to better reflect its function. These functions now provide a value which will be accurately asessed and update the ITR as intended. Change-ID: I14f1d088d052e27f652aaa3113e186415ddea1fc Signed-off-by: Carolyn Wyborny Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 12 ++++++------ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 09f09ea7a5e5..4dc993bb16bf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1864,14 +1864,14 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) /* a small macro to shorten up some long lines */ #define INTREG I40E_PFINT_DYN_CTLN -static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_rx_itr(struct i40e_vsi *vsi, int idx) { - return !!(vsi->rx_rings[idx]->rx_itr_setting); + return vsi->rx_rings[idx]->rx_itr_setting; } -static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_tx_itr(struct i40e_vsi *vsi, int idx) { - return !!(vsi->tx_rings[idx]->tx_itr_setting); + return vsi->tx_rings[idx]->tx_itr_setting; } /** @@ -1897,8 +1897,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); - rx_itr_setting = get_rx_itr_enabled(vsi, idx); - tx_itr_setting = get_tx_itr_enabled(vsi, idx); + rx_itr_setting = get_rx_itr(vsi, idx); + tx_itr_setting = get_tx_itr(vsi, idx); if (q_vector->itr_countdown > 0 || (!ITR_IS_DYNAMIC(rx_itr_setting) && diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index b758846d4dc5..7cd28ef6e714 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1324,18 +1324,18 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) /* a small macro to shorten up some long lines */ #define INTREG I40E_VFINT_DYN_CTLN1 -static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_rx_itr(struct i40e_vsi *vsi, int idx) { struct i40evf_adapter *adapter = vsi->back; - return !!(adapter->rx_rings[idx].rx_itr_setting); + return adapter->rx_rings[idx].rx_itr_setting; } -static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_tx_itr(struct i40e_vsi *vsi, int idx) { struct i40evf_adapter *adapter = vsi->back; - return !!(adapter->tx_rings[idx].tx_itr_setting); + return adapter->tx_rings[idx].tx_itr_setting; } /** @@ -1361,8 +1361,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); - rx_itr_setting = get_rx_itr_enabled(vsi, idx); - tx_itr_setting = get_tx_itr_enabled(vsi, idx); + rx_itr_setting = get_rx_itr(vsi, idx); + tx_itr_setting = get_tx_itr(vsi, idx); if (q_vector->itr_countdown > 0 || (!ITR_IS_DYNAMIC(rx_itr_setting) && -- cgit v1.2.3 From 773d4023badee2e196f4125a94fe9d5a739720a0 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Mon, 12 Dec 2016 15:44:13 -0800 Subject: i40e: refactor AQ CMD buffer debug printing This patch refactors the '%*ph' printk format specifier to instead use the print_hex_dump function, as recommended by the '%*ph' documentation. This produces better/more standardized output. Change-ID: Id56700b4e8abc40ff8c04bc8379e7df04cb4d6fd Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 19 ++++++++++++------- drivers/net/ethernet/intel/i40evf/i40e_common.c | 19 ++++++++++++------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index fc73e4ef27ac..ece57d6a6e23 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -300,7 +300,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len; u8 *buf = (u8 *)buffer; - u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; @@ -328,12 +327,18 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if (buf_len < len) len = buf_len; /* write the full 16-byte chunks */ - for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); - /* write whatever's left over without overrunning the buffer */ - if (i < len) - i40e_debug(hw, mask, "\t0x%04X %*ph\n", - i, len - i, buf + i); + if (hw->debug_mask & mask) { + char prefix[20]; + + snprintf(prefix, 20, + "i40e %02x:%02x.%x: \t0x", + hw->bus.bus_id, + hw->bus.device, + hw->bus.func); + + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); + } } } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index b5a59dd72a0c..89dfdbca13db 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -304,7 +304,6 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u8 *buf = (u8 *)buffer; - u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; @@ -332,12 +331,18 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if (buf_len < len) len = buf_len; /* write the full 16-byte chunks */ - for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); - /* write whatever's left over without overrunning the buffer */ - if (i < len) - i40e_debug(hw, mask, "\t0x%04X %*ph\n", - i, len - i, buf + i); + if (hw->debug_mask & mask) { + char prefix[20]; + + snprintf(prefix, 20, + "i40evf %02x:%02x.%x: \t0x", + hw->bus.bus_id, + hw->bus.device, + hw->bus.func); + + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); + } } } -- cgit v1.2.3 From 2ae0bf501425cbddc8b992a2b65fc04d911cae8c Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 12 Dec 2016 15:44:14 -0800 Subject: i40e: convert to cpu from le16 to generate switch_id correctly On Big Endian platforms we would incorrectly calculate the wrong switch id since we did not properly convert the le16 value into CPU format. Caught by sparse. Change-ID: I69a2f9fa064a0a91691f7d0e6fcc206adceb8e36 Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index f1f41f12902f..267ad2588255 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -974,7 +974,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, struct i40e_dcbx_config *r_cfg = &pf->hw.remote_dcbx_config; int i, ret; - u32 switch_id; + u16 switch_id; bw_data = kzalloc(sizeof( struct i40e_aqc_query_port_ets_config_resp), @@ -986,7 +986,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, vsi = pf->vsi[pf->lan_vsi]; switch_id = - vsi->info.switch_id & I40E_AQ_VSI_SW_ID_MASK; + le16_to_cpu(vsi->info.switch_id) & + I40E_AQ_VSI_SW_ID_MASK; ret = i40e_aq_query_port_ets_config(&pf->hw, switch_id, -- cgit v1.2.3 From 5cb259016b4258d7ac53588a01d17da1ceda84b7 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 12 Dec 2016 15:44:15 -0800 Subject: i40e: properly convert le16 value to CPU format This ensures that the pvid which is stored in __le16 format is converted to the CPU format. This will fix comparison issues on Big Endian platforms. Change-ID: I92c80d1315dc2a0f9f095d5a0c48d461beb052ed Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a6dca5822cff..df78271bdce5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1254,6 +1254,7 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, struct hlist_head *tmp_del_list, int vlan_filters) { + s16 pvid = le16_to_cpu(vsi->info.pvid); struct i40e_mac_filter *f, *add_head; struct i40e_new_mac_filter *new; struct hlist_node *h; @@ -1275,8 +1276,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, /* Update the filters about to be added in place */ hlist_for_each_entry(new, tmp_add_list, hlist) { - if (vsi->info.pvid && new->f->vlan != vsi->info.pvid) - new->f->vlan = vsi->info.pvid; + if (pvid && new->f->vlan != pvid) + new->f->vlan = pvid; else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY) new->f->vlan = 0; else if (!vlan_filters && new->f->vlan == 0) @@ -1290,12 +1291,12 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, * order to avoid duplicating code for adding the new filter * then deleting the old filter. */ - if ((vsi->info.pvid && f->vlan != vsi->info.pvid) || + if ((pvid && f->vlan != pvid) || (vlan_filters && f->vlan == I40E_VLAN_ANY) || (!vlan_filters && f->vlan == 0)) { /* Determine the new vlan we will be adding */ - if (vsi->info.pvid) - new_vlan = vsi->info.pvid; + if (pvid) + new_vlan = pvid; else if (vlan_filters) new_vlan = 0; else -- cgit v1.2.3 From ae13670824d5adadc95a881ebfaa6fa8735218f0 Mon Sep 17 00:00:00 2001 From: Harshitha Ramamurthy Date: Mon, 12 Dec 2016 15:44:16 -0800 Subject: i40e: Error handling for link event There exists an intermittent bug which causes the 'Link Detected' field reported by the 'ethtool ' command to be 'Yes' when in fact, there is no link. This patch fixes the problem by enabling temporary link polling when i40e_get_link_status returns an error. This causes the driver to remember that an admin queue command failed and polls, until the function returns with a success. Change-Id: I64c69b008db4017b8729f3fc27b8f65c8fe2eaa0 Signed-off-by: Harshitha Ramamurthy Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index f4c4ee3edde1..82d8040fa418 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -349,6 +349,7 @@ struct i40e_pf { #define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52) #define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53) #define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54) +#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index df78271bdce5..199ef34e00f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6353,7 +6353,16 @@ static void i40e_link_event(struct i40e_pf *pf) old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); status = i40e_get_link_status(&pf->hw, &new_link); - if (status) { + + /* On success, disable temp link polling */ + if (status == I40E_SUCCESS) { + if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING) + pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING; + } else { + /* Enable link polling temporarily until i40e_get_link_status + * returns I40E_SUCCESS + */ + pf->flags |= I40E_FLAG_TEMP_LINK_POLLING; dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", status); return; @@ -6405,7 +6414,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) return; pf->service_timer_previous = jiffies; - if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) + if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || + (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)) i40e_link_event(pf); /* Update the stats for active netdevs so the network stack -- cgit v1.2.3 From b9c015d421946fe3675fcb9cbacd0a37f1d3263c Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 12 Dec 2016 15:44:17 -0800 Subject: i40e: mark the value passed to csum_replace_by_diff as __wsum Fix, or rather, avoid a sparse warning caused by the fact that csum_replace_by_diff expects to receive a __wsum value. Since the calculation appears to work, simply typecast the passed paylen value to __wsum to avoid the warning. This seems pretty fishy since __wsum was obviously annotated as a separate type on purpose, so this throws the entire calculation into question. Since it currently appears to behave as expected, the typecast is probably safe. Change-ID: I4fdc5cddd589abc16098176e8a61127e761488f4 Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 5 +++-- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 4dc993bb16bf..97d46058d71d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2335,7 +2335,8 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* remove payload length from outer checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.udp->check, htonl(paylen)); + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); } /* reset pointers to inner headers */ @@ -2356,7 +2357,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 7cd28ef6e714..c91fcf43ccbc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1629,7 +1629,8 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* remove payload length from outer checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.udp->check, htonl(paylen)); + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); } /* reset pointers to inner headers */ @@ -1650,7 +1651,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; -- cgit v1.2.3 From ee847d935156b041679ad82c269796dae1afaa6b Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Wed, 21 Dec 2016 18:04:47 -0500 Subject: i40e: remove duplicate device id from PCI table Signed-off-by: Carolyn Wyborny Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 199ef34e00f8..b0215c1159fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -77,7 +77,6 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, -- cgit v1.2.3 From 83a0c6e589017ce737720b9e05e2fa3fe4f7b860 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Thu, 12 Jan 2017 17:04:14 -0800 Subject: i40e: Invoke softirqs after napi_reschedule The following message is logged from time to time when using i40e: NOHZ: local_softirq_pending 08 i40e may schedule napi from a workqueue. Afterwards, softirqs are not run in a deterministic time frame. The problem is the same as what was described in commit ec13ee80145c ("virtio_net: invoke softirqs after __napi_schedule") and this patch applies the same fix to i40e. Signed-off-by: Benjamin Poirier Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b0215c1159fe..e8a8351c8ea9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4685,8 +4685,10 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) */ if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { + local_bh_disable(); if (napi_reschedule(&tx_ring->q_vector->napi)) tx_ring->tx_stats.tx_lost_interrupt++; + local_bh_enable(); } } -- cgit v1.2.3 From 1e128c81290a419ab9ec8b09fe989f1c6c15a0f4 Mon Sep 17 00:00:00 2001 From: Arun Easi Date: Wed, 15 Feb 2017 06:28:22 -0800 Subject: qed: Add support for hardware offloaded FCoE. This adds the backbone required for the various HW initalizations which are necessary for the FCoE driver (qedf) for QLogic FastLinQ 4xxxx line of adapters - FW notification, resource initializations, etc. Signed-off-by: Arun Easi Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/Kconfig | 3 + drivers/net/ethernet/qlogic/qed/Makefile | 1 + drivers/net/ethernet/qlogic/qed/qed.h | 11 + drivers/net/ethernet/qlogic/qed/qed_cxt.c | 98 +- drivers/net/ethernet/qlogic/qed/qed_cxt.h | 3 + drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 13 +- drivers/net/ethernet/qlogic/qed/qed_dcbx.h | 5 +- drivers/net/ethernet/qlogic/qed/qed_dev.c | 205 ++++- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 42 + drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 1014 +++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_fcoe.h | 87 ++ drivers/net/ethernet/qlogic/qed/qed_hsi.h | 781 +++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_hw.c | 3 + drivers/net/ethernet/qlogic/qed/qed_ll2.c | 25 + drivers/net/ethernet/qlogic/qed/qed_ll2.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 7 + drivers/net/ethernet/qlogic/qed/qed_mcp.c | 3 + drivers/net/ethernet/qlogic/qed/qed_mcp.h | 1 + drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 8 + drivers/net/ethernet/qlogic/qed/qed_sp.h | 4 + drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 3 + include/linux/qed/common_hsi.h | 10 +- include/linux/qed/fcoe_common.h | 715 +++++++++++++++ include/linux/qed/qed_fcoe_if.h | 145 +++ include/linux/qed/qed_if.h | 41 +- 25 files changed, 3211 insertions(+), 19 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_fcoe.c create mode 100644 drivers/net/ethernet/qlogic/qed/qed_fcoe.h create mode 100644 include/linux/qed/fcoe_common.h create mode 100644 include/linux/qed/qed_fcoe_if.h diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index aaa1e8517348..c2e24afbaeb2 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -114,4 +114,7 @@ config QED_RDMA config QED_ISCSI bool +config QED_FCOE + bool + endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 1a7300f72cab..974929dcc74e 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -7,3 +7,4 @@ qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_RDMA) += qed_roce.o qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o +qed-$(CONFIG_QED_FCOE) += qed_fcoe.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 6557f94b92ed..61a9cd5be497 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -60,6 +60,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_WFQ_UNIT 100 #define ISCSI_BDQ_ID(_port_id) (_port_id) +#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2) #define QED_WID_SIZE (1024) #define QED_PF_DEMS_SIZE (4) @@ -167,6 +168,7 @@ struct qed_tunn_update_params { */ enum qed_pci_personality { QED_PCI_ETH, + QED_PCI_FCOE, QED_PCI_ISCSI, QED_PCI_ETH_ROCE, QED_PCI_DEFAULT /* default in shmem */ @@ -204,6 +206,7 @@ enum QED_FEATURE { QED_VF, QED_RDMA_CNQ, QED_VF_L2_QUE, + QED_FCOE_CQ, QED_MAX_FEATURES, }; @@ -221,6 +224,7 @@ enum QED_PORT_MODE { enum qed_dev_cap { QED_DEV_CAP_ETH, + QED_DEV_CAP_FCOE, QED_DEV_CAP_ISCSI, QED_DEV_CAP_ROCE, }; @@ -255,6 +259,10 @@ struct qed_hw_info { u32 part_num[4]; unsigned char hw_mac_addr[ETH_ALEN]; + u64 node_wwn; + u64 port_wwn; + + u16 num_fcoe_conns; struct qed_igu_info *p_igu_info; @@ -410,6 +418,7 @@ struct qed_hwfn { struct qed_ooo_info *p_ooo_info; struct qed_rdma_info *p_rdma_info; struct qed_iscsi_info *p_iscsi_info; + struct qed_fcoe_info *p_fcoe_info; struct qed_pf_params pf_params; bool b_rdma_enabled_in_prs; @@ -620,11 +629,13 @@ struct qed_dev { u8 protocol; #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH) +#define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE) /* Callbacks to protocol driver */ union { struct qed_common_cb_ops *common; struct qed_eth_cb_ops *eth; + struct qed_fcoe_cb_ops *fcoe; struct qed_iscsi_cb_ops *iscsi; } protocol_ops; void *ops_cookie; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index dcb8fc185df7..d42d03df751a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -90,12 +90,14 @@ union conn_context { struct core_conn_context core_ctx; struct eth_conn_context eth_ctx; struct iscsi_conn_context iscsi_ctx; + struct fcoe_conn_context fcoe_ctx; struct roce_conn_context roce_ctx; }; -/* TYPE-0 task context - iSCSI */ +/* TYPE-0 task context - iSCSI, FCOE */ union type0_task_context { struct iscsi_task_context iscsi_ctx; + struct fcoe_task_context fcoe_ctx; }; /* TYPE-1 task context - ROCE */ @@ -240,15 +242,22 @@ struct qed_cxt_mngr { static bool src_proto(enum protocol_type type) { return type == PROTOCOLID_ISCSI || + type == PROTOCOLID_FCOE || type == PROTOCOLID_ROCE; } static bool tm_cid_proto(enum protocol_type type) { return type == PROTOCOLID_ISCSI || + type == PROTOCOLID_FCOE || type == PROTOCOLID_ROCE; } +static bool tm_tid_proto(enum protocol_type type) +{ + return type == PROTOCOLID_FCOE; +} + /* counts the iids for the CDU/CDUC ILT client configuration */ struct qed_cdu_iids { u32 pf_cids; @@ -307,6 +316,22 @@ static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr, iids->pf_cids += p_cfg->cid_count; iids->per_vf_cids += p_cfg->cids_per_vf; } + + if (tm_tid_proto(i)) { + struct qed_tid_seg *segs = p_cfg->tid_seg; + + /* for each segment there is at most one + * protocol for which count is not 0. + */ + for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) + iids->pf_tids[j] += segs[j].count; + + /* The last array elelment is for the VFs. As for PF + * segments there can be only one protocol for + * which this value is not 0. + */ + iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; + } } iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN); @@ -1694,9 +1719,42 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) /* @@@TBD how to enable the scan for the VFs */ } +static void qed_prs_init_common(struct qed_hwfn *p_hwfn) +{ + if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) && + p_hwfn->pf_params.fcoe_pf_params.is_target) + STORE_RT_REG(p_hwfn, + PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0); +} + +static void qed_prs_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_conn_type_cfg *p_fcoe; + struct qed_tid_seg *p_tid; + + p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE]; + + /* If FCoE is active set the MAX OX_ID (tid) in the Parser */ + if (!p_fcoe->cid_count) + return; + + p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG]; + if (p_hwfn->pf_params.fcoe_pf_params.is_target) { + STORE_RT_REG_AGG(p_hwfn, + PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET, + p_tid->count); + } else { + STORE_RT_REG_AGG(p_hwfn, + PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET, + p_tid->count); + } +} + void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) { qed_cdu_init_common(p_hwfn); + qed_prs_init_common(p_hwfn); } void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) @@ -1708,6 +1766,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) qed_ilt_init_pf(p_hwfn); qed_src_init_pf(p_hwfn); qed_tm_init_pf(p_hwfn); + qed_prs_init_pf(p_hwfn); } int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, @@ -1885,6 +1944,27 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) p_params->num_cons, 1); break; } + case QED_PCI_FCOE: + { + struct qed_fcoe_pf_params *p_params; + + p_params = &p_hwfn->pf_params.fcoe_pf_params; + + if (p_params->num_cons && p_params->num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, + PROTOCOLID_FCOE, + p_params->num_cons, + 0); + + qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE, + QED_CXT_FCOE_TID_SEG, 0, + p_params->num_tasks, true); + } else { + DP_INFO(p_hwfn->cdev, + "Fcoe personality used without setting params!\n"); + } + break; + } case QED_PCI_ISCSI: { struct qed_iscsi_pf_params *p_params; @@ -1927,6 +2007,10 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, /* Verify the personality */ switch (p_hwfn->hw_info.personality) { + case QED_PCI_FCOE: + proto = PROTOCOLID_FCOE; + seg = QED_CXT_FCOE_TID_SEG; + break; case QED_PCI_ISCSI: proto = PROTOCOLID_ISCSI; seg = QED_CXT_ISCSI_TID_SEG; @@ -2215,15 +2299,19 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *p_cli; - struct qed_ilt_cli_blk *p_seg; struct qed_tid_seg *p_seg_info; - u32 proto, seg; - u32 total_lines; - u32 tid_size, ilt_idx; + struct qed_ilt_cli_blk *p_seg; u32 num_tids_per_block; + u32 tid_size, ilt_idx; + u32 total_lines; + u32 proto, seg; /* Verify the personality */ switch (p_hwfn->hw_info.personality) { + case QED_PCI_FCOE: + proto = PROTOCOLID_FCOE; + seg = QED_CXT_FCOE_TID_SEG; + break; case QED_PCI_ISCSI: proto = PROTOCOLID_ISCSI; seg = QED_CXT_ISCSI_TID_SEG; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 98f4973cac9d..8b010324268a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -91,6 +91,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, #define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI #define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE +#define QED_CXT_FCOE_TID_SEG PROTOCOLID_FCOE enum qed_cxt_elem_type { QED_ELEM_CXT, QED_ELEM_SRQ, @@ -204,4 +205,6 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); #define QED_CTX_WORKING_MEM 0 #define QED_CTX_FL_MEM 1 +int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, + u32 tid, u8 ctx_type, void **task_ctx); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index dc0d2c9ad6b5..5bd36a4a8fcd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -432,7 +432,6 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn, return rc; } -#ifdef CONFIG_DCB static void qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn, struct qed_dcbx_app_prio *p_prio, @@ -749,7 +748,6 @@ qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return 0; } -#endif static int qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -864,6 +862,15 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, return rc; } +void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type) +{ + struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; + void *cookie = hwfn->cdev->ops_cookie; + + if (cookie && op->dcbx_aen) + op->dcbx_aen(cookie, &hwfn->p_dcbx_info->get, mib_type); +} + /* Read updated MIB. * Reconfigure QM and invoke PF update ramrod command if operational MIB * change is detected. @@ -890,6 +897,8 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, qed_sp_pf_update(p_hwfn); } } + qed_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type); + qed_dcbx_aen(p_hwfn, type); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index d70300fda020..0fabe97f998d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -57,7 +57,6 @@ struct qed_dcbx_app_data { u8 tc; /* Traffic Class */ }; -#ifdef CONFIG_DCB #define QED_DCBX_VERSION_DISABLED 0 #define QED_DCBX_VERSION_IEEE 1 #define QED_DCBX_VERSION_CEE 2 @@ -73,7 +72,6 @@ struct qed_dcbx_set { struct qed_dcbx_admin_params config; u32 ver_num; }; -#endif struct qed_dcbx_results { bool dcbx_enabled; @@ -97,9 +95,8 @@ struct qed_dcbx_info { struct qed_dcbx_results results; struct dcbx_mib operational; struct dcbx_mib remote; -#ifdef CONFIG_DCB struct qed_dcbx_set set; -#endif + struct qed_dcbx_get get; u8 dcbx_cap; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 33e720143b8d..5ee7f040c50a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -49,6 +49,7 @@ #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_dev_api.h" +#include "qed_fcoe.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" @@ -172,6 +173,9 @@ void qed_resc_free(struct qed_dev *cdev) #ifdef CONFIG_QED_LL2 qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); #endif + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info); + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info); qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info); @@ -433,6 +437,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_resc_alloc(struct qed_dev *cdev) { struct qed_iscsi_info *p_iscsi_info; + struct qed_fcoe_info *p_fcoe_info; struct qed_ooo_info *p_ooo_info; #ifdef CONFIG_QED_LL2 struct qed_ll2_info *p_ll2_info; @@ -539,6 +544,14 @@ int qed_resc_alloc(struct qed_dev *cdev) p_hwfn->p_ll2_info = p_ll2_info; } #endif + + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { + p_fcoe_info = qed_fcoe_alloc(p_hwfn); + if (!p_fcoe_info) + goto alloc_no_mem; + p_hwfn->p_fcoe_info = p_fcoe_info; + } + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { p_iscsi_info = qed_iscsi_alloc(p_hwfn); if (!p_iscsi_info) @@ -602,6 +615,9 @@ void qed_resc_setup(struct qed_dev *cdev) if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); #endif + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info); + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info); qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info); @@ -994,7 +1010,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Protocl Configuration */ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); - STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, + (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); /* Cleanup chip from previous driver if such remains exist */ @@ -1026,8 +1043,16 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* send function start command */ rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, allow_npar_tx_switch); - if (rc) + if (rc) { DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); + return rc; + } + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2)); + qed_wr(p_hwfn, p_ptt, + PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, + 0x100); + } } return rc; } @@ -1787,8 +1812,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; + u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; struct qed_mcp_link_params *link; /* Read global nvm_cfg address */ @@ -1934,6 +1959,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) __set_bit(QED_DEV_CAP_ETH, &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) + __set_bit(QED_DEV_CAP_FCOE, + &p_hwfn->hw_info.device_capabilities); if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) __set_bit(QED_DEV_CAP_ISCSI, &p_hwfn->hw_info.device_capabilities); @@ -2671,6 +2699,177 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); } +int +qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, enum qed_llh_port_filter_type_t type) +{ + u32 high = 0, low = 0, en; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return 0; + + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + high = source_port_or_eth_type; + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + case QED_LLH_FILTER_UDP_SRC_PORT: + low = source_port_or_eth_type << 16; + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + case QED_LLH_FILTER_UDP_DEST_PORT: + low = dest_port; + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + low = (source_port_or_eth_type << 16) | dest_port; + break; + default: + DP_NOTICE(p_hwfn, + "Non valid LLH protocol filter type %d\n", type); + return -EINVAL; + } + /* Find a free entry and utilize it */ + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + en = qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); + if (en) + continue; + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32), low); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), high); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32), 1 << type); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); + break; + } + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { + DP_NOTICE(p_hwfn, + "Failed to find an empty LLH filter to utilize\n"); + return -EINVAL; + } + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "ETH type %x is added at %d\n", + source_port_or_eth_type, i); + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "TCP src port %x is added at %d\n", + source_port_or_eth_type, i); + break; + case QED_LLH_FILTER_UDP_SRC_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "UDP src port %x is added at %d\n", + source_port_or_eth_type, i); + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "TCP dst port %x is added at %d\n", dest_port, i); + break; + case QED_LLH_FILTER_UDP_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "UDP dst port %x is added at %d\n", dest_port, i); + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "TCP src/dst ports %x/%x are added at %d\n", + source_port_or_eth_type, dest_port, i); + break; + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "UDP src/dst ports %x/%x are added at %d\n", + source_port_or_eth_type, dest_port, i); + break; + } + return 0; +} + +void +qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, + enum qed_llh_port_filter_type_t type) +{ + u32 high = 0, low = 0; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return; + + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + high = source_port_or_eth_type; + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + case QED_LLH_FILTER_UDP_SRC_PORT: + low = source_port_or_eth_type << 16; + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + case QED_LLH_FILTER_UDP_DEST_PORT: + low = dest_port; + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + low = (source_port_or_eth_type << 16) | dest_port; + break; + default: + DP_NOTICE(p_hwfn, + "Non valid LLH protocol filter type %d\n", type); + return; + } + + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + if (!qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32))) + continue; + if (!qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32))) + continue; + if (!(qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32)) & BIT(type))) + continue; + if (qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32)) != low) + continue; + if (qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32)) != high) + continue; + + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), 0); + break; + } + + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) + DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); +} + static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr, void *p_eth_qzone, size_t eth_qzone_size, u8 timeset) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 5d37ba24da40..6812003411cd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -353,6 +353,48 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 *p_filter); +enum qed_llh_port_filter_type_t { + QED_LLH_FILTER_ETHERTYPE, + QED_LLH_FILTER_TCP_SRC_PORT, + QED_LLH_FILTER_TCP_DEST_PORT, + QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT, + QED_LLH_FILTER_UDP_SRC_PORT, + QED_LLH_FILTER_UDP_DEST_PORT, + QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT +}; + +/** + * @brief qed_llh_add_protocol_filter - configures a protocol filter in llh + * + * @param p_hwfn + * @param p_ptt + * @param source_port_or_eth_type - source port or ethertype to add + * @param dest_port - destination port to add + * @param type - type of filters and comparing + */ +int +qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, + enum qed_llh_port_filter_type_t type); + +/** + * @brief qed_llh_remove_protocol_filter - remove a protocol filter in llh + * + * @param p_hwfn + * @param p_ptt + * @param source_port_or_eth_type - source port or ethertype to add + * @param dest_port - destination port to add + * @param type - type of filters and comparing + */ +void +qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, + enum qed_llh_port_filter_type_t type); + /** * *@brief Cleanup of previous driver remains prior to load * diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c new file mode 100644 index 000000000000..cbc81412174f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -0,0 +1,1014 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define __PREVENT_DUMP_MEM_ARR__ +#define __PREVENT_PXP_GLOBAL_WIN__ +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_fcoe.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include + +struct qed_fcoe_conn { + struct list_head list_entry; + bool free_on_delete; + + u16 conn_id; + u32 icid; + u32 fw_cid; + u8 layer_code; + + dma_addr_t sq_pbl_addr; + dma_addr_t sq_curr_page_addr; + dma_addr_t sq_next_page_addr; + dma_addr_t xferq_pbl_addr; + void *xferq_pbl_addr_virt_addr; + dma_addr_t xferq_addr[4]; + void *xferq_addr_virt_addr[4]; + dma_addr_t confq_pbl_addr; + void *confq_pbl_addr_virt_addr; + dma_addr_t confq_addr[2]; + void *confq_addr_virt_addr[2]; + + dma_addr_t terminate_params; + + u16 dst_mac_addr_lo; + u16 dst_mac_addr_mid; + u16 dst_mac_addr_hi; + u16 src_mac_addr_lo; + u16 src_mac_addr_mid; + u16 src_mac_addr_hi; + + u16 tx_max_fc_pay_len; + u16 e_d_tov_timer_val; + u16 rec_tov_timer_val; + u16 rx_max_fc_pay_len; + u16 vlan_tag; + u16 physical_q0; + + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; + u8 def_q_idx; +}; + +static int +qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_fcoe_pf_params *fcoe_pf_params = NULL; + struct fcoe_init_ramrod_params *p_ramrod = NULL; + struct fcoe_init_func_ramrod_data *p_data; + struct fcoe_conn_context *p_cxt = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + struct qed_cxt_info cxt_info; + u32 dummy_cid; + int rc = 0; + u16 tmp; + u8 i; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_INIT_FUNC, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_init; + p_data = &p_ramrod->init_ramrod_data; + fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params; + + p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); + tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages); + p_data->sq_num_pages_in_pbl = tmp; + + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); + if (rc) + return rc; + + cxt_info.iid = dummy_cid; + rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); + if (rc) { + DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", + dummy_cid); + return rc; + } + p_cxt = cxt_info.p_cxt; + SET_FIELD(p_cxt->tstorm_ag_context.flags3, + TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); + + fcoe_pf_params->dummy_icid = (u16)dummy_cid; + + tmp = cpu_to_le16(fcoe_pf_params->num_tasks); + p_data->func_params.num_tasks = tmp; + p_data->func_params.log_page_size = fcoe_pf_params->log_page_size; + p_data->func_params.debug_mode = fcoe_pf_params->debug_mode; + + DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr, + fcoe_pf_params->glbl_q_params_addr); + + tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries); + p_data->q_params.cq_num_entries = tmp; + + tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries); + p_data->q_params.cmdq_num_entries = tmp; + + tmp = fcoe_pf_params->num_cqs; + p_data->q_params.num_queues = (u8)tmp; + + tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; + p_data->q_params.queue_relative_offset = (u8)tmp; + + for (i = 0; i < fcoe_pf_params->num_cqs; i++) { + tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id); + p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; + } + + p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi; + p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi; + + p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id); + + DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ], + fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]); + p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] = + fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ]; + tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]; + p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); + tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]; + p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); + + DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA], + fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); + p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = + fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; + tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]; + p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); + tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]; + p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); + tmp = fcoe_pf_params->rq_buffer_size; + p_data->q_params.rq_buffer_size = cpu_to_le16(tmp); + + if (fcoe_pf_params->is_target) { + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA]) + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1); + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1); + } else { + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + } + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + return rc; +} + +static int +qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL; + struct fcoe_conn_offload_ramrod_data *p_data; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u16 pq_id = 0, tmp; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_conn_ofld; + p_data = &p_ramrod->offload_ramrod_data; + + /* Transmission PQ is the first of the PF */ + pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL); + p_conn->physical_q0 = cpu_to_le16(pq_id); + p_data->physical_q0 = cpu_to_le16(pq_id); + + p_data->conn_id = cpu_to_le16(p_conn->conn_id); + DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr); + DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr); + DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr); + DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr); + DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]); + DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]); + + DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr); + DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]); + DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]); + + p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo); + p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid); + p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi); + p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo); + p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid); + p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi); + + tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len); + p_data->tx_max_fc_pay_len = tmp; + tmp = cpu_to_le16(p_conn->e_d_tov_timer_val); + p_data->e_d_tov_timer_val = tmp; + tmp = cpu_to_le16(p_conn->rec_tov_timer_val); + p_data->rec_rr_tov_timer_val = tmp; + tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len); + p_data->rx_max_fc_pay_len = tmp; + + p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag); + p_data->s_id.addr_hi = p_conn->s_id.addr_hi; + p_data->s_id.addr_mid = p_conn->s_id.addr_mid; + p_data->s_id.addr_lo = p_conn->s_id.addr_lo; + p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3; + p_data->d_id.addr_hi = p_conn->d_id.addr_hi; + p_data->d_id.addr_mid = p_conn->d_id.addr_mid; + p_data->d_id.addr_lo = p_conn->d_id.addr_lo; + p_data->flags = p_conn->flags; + p_data->def_q_idx = p_conn->def_q_idx; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_TERMINATE_CONN, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_conn_terminate; + DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr, + p_conn->terminate_params); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u32 active_segs = 0; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_DESTROY_FUNC, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK); + active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG); + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn **p_out_conn) +{ + struct qed_fcoe_conn *p_conn = NULL; + void *p_addr; + u32 i; + + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + if (!list_empty(&p_hwfn->p_fcoe_info->free_list)) + p_conn = + list_first_entry(&p_hwfn->p_fcoe_info->free_list, + struct qed_fcoe_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + *p_out_conn = p_conn; + return 0; + } + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + + p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); + if (!p_conn) + return -ENOMEM; + + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->xferq_pbl_addr, GFP_KERNEL); + if (!p_addr) + goto nomem_pbl_xferq; + p_conn->xferq_pbl_addr_virt_addr = p_addr; + + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->xferq_addr[i], GFP_KERNEL); + if (!p_addr) + goto nomem_xferq; + p_conn->xferq_addr_virt_addr[i] = p_addr; + + p_addr = p_conn->xferq_pbl_addr_virt_addr; + ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i]; + } + + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->confq_pbl_addr, GFP_KERNEL); + if (!p_addr) + goto nomem_xferq; + p_conn->confq_pbl_addr_virt_addr = p_addr; + + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->confq_addr[i], GFP_KERNEL); + if (!p_addr) + goto nomem_confq; + p_conn->confq_addr_virt_addr[i] = p_addr; + + p_addr = p_conn->confq_pbl_addr_virt_addr; + ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i]; + } + + p_conn->free_on_delete = true; + *p_out_conn = p_conn; + return 0; + +nomem_confq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_pbl_addr_virt_addr, + p_conn->confq_pbl_addr); + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) + if (p_conn->confq_addr_virt_addr[i]) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_addr_virt_addr[i], + p_conn->confq_addr[i]); +nomem_xferq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_pbl_addr_virt_addr, + p_conn->xferq_pbl_addr); + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) + if (p_conn->xferq_addr_virt_addr[i]) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_addr_virt_addr[i], + p_conn->xferq_addr[i]); +nomem_pbl_xferq: + kfree(p_conn); + return -ENOMEM; +} + +static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn) +{ + u32 i; + + if (!p_conn) + return; + + if (p_conn->confq_pbl_addr_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_pbl_addr_virt_addr, + p_conn->confq_pbl_addr); + + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { + if (!p_conn->confq_addr_virt_addr[i]) + continue; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_addr_virt_addr[i], + p_conn->confq_addr[i]); + } + + if (p_conn->xferq_pbl_addr_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_pbl_addr_virt_addr, + p_conn->xferq_pbl_addr); + + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { + if (!p_conn->xferq_addr_virt_addr[i]) + continue; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_addr_virt_addr[i], + p_conn->xferq_addr[i]); + } + kfree(p_conn); +} + +static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) +{ + return (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(cid, DQ_DEMS_LEGACY); +} + +static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); + + return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); +} + +static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); + + return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + + TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); +} + +struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_fcoe_info *p_fcoe_info; + + /* Allocate LL2's set struct */ + p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL); + if (!p_fcoe_info) { + DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n"); + return NULL; + } + INIT_LIST_HEAD(&p_fcoe_info->free_list); + return p_fcoe_info; +} + +void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) +{ + struct fcoe_task_context *p_task_ctx = NULL; + int rc; + u32 i; + + spin_lock_init(&p_fcoe_info->lock); + for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) { + rc = qed_cxt_get_task_ctx(p_hwfn, i, + QED_CTX_WORKING_MEM, + (void **)&p_task_ctx); + if (rc) + continue; + + memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); + SET_FIELD(p_task_ctx->timer_context.logical_client_0, + TIMERS_CONTEXT_VALIDLC0, 1); + SET_FIELD(p_task_ctx->timer_context.logical_client_1, + TIMERS_CONTEXT_VALIDLC1, 1); + SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); + } +} + +void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) +{ + struct qed_fcoe_conn *p_conn = NULL; + + if (!p_fcoe_info) + return; + + while (!list_empty(&p_fcoe_info->free_list)) { + p_conn = list_first_entry(&p_fcoe_info->free_list, + struct qed_fcoe_conn, list_entry); + if (!p_conn) + break; + list_del(&p_conn->list_entry); + qed_fcoe_free_connection(p_hwfn, p_conn); + } + + kfree(p_fcoe_info); +} + +static int +qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_in_conn, + struct qed_fcoe_conn **p_out_conn) +{ + struct qed_fcoe_conn *p_conn = NULL; + int rc = 0; + u32 icid; + + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + if (rc) + return rc; + + /* Use input connection [if provided] or allocate a new one */ + if (p_in_conn) { + p_conn = p_in_conn; + } else { + rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn); + if (rc) { + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + qed_cxt_release_cid(p_hwfn, icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + return rc; + } + } + + p_conn->icid = icid; + p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; + *p_out_conn = p_conn; + + return rc; +} + +static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn) +{ + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list); + qed_cxt_release_cid(p_hwfn, p_conn->icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); +} + +static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_fcoe_stats *p_stats) +{ + struct fcoe_rx_stat tstats; + u32 tstats_addr; + + memset(&tstats, 0, sizeof(tstats)); + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); + + p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt); + p_stats->fcoe_rx_data_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt); + p_stats->fcoe_rx_xfer_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt); + p_stats->fcoe_rx_other_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt); + + p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt); + p_stats->fcoe_silent_drop_pkt_rq_full_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt); + p_stats->fcoe_silent_drop_pkt_crc_error_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt); + p_stats->fcoe_silent_drop_pkt_task_invalid_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt); + p_stats->fcoe_silent_drop_total_pkt_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt); +} + +static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_fcoe_stats *p_stats) +{ + struct fcoe_tx_stat pstats; + u32 pstats_addr; + + memset(&pstats, 0, sizeof(pstats)); + pstats_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); + + p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt); + p_stats->fcoe_tx_data_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt); + p_stats->fcoe_tx_xfer_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt); + p_stats->fcoe_tx_other_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt); +} + +static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn, + struct qed_fcoe_stats *p_stats) +{ + struct qed_ptt *p_ptt; + + memset(p_stats, 0, sizeof(*p_stats)); + + p_ptt = qed_ptt_acquire(p_hwfn); + + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + return -EINVAL; + } + + _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats); + _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +struct qed_hash_fcoe_con { + struct hlist_node node; + struct qed_fcoe_conn *con; +}; + +static int qed_fill_fcoe_dev_info(struct qed_dev *cdev, + struct qed_dev_fcoe_info *info) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + int rc; + + memset(info, 0, sizeof(*info)); + rc = qed_fill_dev_info(cdev, &info->common); + + info->primary_dbq_rq_addr = + qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); + info->secondary_bdq_rq_addr = + qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + + return rc; +} + +static void qed_register_fcoe_ops(struct qed_dev *cdev, + struct qed_fcoe_cb_ops *ops, void *cookie) +{ + cdev->protocol_ops.fcoe = ops; + cdev->ops_cookie = cookie; +} + +static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev, + u32 handle) +{ + struct qed_hash_fcoe_con *hash_con = NULL; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) + return NULL; + + hash_for_each_possible(cdev->connections, hash_con, node, handle) { + if (hash_con->con->icid == handle) + break; + } + + if (!hash_con || (hash_con->con->icid != handle)) + return NULL; + + return hash_con; +} + +static int qed_fcoe_stop(struct qed_dev *cdev) +{ + int rc; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { + DP_NOTICE(cdev, "fcoe already stopped\n"); + return 0; + } + + if (!hash_empty(cdev->connections)) { + DP_NOTICE(cdev, + "Can't stop fcoe - not all connections were returned\n"); + return -EINVAL; + } + + /* Stop the fcoe */ + rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL); + cdev->flags &= ~QED_FLAG_STORAGE_STARTED; + + return rc; +} + +static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks) +{ + int rc; + + if (cdev->flags & QED_FLAG_STORAGE_STARTED) { + DP_NOTICE(cdev, "fcoe already started;\n"); + return 0; + } + + rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) { + DP_NOTICE(cdev, "Failed to start fcoe\n"); + return rc; + } + + cdev->flags |= QED_FLAG_STORAGE_STARTED; + hash_init(cdev->connections); + + if (tasks) { + struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info), + GFP_ATOMIC); + + if (!tid_info) { + DP_NOTICE(cdev, + "Failed to allocate tasks information\n"); + qed_fcoe_stop(cdev); + return -ENOMEM; + } + + rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info); + if (rc) { + DP_NOTICE(cdev, "Failed to gather task information\n"); + qed_fcoe_stop(cdev); + kfree(tid_info); + return rc; + } + + /* Fill task information */ + tasks->size = tid_info->tid_size; + tasks->num_tids_per_block = tid_info->num_tids_per_block; + memcpy(tasks->blocks, tid_info->blocks, + MAX_TID_BLOCKS_FCOE * sizeof(u8 *)); + + kfree(tid_info); + } + + return 0; +} + +static int qed_fcoe_acquire_conn(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell) +{ + struct qed_hash_fcoe_con *hash_con; + int rc; + + /* Allocate a hashed connection */ + hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to allocate hashed connection\n"); + return -ENOMEM; + } + + /* Acquire the connection */ + rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL, + &hash_con->con); + if (rc) { + DP_NOTICE(cdev, "Failed to acquire Connection\n"); + kfree(hash_con); + return rc; + } + + /* Added the connection to hash table */ + *handle = hash_con->con->icid; + *fw_cid = hash_con->con->fw_cid; + hash_add(cdev->connections, &hash_con->node, *handle); + + if (p_doorbell) + *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev), + *handle); + + return 0; +} + +static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_fcoe_con *hash_con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + hlist_del(&hash_con->node); + qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con); + kfree(hash_con); + + return 0; +} + +static int qed_fcoe_offload_conn(struct qed_dev *cdev, + u32 handle, + struct qed_fcoe_params_offload *conn_info) +{ + struct qed_hash_fcoe_con *hash_con; + struct qed_fcoe_conn *con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + + con->sq_pbl_addr = conn_info->sq_pbl_addr; + con->sq_curr_page_addr = conn_info->sq_curr_page_addr; + con->sq_next_page_addr = conn_info->sq_next_page_addr; + con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len; + con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val; + con->rec_tov_timer_val = conn_info->rec_tov_timer_val; + con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len; + con->vlan_tag = conn_info->vlan_tag; + con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3; + con->flags = conn_info->flags; + con->def_q_idx = conn_info->def_q_idx; + + con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) | + conn_info->src_mac[4]; + con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) | + conn_info->src_mac[2]; + con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) | + conn_info->src_mac[0]; + con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) | + conn_info->dst_mac[4]; + con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) | + conn_info->dst_mac[2]; + con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) | + conn_info->dst_mac[0]; + + con->s_id.addr_hi = conn_info->s_id.addr_hi; + con->s_id.addr_mid = conn_info->s_id.addr_mid; + con->s_id.addr_lo = conn_info->s_id.addr_lo; + con->d_id.addr_hi = conn_info->d_id.addr_hi; + con->d_id.addr_mid = conn_info->d_id.addr_mid; + con->d_id.addr_lo = conn_info->d_id.addr_lo; + + return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_fcoe_destroy_conn(struct qed_dev *cdev, + u32 handle, dma_addr_t terminate_params) +{ + struct qed_hash_fcoe_con *hash_con; + struct qed_fcoe_conn *con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + con->terminate_params = terminate_params; + + return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats) +{ + return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats); +} + +void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats) +{ + struct qed_fcoe_stats proto_stats; + + /* Retrieve FW statistics */ + memset(&proto_stats, 0, sizeof(proto_stats)); + if (qed_fcoe_stats(cdev, &proto_stats)) { + DP_VERBOSE(cdev, QED_MSG_STORAGE, + "Failed to collect FCoE statistics\n"); + return; + } + + /* Translate FW statistics into struct */ + stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt + + proto_stats.fcoe_rx_xfer_pkt_cnt + + proto_stats.fcoe_rx_other_pkt_cnt; + stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt + + proto_stats.fcoe_tx_xfer_pkt_cnt + + proto_stats.fcoe_tx_other_pkt_cnt; + stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt; + + /* Request protocol driver to fill-in the rest */ + if (cdev->protocol_ops.fcoe && cdev->ops_cookie) { + struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe; + void *cookie = cdev->ops_cookie; + + if (ops->get_login_failures) + stats->login_failure = ops->get_login_failures(cookie); + } +} + +static const struct qed_fcoe_ops qed_fcoe_ops_pass = { + .common = &qed_common_ops_pass, + .ll2 = &qed_ll2_ops_pass, + .fill_dev_info = &qed_fill_fcoe_dev_info, + .start = &qed_fcoe_start, + .stop = &qed_fcoe_stop, + .register_ops = &qed_register_fcoe_ops, + .acquire_conn = &qed_fcoe_acquire_conn, + .release_conn = &qed_fcoe_release_conn, + .offload_conn = &qed_fcoe_offload_conn, + .destroy_conn = &qed_fcoe_destroy_conn, + .get_stats = &qed_fcoe_stats, +}; + +const struct qed_fcoe_ops *qed_get_fcoe_ops(void) +{ + return &qed_fcoe_ops_pass; +} +EXPORT_SYMBOL(qed_get_fcoe_ops); + +void qed_put_fcoe_ops(void) +{ +} +EXPORT_SYMBOL(qed_put_fcoe_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h new file mode 100644 index 000000000000..472af34a171d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -0,0 +1,87 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _QED_FCOE_H +#define _QED_FCOE_H +#include +#include +#include +#include +#include +#include +#include "qed.h" +#include "qed_hsi.h" +#include "qed_mcp.h" +#include "qed_sp.h" + +struct qed_fcoe_info { + spinlock_t lock; /* Connection resources. */ + struct list_head free_list; +}; + +#if IS_ENABLED(CONFIG_QED_FCOE) +struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn); + +void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info); + +void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info); +void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats); +#else /* CONFIG_QED_FCOE */ +static inline struct qed_fcoe_info * +qed_fcoe_alloc(struct qed_hwfn *p_hwfn) +{ + return NULL; +} + +static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn, + struct qed_fcoe_info *p_fcoe_info) +{ +} + +static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn, + struct qed_fcoe_info *p_fcoe_info) +{ +} + +static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats) +{ +} +#endif /* CONFIG_QED_FCOE */ + +#ifdef CONFIG_QED_LL2 +extern const struct qed_common_ops qed_common_ops_pass; +extern const struct qed_ll2_ops qed_ll2_ops_pass; +#endif + +#endif /* _QED_FCOE_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 5d31189288e8..37c2bfb663bb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -43,10 +43,12 @@ #include #include #include +#include #include #include #include #include +#include struct qed_hwfn; struct qed_ptt; @@ -937,7 +939,7 @@ struct mstorm_vf_zone { enum personality_type { BAD_PERSONALITY_TYP, PERSONALITY_ISCSI, - PERSONALITY_RESERVED2, + PERSONALITY_FCOE, PERSONALITY_RDMA_AND_ETH, PERSONALITY_RESERVED3, PERSONALITY_CORE, @@ -3473,6 +3475,10 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[43].base + ((pf_id) * IRO[43].m1)) +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ + (IRO[44].base + ((pf_id) * IRO[44].m1)) static const struct iro iro_arr[47] = { {0x0, 0x0, 0x0, 0x0, 0x8}, @@ -7407,6 +7413,769 @@ struct ystorm_roce_resp_conn_ag_ctx { __le32 reg3; }; +struct ystorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 cos; + u8 conf_version; + u8 eth_hdr_size; + __le16 stat_ram_addr; + __le16 mtu; + __le16 max_fc_payload_len; + __le16 tx_max_fc_pay_len; + u8 fcp_cmd_size; + u8 fcp_rsp_size; + __le16 mss; + struct regpair reserved; + u8 protection_info_flags; +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2 + u8 dst_protection_per_mss; + u8 src_protection_per_mss; + u8 ptu_log_page_size; + u8 flags; +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2 + u8 fcp_xfer_size; + u8 reserved3[2]; +}; + +struct fcoe_vlan_fields { + __le16 fields; +#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF +#define FCOE_VLAN_FIELDS_VID_SHIFT 0 +#define FCOE_VLAN_FIELDS_CLI_MASK 0x1 +#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 +#define FCOE_VLAN_FIELDS_PRI_MASK 0x7 +#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 +}; + +union fcoe_vlan_field_union { + struct fcoe_vlan_fields fields; + __le16 val; +}; + +union fcoe_vlan_vif_field_union { + union fcoe_vlan_field_union vlan; + __le16 vif; +}; + +struct pstorm_fcoe_eth_context_section { + u8 remote_addr_3; + u8 remote_addr_2; + u8 remote_addr_1; + u8 remote_addr_0; + u8 local_addr_1; + u8 local_addr_0; + u8 remote_addr_5; + u8 remote_addr_4; + u8 local_addr_5; + u8 local_addr_4; + u8 local_addr_3; + u8 local_addr_2; + union fcoe_vlan_vif_field_union vif_outer_vlan; + __le16 vif_outer_eth_type; + union fcoe_vlan_vif_field_union inner_vlan; + __le16 inner_eth_type; +}; + +struct pstorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 cos; + u8 conf_version; + u8 rsrv; + __le16 stat_ram_addr; + __le16 mss; + struct regpair abts_cleanup_addr; + struct pstorm_fcoe_eth_context_section eth; + u8 sid_2; + u8 sid_1; + u8 sid_0; + u8 flags; +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3 +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0xF +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 4 + u8 did_2; + u8 did_1; + u8 did_0; + u8 src_mac_index; + __le16 rec_rr_tov_val; + u8 q_relative_offset; + u8 reserved1; +}; + +struct xstorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 src_mac_index; + u8 conf_version; + u8 cached_wqes_avail; + __le16 stat_ram_addr; + u8 flags; +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5 + u8 cached_wqes_offset; + u8 reserved2; + u8 eth_hdr_size; + u8 seq_id; + u8 max_conc_seqs; + __le16 num_pages_in_pbl; + __le16 reserved; + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 mtu; + __le16 tx_max_fc_pay_len; + __le16 max_fc_payload_len; + __le16 min_frame_size; + __le16 sq_pbl_next_index; + __le16 respq_pbl_next_index; + u8 fcp_cmd_byte_credit; + u8 fcp_rsp_byte_credit; + __le16 protection_info; +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8 + __le16 xferq_pbl_next_index; + __le16 page_size; + u8 mid_seq; + u8 fcp_xfer_byte_credit; + u8 reserved1[2]; + struct fcoe_wqe cached_wqes[16]; +}; + +struct xstorm_fcoe_conn_ag_ctx { + u8 reserved0; + u8 fcoe_state; + u8 flags0; +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 + u8 flags2; +#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 + u8 flags7; +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 + u8 flags12; +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 sq_cons; + __le16 sq_prod; + __le16 xferq_prod; + __le16 xferq_cons; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 remain_io; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 respq_prod; + __le16 respq_cons; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; +}; + +struct ustorm_fcoe_conn_st_ctx { + struct regpair respq_pbl_addr; + __le16 num_pages_in_pbl; + u8 ptu_log_page_size; + u8 log_page_size; + __le16 respq_prod; + u8 reserved[2]; +}; + +struct tstorm_fcoe_conn_ag_ctx { + u8 reserved0; + u8 fcoe_state; + u8 flags0; +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 + u8 flags1; +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; +}; + +struct ustorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct tstorm_fcoe_conn_st_ctx { + __le16 stat_ram_addr; + __le16 rx_max_fc_payload_len; + __le16 e_d_tov_val; + u8 flags; +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1 +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2 + u8 timers_cleanup_invocation_cnt; + __le32 reserved1[2]; + __le32 dst_mac_address_bytes0to3; + __le16 dst_mac_address_bytes4to5; + __le16 ramrod_echo; + u8 flags1; +#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3 +#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2 + u8 q_relative_offset; + u8 bdq_resource_id; + u8 reserved0[5]; +}; + +struct mstorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct fcoe_mstorm_fcoe_conn_st_ctx_fp { + __le16 xfer_prod; + __le16 reserved1; + u8 protection_info; +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_MASK 0x1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_SHIFT 1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_MASK 0x3F +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_SHIFT 2 + u8 q_relative_offset; + u8 reserved2[2]; +}; + +struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp { + __le16 conn_id; + __le16 stat_ram_addr; + __le16 num_pages_in_pbl; + u8 ptu_log_page_size; + u8 log_page_size; + __le16 unsolicited_cq_count; + __le16 cmdq_count; + u8 bdq_resource_id; + u8 reserved0[3]; + struct regpair xferq_pbl_addr; + struct regpair reserved1; + struct regpair reserved2[3]; +}; + +struct mstorm_fcoe_conn_st_ctx { + struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp; + struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp; +}; + +struct fcoe_conn_context { + struct ystorm_fcoe_conn_st_ctx ystorm_st_context; + struct pstorm_fcoe_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_fcoe_conn_st_ctx xstorm_st_context; + struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context; + struct regpair xstorm_ag_padding[6]; + struct ustorm_fcoe_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context; + struct tstorm_fcoe_conn_st_ctx tstorm_st_context; + struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_conn_st_ctx mstorm_st_context; +}; + +struct fcoe_conn_offload_ramrod_params { + struct fcoe_conn_offload_ramrod_data offload_ramrod_data; +}; + +struct fcoe_conn_terminate_ramrod_params { + struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data; +}; + +enum fcoe_event_type { + FCOE_EVENT_INIT_FUNC, + FCOE_EVENT_DESTROY_FUNC, + FCOE_EVENT_STAT_FUNC, + FCOE_EVENT_OFFLOAD_CONN, + FCOE_EVENT_TERMINATE_CONN, + FCOE_EVENT_ERROR, + MAX_FCOE_EVENT_TYPE +}; + +struct fcoe_init_ramrod_params { + struct fcoe_init_func_ramrod_data init_ramrod_data; +}; + +enum fcoe_ramrod_cmd_id { + FCOE_RAMROD_CMD_ID_INIT_FUNC, + FCOE_RAMROD_CMD_ID_DESTROY_FUNC, + FCOE_RAMROD_CMD_ID_STAT_FUNC, + FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, + FCOE_RAMROD_CMD_ID_TERMINATE_CONN, + MAX_FCOE_RAMROD_CMD_ID +}; + +struct fcoe_stat_ramrod_params { + struct fcoe_stat_ramrod_data stat_ramrod_data; +}; + +struct ystorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + struct ystorm_iscsi_conn_st_ctx { __le32 reserved[4]; }; @@ -8435,6 +9204,7 @@ struct public_func { #define FUNC_MF_CFG_PROTOCOL_SHIFT 4 #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 +#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 #define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 #define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 @@ -8529,6 +9299,13 @@ struct lan_stats_stc { u32 rserved; }; +struct fcoe_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + struct ocbb_data_stc { u32 ocbb_host_addr; u32 ocsd_host_addr; @@ -8602,6 +9379,7 @@ union drv_union_data { struct drv_version_stc drv_version; struct lan_stats_stc lan_stats; + struct fcoe_stats_stc fcoe_stats; struct ocbb_data_stc ocbb_info; struct temperature_status_stc temp_info; struct resource_info resource; @@ -8905,6 +9683,7 @@ struct nvm_cfg1_glob { u32 misc_sig; u32 device_capabilities; #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 u32 power_dissipated; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 1f606516b6aa..899cad7f97ea 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -841,6 +841,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, if (pq_id > p_hwfn->qm_info.num_pf_rls) pq_id = p_hwfn->qm_info.offload_pq; break; + case PROTOCOLID_FCOE: + pq_id = p_hwfn->qm_info.offload_pq; + break; default: pq_id = 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 02c5d47cfc6d..9a0b9af10a57 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1130,6 +1130,9 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->qm_pq_id = cpu_to_le16(pq_id); switch (conn_type) { + case QED_LL2_TYPE_FCOE: + p_ramrod->conn_type = PROTOCOLID_FCOE; + break; case QED_LL2_TYPE_ISCSI: case QED_LL2_TYPE_ISCSI_OOO: p_ramrod->conn_type = PROTOCOLID_ISCSI; @@ -1458,6 +1461,15 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); + if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { + qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8906, 0, + QED_LLH_FILTER_ETHERTYPE); + qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8914, 0, + QED_LLH_FILTER_ETHERTYPE); + } + return rc; } @@ -1831,6 +1843,15 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { + qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8906, 0, + QED_LLH_FILTER_ETHERTYPE); + qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8914, 0, + QED_LLH_FILTER_ETHERTYPE); + } + return rc; } @@ -2039,6 +2060,10 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) } switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { + case QED_PCI_FCOE: + conn_type = QED_LL2_TYPE_FCOE; + gsi_enable = 0; + break; case QED_PCI_ISCSI: conn_type = QED_LL2_TYPE_ISCSI; gsi_enable = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index db3e4fc78e09..31a409033c41 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -54,7 +54,7 @@ enum qed_ll2_roce_flavor_type { }; enum qed_ll2_conn_type { - QED_LL2_TYPE_RESERVED, + QED_LL2_TYPE_FCOE, QED_LL2_TYPE_ISCSI, QED_LL2_TYPE_TEST, QED_LL2_TYPE_ISCSI_OOO, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 592e104687a7..942c03ca3b59 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -53,9 +53,11 @@ #include "qed_sp.h" #include "qed_dev_api.h" #include "qed_ll2.h" +#include "qed_fcoe.h" #include "qed_mcp.h" #include "qed_hw.h" #include "qed_selftest.h" +#include "qed_debug.h" #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) @@ -1603,6 +1605,8 @@ const struct qed_common_ops qed_common_ops_pass = { .sb_release = &qed_sb_release, .simd_handler_config = &qed_simd_handler_config, .simd_handler_clean = &qed_simd_handler_clean, + .dbg_grc = &qed_dbg_grc, + .dbg_grc_size = &qed_dbg_grc_size, .can_link_change = &qed_can_link_change, .set_link = &qed_set_link, .get_link = &qed_get_current_link, @@ -1636,6 +1640,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev, stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts; stats->lan_stats.fcs_err = -1; break; + case QED_MCP_FCOE_STATS: + qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); + break; default: DP_ERR(cdev, "Invalid protocol type = %d\n", type); return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index c8a877594032..7624a38cbd39 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1130,6 +1130,9 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, case FUNC_MF_CFG_PROTOCOL_ISCSI: *p_proto = QED_PCI_ISCSI; break; + case FUNC_MF_CFG_PROTOCOL_FCOE: + *p_proto = QED_PCI_FCOE; + break; case FUNC_MF_CFG_PROTOCOL_ROCE: DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); /* Fallthrough */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 363dce0f16b1..0792224ac219 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -37,6 +37,7 @@ #include #include #include +#include #include "qed_hsi.h" struct qed_mcp_link_speed_params { diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 3b7edf6ff234..d59d9df60cd2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -110,6 +110,8 @@ 0x1e80000UL #define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ 0x5011f4UL +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE \ + 0x1f0164UL #define PRS_REG_SEARCH_TCP \ 0x1f0400UL #define PRS_REG_SEARCH_UDP \ @@ -120,6 +122,12 @@ 0x1f040cUL #define PRS_REG_SEARCH_OPENFLOW \ 0x1f0434UL +#define PRS_REG_SEARCH_TAG1 \ + 0x1f0444UL +#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \ + 0x1f0a0cUL +#define PRS_REG_SEARCH_TCP_FIRST_FRAG \ + 0x1f0410UL #define TM_REG_PF_ENABLE_CONN \ 0x2c043cUL #define TM_REG_PF_ENABLE_TASK \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 043882959606..30393ffaa8e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -109,6 +109,10 @@ union ramrod_data { struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; struct rdma_srq_modify_ramrod_data rdma_modify_srq; struct roce_init_func_ramrod_data roce_init_func; + struct fcoe_init_ramrod_params fcoe_init; + struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld; + struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate; + struct fcoe_stat_ramrod_params fcoe_stat; struct iscsi_slow_path_hdr iscsi_empty; struct iscsi_init_ramrod_params iscsi_init; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 097a72987572..6fb80f9ef446 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -386,6 +386,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, case QED_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; + case QED_PCI_FCOE: + p_ramrod->personality = PERSONALITY_FCOE; + break; case QED_PCI_ISCSI: p_ramrod->personality = PERSONALITY_ISCSI; break; diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index c33080baf38c..52966b9bfde3 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -62,6 +62,7 @@ #define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 #define ISCSI_CDU_TASK_SEG_TYPE 0 +#define FCOE_CDU_TASK_SEG_TYPE 0 #define RDMA_CDU_TASK_SEG_TYPE 1 #define FW_ASSERT_GENERAL_ATTN_IDX 32 @@ -205,6 +206,9 @@ #define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 #define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 @@ -261,6 +265,7 @@ #define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) @@ -291,6 +296,9 @@ #define DQ_TCM_AGG_FLG_SHIFT_CF6 6 #define DQ_TCM_AGG_FLG_SHIFT_CF7 7 /* TCM agg counter flag selection (FW) */ +#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) +#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) @@ -728,7 +736,7 @@ enum mf_mode { /* Per-protocol connection types */ enum protocol_type { PROTOCOLID_ISCSI, - PROTOCOLID_RESERVED2, + PROTOCOLID_FCOE, PROTOCOLID_ROCE, PROTOCOLID_CORE, PROTOCOLID_ETH, diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h new file mode 100644 index 000000000000..2e417a45c5f7 --- /dev/null +++ b/include/linux/qed/fcoe_common.h @@ -0,0 +1,715 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __FCOE_COMMON__ +#define __FCOE_COMMON__ +/*********************/ +/* FCOE FW CONSTANTS */ +/*********************/ + +#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 +#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600) + +struct fcoe_abts_pkt { + __le32 abts_rsp_fc_payload_lo; + __le16 abts_rsp_rx_id; + u8 abts_rsp_rctl; + u8 reserved2; +}; + +/* FCoE additional WQE (Sq/XferQ) information */ +union fcoe_additional_info_union { + __le32 previous_tid; + __le32 parent_tid; + __le32 burst_length; + __le32 seq_rec_updated_offset; +}; + +struct fcoe_exp_ro { + __le32 data_offset; + __le32 reserved; +}; + +union fcoe_cleanup_addr_exp_ro_union { + struct regpair abts_rsp_fc_payload_hi; + struct fcoe_exp_ro exp_ro; +}; + +/* FCoE Ramrod Command IDs */ +enum fcoe_completion_status { + FCOE_COMPLETION_STATUS_SUCCESS, + FCOE_COMPLETION_STATUS_FCOE_VER_ERR, + FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, + MAX_FCOE_COMPLETION_STATUS +}; + +struct fc_addr_nw { + u8 addr_lo; + u8 addr_mid; + u8 addr_hi; +}; + +/* FCoE connection offload */ +struct fcoe_conn_offload_ramrod_data { + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 dst_mac_addr_lo; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_hi; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + __le16 src_mac_addr_hi; + __le16 tx_max_fc_pay_len; + __le16 e_d_tov_timer_val; + __le16 rx_max_fc_pay_len; + __le16 vlan_tag; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 + __le16 physical_q0; + __le16 rec_rr_tov_timer_val; + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6 + __le16 conn_id; + u8 def_q_idx; + u8 reserved[5]; +}; + +/* FCoE terminate connection request */ +struct fcoe_conn_terminate_ramrod_data { + struct regpair terminate_params_addr; +}; + +struct fcoe_fast_sgl_ctx { + struct regpair sgl_start_addr; + __le32 sgl_byte_offset; + __le16 task_reuse_cnt; + __le16 init_offset_in_first_sge; +}; + +struct fcoe_slow_sgl_ctx { + struct regpair base_sgl_addr; + __le16 curr_sge_off; + __le16 remainder_num_sges; + __le16 curr_sgl_index; + __le16 reserved; +}; + +struct fcoe_sge { + struct regpair sge_addr; + __le16 size; + __le16 reserved0; + u8 reserved1[3]; + u8 is_valid_sge; +}; + +union fcoe_data_desc_ctx { + struct fcoe_fast_sgl_ctx fast; + struct fcoe_slow_sgl_ctx slow; + struct fcoe_sge single_sge; +}; + +union fcoe_dix_desc_ctx { + struct fcoe_slow_sgl_ctx dix_sgl; + struct fcoe_sge cached_dix_sge; +}; + +struct fcoe_fcp_cmd_payload { + __le32 opaque[8]; +}; + +struct fcoe_fcp_rsp_payload { + __le32 opaque[6]; +}; + +struct fcoe_fcp_xfer_payload { + __le32 opaque[3]; +}; + +/* FCoE firmware function init */ +struct fcoe_init_func_ramrod_data { + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; + __le16 mtu; + __le16 sq_num_pages_in_pbl; + __le32 reserved; +}; + +/* FCoE: Mode of the connection: Target or Initiator or both */ +enum fcoe_mode_type { + FCOE_INITIATOR_MODE = 0x0, + FCOE_TARGET_MODE = 0x1, + FCOE_BOTH_OR_NOT_CHOSEN = 0x3, + MAX_FCOE_MODE_TYPE +}; + +struct fcoe_mstorm_fcoe_task_st_ctx_fp { + __le16 flags; +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15 + __le16 difDataResidue; + __le16 parent_id; + __le16 single_sge_saved_offset; + __le32 data_2_trns_rem; + __le32 offset_in_io; + union fcoe_dix_desc_ctx dix_desc; + union fcoe_data_desc_ctx data_desc; +}; + +struct fcoe_mstorm_fcoe_task_st_ctx_non_fp { + __le16 flags; +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15 + u8 tx_rx_sgl_mode; +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6 + u8 rsrv2; + __le32 num_prm_zero_read; + struct regpair rsp_buf_addr; +}; + +struct fcoe_rx_stat { + struct regpair fcoe_rx_byte_cnt; + struct regpair fcoe_rx_data_pkt_cnt; + struct regpair fcoe_rx_xfer_pkt_cnt; + struct regpair fcoe_rx_other_pkt_cnt; + __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; + __le32 fcoe_silent_drop_pkt_rq_full_cnt; + __le32 fcoe_silent_drop_pkt_crc_error_cnt; + __le32 fcoe_silent_drop_pkt_task_invalid_cnt; + __le32 fcoe_silent_drop_total_pkt_cnt; + __le32 rsrv; +}; + +enum fcoe_sgl_mode { + FCOE_SLOW_SGL, + FCOE_SINGLE_FAST_SGE, + FCOE_2_FAST_SGE, + FCOE_3_FAST_SGE, + FCOE_4_FAST_SGE, + FCOE_MUL_FAST_SGES, + MAX_FCOE_SGL_MODE +}; + +struct fcoe_stat_ramrod_data { + struct regpair stat_params_addr; +}; + +struct protection_info_ctx { + __le16 flags; +#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 +#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 +#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F +#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 + u8 dix_block_size; + u8 dst_size; +}; + +union protection_info_union_ctx { + struct protection_info_ctx info; + __le32 value; +}; + +struct fcp_rsp_payload_padded { + struct fcoe_fcp_rsp_payload rsp_payload; + __le32 reserved[2]; +}; + +struct fcp_xfer_payload_padded { + struct fcoe_fcp_xfer_payload xfer_payload; + __le32 reserved[5]; +}; + +struct fcoe_tx_data_params { + __le32 data_offset; + __le32 offset_in_io; + u8 flags; +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 +#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F +#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 + u8 dif_residual; + __le16 seq_cnt; + __le16 single_sge_saved_offset; + __le16 next_dif_offset; + __le16 seq_id; + __le16 reserved3; +}; + +struct fcoe_tx_mid_path_params { + __le32 parameter; + u8 r_ctl; + u8 type; + u8 cs_ctl; + u8 df_ctl; + __le16 rx_id; + __le16 ox_id; +}; + +struct fcoe_tx_params { + struct fcoe_tx_data_params data; + struct fcoe_tx_mid_path_params mid_path; +}; + +union fcoe_tx_info_union_ctx { + struct fcoe_fcp_cmd_payload fcp_cmd_payload; + struct fcp_rsp_payload_padded fcp_rsp_payload; + struct fcp_xfer_payload_padded fcp_xfer_payload; + struct fcoe_tx_params tx_params; +}; + +struct ystorm_fcoe_task_st_ctx { + u8 task_type; + u8 sgl_mode; +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3 + u8 cached_dix_sge; + u8 expect_first_xfer; + __le32 num_pbf_zero_write; + union protection_info_union_ctx protection_info_union; + __le32 data_2_trns_rem; + union fcoe_tx_info_union_ctx tx_info_union; + union fcoe_dix_desc_ctx dix_desc; + union fcoe_data_desc_ctx data_desc; + __le16 ox_id; + __le16 rx_id; + __le32 task_rety_identifier; + __le32 reserved1[2]; +}; + +struct ystorm_fcoe_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 rx_id; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le32 reg1; + __le32 reg2; +}; + +struct tstorm_fcoe_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 + u8 flags1; +#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 + u8 flags3; +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 +#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 cleanup_state; + __le16 last_sent_tid; + __le32 rec_rr_tov_exp_timeout; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 data_offset_end_of_seq; + __le32 data_offset_next; +}; + +struct fcoe_tstorm_fcoe_task_st_ctx_read_write { + union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; + __le16 flags; +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10 + __le16 seq_cnt; + u8 seq_id; + u8 ooo_rx_seq_id; + __le16 rx_id; + struct fcoe_abts_pkt abts_data; + __le32 e_d_tov_exp_timeout_val; + __le16 ooo_rx_seq_cnt; + __le16 reserved1; +}; + +struct fcoe_tstorm_fcoe_task_st_ctx_read_only { + u8 task_type; + u8 dev_type; + u8 conf_supported; + u8 glbl_q_num; + __le32 cid; + __le32 fcp_cmd_trns_size; + __le32 rsrv; +}; + +struct tstorm_fcoe_task_st_ctx { + struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; + struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; +}; + +struct mstorm_fcoe_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 icid; + u8 flags0; +#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 +#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 cleanup_state; + __le32 received_bytes; + u8 byte3; + u8 glbl_q_num; + __le16 word1; + __le16 tid_to_xfer; + __le16 word3; + __le16 word4; + __le16 word5; + __le32 expected_bytes; + __le32 reg2; +}; + +struct mstorm_fcoe_task_st_ctx { + struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp; + struct fcoe_mstorm_fcoe_task_st_ctx_fp fp; +}; + +struct ustorm_fcoe_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 + u8 flags3; +#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 global_cq_num; + __le32 reg3; + __le32 reg4; + __le32 reg5; +}; + +struct fcoe_task_context { + struct ystorm_fcoe_task_st_ctx ystorm_st_context; + struct tdif_task_context tdif_context; + struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; + struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; + struct timers_context timer_context; + struct tstorm_fcoe_task_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct mstorm_fcoe_task_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_task_st_ctx mstorm_st_context; + struct ustorm_fcoe_task_ag_ctx ustorm_ag_context; + struct rdif_task_context rdif_context; +}; + +struct fcoe_tx_stat { + struct regpair fcoe_tx_byte_cnt; + struct regpair fcoe_tx_data_pkt_cnt; + struct regpair fcoe_tx_xfer_pkt_cnt; + struct regpair fcoe_tx_other_pkt_cnt; +}; + +struct fcoe_wqe { + __le16 task_id; + __le16 flags; +#define FCOE_WQE_REQ_TYPE_MASK 0xF +#define FCOE_WQE_REQ_TYPE_SHIFT 0 +#define FCOE_WQE_SGL_MODE_MASK 0x7 +#define FCOE_WQE_SGL_MODE_SHIFT 4 +#define FCOE_WQE_CONTINUATION_MASK 0x1 +#define FCOE_WQE_CONTINUATION_SHIFT 7 +#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1 +#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8 +#define FCOE_WQE_SUPER_IO_MASK 0x1 +#define FCOE_WQE_SUPER_IO_SHIFT 9 +#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 +#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10 +#define FCOE_WQE_RESERVED0_MASK 0x1F +#define FCOE_WQE_RESERVED0_SHIFT 11 + union fcoe_additional_info_union additional_info_union; +}; + +struct xfrqe_prot_flags { + u8 flags; +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 +#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 +#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 +}; + +struct fcoe_db_data { + u8 params; +#define FCOE_DB_DATA_DEST_MASK 0x3 +#define FCOE_DB_DATA_DEST_SHIFT 0 +#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 +#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 +#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define FCOE_DB_DATA_RESERVED_MASK 0x1 +#define FCOE_DB_DATA_RESERVED_SHIFT 5 +#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; +#endif /* __FCOE_COMMON__ */ diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h new file mode 100644 index 000000000000..bd6bcb809415 --- /dev/null +++ b/include/linux/qed/qed_fcoe_if.h @@ -0,0 +1,145 @@ +#ifndef _QED_FCOE_IF_H +#define _QED_FCOE_IF_H +#include +#include +struct qed_fcoe_stats { + u64 fcoe_rx_byte_cnt; + u64 fcoe_rx_data_pkt_cnt; + u64 fcoe_rx_xfer_pkt_cnt; + u64 fcoe_rx_other_pkt_cnt; + u32 fcoe_silent_drop_pkt_cmdq_full_cnt; + u32 fcoe_silent_drop_pkt_rq_full_cnt; + u32 fcoe_silent_drop_pkt_crc_error_cnt; + u32 fcoe_silent_drop_pkt_task_invalid_cnt; + u32 fcoe_silent_drop_total_pkt_cnt; + + u64 fcoe_tx_byte_cnt; + u64 fcoe_tx_data_pkt_cnt; + u64 fcoe_tx_xfer_pkt_cnt; + u64 fcoe_tx_other_pkt_cnt; +}; + +struct qed_dev_fcoe_info { + struct qed_dev_info common; + + void __iomem *primary_dbq_rq_addr; + void __iomem *secondary_bdq_rq_addr; +}; + +struct qed_fcoe_params_offload { + dma_addr_t sq_pbl_addr; + dma_addr_t sq_curr_page_addr; + dma_addr_t sq_next_page_addr; + + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + + u16 tx_max_fc_pay_len; + u16 e_d_tov_timer_val; + u16 rec_tov_timer_val; + u16 rx_max_fc_pay_len; + u16 vlan_tag; + + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; + u8 def_q_idx; +}; + +#define MAX_TID_BLOCKS_FCOE (512) +struct qed_fcoe_tid { + u32 size; /* In bytes per task */ + u32 num_tids_per_block; + u8 *blocks[MAX_TID_BLOCKS_FCOE]; +}; + +struct qed_fcoe_cb_ops { + struct qed_common_cb_ops common; + u32 (*get_login_failures)(void *cookie); +}; + +void qed_fcoe_set_pf_params(struct qed_dev *cdev, + struct qed_fcoe_pf_params *params); + +/** + * struct qed_fcoe_ops - qed FCoE operations. + * @common: common operations pointer + * @fill_dev_info: fills FCoE specific information + * @param cdev + * @param info + * @return 0 on sucesss, otherwise error value. + * @register_ops: register FCoE operations + * @param cdev + * @param ops - specified using qed_iscsi_cb_ops + * @param cookie - driver private + * @ll2: light L2 operations pointer + * @start: fcoe in FW + * @param cdev + * @param tasks - qed will fill information about tasks + * return 0 on success, otherwise error value. + * @stop: stops fcoe in FW + * @param cdev + * return 0 on success, otherwise error value. + * @acquire_conn: acquire a new fcoe connection + * @param cdev + * @param handle - qed will fill handle that should be + * used henceforth as identifier of the + * connection. + * @param p_doorbell - qed will fill the address of the + * doorbell. + * return 0 on sucesss, otherwise error value. + * @release_conn: release a previously acquired fcoe connection + * @param cdev + * @param handle - the connection handle. + * return 0 on success, otherwise error value. + * @offload_conn: configures an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * return 0 on success, otherwise error value. + * @destroy_conn: stops an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param terminate_params + * return 0 on success, otherwise error value. + * @get_stats: gets FCoE related statistics + * @param cdev + * @param stats - pointer to struck that would be filled + * we stats + * return 0 on success, error otherwise. + */ +struct qed_fcoe_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_fcoe_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_fcoe_cb_ops *ops, void *cookie); + + const struct qed_ll2_ops *ll2; + + int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks); + + int (*stop)(struct qed_dev *cdev); + + int (*acquire_conn)(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell); + + int (*release_conn)(struct qed_dev *cdev, u32 handle); + + int (*offload_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_fcoe_params_offload *conn_info); + int (*destroy_conn)(struct qed_dev *cdev, + u32 handle, dma_addr_t terminate_params); + + int (*get_stats)(struct qed_dev *cdev, struct qed_fcoe_stats *stats); +}; + +const struct qed_fcoe_ops *qed_get_fcoe_ops(void); +void qed_put_fcoe_ops(void); +#endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index d1576a2bcfc9..fde56c436f71 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -59,7 +59,6 @@ enum dcbx_protocol_type { #define QED_ROCE_PROTOCOL_INDEX (3) -#ifdef CONFIG_DCB #define QED_LLDP_CHASSIS_ID_STAT_LEN 4 #define QED_LLDP_PORT_ID_STAT_LEN 4 #define QED_DCBX_MAX_APP_PROTOCOL 32 @@ -155,7 +154,6 @@ struct qed_dcbx_get { struct qed_dcbx_remote_params remote; struct qed_dcbx_admin_params local; }; -#endif enum qed_led_mode { QED_LED_MODE_OFF, @@ -182,6 +180,38 @@ struct qed_eth_pf_params { u16 num_cons; }; +struct qed_fcoe_pf_params { + /* The following parameters are used during protocol-init */ + u64 glbl_q_params_addr; + u64 bdq_pbl_base_addr[2]; + + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; + u16 num_tasks; + + /* The following parameters are used during protocol-init */ + u16 sq_num_pbl_pages; + + u16 cq_num_entries; + u16 cmdq_num_entries; + u16 rq_buffer_log_size; + u16 mtu; + u16 dummy_icid; + u16 bdq_xoff_threshold[2]; + u16 bdq_xon_threshold[2]; + u16 rq_buffer_size; + u8 num_cqs; /* num of global CQs */ + u8 log_page_size; + u8 gl_rq_pi; + u8 gl_cmd_pi; + u8 debug_mode; + u8 is_target; + u8 bdq_pbl_num_entries[2]; +}; + /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; @@ -245,6 +275,7 @@ struct qed_rdma_pf_params { struct qed_pf_params { struct qed_eth_pf_params eth_pf_params; + struct qed_fcoe_pf_params fcoe_pf_params; struct qed_iscsi_pf_params iscsi_pf_params; struct qed_rdma_pf_params rdma_pf_params; }; @@ -305,6 +336,7 @@ enum qed_sb_type { enum qed_protocol { QED_PROTOCOL_ETH, QED_PROTOCOL_ISCSI, + QED_PROTOCOL_FCOE, }; enum qed_link_mode_bits { @@ -391,6 +423,7 @@ struct qed_int_info { struct qed_common_cb_ops { void (*link_update)(void *dev, struct qed_link_output *link); + void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); }; struct qed_selftest_ops { @@ -494,6 +527,10 @@ struct qed_common_ops { void (*simd_handler_clean)(struct qed_dev *cdev, int index); + int (*dbg_grc)(struct qed_dev *cdev, + void *buffer, u32 *num_dumped_bytes); + + int (*dbg_grc_size)(struct qed_dev *cdev); int (*dbg_all_data) (struct qed_dev *cdev, void *buffer); -- cgit v1.2.3 From 6c267b3dea09aebe84752cfedcab140c908830bb Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 15 Feb 2017 12:17:58 -0600 Subject: ibmvnic: Handle processing of CRQ messages in a tasklet Create a tasklet to process queued commands or messages received from firmware instead of processing them in the interrupt handler. Note that this handler does not process network traffic, but communications related to resource allocation and device settings. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 18 +++++++++++++++++- drivers/net/ethernet/ibm/ibmvnic.h | 1 + 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 0c94e23985be..c573ba8654e2 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3420,6 +3420,18 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, static irqreturn_t ibmvnic_interrupt(int irq, void *instance) { struct ibmvnic_adapter *adapter = instance; + unsigned long flags; + + spin_lock_irqsave(&adapter->crq.lock, flags); + vio_disable_interrupts(adapter->vdev); + tasklet_schedule(&adapter->tasklet); + spin_unlock_irqrestore(&adapter->crq.lock, flags); + return IRQ_HANDLED; +} + +static void ibmvnic_tasklet(void *data) +{ + struct ibmvnic_adapter *adapter = data; struct ibmvnic_crq_queue *queue = &adapter->crq; struct vio_dev *vdev = adapter->vdev; union ibmvnic_crq *crq; @@ -3445,7 +3457,6 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance) } } spin_unlock_irqrestore(&queue->lock, flags); - return IRQ_HANDLED; } static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) @@ -3500,6 +3511,7 @@ static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter) netdev_dbg(adapter->netdev, "Releasing CRQ\n"); free_irq(vdev->irq, adapter); + tasklet_kill(&adapter->tasklet); do { rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); @@ -3545,6 +3557,9 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter) retrc = 0; + tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet, + (unsigned long)adapter); + netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, adapter); @@ -3566,6 +3581,7 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter) return retrc; req_irq_failed: + tasklet_kill(&adapter->tasklet); do { rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index dd775d951b73..0d0edc36107a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1049,5 +1049,6 @@ struct ibmvnic_adapter { struct work_struct vnic_crq_init; struct work_struct ibmvnic_xport; + struct tasklet_struct tasklet; bool failover; }; -- cgit v1.2.3 From 901e040aa341d5b2c38fd65a473e953f7ca0bc0b Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 15 Feb 2017 12:17:59 -0600 Subject: ibmvnic: Use common counter for capabilities checks Two different counters were being used for capabilities requests and queries. These commands are not called at the same time so there is no reason a single counter cannot be used. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 71 ++++++++++++++++++++------------------ drivers/net/ethernet/ibm/ibmvnic.h | 3 +- 2 files changed, 39 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c573ba8654e2..b9fbc2dd47cf 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1260,8 +1260,6 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) } adapter->rx_scrq = NULL; } - - adapter->requested_caps = 0; } static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter) @@ -1283,8 +1281,6 @@ static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter) adapter->rx_scrq[i]); adapter->rx_scrq = NULL; } - - adapter->requested_caps = 0; } static int disable_scrq_irq(struct ibmvnic_adapter *adapter, @@ -1572,30 +1568,36 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); crq.request_capability.number = cpu_to_be64(adapter->req_tx_entries_per_subcrq); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_MTU); crq.request_capability.number = cpu_to_be64(adapter->req_mtu); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); if (adapter->netdev->flags & IFF_PROMISC) { @@ -1603,12 +1605,14 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); crq.request_capability.number = cpu_to_be64(1); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); } } else { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); crq.request_capability.number = cpu_to_be64(0); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); } @@ -1960,112 +1964,112 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter) { union ibmvnic_crq crq; - atomic_set(&adapter->running_cap_queries, 0); + atomic_set(&adapter->running_cap_crqs, 0); memset(&crq, 0, sizeof(crq)); crq.query_capability.first = IBMVNIC_CRQ_CMD; crq.query_capability.cmd = QUERY_CAPABILITY; crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_MTU); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_MTU); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); } @@ -2353,6 +2357,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, u64 *req_value; char *name; + atomic_dec(&adapter->running_cap_crqs); switch (be16_to_cpu(crq->request_capability_rsp.capability)) { case REQ_TX_QUEUES: req_value = &adapter->req_tx_queues; @@ -2407,7 +2412,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, } /* Done receiving requested capabilities, query IP offload support */ - if (++adapter->requested_caps == 7) { + if (atomic_read(&adapter->running_cap_crqs) == 0) { union ibmvnic_crq newcrq; int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = @@ -2548,9 +2553,9 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, struct device *dev = &adapter->vdev->dev; long rc; - atomic_dec(&adapter->running_cap_queries); + atomic_dec(&adapter->running_cap_crqs); netdev_dbg(netdev, "Outstanding queries: %d\n", - atomic_read(&adapter->running_cap_queries)); + atomic_read(&adapter->running_cap_crqs)); rc = crq->query_capability.rc.code; if (rc) { dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); @@ -2708,7 +2713,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, } out: - if (atomic_read(&adapter->running_cap_queries) == 0) + if (atomic_read(&adapter->running_cap_crqs) == 0) init_sub_crqs(adapter, 0); /* We're done querying the capabilities, initialize sub-crqs */ } diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 0d0edc36107a..504d05c452e6 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -976,11 +976,10 @@ struct ibmvnic_adapter { dma_addr_t login_rsp_buf_token; int login_rsp_buf_sz; - atomic_t running_cap_queries; + atomic_t running_cap_crqs; struct ibmvnic_sub_crq_queue **tx_scrq; struct ibmvnic_sub_crq_queue **rx_scrq; - int requested_caps; bool renegotiate; /* rx structs */ -- cgit v1.2.3 From 249168ad07cd23b5201dd2f09fd450ae4176f96c Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 15 Feb 2017 12:18:00 -0600 Subject: ibmvnic: Make CRQ interrupt tasklet wait for all capabilities crqs After sending device capability queries and requests to the vNIC Server, an interrupt is triggered and the responses are written to the driver's CRQ response buffer. Since the interrupt can be triggered before all responses are written and visible to the partition, there is a danger that the interrupt handler or tasklet can terminate before all responses are read, resulting in a failure to initialize the device. To avoid this scenario, when capability commands are sent, we set a flag that will be checked in the following interrupt tasklet that will handle the capability responses from the server. Once all responses have been handled, the flag is disabled; and the tasklet is allowed to terminate. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 16 ++++++++++++++-- drivers/net/ethernet/ibm/ibmvnic.h | 1 + 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b9fbc2dd47cf..9198e6bd5160 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2418,6 +2418,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = &adapter->ip_offload_buf; + adapter->wait_capability = false; adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, buf_sz, DMA_FROM_DEVICE); @@ -2713,9 +2714,11 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, } out: - if (atomic_read(&adapter->running_cap_crqs) == 0) + if (atomic_read(&adapter->running_cap_crqs) == 0) { + adapter->wait_capability = false; init_sub_crqs(adapter, 0); /* We're done querying the capabilities, initialize sub-crqs */ + } } static void handle_control_ras_rsp(union ibmvnic_crq *crq, @@ -3458,9 +3461,18 @@ static void ibmvnic_tasklet(void *data) ibmvnic_handle_crq(crq, adapter); crq->generic.first = 0; } else { - done = true; + /* remain in tasklet until all + * capabilities responses are received + */ + if (!adapter->wait_capability) + done = true; } } + /* if capabilities CRQ's were sent in this tasklet, the following + * tasklet must wait until all responses are received + */ + if (atomic_read(&adapter->running_cap_crqs) != 0) + adapter->wait_capability = true; spin_unlock_irqrestore(&queue->lock, flags); } diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 504d05c452e6..422824f1f42a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -977,6 +977,7 @@ struct ibmvnic_adapter { int login_rsp_buf_sz; atomic_t running_cap_crqs; + bool wait_capability; struct ibmvnic_sub_crq_queue **tx_scrq; struct ibmvnic_sub_crq_queue **rx_scrq; -- cgit v1.2.3 From 6c07ec0fa5712b01d0967cf74129fa9b4d234af8 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Thu, 16 Feb 2017 18:04:14 +0300 Subject: uapi: fix linux/ipv6_route.h userspace compilation errors Include to fix the following linux/ipv6_route.h userspace compilation errors: /usr/include/linux/ipv6_route.h:42:19: error: field 'rtmsg_dst' has incomplete type struct in6_addr rtmsg_dst; /usr/include/linux/ipv6_route.h:43:19: error: field 'rtmsg_src' has incomplete type struct in6_addr rtmsg_src; /ust/include/linux/ipv6_route.h:44:19: error: field 'rtmsg_gateway' has incomplete type struct in6_addr rtmsg_gateway; Signed-off-by: Dmitry V. Levin Signed-off-by: David S. Miller --- include/uapi/linux/ipv6_route.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h index f6598d1c886e..85bbb1799df3 100644 --- a/include/uapi/linux/ipv6_route.h +++ b/include/uapi/linux/ipv6_route.h @@ -14,6 +14,7 @@ #define _UAPI_LINUX_IPV6_ROUTE_H #include +#include /* For struct in6_addr. */ #define RTF_DEFAULT 0x00010000 /* default - learned via ND */ #define RTF_ALLONLINK 0x00020000 /* (deprecated and will be removed) -- cgit v1.2.3 From 72aa107df6a275cf03359934ca5799a2be7a1bf7 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Thu, 16 Feb 2017 18:04:29 +0300 Subject: uapi: fix linux/mroute6.h userspace compilation errors Include to fix the following linux/mroute6.h userspace compilation errors: /usr/include/linux/mroute6.h:80:22: error: field 'mf6cc_origin' has incomplete type struct sockaddr_in6 mf6cc_origin; /* Origin of mcast */ /usr/include/linux/mroute6.h:81:22: error: field 'mf6cc_mcastgrp' has incomplete type struct sockaddr_in6 mf6cc_mcastgrp; /* Group in question */ /usr/include/linux/mroute6.h:91:22: error: field 'src' has incomplete type struct sockaddr_in6 src; /usr/include/linux/mroute6.h:92:22: error: field 'grp' has incomplete type struct sockaddr_in6 grp; /usr/include/linux/mroute6.h:132:18: error: field 'im6_src' has incomplete type struct in6_addr im6_src, im6_dst; /usr/include/linux/mroute6.h:132:27: error: field 'im6_dst' has incomplete type struct in6_addr im6_src, im6_dst; Signed-off-by: Dmitry V. Levin Signed-off-by: David S. Miller --- include/uapi/linux/mroute6.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h index 5062fb5751e1..ed5721148768 100644 --- a/include/uapi/linux/mroute6.h +++ b/include/uapi/linux/mroute6.h @@ -4,6 +4,7 @@ #include #include #include +#include /* For struct sockaddr_in6. */ /* * Based on the MROUTING 3.5 defines primarily to keep -- cgit v1.2.3 From bcb41c6bced1ee778d23c53a6b4807fb08cf5540 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Thu, 16 Feb 2017 18:04:46 +0300 Subject: uapi: fix linux/mroute.h userspace compilation errors Include to fix the following linux/mroute.h userspace compilation errors: /usr/include/linux/mroute.h:58:18: error: field 'vifc_lcl_addr' has incomplete type struct in_addr vifc_lcl_addr; /* Local interface address */ /usr/include/linux/mroute.h:61:17: error: field 'vifc_rmt_addr' has incomplete type struct in_addr vifc_rmt_addr; /* IPIP tunnel addr */ /usr/include/linux/mroute.h:72:17: error: field 'mfcc_origin' has incomplete type struct in_addr mfcc_origin; /* Origin of mcast */ /usr/include/linux/mroute.h:73:17: error: field 'mfcc_mcastgrp' has incomplete type struct in_addr mfcc_mcastgrp; /* Group in question */ /usr/include/linux/mroute.h:84:17: error: field 'src' has incomplete type struct in_addr src; /usr/include/linux/mroute.h:85:17: error: field 'grp' has incomplete type struct in_addr grp; /usr/include/linux/mroute.h:109:17: error: field 'im_src' has incomplete type struct in_addr im_src,im_dst; /usr/include/linux/mroute.h:109:24: error: field 'im_dst' has incomplete type struct in_addr im_src,im_dst; Signed-off-by: Dmitry V. Levin Signed-off-by: David S. Miller --- include/uapi/linux/mroute.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h index cf943016930f..1fe4c1e7d66e 100644 --- a/include/uapi/linux/mroute.h +++ b/include/uapi/linux/mroute.h @@ -3,6 +3,7 @@ #include #include +#include /* For struct in_addr. */ /* Based on the MROUTING 3.5 defines primarily to keep * source compatibility with BSD. -- cgit v1.2.3 From feb0869d90e51ce8b6fd8a46588465b1b5a26d09 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Thu, 16 Feb 2017 18:05:13 +0300 Subject: uapi: fix linux/rds.h userspace compilation errors Consistently use types from linux/types.h to fix the following linux/rds.h userspace compilation errors: /usr/include/linux/rds.h:106:2: error: unknown type name 'uint8_t' uint8_t name[32]; /usr/include/linux/rds.h:107:2: error: unknown type name 'uint64_t' uint64_t value; /usr/include/linux/rds.h:117:2: error: unknown type name 'uint64_t' uint64_t next_tx_seq; /usr/include/linux/rds.h:118:2: error: unknown type name 'uint64_t' uint64_t next_rx_seq; /usr/include/linux/rds.h:121:2: error: unknown type name 'uint8_t' uint8_t transport[TRANSNAMSIZ]; /* null term ascii */ /usr/include/linux/rds.h:122:2: error: unknown type name 'uint8_t' uint8_t flags; /usr/include/linux/rds.h:129:2: error: unknown type name 'uint64_t' uint64_t seq; /usr/include/linux/rds.h:130:2: error: unknown type name 'uint32_t' uint32_t len; /usr/include/linux/rds.h:135:2: error: unknown type name 'uint8_t' uint8_t flags; /usr/include/linux/rds.h:139:2: error: unknown type name 'uint32_t' uint32_t sndbuf; /usr/include/linux/rds.h:144:2: error: unknown type name 'uint32_t' uint32_t rcvbuf; /usr/include/linux/rds.h:145:2: error: unknown type name 'uint64_t' uint64_t inum; /usr/include/linux/rds.h:153:2: error: unknown type name 'uint64_t' uint64_t hdr_rem; /usr/include/linux/rds.h:154:2: error: unknown type name 'uint64_t' uint64_t data_rem; /usr/include/linux/rds.h:155:2: error: unknown type name 'uint32_t' uint32_t last_sent_nxt; /usr/include/linux/rds.h:156:2: error: unknown type name 'uint32_t' uint32_t last_expected_una; /usr/include/linux/rds.h:157:2: error: unknown type name 'uint32_t' uint32_t last_seen_una; /usr/include/linux/rds.h:164:2: error: unknown type name 'uint8_t' uint8_t src_gid[RDS_IB_GID_LEN]; /usr/include/linux/rds.h:165:2: error: unknown type name 'uint8_t' uint8_t dst_gid[RDS_IB_GID_LEN]; /usr/include/linux/rds.h:167:2: error: unknown type name 'uint32_t' uint32_t max_send_wr; /usr/include/linux/rds.h:168:2: error: unknown type name 'uint32_t' uint32_t max_recv_wr; /usr/include/linux/rds.h:169:2: error: unknown type name 'uint32_t' uint32_t max_send_sge; /usr/include/linux/rds.h:170:2: error: unknown type name 'uint32_t' uint32_t rdma_mr_max; /usr/include/linux/rds.h:171:2: error: unknown type name 'uint32_t' uint32_t rdma_mr_size; /usr/include/linux/rds.h:212:9: error: unknown type name 'uint64_t' typedef uint64_t rds_rdma_cookie_t; /usr/include/linux/rds.h:215:2: error: unknown type name 'uint64_t' uint64_t addr; /usr/include/linux/rds.h:216:2: error: unknown type name 'uint64_t' uint64_t bytes; /usr/include/linux/rds.h:221:2: error: unknown type name 'uint64_t' uint64_t cookie_addr; /usr/include/linux/rds.h:222:2: error: unknown type name 'uint64_t' uint64_t flags; /usr/include/linux/rds.h:228:2: error: unknown type name 'uint64_t' uint64_t cookie_addr; /usr/include/linux/rds.h:229:2: error: unknown type name 'uint64_t' uint64_t flags; /usr/include/linux/rds.h:234:2: error: unknown type name 'uint64_t' uint64_t flags; /usr/include/linux/rds.h:240:2: error: unknown type name 'uint64_t' uint64_t local_vec_addr; /usr/include/linux/rds.h:241:2: error: unknown type name 'uint64_t' uint64_t nr_local; /usr/include/linux/rds.h:242:2: error: unknown type name 'uint64_t' uint64_t flags; /usr/include/linux/rds.h:243:2: error: unknown type name 'uint64_t' uint64_t user_token; /usr/include/linux/rds.h:248:2: error: unknown type name 'uint64_t' uint64_t local_addr; /usr/include/linux/rds.h:249:2: error: unknown type name 'uint64_t' uint64_t remote_addr; /usr/include/linux/rds.h:252:4: error: unknown type name 'uint64_t' uint64_t compare; /usr/include/linux/rds.h:253:4: error: unknown type name 'uint64_t' uint64_t swap; /usr/include/linux/rds.h:256:4: error: unknown type name 'uint64_t' uint64_t add; /usr/include/linux/rds.h:259:4: error: unknown type name 'uint64_t' uint64_t compare; /usr/include/linux/rds.h:260:4: error: unknown type name 'uint64_t' uint64_t swap; /usr/include/linux/rds.h:261:4: error: unknown type name 'uint64_t' uint64_t compare_mask; /usr/include/linux/rds.h:262:4: error: unknown type name 'uint64_t' uint64_t swap_mask; /usr/include/linux/rds.h:265:4: error: unknown type name 'uint64_t' uint64_t add; /usr/include/linux/rds.h:266:4: error: unknown type name 'uint64_t' uint64_t nocarry_mask; /usr/include/linux/rds.h:269:2: error: unknown type name 'uint64_t' uint64_t flags; /usr/include/linux/rds.h:270:2: error: unknown type name 'uint64_t' uint64_t user_token; /usr/include/linux/rds.h:274:2: error: unknown type name 'uint64_t' uint64_t user_token; /usr/include/linux/rds.h:275:2: error: unknown type name 'int32_t' int32_t status; Signed-off-by: Dmitry V. Levin Signed-off-by: David S. Miller --- include/uapi/linux/rds.h | 104 +++++++++++++++++++++++------------------------ 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 3833113ab2c0..f42d2112ccee 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -117,8 +117,8 @@ #define RDS_INFO_LAST 10010 struct rds_info_counter { - uint8_t name[32]; - uint64_t value; + __u8 name[32]; + __u64 value; } __attribute__((packed)); #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 @@ -128,35 +128,35 @@ struct rds_info_counter { #define TRANSNAMSIZ 16 struct rds_info_connection { - uint64_t next_tx_seq; - uint64_t next_rx_seq; + __u64 next_tx_seq; + __u64 next_rx_seq; __be32 laddr; __be32 faddr; - uint8_t transport[TRANSNAMSIZ]; /* null term ascii */ - uint8_t flags; + __u8 transport[TRANSNAMSIZ]; /* null term ascii */ + __u8 flags; } __attribute__((packed)); #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 struct rds_info_message { - uint64_t seq; - uint32_t len; + __u64 seq; + __u32 len; __be32 laddr; __be32 faddr; __be16 lport; __be16 fport; - uint8_t flags; + __u8 flags; } __attribute__((packed)); struct rds_info_socket { - uint32_t sndbuf; + __u32 sndbuf; __be32 bound_addr; __be32 connected_addr; __be16 bound_port; __be16 connected_port; - uint32_t rcvbuf; - uint64_t inum; + __u32 rcvbuf; + __u64 inum; } __attribute__((packed)); struct rds_info_tcp_socket { @@ -164,25 +164,25 @@ struct rds_info_tcp_socket { __be16 local_port; __be32 peer_addr; __be16 peer_port; - uint64_t hdr_rem; - uint64_t data_rem; - uint32_t last_sent_nxt; - uint32_t last_expected_una; - uint32_t last_seen_una; + __u64 hdr_rem; + __u64 data_rem; + __u32 last_sent_nxt; + __u32 last_expected_una; + __u32 last_seen_una; } __attribute__((packed)); #define RDS_IB_GID_LEN 16 struct rds_info_rdma_connection { __be32 src_addr; __be32 dst_addr; - uint8_t src_gid[RDS_IB_GID_LEN]; - uint8_t dst_gid[RDS_IB_GID_LEN]; - - uint32_t max_send_wr; - uint32_t max_recv_wr; - uint32_t max_send_sge; - uint32_t rdma_mr_max; - uint32_t rdma_mr_size; + __u8 src_gid[RDS_IB_GID_LEN]; + __u8 dst_gid[RDS_IB_GID_LEN]; + + __u32 max_send_wr; + __u32 max_recv_wr; + __u32 max_send_sge; + __u32 rdma_mr_max; + __u32 rdma_mr_size; }; /* RDS message Receive Path Latency points */ @@ -242,70 +242,70 @@ struct rds_cmsg_rx_trace { * (so that the application does not have to worry about * alignment). */ -typedef uint64_t rds_rdma_cookie_t; +typedef __u64 rds_rdma_cookie_t; struct rds_iovec { - uint64_t addr; - uint64_t bytes; + __u64 addr; + __u64 bytes; }; struct rds_get_mr_args { struct rds_iovec vec; - uint64_t cookie_addr; - uint64_t flags; + __u64 cookie_addr; + __u64 flags; }; struct rds_get_mr_for_dest_args { struct sockaddr_storage dest_addr; struct rds_iovec vec; - uint64_t cookie_addr; - uint64_t flags; + __u64 cookie_addr; + __u64 flags; }; struct rds_free_mr_args { rds_rdma_cookie_t cookie; - uint64_t flags; + __u64 flags; }; struct rds_rdma_args { rds_rdma_cookie_t cookie; struct rds_iovec remote_vec; - uint64_t local_vec_addr; - uint64_t nr_local; - uint64_t flags; - uint64_t user_token; + __u64 local_vec_addr; + __u64 nr_local; + __u64 flags; + __u64 user_token; }; struct rds_atomic_args { rds_rdma_cookie_t cookie; - uint64_t local_addr; - uint64_t remote_addr; + __u64 local_addr; + __u64 remote_addr; union { struct { - uint64_t compare; - uint64_t swap; + __u64 compare; + __u64 swap; } cswp; struct { - uint64_t add; + __u64 add; } fadd; struct { - uint64_t compare; - uint64_t swap; - uint64_t compare_mask; - uint64_t swap_mask; + __u64 compare; + __u64 swap; + __u64 compare_mask; + __u64 swap_mask; } m_cswp; struct { - uint64_t add; - uint64_t nocarry_mask; + __u64 add; + __u64 nocarry_mask; } m_fadd; }; - uint64_t flags; - uint64_t user_token; + __u64 flags; + __u64 user_token; }; struct rds_rdma_notify { - uint64_t user_token; - int32_t status; + __u64 user_token; + __s32 status; }; #define RDS_RDMA_SUCCESS 0 -- cgit v1.2.3 From 1786dbf3702e33ce3afd2d3dbe630bd04b1d2e58 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Thu, 16 Feb 2017 18:05:45 +0300 Subject: uapi: fix linux/rds.h userspace compilation error On the kernel side, sockaddr_storage is #define'd to __kernel_sockaddr_storage. Replacing struct sockaddr_storage with struct __kernel_sockaddr_storage defined by fixes the following linux/rds.h userspace compilation error: /usr/include/linux/rds.h:226:26: error: field 'dest_addr' has incomplete type struct sockaddr_storage dest_addr; Signed-off-by: Dmitry V. Levin Signed-off-by: David S. Miller --- include/uapi/linux/rds.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index f42d2112ccee..47c03ca5c404 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -35,6 +35,7 @@ #define _LINUX_RDS_H #include +#include /* For __kernel_sockaddr_storage. */ #define RDS_IB_ABI_VERSION 0x301 @@ -256,7 +257,7 @@ struct rds_get_mr_args { }; struct rds_get_mr_for_dest_args { - struct sockaddr_storage dest_addr; + struct __kernel_sockaddr_storage dest_addr; struct rds_iovec vec; __u64 cookie_addr; __u64 flags; -- cgit v1.2.3 From f5a57723371f11ff953c4ba71184dc00e65469fc Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Feb 2017 15:23:27 -0800 Subject: mlx4: fix potential divide by 0 in mlx4_en_auto_moderation() 1) In the case where rate == priv->pkt_rate_low == priv->pkt_rate_high, mlx4_en_auto_moderation() does a divide by zero. 2) We want to properly change the moderation parameters if rx_frames was changed (like in ethtool -C eth0 rx-frames 16) Signed-off-by: Eric Dumazet Reviewed-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 748e9f65c386..afe4444e5434 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1382,6 +1382,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) { unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); + u32 pkt_rate_high, pkt_rate_low; struct mlx4_en_cq *cq; unsigned long packets; unsigned long rate; @@ -1395,37 +1396,40 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) return; + pkt_rate_low = READ_ONCE(priv->pkt_rate_low); + pkt_rate_high = READ_ONCE(priv->pkt_rate_high); + for (ring = 0; ring < priv->rx_ring_num; ring++) { rx_packets = READ_ONCE(priv->rx_ring[ring]->packets); rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes); - rx_pkt_diff = ((unsigned long) (rx_packets - - priv->last_moder_packets[ring])); + rx_pkt_diff = rx_packets - priv->last_moder_packets[ring]; packets = rx_pkt_diff; rate = packets * HZ / period; - avg_pkt_size = packets ? ((unsigned long) (rx_bytes - - priv->last_moder_bytes[ring])) / packets : 0; + avg_pkt_size = packets ? (rx_bytes - + priv->last_moder_bytes[ring]) / packets : 0; /* Apply auto-moderation only when packet rate * exceeds a rate that it matters */ if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { - if (rate < priv->pkt_rate_low) + if (rate <= pkt_rate_low) moder_time = priv->rx_usecs_low; - else if (rate > priv->pkt_rate_high) + else if (rate >= pkt_rate_high) moder_time = priv->rx_usecs_high; else - moder_time = (rate - priv->pkt_rate_low) * + moder_time = (rate - pkt_rate_low) * (priv->rx_usecs_high - priv->rx_usecs_low) / - (priv->pkt_rate_high - priv->pkt_rate_low) + + (pkt_rate_high - pkt_rate_low) + priv->rx_usecs_low; } else { moder_time = priv->rx_usecs_low; } - if (moder_time != priv->last_moder_time[ring]) { + cq = priv->rx_cq[ring]; + if (moder_time != priv->last_moder_time[ring] || + cq->moder_cnt != priv->rx_frames) { priv->last_moder_time[ring] = moder_time; - cq = priv->rx_cq[ring]; cq->moder_time = moder_time; cq->moder_cnt = priv->rx_frames; err = mlx4_en_set_cq_moder(priv, cq); -- cgit v1.2.3 From 61845d2072150e896f2f18f1d879956e453ee8fd Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 17 Feb 2017 11:33:09 +0800 Subject: virtio-net: batch stats updating We already have counters for sent/recv packets and sent/recv bytes. Doing a batched update to reduce the number of u64_stats_update_begin/end(). Take care not to bother with stats update when called speculatively. Cc: Willem de Bruijn Signed-off-by: Jason Wang Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 47 +++++++++++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 16 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 11e28530c83c..05a83dbc910d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -719,13 +719,13 @@ xdp_xmit: return NULL; } -static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len) +static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, unsigned int len) { struct net_device *dev = vi->dev; - struct virtnet_stats *stats = this_cpu_ptr(vi->stats); struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; + int ret; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); @@ -739,7 +739,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, } else { dev_kfree_skb(buf); } - return; + return 0; } if (vi->mergeable_rx_bufs) @@ -750,14 +750,11 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, skb = receive_small(dev, vi, rq, buf, len); if (unlikely(!skb)) - return; + return 0; hdr = skb_vnet_hdr(skb); - u64_stats_update_begin(&stats->rx_syncp); - stats->rx_bytes += skb->len; - stats->rx_packets++; - u64_stats_update_end(&stats->rx_syncp); + ret = skb->len; if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -775,11 +772,12 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, ntohs(skb->protocol), skb->len, skb->pkt_type); napi_gro_receive(&rq->napi, skb); - return; + return ret; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); + return 0; } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) @@ -994,12 +992,13 @@ static void refill_work(struct work_struct *work) static int virtnet_receive(struct receive_queue *rq, int budget) { struct virtnet_info *vi = rq->vq->vdev->priv; - unsigned int len, received = 0; + unsigned int len, received = 0, bytes = 0; void *buf; + struct virtnet_stats *stats = this_cpu_ptr(vi->stats); while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - receive_buf(vi, rq, buf, len); + bytes += receive_buf(vi, rq, buf, len); received++; } @@ -1008,6 +1007,11 @@ static int virtnet_receive(struct receive_queue *rq, int budget) schedule_delayed_work(&vi->refill, 0); } + u64_stats_update_begin(&stats->rx_syncp); + stats->rx_bytes += bytes; + stats->rx_packets += received; + u64_stats_update_end(&stats->rx_syncp); + return received; } @@ -1056,17 +1060,28 @@ static void free_old_xmit_skbs(struct send_queue *sq) unsigned int len; struct virtnet_info *vi = sq->vq->vdev->priv; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); + unsigned int packets = 0; + unsigned int bytes = 0; while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); - u64_stats_update_begin(&stats->tx_syncp); - stats->tx_bytes += skb->len; - stats->tx_packets++; - u64_stats_update_end(&stats->tx_syncp); + bytes += skb->len; + packets++; dev_kfree_skb_any(skb); } + + /* Avoid overhead when no packets have been processed + * happens when called speculatively from start_xmit. + */ + if (!packets) + return; + + u64_stats_update_begin(&stats->tx_syncp); + stats->tx_bytes += bytes; + stats->tx_packets += packets; + u64_stats_update_end(&stats->tx_syncp); } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) -- cgit v1.2.3 From bd4b9f8b4af7be15fd162276ec9a2a1d49f10270 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Feb 2017 12:45:37 +0800 Subject: sctp: add support for generating stream reconf resp chunk This patch is to define Re-configuration Response Parameter described in rfc6525 section 4.4. As optional fields are only for SSN/TSN Reset Request Parameter, it uses another function to make that. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 24 ++++++++++++++++ include/net/sctp/sm.h | 7 +++++ net/sctp/sm_make_chunk.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 105 insertions(+) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index b055788de0cf..7a4804c4a593 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -749,4 +749,28 @@ struct sctp_strreset_addstrm { __u16 reserved; }; +enum { + SCTP_STRRESET_NOTHING_TO_DO = 0x00, + SCTP_STRRESET_PERFORMED = 0x01, + SCTP_STRRESET_DENIED = 0x02, + SCTP_STRRESET_ERR_WRONG_SSN = 0x03, + SCTP_STRRESET_ERR_IN_PROGRESS = 0x04, + SCTP_STRRESET_ERR_BAD_SEQNO = 0x05, + SCTP_STRRESET_IN_PROGRESS = 0x06, +}; + +struct sctp_strreset_resp { + sctp_paramhdr_t param_hdr; + __u32 response_seq; + __u32 result; +}; + +struct sctp_strreset_resptsn { + sctp_paramhdr_t param_hdr; + __u32 response_seq; + __u32 result; + __u32 senders_next_tsn; + __u32 receivers_next_tsn; +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 3675fde3a26e..8a85dd7fc1ec 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -270,6 +270,13 @@ struct sctp_chunk *sctp_make_strreset_tsnreq( struct sctp_chunk *sctp_make_strreset_addstrm( const struct sctp_association *asoc, __u16 out, __u16 in); +struct sctp_chunk *sctp_make_strreset_resp( + const struct sctp_association *asoc, + __u32 result, __u32 sn); +struct sctp_chunk *sctp_make_strreset_tsnresp( + struct sctp_association *asoc, + __u32 result, __u32 sn, + __u32 sender_tsn, __u32 receiver_tsn); void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 7f8dbf2c6cee..9680c580759b 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3733,3 +3733,77 @@ struct sctp_chunk *sctp_make_strreset_addstrm( return retval; } + +/* RE-CONFIG 4.4 (RESP) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Parameter Type = 16 | Parameter Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Re-configuration Response Sequence Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Result | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct sctp_chunk *sctp_make_strreset_resp( + const struct sctp_association *asoc, + __u32 result, __u32 sn) +{ + struct sctp_strreset_resp resp; + __u16 length = sizeof(resp); + struct sctp_chunk *retval; + + retval = sctp_make_reconf(asoc, length); + if (!retval) + return NULL; + + resp.param_hdr.type = SCTP_PARAM_RESET_RESPONSE; + resp.param_hdr.length = htons(length); + resp.response_seq = htonl(sn); + resp.result = htonl(result); + + sctp_addto_chunk(retval, sizeof(resp), &resp); + + return retval; +} + +/* RE-CONFIG 4.4 OPTIONAL (TSNRESP) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Parameter Type = 16 | Parameter Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Re-configuration Response Sequence Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Result | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Sender's Next TSN (optional) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Receiver's Next TSN (optional) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct sctp_chunk *sctp_make_strreset_tsnresp( + struct sctp_association *asoc, + __u32 result, __u32 sn, + __u32 sender_tsn, __u32 receiver_tsn) +{ + struct sctp_strreset_resptsn tsnresp; + __u16 length = sizeof(tsnresp); + struct sctp_chunk *retval; + + retval = sctp_make_reconf(asoc, length); + if (!retval) + return NULL; + + tsnresp.param_hdr.type = SCTP_PARAM_RESET_RESPONSE; + tsnresp.param_hdr.length = htons(length); + + tsnresp.response_seq = htonl(sn); + tsnresp.result = htonl(result); + tsnresp.senders_next_tsn = htonl(sender_tsn); + tsnresp.receivers_next_tsn = htonl(receiver_tsn); + + sctp_addto_chunk(retval, sizeof(tsnresp), &tsnresp); + + return retval; +} -- cgit v1.2.3 From 35ea82d611da59f8bea44a37996b3b11bb1d3fd7 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Feb 2017 12:45:38 +0800 Subject: sctp: add support for generating stream ssn reset event notification This patch is to add Stream Reset Event described in rfc6525 section 6.1.1. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/ulpevent.h | 4 ++++ include/uapi/linux/sctp.h | 16 ++++++++++++++++ net/sctp/ulpevent.c | 29 +++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+) diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 2c098cd7e7e2..324b5965fc4d 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -128,6 +128,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_authkey( struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( const struct sctp_association *asoc, gfp_t gfp); +struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( + const struct sctp_association *asoc, __u16 flags, + __u16 stream_num, __u16 *stream_list, gfp_t gfp); + void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *); void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index a91a9cccbae6..d3ae381fcf33 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -490,6 +490,18 @@ struct sctp_sender_dry_event { sctp_assoc_t sender_dry_assoc_id; }; +#define SCTP_STREAM_RESET_INCOMING_SSN 0x0001 +#define SCTP_STREAM_RESET_OUTGOING_SSN 0x0002 +#define SCTP_STREAM_RESET_DENIED 0x0004 +#define SCTP_STREAM_RESET_FAILED 0x0008 +struct sctp_stream_reset_event { + __u16 strreset_type; + __u16 strreset_flags; + __u32 strreset_length; + sctp_assoc_t strreset_assoc_id; + __u16 strreset_stream_list[]; +}; + /* * Described in Section 7.3 * Ancillary Data and Notification Interest Options @@ -505,6 +517,7 @@ struct sctp_event_subscribe { __u8 sctp_adaptation_layer_event; __u8 sctp_authentication_event; __u8 sctp_sender_dry_event; + __u8 sctp_stream_reset_event; }; /* @@ -529,6 +542,7 @@ union sctp_notification { struct sctp_pdapi_event sn_pdapi_event; struct sctp_authkey_event sn_authkey_event; struct sctp_sender_dry_event sn_sender_dry_event; + struct sctp_stream_reset_event sn_strreset_event; }; /* Section 5.3.1 @@ -556,6 +570,8 @@ enum sctp_sn_type { #define SCTP_AUTHENTICATION_INDICATION SCTP_AUTHENTICATION_EVENT SCTP_SENDER_DRY_EVENT, #define SCTP_SENDER_DRY_EVENT SCTP_SENDER_DRY_EVENT + SCTP_STREAM_RESET_EVENT, +#define SCTP_STREAM_RESET_EVENT SCTP_STREAM_RESET_EVENT }; /* Notification error codes used to fill up the error fields in some diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index bea00058ce35..c8881bc542a0 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -854,6 +854,35 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( return event; } +struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( + const struct sctp_association *asoc, __u16 flags, __u16 stream_num, + __u16 *stream_list, gfp_t gfp) +{ + struct sctp_stream_reset_event *sreset; + struct sctp_ulpevent *event; + struct sk_buff *skb; + int length, i; + + length = sizeof(struct sctp_stream_reset_event) + 2 * stream_num; + event = sctp_ulpevent_new(length, MSG_NOTIFICATION, gfp); + if (!event) + return NULL; + + skb = sctp_event2skb(event); + sreset = (struct sctp_stream_reset_event *)skb_put(skb, length); + + sreset->strreset_type = SCTP_STREAM_RESET_EVENT; + sreset->strreset_flags = flags; + sreset->strreset_length = length; + sctp_ulpevent_set_owner(event, asoc); + sreset->strreset_assoc_id = sctp_assoc2id(asoc); + + for (i = 0; i < stream_num; i++) + sreset->strreset_stream_list[i] = ntohs(stream_list[i]); + + return event; +} + /* Return the notification type, assuming this is a notification * event. */ -- cgit v1.2.3 From 81054476453640159149cb6704e834893e16736b Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Feb 2017 12:45:39 +0800 Subject: sctp: implement receiver-side procedures for the Outgoing SSN Reset Request Parameter This patch is to implement Receiver-Side Procedures for the Outgoing SSN Reset Request Parameter described in rfc6525 section 5.2.2. Note that some checks must be after request_seq check, as even those checks fail, strreset_inseq still has to be increase by 1. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sm.h | 6 +++ net/sctp/stream.c | 113 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 119 insertions(+) diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 8a85dd7fc1ec..c0f9bea4fa92 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -280,6 +280,12 @@ struct sctp_chunk *sctp_make_strreset_tsnresp( void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); +/* Prototypes for stream-processing functions. */ +struct sctp_chunk *sctp_process_strreset_outreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp); + /* Prototypes for statetable processing. */ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, diff --git a/net/sctp/stream.c b/net/sctp/stream.c index eb02490245ba..531fcc4cd112 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -294,3 +294,116 @@ int sctp_send_add_streams(struct sctp_association *asoc, out: return retval; } + +static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param( + struct sctp_association *asoc, __u32 resp_seq) +{ + struct sctp_chunk *chunk = asoc->strreset_chunk; + struct sctp_reconf_chunk *hdr; + union sctp_params param; + + if (ntohl(resp_seq) != asoc->strreset_outseq || !chunk) + return NULL; + + hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; + sctp_walk_params(param, hdr, params) { + /* sctp_strreset_tsnreq is actually the basic structure + * of all stream reconf params, so it's safe to use it + * to access request_seq. + */ + struct sctp_strreset_tsnreq *req = param.v; + + if (req->request_seq == resp_seq) + return param.v; + } + + return NULL; +} + +struct sctp_chunk *sctp_process_strreset_outreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp) +{ + struct sctp_strreset_outreq *outreq = param.v; + struct sctp_stream *stream = asoc->stream; + __u16 i, nums, flags = 0, *str_p = NULL; + __u32 result = SCTP_STRRESET_DENIED; + __u32 request_seq; + + request_seq = ntohl(outreq->request_seq); + + if (ntohl(outreq->send_reset_at_tsn) > + sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)) { + result = SCTP_STRRESET_IN_PROGRESS; + goto out; + } + + if (request_seq > asoc->strreset_inseq) { + result = SCTP_STRRESET_ERR_BAD_SEQNO; + goto out; + } else if (request_seq == asoc->strreset_inseq) { + asoc->strreset_inseq++; + } + + /* Check strreset_enable after inseq inc, as sender cannot tell + * the peer doesn't enable strreset after receiving response with + * result denied, as well as to keep consistent with bsd. + */ + if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) + goto out; + + if (asoc->strreset_chunk) { + sctp_paramhdr_t *param_hdr; + struct sctp_transport *t; + + param_hdr = sctp_chunk_lookup_strreset_param( + asoc, outreq->response_seq); + if (!param_hdr || param_hdr->type != + SCTP_PARAM_RESET_IN_REQUEST) { + /* same process with outstanding isn't 0 */ + result = SCTP_STRRESET_ERR_IN_PROGRESS; + goto out; + } + + asoc->strreset_outstanding--; + asoc->strreset_outseq++; + + if (!asoc->strreset_outstanding) { + t = asoc->strreset_chunk->transport; + if (del_timer(&t->reconf_timer)) + sctp_transport_put(t); + + sctp_chunk_put(asoc->strreset_chunk); + asoc->strreset_chunk = NULL; + } + + flags = SCTP_STREAM_RESET_INCOMING_SSN; + } + + nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2; + if (nums) { + str_p = outreq->list_of_streams; + for (i = 0; i < nums; i++) { + if (ntohs(str_p[i]) >= stream->incnt) { + result = SCTP_STRRESET_ERR_WRONG_SSN; + goto out; + } + } + + for (i = 0; i < nums; i++) + stream->in[ntohs(str_p[i])].ssn = 0; + } else { + for (i = 0; i < stream->incnt; i++) + stream->in[i].ssn = 0; + } + + result = SCTP_STRRESET_PERFORMED; + + *evp = sctp_ulpevent_make_stream_reset_event(asoc, + flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p, + GFP_ATOMIC); + +out: + return sctp_make_strreset_resp(asoc, result, request_seq); +} -- cgit v1.2.3 From 16e1a91965b02fe24d24e8b8d7b2245d29ed6a70 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Feb 2017 12:45:40 +0800 Subject: sctp: implement receiver-side procedures for the Incoming SSN Reset Request Parameter This patch is to implement Receiver-Side Procedures for the Incoming SSN Reset Request Parameter described in rfc6525 section 5.2.3. It's also to move str_list endian conversion out of sctp_make_strreset_req, so that sctp_make_strreset_req can be used more conveniently to process inreq. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sm.h | 4 +++ net/sctp/sm_make_chunk.c | 8 +----- net/sctp/stream.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 7 deletions(-) diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index c0f9bea4fa92..f6a828d3d072 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -285,6 +285,10 @@ struct sctp_chunk *sctp_process_strreset_outreq( struct sctp_association *asoc, union sctp_params param, struct sctp_ulpevent **evp); +struct sctp_chunk *sctp_process_strreset_inreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp); /* Prototypes for statetable processing. */ diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 9680c580759b..60d9fdcef440 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3617,7 +3617,7 @@ struct sctp_chunk *sctp_make_strreset_req( __u16 stream_len = stream_num * 2; struct sctp_strreset_inreq inreq; struct sctp_chunk *retval; - __u16 outlen, inlen, i; + __u16 outlen, inlen; outlen = (sizeof(outreq) + stream_len) * out; inlen = (sizeof(inreq) + stream_len) * in; @@ -3626,9 +3626,6 @@ struct sctp_chunk *sctp_make_strreset_req( if (!retval) return NULL; - for (i = 0; i < stream_num; i++) - stream_list[i] = htons(stream_list[i]); - if (outlen) { outreq.param_hdr.type = SCTP_PARAM_RESET_OUT_REQUEST; outreq.param_hdr.length = htons(outlen); @@ -3653,9 +3650,6 @@ struct sctp_chunk *sctp_make_strreset_req( sctp_addto_chunk(retval, stream_len, stream_list); } - for (i = 0; i < stream_num; i++) - stream_list[i] = ntohs(stream_list[i]); - return retval; } diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 531fcc4cd112..1c6cc04fa3a4 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -135,7 +135,14 @@ int sctp_send_reset_streams(struct sctp_association *asoc, if (str_list[i] >= stream->incnt) goto out; + for (i = 0; i < str_nums; i++) + str_list[i] = htons(str_list[i]); + chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in); + + for (i = 0; i < str_nums; i++) + str_list[i] = ntohs(str_list[i]); + if (!chunk) { retval = -ENOMEM; goto out; @@ -407,3 +414,66 @@ struct sctp_chunk *sctp_process_strreset_outreq( out: return sctp_make_strreset_resp(asoc, result, request_seq); } + +struct sctp_chunk *sctp_process_strreset_inreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp) +{ + struct sctp_strreset_inreq *inreq = param.v; + struct sctp_stream *stream = asoc->stream; + __u32 result = SCTP_STRRESET_DENIED; + struct sctp_chunk *chunk = NULL; + __u16 i, nums, *str_p; + __u32 request_seq; + + request_seq = ntohl(inreq->request_seq); + if (request_seq > asoc->strreset_inseq) { + result = SCTP_STRRESET_ERR_BAD_SEQNO; + goto out; + } else if (request_seq == asoc->strreset_inseq) { + asoc->strreset_inseq++; + } + + if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) + goto out; + + if (asoc->strreset_outstanding) { + result = SCTP_STRRESET_ERR_IN_PROGRESS; + goto out; + } + + nums = (ntohs(param.p->length) - sizeof(*inreq)) / 2; + str_p = inreq->list_of_streams; + for (i = 0; i < nums; i++) { + if (ntohs(str_p[i]) >= stream->outcnt) { + result = SCTP_STRRESET_ERR_WRONG_SSN; + goto out; + } + } + + chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0); + if (!chunk) + goto out; + + if (nums) + for (i = 0; i < nums; i++) + stream->out[ntohs(str_p[i])].state = + SCTP_STREAM_CLOSED; + else + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_CLOSED; + + asoc->strreset_chunk = chunk; + asoc->strreset_outstanding = 1; + sctp_chunk_hold(asoc->strreset_chunk); + + *evp = sctp_ulpevent_make_stream_reset_event(asoc, + SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC); + +out: + if (!chunk) + chunk = sctp_make_strreset_resp(asoc, result, request_seq); + + return chunk; +} -- cgit v1.2.3 From ea62504373fa9ff3ced27e07e1eac041888ecc46 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Feb 2017 12:45:41 +0800 Subject: sctp: add a function to verify the sctp reconf chunk This patch is to add a function sctp_verify_reconf to do some length check and multi-params check for sctp stream reconf according to rfc6525 section 3.1. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sm.h | 3 +++ net/sctp/sm_make_chunk.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index f6a828d3d072..ca9fbfb2084c 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -277,6 +277,9 @@ struct sctp_chunk *sctp_make_strreset_tsnresp( struct sctp_association *asoc, __u32 result, __u32 sn, __u32 sender_tsn, __u32 receiver_tsn); +bool sctp_verify_reconf(const struct sctp_association *asoc, + struct sctp_chunk *chunk, + struct sctp_paramhdr **errp); void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 60d9fdcef440..969a30c7bb54 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3801,3 +3801,62 @@ struct sctp_chunk *sctp_make_strreset_tsnresp( return retval; } + +bool sctp_verify_reconf(const struct sctp_association *asoc, + struct sctp_chunk *chunk, + struct sctp_paramhdr **errp) +{ + struct sctp_reconf_chunk *hdr; + union sctp_params param; + __u16 last = 0, cnt = 0; + + hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; + sctp_walk_params(param, hdr, params) { + __u16 length = ntohs(param.p->length); + + *errp = param.p; + if (cnt++ > 2) + return false; + switch (param.p->type) { + case SCTP_PARAM_RESET_OUT_REQUEST: + if (length < sizeof(struct sctp_strreset_outreq) || + (last && last != SCTP_PARAM_RESET_RESPONSE && + last != SCTP_PARAM_RESET_IN_REQUEST)) + return false; + break; + case SCTP_PARAM_RESET_IN_REQUEST: + if (length < sizeof(struct sctp_strreset_inreq) || + (last && last != SCTP_PARAM_RESET_OUT_REQUEST)) + return false; + break; + case SCTP_PARAM_RESET_RESPONSE: + if ((length != sizeof(struct sctp_strreset_resp) && + length != sizeof(struct sctp_strreset_resptsn)) || + (last && last != SCTP_PARAM_RESET_RESPONSE && + last != SCTP_PARAM_RESET_OUT_REQUEST)) + return false; + break; + case SCTP_PARAM_RESET_TSN_REQUEST: + if (length != + sizeof(struct sctp_strreset_tsnreq) || last) + return false; + break; + case SCTP_PARAM_RESET_ADD_IN_STREAMS: + if (length != sizeof(struct sctp_strreset_addstrm) || + (last && last != SCTP_PARAM_RESET_ADD_OUT_STREAMS)) + return false; + break; + case SCTP_PARAM_RESET_ADD_OUT_STREAMS: + if (length != sizeof(struct sctp_strreset_addstrm) || + (last && last != SCTP_PARAM_RESET_ADD_IN_STREAMS)) + return false; + break; + default: + return false; + } + + last = param.p->type; + } + + return true; +} -- cgit v1.2.3 From 2040d3d7a36265d4a26af4e19a6a8a6154ab4bcc Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Feb 2017 12:45:42 +0800 Subject: sctp: add reconf chunk process This patch is to add a function to process the incoming reconf chunk, in which it verifies the chunk, and traverses the param and process it with the right function one by one. sctp_sf_do_reconf would be the process function of reconf chunk event. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sm.h | 1 + net/sctp/sm_statefuns.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index ca9fbfb2084c..b6f682ec184a 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -135,6 +135,7 @@ sctp_state_fn_t sctp_sf_do_8_5_1_E_sa; sctp_state_fn_t sctp_sf_cookie_echoed_err; sctp_state_fn_t sctp_sf_do_asconf; sctp_state_fn_t sctp_sf_do_asconf_ack; +sctp_state_fn_t sctp_sf_do_reconf; sctp_state_fn_t sctp_sf_do_9_2_reshutack; sctp_state_fn_t sctp_sf_eat_fwd_tsn; sctp_state_fn_t sctp_sf_eat_fwd_tsn_fast; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index d8798ddda726..e03bb1aab4d0 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3834,6 +3834,60 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, return SCTP_DISPOSITION_DISCARD; } +/* RE-CONFIG Section 5.2 Upon reception of an RECONF Chunk. */ +sctp_disposition_t sctp_sf_do_reconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_paramhdr *err_param = NULL; + struct sctp_chunk *chunk = arg; + struct sctp_reconf_chunk *hdr; + union sctp_params param; + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + /* Make sure that the RECONF chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(*hdr))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + if (!sctp_verify_reconf(asoc, chunk, &err_param)) + return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, + (void *)err_param, commands); + + hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; + sctp_walk_params(param, hdr, params) { + struct sctp_chunk *reply = NULL; + struct sctp_ulpevent *ev = NULL; + + if (param.p->type == SCTP_PARAM_RESET_OUT_REQUEST) + reply = sctp_process_strreset_outreq( + (struct sctp_association *)asoc, param, &ev); + else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST) + reply = sctp_process_strreset_inreq( + (struct sctp_association *)asoc, param, &ev); + /* More handles for other types will be added here, by now it + * just ignores other types. + */ + + if (ev) + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ev)); + + if (reply) + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(reply)); + } + + return SCTP_DISPOSITION_CONSUME; +} + /* * PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP * -- cgit v1.2.3 From d884aa635be6247a1bc5b0fa60d8a16b5f48e279 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Feb 2017 12:45:43 +0800 Subject: sctp: add reconf chunk event This patch is to add reconf chunk event based on the sctp event frame in rx path, it will call sctp_sf_do_reconf to process the reconf chunk. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 3 +++ net/sctp/sm_statetable.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 3567c971cf3b..b07a745ab69f 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -60,11 +60,14 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM }; #define SCTP_NUM_PRSCTP_CHUNK_TYPES 1 +#define SCTP_NUM_RECONF_CHUNK_TYPES 1 + #define SCTP_NUM_AUTH_CHUNK_TYPES 1 #define SCTP_NUM_CHUNK_TYPES (SCTP_NUM_BASE_CHUNK_TYPES + \ SCTP_NUM_ADDIP_CHUNK_TYPES +\ SCTP_NUM_PRSCTP_CHUNK_TYPES +\ + SCTP_NUM_RECONF_CHUNK_TYPES +\ SCTP_NUM_AUTH_CHUNK_TYPES) /* These are the different flavours of event. */ diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index b5438b4f6c1e..419b18ebb056 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -482,6 +482,32 @@ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUN TYPE_SCTP_FWD_TSN, }; /*state_fn_t prsctp_chunk_event_table[][] */ +#define TYPE_SCTP_RECONF { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_reconf), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_reconf), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_RECONF */ + +/* The primary index for this table is the chunk type. + * The secondary index for this table is the state. + */ +static const sctp_sm_table_entry_t reconf_chunk_event_table[SCTP_NUM_RECONF_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_RECONF, +}; /*state_fn_t reconf_chunk_event_table[][] */ + #define TYPE_SCTP_AUTH { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ @@ -964,6 +990,10 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net, return &addip_chunk_event_table[1][state]; } + if (net->sctp.reconf_enable) + if (cid == SCTP_CID_RECONF) + return &reconf_chunk_event_table[0][state]; + if (net->sctp.auth_enable) { if (cid == SCTP_CID_AUTH) return &auth_chunk_event_table[0][state]; -- cgit v1.2.3 From a4d69a4c3ca6be699ed8cdc4683381ce44b85c90 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Feb 2017 16:41:45 +0800 Subject: sctp: sctp_transport_dst_check should check if transport pmtu is dst mtu Now when sending a packet, sctp_transport_dst_check will check if dst is obsolete by calling ipv4/ip6_dst_check. But they return obsolete only when adding a new cache, after that when the cache's pmtu is updated again, it will not trigger transport->dst/pmtu's update. It can be reproduced by reducing route's pmtu twice. At the 1st time client will add a new cache, and transport->pathmtu gets updated as sctp_transport_dst_check finds it's obsolete. But at the 2nd time, cache's mtu is updated, sctp client will never send out any packet, because transport->pmtu has no chance to update. This patch is to fix this by also checking if transport pmtu is dst mtu in sctp_transport_dst_check, so that transport->pmtu can be updated on time. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 6dfc5536a3e6..1f71ee5ab518 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -596,7 +596,9 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) */ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) { - if (t->dst && !dst_check(t->dst, t->dst_cookie)) + if (t->dst && (!dst_check(t->dst, t->dst_cookie) || + t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)), + SCTP_DEFAULT_MINSEGMENT))) sctp_transport_dst_release(t); return t->dst; -- cgit v1.2.3 From d4ff816e9707572c1c83def42f32105ddcabe1c0 Mon Sep 17 00:00:00 2001 From: "david.wu" Date: Fri, 17 Feb 2017 20:55:11 +0800 Subject: net: ethernet: stmmac: dwmac-rk: Add RK3328 gmac support Add constants and callback functions for the dwmac on rk3328 socs. As can be seen, the base structure is the same, only registers and the bits in them moved slightly. Signed-off-by: david.wu Signed-off-by: David S. Miller --- .../devicetree/bindings/net/rockchip-dwmac.txt | 1 + drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 117 +++++++++++++++++++++ 2 files changed, 118 insertions(+) diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt index 95383c5131fc..8f427550720a 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt @@ -6,6 +6,7 @@ Required properties: - compatible: should be "rockchip,-gamc" "rockchip,rk3228-gmac": found on RK322x SoCs "rockchip,rk3288-gmac": found on RK3288 SoCs + "rockchip,rk3328-gmac": found on RK3328 SoCs "rockchip,rk3366-gmac": found on RK3366 SoCs "rockchip,rk3368-gmac": found on RK3368 SoCs "rockchip,rk3399-gmac": found on RK3399 SoCs diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index fa6e9704c077..e5db6ac36235 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -302,6 +302,122 @@ static const struct rk_gmac_ops rk3288_ops = { .set_rmii_speed = rk3288_set_rmii_speed, }; +#define RK3328_GRF_MAC_CON0 0x0900 +#define RK3328_GRF_MAC_CON1 0x0904 + +/* RK3328_GRF_MAC_CON0 */ +#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) +#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) + +/* RK3328_GRF_MAC_CON1 */ +#define RK3328_GMAC_PHY_INTF_SEL_RGMII \ + (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) +#define RK3328_GMAC_PHY_INTF_SEL_RMII \ + (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) +#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3) +#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) +#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define RK3328_GMAC_SPEED_100M GRF_BIT(2) +#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7) +#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) +#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12)) +#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12)) +#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12)) +#define RK3328_GMAC_RMII_MODE GRF_BIT(9) +#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9) +#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0) +#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0) +#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) +#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0) + +static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, + int tx_delay, int rx_delay) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_PHY_INTF_SEL_RGMII | + RK3328_GMAC_RMII_MODE_CLR | + RK3328_GMAC_RXCLK_DLY_ENABLE | + RK3328_GMAC_TXCLK_DLY_ENABLE); + + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON0, + RK3328_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3328_GMAC_CLK_TX_DL_CFG(tx_delay)); +} + +static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_PHY_INTF_SEL_RMII | + RK3328_GMAC_RMII_MODE); + + /* set MAC to RMII mode */ + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, GRF_BIT(11)); +} + +static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_CLK_2_5M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_CLK_25M); + else if (speed == 1000) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_CLK_125M); + else + dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); +} + +static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_RMII_CLK_2_5M | + RK3328_GMAC_SPEED_10M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_RMII_CLK_25M | + RK3328_GMAC_SPEED_100M); + else + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); +} + +static const struct rk_gmac_ops rk3328_ops = { + .set_to_rgmii = rk3328_set_to_rgmii, + .set_to_rmii = rk3328_set_to_rmii, + .set_rgmii_speed = rk3328_set_rgmii_speed, + .set_rmii_speed = rk3328_set_rmii_speed, +}; + #define RK3366_GRF_SOC_CON6 0x0418 #define RK3366_GRF_SOC_CON7 0x041c @@ -1006,6 +1122,7 @@ static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); static const struct of_device_id rk_gmac_dwmac_match[] = { { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, + { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops }, -- cgit v1.2.3 From 7e1392fb94a7f35421eca52bedead284429f4fed Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 17 Feb 2017 12:04:12 -0800 Subject: of_mdio: Add "broadcom,bcm5241" to the whitelist. Some Cavium dev boards have firmware which doesn't supply a proper ethernet-phy-ieee802.3-c22" compatible property. Restore these boards to working order by whitelisting this compatible value. Signed-off-by: David Daney Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 262281bd68fa..0b2979816dbf 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -147,6 +147,7 @@ EXPORT_SYMBOL(of_mdio_parse_addr); */ static const struct of_device_id whitelist_phys[] = { { .compatible = "brcm,40nm-ephy" }, + { .compatible = "broadcom,bcm5241" }, { .compatible = "marvell,88E1111", }, { .compatible = "marvell,88e1116", }, { .compatible = "marvell,88e1118", }, -- cgit v1.2.3 From cd2b708750582e327789d8fb07c6eb5f79f7759f Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Feb 2017 16:35:24 +0800 Subject: sctp: check duplicate node before inserting a new transport sctp has changed to use rhlist for transport rhashtable since commit 7fda702f9315 ("sctp: use new rhlist interface on sctp transport rhashtable"). But rhltable_insert_key doesn't check the duplicate node when inserting a node, unlike rhashtable_lookup_insert_key. It may cause duplicate assoc/transport in rhashtable. like: client (addr A, B) server (addr X, Y) connect to X INIT (1) ------------> connect to Y INIT (2) ------------> INIT_ACK (1) <------------ INIT_ACK (2) <------------ After sending INIT (2), one transport will be created and hashed into rhashtable. But when receiving INIT_ACK (1) and processing the address params, another transport will be created and hashed into rhashtable with the same addr Y and EP as the last transport. This will confuse the assoc/transport's lookup. This patch is to fix it by returning err if any duplicate node exists before inserting it. Fixes: 7fda702f9315 ("sctp: use new rhlist interface on sctp transport rhashtable") Reported-by: Fabio M. Di Nitto Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/input.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/net/sctp/input.c b/net/sctp/input.c index 704ad19c1565..fc458968fe4b 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -872,6 +872,8 @@ void sctp_transport_hashtable_destroy(void) int sctp_hash_transport(struct sctp_transport *t) { + struct sctp_transport *transport; + struct rhlist_head *tmp, *list; struct sctp_hash_cmp_arg arg; int err; @@ -882,8 +884,19 @@ int sctp_hash_transport(struct sctp_transport *t) arg.paddr = &t->ipaddr; arg.lport = htons(t->asoc->base.bind_addr.port); + list = rhltable_lookup(&sctp_transport_hashtable, &arg, + sctp_hash_params); + + rhl_for_each_entry_rcu(transport, tmp, list, node) + if (transport->asoc->ep == t->asoc->ep) { + err = -EEXIST; + goto out; + } + err = rhltable_insert_key(&sctp_transport_hashtable, &arg, &t->node, sctp_hash_params); + +out: if (err) pr_err_once("insert transport fail, errno %d\n", err); -- cgit v1.2.3 From 7b9a88a390dacb37b051a7b09b9a08f546edf5eb Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 17 Feb 2017 16:07:33 -0800 Subject: net: phy: Fix PHY unbind crash The PHY library does not deal very well with bind and unbind events. The first thing we would see is that we were not properly canceling the PHY state machine workqueue, so we would be crashing while dereferencing phydev->drv since there is no driver attached anymore. Suggested-by: Russell King Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8c8e15b8739d..7f9319586b7e 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1784,6 +1784,8 @@ static int phy_remove(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); + cancel_delayed_work_sync(&phydev->state_queue); + mutex_lock(&phydev->lock); phydev->state = PHY_DOWN; mutex_unlock(&phydev->lock); -- cgit v1.2.3 From 25149ef9d25cafc4f4fe9f4461f18f876f397417 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 17 Feb 2017 16:07:34 -0800 Subject: net: phy: Check phydev->drv There are number of function calls, originating from user-space, typically through the Ethernet driver that can make us crash by dereferencing phydev->drv which will be NULL once we unbind the driver from the PHY. There are still functional issues that prevent an unbind then rebind to work, but these will be addressed separately. Suggested-by: Russell King Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 26 ++++++++++++++++++++++---- drivers/net/phy/phy_device.c | 6 +++--- include/linux/phy.h | 3 +++ 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7cc1b7dcfe05..d6f7838455dd 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -580,7 +580,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) return 0; case SIOCSHWTSTAMP: - if (phydev->drv->hwtstamp) + if (phydev->drv && phydev->drv->hwtstamp) return phydev->drv->hwtstamp(phydev, ifr); /* fall through */ @@ -603,6 +603,9 @@ int phy_start_aneg(struct phy_device *phydev) { int err; + if (!phydev->drv) + return -EIO; + mutex_lock(&phydev->lock); if (AUTONEG_DISABLE == phydev->autoneg) @@ -975,7 +978,7 @@ void phy_state_machine(struct work_struct *work) old_state = phydev->state; - if (phydev->drv->link_change_notify) + if (phydev->drv && phydev->drv->link_change_notify) phydev->drv->link_change_notify(phydev); switch (phydev->state) { @@ -1286,6 +1289,9 @@ EXPORT_SYMBOL(phy_write_mmd_indirect); */ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) { + if (!phydev->drv) + return -EIO; + /* According to 802.3az,the EEE is supported only in full duplex-mode. * Also EEE feature is active when core is operating with MII, GMII * or RGMII (all kinds). Internal PHYs are also allowed to proceed and @@ -1363,6 +1369,9 @@ EXPORT_SYMBOL(phy_init_eee); */ int phy_get_eee_err(struct phy_device *phydev) { + if (!phydev->drv) + return -EIO; + return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS); } EXPORT_SYMBOL(phy_get_eee_err); @@ -1379,6 +1388,9 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) { int val; + if (!phydev->drv) + return -EIO; + /* Get Supported EEE */ val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS); if (val < 0) @@ -1412,6 +1424,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) { int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); + if (!phydev->drv) + return -EIO; + /* Mask prohibited EEE modes */ val &= ~phydev->eee_broken_modes; @@ -1423,7 +1438,7 @@ EXPORT_SYMBOL(phy_ethtool_set_eee); int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { - if (phydev->drv->set_wol) + if (phydev->drv && phydev->drv->set_wol) return phydev->drv->set_wol(phydev, wol); return -EOPNOTSUPP; @@ -1432,7 +1447,7 @@ EXPORT_SYMBOL(phy_ethtool_set_wol); void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { - if (phydev->drv->get_wol) + if (phydev->drv && phydev->drv->get_wol) phydev->drv->get_wol(phydev, wol); } EXPORT_SYMBOL(phy_ethtool_get_wol); @@ -1468,6 +1483,9 @@ int phy_ethtool_nway_reset(struct net_device *ndev) if (!phydev) return -ENODEV; + if (!phydev->drv) + return -EIO; + return genphy_restart_aneg(phydev); } EXPORT_SYMBOL(phy_ethtool_nway_reset); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 7f9319586b7e..daec6555f3b1 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1094,7 +1094,7 @@ int phy_suspend(struct phy_device *phydev) if (wol.wolopts) return -EBUSY; - if (phydrv->suspend) + if (phydev->drv && phydrv->suspend) ret = phydrv->suspend(phydev); if (ret) @@ -1111,7 +1111,7 @@ int phy_resume(struct phy_device *phydev) struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); int ret = 0; - if (phydrv->resume) + if (phydev->drv && phydrv->resume) ret = phydrv->resume(phydev); if (ret) @@ -1790,7 +1790,7 @@ static int phy_remove(struct device *dev) phydev->state = PHY_DOWN; mutex_unlock(&phydev->lock); - if (phydev->drv->remove) + if (phydev->drv && phydev->drv->remove) phydev->drv->remove(phydev); phydev->drv = NULL; diff --git a/include/linux/phy.h b/include/linux/phy.h index d9bdf53e0514..772476028a65 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -807,6 +807,9 @@ int phy_stop_interrupts(struct phy_device *phydev); static inline int phy_read_status(struct phy_device *phydev) { + if (!phydev->drv) + return -EIO; + return phydev->drv->read_status(phydev); } -- cgit v1.2.3 From 2317c6b51e4249dbfa093e1b88cab0a9f0564b7f Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Fri, 17 Feb 2017 18:11:58 -0800 Subject: openvswitch: Set event bit after initializing labels. Connlabels are included in conntrack netlink event messages only if the IPCT_LABEL bit is set in the event cache (see ctnetlink_conntrack_event()). Set it after initializing labels for a new connection. Found upon further system testing, where it was noticed that labels were missing from the conntrack events. Fixes: 193e30967897 ("openvswitch: Do not trigger events for unconfirmed connections.") Signed-off-by: Jarno Rajahalme Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index c2d452eab0c5..85cd59526670 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -339,9 +339,7 @@ static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct) /* Initialize labels for a new, yet to be committed conntrack entry. Note that * since the new connection is not yet confirmed, and thus no-one else has - * access to it's labels, we simply write them over. Also, we refrain from - * triggering events, as receiving change events before the create event would - * be confusing. + * access to it's labels, we simply write them over. */ static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key, const struct ovs_key_ct_labels *labels, @@ -374,6 +372,11 @@ static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key, & mask->ct_labels_32[i]); } + /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the + * IPCT_LABEL bit it set in the event cache. + */ + nf_conntrack_event_cache(IPCT_LABEL, ct); + memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN); return 0; -- cgit v1.2.3 From 90a6c997bd8bb836809eda5efb6406d8c58c0924 Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Sat, 18 Feb 2017 12:19:41 +0100 Subject: net: ena: remove superfluous check in ena_remove() The check in ena_remove() for the pci driver data not being NULL is not needed, since it is always set in the probe() function. Remove the superfluous check. Signed-off-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d8c920be5e91..35f19430c84a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3163,12 +3163,6 @@ static void ena_remove(struct pci_dev *pdev) struct ena_com_dev *ena_dev; struct net_device *netdev; - if (!adapter) - /* This device didn't load properly and it's resources - * already released, nothing to do - */ - return; - ena_dev = adapter->ena_dev; netdev = adapter->netdev; -- cgit v1.2.3 From eb36bedf28be6d986bdbcfa375bab08ffa45efd8 Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Sat, 18 Feb 2017 12:27:12 +0100 Subject: net: aquantia: remove function aq_ring_tx_deinit Both functions aq_ring_rx_deinit() and aq_ring_tx_clean() are almost identical aside from an additional check in the latter. Move that check from the function into its caller and replace aq_ring_rx_deinit() with aq_ring_rx_deinit(). By doing this also adjust the functions return value from int to void since it can never fail. Signed-off-by: Lino Sanfilippo Tested-by: Pavel Belous Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 33 +----------------------- drivers/net/ethernet/aquantia/atlantic/aq_ring.h | 3 +-- drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 14 ++++++---- 3 files changed, 11 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index dea9e9bbb8e7..fed6ac51559f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -123,7 +123,7 @@ void aq_ring_tx_append_buffs(struct aq_ring_s *self, } } -int aq_ring_tx_clean(struct aq_ring_s *self) +void aq_ring_tx_clean(struct aq_ring_s *self) { struct device *dev = aq_nic_get_dev(self->aq_nic); @@ -143,11 +143,6 @@ int aq_ring_tx_clean(struct aq_ring_s *self) if (unlikely(buff->is_eop)) dev_kfree_skb_any(buff->skb); } - - if (aq_ring_avail_dx(self) > AQ_CFG_SKB_FRAGS_MAX) - aq_nic_ndev_queue_start(self->aq_nic, self->idx); - - return 0; } static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, @@ -333,32 +328,6 @@ void aq_ring_rx_deinit(struct aq_ring_s *self) err_exit:; } -void aq_ring_tx_deinit(struct aq_ring_s *self) -{ - if (!self) - goto err_exit; - - for (; self->sw_head != self->sw_tail; - self->sw_head = aq_ring_next_dx(self, self->sw_head)) { - struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; - struct device *ndev = aq_nic_get_dev(self->aq_nic); - - if (likely(buff->is_mapped)) { - if (unlikely(buff->is_sop)) { - dma_unmap_single(ndev, buff->pa, buff->len, - DMA_TO_DEVICE); - } else { - dma_unmap_page(ndev, buff->pa, buff->len, - DMA_TO_DEVICE); - } - } - - if (unlikely(buff->is_eop)) - dev_kfree_skb_any(buff->skb); - } -err_exit:; -} - void aq_ring_free(struct aq_ring_s *self) { if (!self) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 0ac3f9e7bee6..fb296b3fa7fd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -144,13 +144,12 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg); int aq_ring_init(struct aq_ring_s *self); -void aq_ring_tx_deinit(struct aq_ring_s *self); void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self); void aq_ring_tx_append_buffs(struct aq_ring_s *ring, struct aq_ring_buff_s *buffer, unsigned int buffers); -int aq_ring_tx_clean(struct aq_ring_s *self); +void aq_ring_tx_clean(struct aq_ring_s *self); int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); int aq_ring_rx_fill(struct aq_ring_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index cb30a6396a70..ad5b4d4dac7f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -59,10 +59,14 @@ __acquires(&self->lock) } if (ring[AQ_VEC_TX_ID].sw_head != - ring[AQ_VEC_TX_ID].hw_head) { - err = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); - if (err < 0) - goto err_exit; + ring[AQ_VEC_TX_ID].hw_head) { + aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); + + if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) > + AQ_CFG_SKB_FRAGS_MAX) { + aq_nic_ndev_queue_start(self->aq_nic, + ring[AQ_VEC_TX_ID].idx); + } was_tx_cleaned = true; } @@ -271,7 +275,7 @@ void aq_vec_deinit(struct aq_vec_s *self) for (i = 0U, ring = self->ring[0]; self->tx_rings > i; ++i, ring = self->ring[i]) { - aq_ring_tx_deinit(&ring[AQ_VEC_TX_ID]); + aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); } err_exit:; -- cgit v1.2.3 From 93a66e93c7d8775f33c0d65e3fdeb209c1d4a8c9 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Sat, 18 Feb 2017 13:58:00 +0100 Subject: GTP: Add some basic documentation about drivers/net/gtp.c In order to clarify what the module actually does, and how to use it, let's add some basic documentation to the kernel tree, together with pointers to related specs and projects. Signed-off-by: Harald Welte Acked-by: Andreas Schultz Signed-off-by: David S. Miller --- Documentation/networking/gtp.txt | 135 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100644 Documentation/networking/gtp.txt diff --git a/Documentation/networking/gtp.txt b/Documentation/networking/gtp.txt new file mode 100644 index 000000000000..93e96750f103 --- /dev/null +++ b/Documentation/networking/gtp.txt @@ -0,0 +1,135 @@ +The Linux kernel GTP tunneling module +====================================================================== +Documentation by Harald Welte + +In 'drivers/net/gtp.c' you are finding a kernel-level implementation +of a GTP tunnel endpoint. + +== What is GTP == + +GTP is the Generic Tunnel Protocol, which is a 3GPP protocol used for +tunneling User-IP payload between a mobile station (phone, modem) +and the interconnection between an external packet data network (such +as the internet). + +So when you start a 'data connection' from your mobile phone, the +phone will use the control plane to signal for the establishment of +such a tunnel between that external data network and the phone. The +tunnel endpoints thus reside on the phone and in the gateway. All +intermediate nodes just transport the encapsulated packet. + +The phone itself does not implement GTP but uses some other +technology-dependent protocol stack for transmitting the user IP +payload, such as LLC/SNDCP/RLC/MAC. + +At some network element inside the cellular operator infrastructure +(SGSN in case of GPRS/EGPRS or classic UMTS, hNodeB in case of a 3G +femtocell, eNodeB in case of 4G/LTE), the cellular protocol stacking +is translated into GTP *without breaking the end-to-end tunnel*. So +intermediate nodes just perform some specific relay function. + +At some point the GTP packet ends up on the so-called GGSN (GSM/UMTS) +or P-GW (LTE), which terminates the tunnel, decapsulates the packet +and forwards it onto an external packet data network. This can be +public internet, but can also be any private IP network (or even +theoretically some non-IP network like X.25). + +You can find the protocol specification in 3GPP TS 29.060, available +publicly via the 3GPP website at http://www.3gpp.org/DynaReport/29060.htm + +A direct PDF link to v13.6.0 is provided for convenience below: +http://www.etsi.org/deliver/etsi_ts/129000_129099/129060/13.06.00_60/ts_129060v130600p.pdf + +== The Linux GTP tunnelling module == + +The module implements the function of a tunnel endpoint, i.e. it is +able to decapsulate tunneled IP packets in the uplink originated by +the phone, and encapsulate raw IP packets received from the external +packet network in downlink towards the phone. + +It *only* implements the so-called 'user plane', carrying the User-IP +payload, called GTP-U. It does not implement the 'control plane', +which is a signaling protocol used for establishment and teardown of +GTP tunnels (GTP-C). + +So in order to have a working GGSN/P-GW setup, you will need a +userspace program that implements the GTP-C protocol and which then +uses the netlink interface provided by the GTP-U module in the kernel +to configure the kernel module. + +This split architecture follows the tunneling modules of other +protocols, e.g. PPPoE or L2TP, where you also run a userspace daemon +to handle the tunnel establishment, authentication etc. and only the +data plane is accelerated inside the kernel. + +Don't be confused by terminology: The GTP User Plane goes through +kernel accelerated path, while the GTP Control Plane goes to +Userspace :) + +The official homepge of the module is at +https://osmocom.org/projects/linux-kernel-gtp-u/wiki + +== Userspace Programs with Linux Kernel GTP-U support == + +At the time of this writing, there are at least two Free Software +implementations that implement GTP-C and can use the netlink interface +to make use of the Linux kernel GTP-U support: + +* OpenGGSN (classic 2G/3G GGSN in C): + https://osmocom.org/projects/openggsn/wiki/OpenGGSN + +* ergw (GGSN + P-GW in Erlang): + https://github.com/travelping/ergw + +== Userspace Library / Command Line Utilities == + +There is a userspace library called 'libgtpnl' which is based on +libmnl and which implements a C-language API towards the netlink +interface provided by the Kernel GTP module: + +http://git.osmocom.org/libgtpnl/ + +== Protocol Versions == + +There are two different versions of GTP-U: v0 and v1. Both are +implemented in the Kernel GTP module. Version 0 is a legacy version, +and deprecated from recent 3GPP specifications. + +There are three versions of GTP-C: v0, v1, and v2. As the kernel +doesn't implement GTP-C, we don't have to worry about this. It's the +responsibility of the control plane implementation in userspace to +implement that. + +== IPv6 == + +The 3GPP specifications indicate either IPv4 or IPv6 can be used both +on the inner (user) IP layer, or on the outer (transport) layer. + +Unfortunately, the Kernel module currently supports IPv6 neither for +the User IP payload, nor for the outer IP layer. Patches or other +Contributions to fix this are most welcome! + +== Mailing List == + +If yo have questions regarding how to use the Kernel GTP module from +your own software, or want to contribute to the code, please use the +osmocom-net-grps mailing list for related discussion. The list can be +reached at osmocom-net-gprs@lists.osmocom.org and the mailman +interface for managign your subscription is at +https://lists.osmocom.org/mailman/listinfo/osmocom-net-gprs + +== Issue Tracker == + +The Osmocom project maintains an issue tracker for the Kernel GTP-U +module at +https://osmocom.org/projects/linux-kernel-gtp-u/issues + +== History / Acknowledgements == + +The Module was originally created in 2012 by Harald Welte, but never +completed. Pablo came in to finish the mess Harald left behind. But +doe to a lack of user interest, it never got merged. + +In 2015, Andreas Schultz came to the rescue and fixed lots more bugs, +extended it with new features and finally pushed all of us to get it +mainline, where it was merged in 4.7.0. -- cgit v1.2.3 From b7018d0b63003c0474451991797113605f26ee81 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 19 Feb 2017 01:52:45 +0800 Subject: sctp: flush out queue once assoc state falls into SHUTDOWN_PENDING This patch is to flush out queue when assoc state falls into SHUTDOWN_PENDING if there are still chunks in it, so that the data can be sent out as soon as possible before sending SHUTDOWN chunk. When sctp supports MSG_MORE flag in next patch, this improvement can also solve the problem that the chunks with MSG_MORE flag may be stuck in queue when closing an assoc. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/sm_sideeffect.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 51abcc90fe75..25384fa82ba9 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -872,6 +872,10 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, if (!sctp_style(sk, UDP)) sk->sk_state_change(sk); } + + if (sctp_state(asoc, SHUTDOWN_PENDING) && + !sctp_outq_is_empty(&asoc->outqueue)) + sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC); } /* Helper function to delete an association. */ -- cgit v1.2.3 From 4ea0c32f5f42f7ef33a7ecfb9b61ff0cad9b3c08 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 19 Feb 2017 01:52:46 +0800 Subject: sctp: add support for MSG_MORE This patch is to add support for MSG_MORE on sctp. It adds force_delay in sctp_datamsg to save MSG_MORE, and sets it after creating datamsg according to the send flag. sctp_packet_can_append_data then uses it to decide if the chunks of this msg will be sent at once or delay it. Note that unlike [1], this patch saves MSG_MORE in datamsg, instead of in assoc. As sctp enqueues the chunks first, then dequeue them one by one. If it's saved in assoc,the current msg's send flag (MSG_MORE) may affect other chunks' bundling. Since last patch, sctp flush out queue once assoc state falls into SHUTDOWN_PENDING, the close block problem mentioned in [1] has been solved as well. [1] https://patchwork.ozlabs.org/patch/372404/ Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 1 + net/sctp/output.c | 9 +++------ net/sctp/socket.c | 1 + 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 387c802bf248..a244db5e5ff7 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -497,6 +497,7 @@ struct sctp_datamsg { /* Did the messenge fail to send? */ int send_error; u8 send_failed:1, + force_delay:1, can_delay; /* should this message be Nagle delayed */ }; diff --git a/net/sctp/output.c b/net/sctp/output.c index 814eac047467..85406d5f8f41 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -704,18 +704,15 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, * unacknowledged. */ - if (sctp_sk(asoc->base.sk)->nodelay) - /* Nagle disabled */ + if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && + !chunk->msg->force_delay) + /* Nothing unacked */ return SCTP_XMIT_OK; if (!sctp_packet_empty(packet)) /* Append to packet */ return SCTP_XMIT_OK; - if (inflight == 0) - /* Nothing unacked */ - return SCTP_XMIT_OK; - if (!sctp_state(asoc, ESTABLISHED)) return SCTP_XMIT_OK; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 75f35cea4371..b5321486fbed 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1964,6 +1964,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) err = PTR_ERR(datamsg); goto out_free; } + datamsg->force_delay = !!(msg->msg_flags & MSG_MORE); /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { -- cgit v1.2.3 From f74f92bed642033307cda00a0549a02fb6710a04 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 18 Feb 2017 23:41:46 +0000 Subject: fsl/fman: fix spelling mistake in variable name en_tsu_err_exeption trivial fix to spelling mistake, en_tsu_err_exeption should be en_tsu_err_exception Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_dtsec.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index c88918c4c5f3..84ea130eed36 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -337,7 +337,7 @@ struct fman_mac { u8 mac_id; u32 exceptions; bool ptp_tsu_enabled; - bool en_tsu_err_exeption; + bool en_tsu_err_exception; struct dtsec_cfg *dtsec_drv_param; void *fm; struct fman_rev_info fm_rev_info; @@ -1247,12 +1247,12 @@ int dtsec_set_exception(struct fman_mac *dtsec, switch (exception) { case FM_MAC_EX_1G_1588_TS_RX_ERR: if (enable) { - dtsec->en_tsu_err_exeption = true; + dtsec->en_tsu_err_exception = true; iowrite32be(ioread32be(®s->tmr_pemask) | TMR_PEMASK_TSREEN, ®s->tmr_pemask); } else { - dtsec->en_tsu_err_exeption = false; + dtsec->en_tsu_err_exception = false; iowrite32be(ioread32be(®s->tmr_pemask) & ~TMR_PEMASK_TSREEN, ®s->tmr_pemask); @@ -1420,7 +1420,7 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params) dtsec->event_cb = params->event_cb; dtsec->dev_id = params->dev_id; dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en; - dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en; + dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en; dtsec->fm = params->fm; dtsec->basex_if = params->basex_if; -- cgit v1.2.3 From d4e854ccd6410687bf82af3fe49d2df9793d80b4 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 19 Feb 2017 00:19:04 +0100 Subject: net: qlogic: qla3xxx: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qla3xxx.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index ea38236f1ced..2991179c2fd0 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1707,23 +1707,30 @@ static int ql_get_full_dup(struct ql3_adapter *qdev) return status; } -static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +static int ql_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct ql3_adapter *qdev = netdev_priv(ndev); + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = ql_supported_modes(qdev); + supported = ql_supported_modes(qdev); if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; } else { - ecmd->port = PORT_TP; - ecmd->phy_address = qdev->PHYAddr; + cmd->base.port = PORT_TP; + cmd->base.phy_address = qdev->PHYAddr; } - ecmd->advertising = ql_supported_modes(qdev); - ecmd->autoneg = ql_get_auto_cfg_status(qdev); - ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); - ecmd->duplex = ql_get_full_dup(qdev); + advertising = ql_supported_modes(qdev); + cmd->base.autoneg = ql_get_auto_cfg_status(qdev); + cmd->base.speed = ql_get_speed(qdev); + cmd->base.duplex = ql_get_full_dup(qdev); + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } @@ -1769,12 +1776,12 @@ static void ql_get_pauseparam(struct net_device *ndev, } static const struct ethtool_ops ql3xxx_ethtool_ops = { - .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, .get_pauseparam = ql_get_pauseparam, + .get_link_ksettings = ql_get_link_ksettings, }; static int ql_populate_free_queue(struct ql3_adapter *qdev) -- cgit v1.2.3 From e71695307114335be1ed912f4a347396c2ed0e69 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 19 Feb 2017 07:17:17 +0200 Subject: ptr_ring: fix race conditions when resizing Resizing currently drops consumer lock. This can cause entries to be reordered, which isn't good in itself. More importantly, consumer can detect a false ring empty condition and block forever. Further, nesting of consumer within producer lock is problematic for tun, since it produces entries in a BH, which causes a lock order reversal: CPU0 CPU1 ---- ---- consume: lock(&(&r->consumer_lock)->rlock); resize: local_irq_disable(); lock(&(&r->producer_lock)->rlock); lock(&(&r->consumer_lock)->rlock); produce: lock(&(&r->producer_lock)->rlock); To fix, nest producer lock within consumer lock during resize, and keep consumer lock during the whole swap operation. Reported-by: Dmitry Vyukov Cc: stable@vger.kernel.org Cc: "David S. Miller" Acked-by: Jason Wang Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- include/linux/ptr_ring.h | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 2052011bf9fb..6c70444da3b9 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -111,6 +111,11 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) return 0; } +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * consume in interrupt or BH context, you must disable interrupts/BH when + * calling this. + */ static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) { int ret; @@ -242,6 +247,11 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * call this in interrupt or BH context, you must disable interrupts/BH when + * producing. + */ static inline void *ptr_ring_consume(struct ptr_ring *r) { void *ptr; @@ -357,7 +367,7 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, void **old; void *ptr; - while ((ptr = ptr_ring_consume(r))) + while ((ptr = __ptr_ring_consume(r))) if (producer < size) queue[producer++] = ptr; else if (destroy) @@ -372,6 +382,12 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, return old; } +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, void (*destroy)(void *)) { @@ -382,17 +398,25 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, if (!queue) return -ENOMEM; - spin_lock_irqsave(&(r)->producer_lock, flags); + spin_lock_irqsave(&(r)->consumer_lock, flags); + spin_lock(&(r)->producer_lock); old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); - spin_unlock_irqrestore(&(r)->producer_lock, flags); + spin_unlock(&(r)->producer_lock); + spin_unlock_irqrestore(&(r)->consumer_lock, flags); kfree(old); return 0; } +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, int size, gfp_t gfp, void (*destroy)(void *)) @@ -412,10 +436,12 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, } for (i = 0; i < nrings; ++i) { - spin_lock_irqsave(&(rings[i])->producer_lock, flags); + spin_lock_irqsave(&(rings[i])->consumer_lock, flags); + spin_lock(&(rings[i])->producer_lock); queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], size, gfp, destroy); - spin_unlock_irqrestore(&(rings[i])->producer_lock, flags); + spin_unlock(&(rings[i])->producer_lock); + spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); } for (i = 0; i < nrings; ++i) -- cgit v1.2.3 From 3608b13ccc51d06e499dfe12b27f134de1286e28 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 18 Feb 2017 10:34:18 -0800 Subject: mlx4: reduce OOM risk on arches with large pages Since mlx4 NIC are used on PowerPC with 64K pages, we need to adapt MLX4_EN_ALLOC_PREFER_ORDER definition. Otherwise, a fragment sitting in an out of order TCP queue can hold 0.5 Mbytes and it is a serious OOM risk. Fixes: 51151a16a60f ("mlx4: allow order-0 memory allocations in RX path") Signed-off-by: Eric Dumazet Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d8ca6d1794ef..4941b692e947 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -102,7 +102,8 @@ /* Use the maximum between 16384 and a single page */ #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) -#define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER +#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \ + PAGE_ALLOC_COSTLY_ORDER) /* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU * and 4K allocations) */ -- cgit v1.2.3 From 177c8d1c96eb9851210313922a12ae8b0b63d8c0 Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Sun, 19 Feb 2017 10:19:57 +0100 Subject: net: mvpp2: Fix a memory leak in error handling path if 'devm_kzalloc()' fails, we should release resources allocated so far, just as done a few lines below. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index c2fd7c36f927..c48632048f71 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5971,8 +5971,10 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct mvpp2_tx_queue *txq; txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); - if (!txq) - return -ENOMEM; + if (!txq) { + err = -ENOMEM; + goto err_free_percpu; + } txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); if (!txq->pcpu) { -- cgit v1.2.3 From 766a957df9f51479cb5c7212d19b282da5ca6f07 Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Sun, 19 Feb 2017 10:35:05 +0100 Subject: qlcnic: Fix a memory leak in error handling path If 'dma_alloc_coherent()' fails, we should release resources allocated so far, just as done in all other cases in this function. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index daf05155b732..d344e9d43832 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -573,8 +573,10 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); - if (ptr == NULL) - return -ENOMEM; + if (ptr == NULL) { + err = -ENOMEM; + goto err_out_free; + } tx_ring->hw_consumer = ptr; /* cmd desc ring */ -- cgit v1.2.3 From 24045a03b8796e3e1ddb370dfe4bc592a9f5f301 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 20 Feb 2017 08:03:30 -0800 Subject: net: mpls: Add support for netconf Add netconf support to MPLS. Allows userpsace to learn and be notified of changes to 'input' enable setting per interface. Acked-by: Nicolas Dichtel Signed-off-by: David Ahern Acked-by: Robert Shearman Signed-off-by: David S. Miller --- include/uapi/linux/netconf.h | 1 + include/uapi/linux/rtnetlink.h | 2 + net/mpls/af_mpls.c | 212 ++++++++++++++++++++++++++++++++++++++++- net/mpls/internal.h | 2 +- 4 files changed, 214 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h index 45dfad509c4d..7e5f0f3e31bf 100644 --- a/include/uapi/linux/netconf.h +++ b/include/uapi/linux/netconf.h @@ -16,6 +16,7 @@ enum { NETCONFA_MC_FORWARDING, NETCONFA_PROXY_NEIGH, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, + NETCONFA_INPUT, __NETCONFA_MAX }; #define NETCONFA_MAX (__NETCONFA_MAX - 1) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 8c93ad1ef9ab..6546917d605a 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -658,6 +658,8 @@ enum rtnetlink_groups { #define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE RTNLGRP_NSID, #define RTNLGRP_NSID RTNLGRP_NSID + RTNLGRP_MPLS_NETCONF, +#define RTNLGRP_MPLS_NETCONF RTNLGRP_MPLS_NETCONF __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 64d3bf269a26..3818686182b2 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -960,15 +961,215 @@ static size_t mpls_get_stats_af_size(const struct net_device *dev) return nla_total_size_64bit(sizeof(struct mpls_link_stats)); } +static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev, + u32 portid, u32 seq, int event, + unsigned int flags, int type) +{ + struct nlmsghdr *nlh; + struct netconfmsg *ncm; + bool all = false; + + nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), + flags); + if (!nlh) + return -EMSGSIZE; + + if (type == NETCONFA_ALL) + all = true; + + ncm = nlmsg_data(nlh); + ncm->ncm_family = AF_MPLS; + + if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0) + goto nla_put_failure; + + if ((all || type == NETCONFA_INPUT) && + nla_put_s32(skb, NETCONFA_INPUT, + mdev->input_enabled) < 0) + goto nla_put_failure; + + nlmsg_end(skb, nlh); + return 0; + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int mpls_netconf_msgsize_devconf(int type) +{ + int size = NLMSG_ALIGN(sizeof(struct netconfmsg)) + + nla_total_size(4); /* NETCONFA_IFINDEX */ + bool all = false; + + if (type == NETCONFA_ALL) + all = true; + + if (all || type == NETCONFA_INPUT) + size += nla_total_size(4); + + return size; +} + +static void mpls_netconf_notify_devconf(struct net *net, int type, + struct mpls_dev *mdev) +{ + struct sk_buff *skb; + int err = -ENOBUFS; + + skb = nlmsg_new(mpls_netconf_msgsize_devconf(type), GFP_KERNEL); + if (!skb) + goto errout; + + err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, RTM_NEWNETCONF, + 0, type); + if (err < 0) { + /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */ + WARN_ON(err == -EMSGSIZE); + kfree_skb(skb); + goto errout; + } + + rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL); + return; +errout: + if (err < 0) + rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err); +} + +static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = { + [NETCONFA_IFINDEX] = { .len = sizeof(int) }, +}; + +static int mpls_netconf_get_devconf(struct sk_buff *in_skb, + struct nlmsghdr *nlh) +{ + struct net *net = sock_net(in_skb->sk); + struct nlattr *tb[NETCONFA_MAX + 1]; + struct netconfmsg *ncm; + struct net_device *dev; + struct mpls_dev *mdev; + struct sk_buff *skb; + int ifindex; + int err; + + err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, + devconf_mpls_policy); + if (err < 0) + goto errout; + + err = -EINVAL; + if (!tb[NETCONFA_IFINDEX]) + goto errout; + + ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]); + dev = __dev_get_by_index(net, ifindex); + if (!dev) + goto errout; + + mdev = mpls_dev_get(dev); + if (!mdev) + goto errout; + + err = -ENOBUFS; + skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL); + if (!skb) + goto errout; + + err = mpls_netconf_fill_devconf(skb, mdev, + NETLINK_CB(in_skb).portid, + nlh->nlmsg_seq, RTM_NEWNETCONF, 0, + NETCONFA_ALL); + if (err < 0) { + /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */ + WARN_ON(err == -EMSGSIZE); + kfree_skb(skb); + goto errout; + } + err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); +errout: + return err; +} + +static int mpls_netconf_dump_devconf(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct hlist_head *head; + struct net_device *dev; + struct mpls_dev *mdev; + int idx, s_idx; + int h, s_h; + + s_h = cb->args[0]; + s_idx = idx = cb->args[1]; + + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { + idx = 0; + head = &net->dev_index_head[h]; + rcu_read_lock(); + cb->seq = net->dev_base_seq; + hlist_for_each_entry_rcu(dev, head, index_hlist) { + if (idx < s_idx) + goto cont; + mdev = mpls_dev_get(dev); + if (!mdev) + goto cont; + if (mpls_netconf_fill_devconf(skb, mdev, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, + NLM_F_MULTI, + NETCONFA_ALL) < 0) { + rcu_read_unlock(); + goto done; + } + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); +cont: + idx++; + } + rcu_read_unlock(); + } +done: + cb->args[0] = h; + cb->args[1] = idx; + + return skb->len; +} + #define MPLS_PERDEV_SYSCTL_OFFSET(field) \ (&((struct mpls_dev *)0)->field) +static int mpls_conf_proc(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int oval = *(int *)ctl->data; + int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (write) { + struct mpls_dev *mdev = ctl->extra1; + int i = (int *)ctl->data - (int *)mdev; + struct net *net = ctl->extra2; + int val = *(int *)ctl->data; + + if (i == offsetof(struct mpls_dev, input_enabled) && + val != oval) { + mpls_netconf_notify_devconf(net, + NETCONFA_INPUT, + mdev); + } + } + + return ret; +} + static const struct ctl_table mpls_dev_table[] = { { .procname = "input", .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = mpls_conf_proc, .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled), }, { } @@ -978,6 +1179,7 @@ static int mpls_dev_sysctl_register(struct net_device *dev, struct mpls_dev *mdev) { char path[sizeof("net/mpls/conf/") + IFNAMSIZ]; + struct net *net = dev_net(dev); struct ctl_table *table; int i; @@ -988,8 +1190,11 @@ static int mpls_dev_sysctl_register(struct net_device *dev, /* Table data contains only offsets relative to the base of * the mdev at this point, so make them absolute. */ - for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) + for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) { table[i].data = (char *)mdev + (uintptr_t)table[i].data; + table[i].extra1 = mdev; + table[i].extra2 = net; + } snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name); @@ -1041,6 +1246,7 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev) if (err) goto free; + mdev->dev = dev; rcu_assign_pointer(dev->mpls_ptr, mdev); return mdev; @@ -1861,6 +2067,8 @@ static int __init mpls_init(void) rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL); rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL); rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL); + rtnl_register(PF_MPLS, RTM_GETNETCONF, mpls_netconf_get_devconf, + mpls_netconf_dump_devconf, NULL); err = 0; out: return err; diff --git a/net/mpls/internal.h b/net/mpls/internal.h index d97243034605..76360d8b9579 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -16,7 +16,7 @@ struct mpls_pcpu_stats { struct mpls_dev { int input_enabled; - + struct net_device *dev; struct mpls_pcpu_stats __percpu *stats; struct ctl_table_header *sysctl; -- cgit v1.2.3 From a9c83f7bc7c121c99457553f8d1c3e231a9ba6df Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 19 Feb 2017 11:58:08 -0800 Subject: nfp: refactor NSP initialization and add error message When acquiring NSP communication resource fails user is left with "probe failed with error -2" PCI code message but no info on what caused the problem. Some development boards may not have NSP FW in the flash image. Help users with a more verbouse message. While at it move the whole NSP init to a separate function to keep .probe() callback nice and simple. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 59 ++++++++++++++++----------- 1 file changed, 36 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index db52b6a53c6f..ca2c464c15df 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -228,6 +228,40 @@ exit_release_fw: return err < 0 ? err : 1; } +static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) +{ + struct nfp_nsp *nsp; + int err; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + dev_err(&pdev->dev, "Failed to access the NSP: %d\n", err); + return err; + } + + err = nfp_nsp_wait(nsp); + if (err < 0) + goto exit_close_nsp; + + pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); + + err = nfp_fw_load(pdev, pf, nsp); + if (err < 0) { + kfree(pf->eth_tbl); + dev_err(&pdev->dev, "Failed to load FW\n"); + goto exit_close_nsp; + } + + pf->fw_loaded = !!err; + err = 0; + +exit_close_nsp: + nfp_nsp_close(nsp); + + return err; +} + static void nfp_fw_unload(struct nfp_pf *pf) { struct nfp_nsp *nsp; @@ -251,7 +285,6 @@ static void nfp_fw_unload(struct nfp_pf *pf) static int nfp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { - struct nfp_nsp *nsp; struct nfp_pf *pf; int err; @@ -289,28 +322,9 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_disable_msix; } - nsp = nfp_nsp_open(pf->cpp); - if (IS_ERR(nsp)) { - err = PTR_ERR(nsp); - goto err_cpp_free; - } - - err = nfp_nsp_wait(nsp); - if (err < 0) { - nfp_nsp_close(nsp); + err = nfp_nsp_init(pdev, pf); + if (err) goto err_cpp_free; - } - - pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); - - err = nfp_fw_load(pdev, pf, nsp); - nfp_nsp_close(nsp); - if (err < 0) { - dev_err(&pdev->dev, "Failed to load FW\n"); - goto err_eth_tbl_free; - } - - pf->fw_loaded = !!err; err = nfp_net_pci_probe(pf); if (err) @@ -321,7 +335,6 @@ static int nfp_pci_probe(struct pci_dev *pdev, err_fw_unload: if (pf->fw_loaded) nfp_fw_unload(pf); -err_eth_tbl_free: kfree(pf->eth_tbl); err_cpp_free: nfp_cpp_free(pf->cpp); -- cgit v1.2.3 From 64db09ed1fc2c4491cca4219850fab4d0460c952 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 19 Feb 2017 11:58:09 -0800 Subject: nfp: report manufacturing info on load Report card manufacturing information when driver loads. These identify the version of the board and its subcomponents. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index ca2c464c15df..8cda6b0e7e32 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -322,6 +322,13 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_disable_msix; } + dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n", + nfp_hwinfo_lookup(pf->cpp, "assembly.vendor"), + nfp_hwinfo_lookup(pf->cpp, "assembly.partno"), + nfp_hwinfo_lookup(pf->cpp, "assembly.serial"), + nfp_hwinfo_lookup(pf->cpp, "assembly.revision"), + nfp_hwinfo_lookup(pf->cpp, "cpld.version")); + err = nfp_nsp_init(pdev, pf); if (err) goto err_cpp_free; -- cgit v1.2.3 From 368b1d1c49f651ba513090623f2891a695fe9fbb Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 19 Feb 2017 11:58:10 -0800 Subject: nfp: store NSP ABI version in state structure We read the status register on each NSP open, we can store the NSP ABI version in the state structure so that we don't have to read it again. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 27 +++++++++++----------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index f07f2fc3fba0..f2d737c3d1df 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -99,6 +99,10 @@ enum nfp_nsp_cmd { struct nfp_nsp { struct nfp_cpp *cpp; struct nfp_resource *res; + struct { + u16 major; + u16 minor; + } ver; }; static int nfp_nsp_check(struct nfp_nsp *state) @@ -120,11 +124,12 @@ static int nfp_nsp_check(struct nfp_nsp *state) return -ENODEV; } - if (FIELD_GET(NSP_STATUS_MAJOR, reg) != NSP_MAJOR || - FIELD_GET(NSP_STATUS_MINOR, reg) < NSP_MINOR) { - nfp_err(cpp, "Unsupported ABI %lld.%lld\n", - FIELD_GET(NSP_STATUS_MAJOR, reg), - FIELD_GET(NSP_STATUS_MINOR, reg)); + state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg); + state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); + + if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) { + nfp_err(cpp, "Unsupported ABI %hu.%hu\n", + state->ver.major, state->ver.minor); return -EINVAL; } @@ -301,15 +306,9 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, int ret, err; u32 cpp_id; - err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), - nfp_resource_address(nsp->res) + NSP_STATUS, ®); - if (err < 0) - return err; - - if (FIELD_GET(NSP_STATUS_MINOR, reg) < 13) { - nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %lld.%lld)\n", - code, FIELD_GET(NSP_STATUS_MAJOR, reg), - FIELD_GET(NSP_STATUS_MINOR, reg)); + if (nsp->ver.minor < 13) { + nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n", + code, nsp->ver.major, nsp->ver.minor); return -EOPNOTSUPP; } -- cgit v1.2.3 From bd5ca062ba7d24bcc28f637aa90056f642a35dfa Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 19 Feb 2017 11:58:11 -0800 Subject: nfp: report NSP ABI version in ethtool FW version ethtool_drvinfo->fw_version can cantain multiple FW strings. We already report NFD ABI version there, add NSP ABI version if available (i.e. on PF) with 'sp:' prefix. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 4 ++++ .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 25 ++++++++++++++++++++-- drivers/net/ethernet/netronome/nfp/nfp_net_main.c | 1 + drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h | 3 +++ .../net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 10 +++++++++ 5 files changed, 41 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index d37d2391b4fe..5a0fc09dd6ea 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -111,6 +111,7 @@ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) /* Forward declarations */ +struct nfp_cpp; struct nfp_net; struct nfp_net_r_vector; @@ -492,6 +493,7 @@ struct nfp_stat_pair { * @rx_bar: Pointer to mapped FL/RX queues * @debugfs_dir: Device directory in debugfs * @port_list: Entry on device port list + * @cpp: CPP device handle if available */ struct nfp_net { struct pci_dev *pdev; @@ -579,6 +581,8 @@ struct nfp_net { struct dentry *debugfs_dir; struct list_head port_list; + + struct nfp_cpp *cpp; }; struct nfp_net_ring_set { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 255f30252550..48f623f68598 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -47,6 +47,7 @@ #include #include +#include "nfpcore/nfp.h" #include "nfp_net_ctrl.h" #include "nfp_net.h" @@ -127,19 +128,39 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = { #define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \ NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN) +static void nfp_net_get_nspinfo(struct nfp_net *nn, char *version) +{ + struct nfp_nsp *nsp; + + if (!nn->cpp) + return; + + nsp = nfp_nsp_open(nn->cpp); + if (IS_ERR(nsp)) + return; + + snprintf(version, ETHTOOL_FWVERS_LEN, "sp:%hu.%hu", + nfp_nsp_get_abi_ver_major(nsp), + nfp_nsp_get_abi_ver_minor(nsp)); + + nfp_nsp_close(nsp); +} + static void nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { + char nsp_version[ETHTOOL_FWVERS_LEN] = {}; struct nfp_net *nn = netdev_priv(netdev); strlcpy(drvinfo->driver, nn->pdev->driver->name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version)); + nfp_net_get_nspinfo(nn, nsp_version); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d.%d", + "%d.%d.%d.%d %s", nn->fw_ver.resv, nn->fw_ver.class, - nn->fw_ver.major, nn->fw_ver.minor); + nn->fw_ver.major, nn->fw_ver.minor, nsp_version); strlcpy(drvinfo->bus_info, pci_name(nn->pdev), sizeof(drvinfo->bus_info)); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index eccd31003b0d..3afcdc11480c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -303,6 +303,7 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, if (IS_ERR(nn)) return nn; + nn->cpp = pf->cpp; nn->fw_ver = *fw_ver; nn->ctrl_bar = ctrl_bar; nn->tx_bar = tx_bar; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index 3d70a8578a98..540027973a74 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -40,6 +40,7 @@ #define __NFP_H__ #include +#include #include "nfp_cpp.h" @@ -54,6 +55,8 @@ struct firmware; struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); void nfp_nsp_close(struct nfp_nsp *state); +u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); +u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); int nfp_nsp_wait(struct nfp_nsp *state); int nfp_nsp_device_soft_reset(struct nfp_nsp *state); int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index f2d737c3d1df..34c50987c377 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -182,6 +182,16 @@ void nfp_nsp_close(struct nfp_nsp *state) kfree(state); } +u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state) +{ + return state->ver.major; +} + +u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state) +{ + return state->ver.minor; +} + static int nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, u64 mask, u64 val) -- cgit v1.2.3 From af623682ac2eba96769f9ba2270438c1f9438d7c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 19 Feb 2017 11:58:12 -0800 Subject: nfp: add very basic access to NSP logs Allow dumping "arm.diag" resource with ethtool -w. This resource should contain a text log of the NSP (control processor) application. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 2 + .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 76 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h | 1 + 3 files changed, 79 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 5a0fc09dd6ea..e614a376b595 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -492,6 +492,7 @@ struct nfp_stat_pair { * @tx_bar: Pointer to mapped TX queues * @rx_bar: Pointer to mapped FL/RX queues * @debugfs_dir: Device directory in debugfs + * @ethtool_dump_flag: Ethtool dump flag * @port_list: Entry on device port list * @cpp: CPP device handle if available */ @@ -579,6 +580,7 @@ struct nfp_net { u8 __iomem *rx_bar; struct dentry *debugfs_dir; + u32 ethtool_dump_flag; struct list_head port_list; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 48f623f68598..2649f7523c81 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -51,6 +51,10 @@ #include "nfp_net_ctrl.h" #include "nfp_net.h" +enum nfp_dump_diag { + NFP_DUMP_NSP_DIAG = 0, +}; + /* Support for stats. Returns netdev, driver, and device stats */ enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS }; struct _nfp_net_et_stats { @@ -579,6 +583,75 @@ static int nfp_net_get_coalesce(struct net_device *netdev, return 0; } +/* Other debug dumps + */ +static int +nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer) +{ + struct nfp_resource *res; + int ret; + + if (!nn->cpp) + return -EOPNOTSUPP; + + dump->version = 1; + dump->flag = NFP_DUMP_NSP_DIAG; + + res = nfp_resource_acquire(nn->cpp, NFP_RESOURCE_NSP_DIAG); + if (IS_ERR(res)) + return PTR_ERR(res); + + if (buffer) { + if (dump->len != nfp_resource_size(res)) { + ret = -EINVAL; + goto exit_release; + } + + ret = nfp_cpp_read(nn->cpp, nfp_resource_cpp_id(res), + nfp_resource_address(res), + buffer, dump->len); + if (ret != dump->len) + ret = ret < 0 ? ret : -EIO; + else + ret = 0; + } else { + dump->len = nfp_resource_size(res); + ret = 0; + } +exit_release: + nfp_resource_release(res); + + return ret; +} + +static int nfp_net_set_dump(struct net_device *netdev, struct ethtool_dump *val) +{ + struct nfp_net *nn = netdev_priv(netdev); + + if (!nn->cpp) + return -EOPNOTSUPP; + + if (val->flag != NFP_DUMP_NSP_DIAG) + return -EINVAL; + + nn->ethtool_dump_flag = val->flag; + + return 0; +} + +static int +nfp_net_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + return nfp_dump_nsp_diag(netdev_priv(netdev), dump, NULL); +} + +static int +nfp_net_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer) +{ + return nfp_dump_nsp_diag(netdev_priv(netdev), dump, buffer); +} + static int nfp_net_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { @@ -743,6 +816,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .set_rxfh = nfp_net_set_rxfh, .get_regs_len = nfp_net_get_regs_len, .get_regs = nfp_net_get_regs, + .set_dump = nfp_net_set_dump, + .get_dump_flag = nfp_net_get_dump_flag, + .get_dump_data = nfp_net_get_dump_data, .get_coalesce = nfp_net_get_coalesce, .set_coalesce = nfp_net_set_coalesce, .get_channels = nfp_net_get_channels, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index 540027973a74..42cb720b696d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -86,6 +86,7 @@ int nfp_nsp_write_eth_table(struct nfp_nsp *state, /* Service Processor */ #define NFP_RESOURCE_NSP "nfp.sp" +#define NFP_RESOURCE_NSP_DIAG "arm.diag" /* Netronone Flow Firmware Table */ #define NFP_RESOURCE_NFP_NFFW "nfp.nffw" -- cgit v1.2.3 From 372d504575cbefb34be6a934cc3d3ceb62e0892c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 19 Feb 2017 11:58:13 -0800 Subject: nfp: return nfp_rtsym_read_le() errors correctly nfp_rtsym_read_le() has an out parameter for error codes. We have to use that instead of returning errors directly. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index c659b1d999be..0e3870ecfb8c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -269,8 +269,10 @@ u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error) int err; sym = nfp_rtsym_lookup(cpp, name); - if (!sym) - return -ENOENT; + if (!sym) { + err = -ENOENT; + goto exit; + } id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); @@ -294,7 +296,7 @@ u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error) err = 0; else if (err >= 0) err = -EIO; - +exit: if (error) *error = err; -- cgit v1.2.3 From 0bc3827f8c9f74b5335667d70309cefa12ec14ae Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 19 Feb 2017 11:58:14 -0800 Subject: nfp: allow application firmware to limit number of SR-IOV VFs Some application firmware projects may choose to limit the number of VFs available below what is specified in PCI capability to be able to reuse the PCIe interface resources. There may also be projects which use cases don't require SR-IOV support at all and therefore don't want to spend time implementing/testing it. Check nfd_vf_cfg_max_vfs firmware symbol to see if application firmware is reporting how many VFs it supports. This mechanism is an opt-in, if symbol is not present we will only look at the PCI capability values. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 25 +++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_main.h | 2 ++ 2 files changed, 27 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 8cda6b0e7e32..dedac720fb29 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -47,6 +47,7 @@ #include "nfpcore/nfp.h" #include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nffw.h" #include "nfpcore/nfp_nsp_eth.h" #include "nfpcore/nfp6000_pcie.h" @@ -70,12 +71,34 @@ static const struct pci_device_id nfp_pci_device_ids[] = { }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); +static void nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) +{ +#ifdef CONFIG_PCI_IOV + int err; + + pf->limit_vfs = nfp_rtsym_read_le(pf->cpp, "nfd_vf_cfg_max_vfs", &err); + if (!err) + return; + + pf->limit_vfs = ~0; + /* Allow any setting for backwards compatibility if symbol not found */ + if (err != -ENOENT) + nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err); +#endif +} + static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) { #ifdef CONFIG_PCI_IOV struct nfp_pf *pf = pci_get_drvdata(pdev); int err; + if (num_vfs > pf->limit_vfs) { + nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", + pf->limit_vfs); + return -EINVAL; + } + err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err); @@ -333,6 +356,8 @@ static int nfp_pci_probe(struct pci_dev *pdev, if (err) goto err_cpp_free; + nfp_pcie_sriov_read_nfd_limit(pf); + err = nfp_net_pci_probe(pf); if (err) goto err_fw_unload; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 6c40fa322da3..39105d0435e9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -59,6 +59,7 @@ struct nfp_eth_table; * @tx_area: Pointer to the CPP area for the TX queues * @rx_area: Pointer to the CPP area for the FL/RX queues * @irq_entries: Array of MSI-X entries for all ports + * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit) * @num_vfs: Number of SR-IOV VFs enabled * @fw_loaded: Is the firmware loaded? * @eth_tbl: NSP ETH table @@ -77,6 +78,7 @@ struct nfp_pf { struct msix_entry *irq_entries; + unsigned int limit_vfs; unsigned int num_vfs; bool fw_loaded; -- cgit v1.2.3 From 49cef10ffaeadc9cbb056e9219b3cbaa33ecafca Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 19 Feb 2017 23:06:01 +0100 Subject: net: qlogic: qlcnic: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 94 ++++++++------- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | 6 +- .../net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 132 +++++++++++---------- 3 files changed, 123 insertions(+), 109 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index bdbcd2b088a0..99b187bfdd55 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3252,12 +3252,13 @@ out: return config; } -int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, - struct ethtool_cmd *ecmd) +int qlcnic_83xx_get_link_ksettings(struct qlcnic_adapter *adapter, + struct ethtool_link_ksettings *ecmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = 0; int status = 0; + u32 supported, advertising; if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { /* Get port configuration info */ @@ -3271,45 +3272,48 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; if (netif_running(adapter->netdev) && ahw->has_link_events) { - ethtool_cmd_speed_set(ecmd, ahw->link_speed); - ecmd->duplex = ahw->link_duplex; - ecmd->autoneg = ahw->link_autoneg; + ecmd->base.speed = ahw->link_speed; + ecmd->base.duplex = ahw->link_duplex; + ecmd->base.autoneg = ahw->link_autoneg; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - ecmd->autoneg = AUTONEG_DISABLE; + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; + ecmd->base.autoneg = AUTONEG_DISABLE; } - ecmd->supported = (SUPPORTED_10baseT_Full | + supported = (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | SUPPORTED_Autoneg); - if (ecmd->autoneg == AUTONEG_ENABLE) { + ethtool_convert_link_mode_to_legacy_u32(&advertising, + ecmd->link_modes.advertising); + + if (ecmd->base.autoneg == AUTONEG_ENABLE) { if (ahw->port_config & QLC_83XX_10_CAPABLE) - ecmd->advertising |= SUPPORTED_10baseT_Full; + advertising |= SUPPORTED_10baseT_Full; if (ahw->port_config & QLC_83XX_100_CAPABLE) - ecmd->advertising |= SUPPORTED_100baseT_Full; + advertising |= SUPPORTED_100baseT_Full; if (ahw->port_config & QLC_83XX_1G_CAPABLE) - ecmd->advertising |= SUPPORTED_1000baseT_Full; + advertising |= SUPPORTED_1000baseT_Full; if (ahw->port_config & QLC_83XX_10G_CAPABLE) - ecmd->advertising |= SUPPORTED_10000baseT_Full; + advertising |= SUPPORTED_10000baseT_Full; if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE) - ecmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; } else { switch (ahw->link_speed) { case SPEED_10: - ecmd->advertising = SUPPORTED_10baseT_Full; + advertising = SUPPORTED_10baseT_Full; break; case SPEED_100: - ecmd->advertising = SUPPORTED_100baseT_Full; + advertising = SUPPORTED_100baseT_Full; break; case SPEED_1000: - ecmd->advertising = SUPPORTED_1000baseT_Full; + advertising = SUPPORTED_1000baseT_Full; break; case SPEED_10000: - ecmd->advertising = SUPPORTED_10000baseT_Full; + advertising = SUPPORTED_10000baseT_Full; break; default: break; @@ -3319,56 +3323,58 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, switch (ahw->supported_type) { case PORT_FIBRE: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + ecmd->base.port = PORT_FIBRE; break; case PORT_TP: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->transceiver = XCVR_INTERNAL; + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + ecmd->base.port = PORT_TP; break; case PORT_DA: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_DA; - ecmd->transceiver = XCVR_EXTERNAL; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + ecmd->base.port = PORT_DA; break; default: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_OTHER; - ecmd->transceiver = XCVR_EXTERNAL; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + ecmd->base.port = PORT_OTHER; break; } - ecmd->phy_address = ahw->physical_port; + ecmd->base.phy_address = ahw->physical_port; + + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); + return status; } -int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter, - struct ethtool_cmd *ecmd) +int qlcnic_83xx_set_link_ksettings(struct qlcnic_adapter *adapter, + const struct ethtool_link_ksettings *ecmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = adapter->ahw->port_config; int status = 0; /* 83xx devices do not support Half duplex */ - if (ecmd->duplex == DUPLEX_HALF) { - netdev_info(adapter->netdev, - "Half duplex mode not supported\n"); - return -EINVAL; + if (ecmd->base.duplex == DUPLEX_HALF) { + netdev_info(adapter->netdev, + "Half duplex mode not supported\n"); + return -EINVAL; } - if (ecmd->autoneg) { + if (ecmd->base.autoneg) { ahw->port_config |= QLC_83XX_AUTONEG_ENABLE; ahw->port_config |= (QLC_83XX_100_CAPABLE | QLC_83XX_1G_CAPABLE | QLC_83XX_10G_CAPABLE); } else { /* force speed */ ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE; - switch (ethtool_cmd_speed(ecmd)) { + switch (ecmd->base.speed) { case SPEED_10: ahw->port_config &= ~(QLC_83XX_100_CAPABLE | QLC_83XX_1G_CAPABLE | diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 331ae2c20f40..3dfe8e27b51c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -628,8 +628,10 @@ int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *); -int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); -int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); +int qlcnic_83xx_get_link_ksettings(struct qlcnic_adapter *adapter, + struct ethtool_link_ksettings *ecmd); +int qlcnic_83xx_set_link_ksettings(struct qlcnic_adapter *adapter, + const struct ethtool_link_ksettings *ecmd); void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, struct ethtool_pauseparam *); int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 0a2318cad34d..9a869c15d8bf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -285,42 +285,43 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) sizeof(drvinfo->version)); } -static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, - struct ethtool_cmd *ecmd) +static int qlcnic_82xx_get_link_ksettings(struct qlcnic_adapter *adapter, + struct ethtool_link_ksettings *ecmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 speed, reg; int check_sfp_module = 0, err = 0; u16 pcifn = ahw->pci_func; + u32 supported, advertising; /* read which mode */ if (adapter->ahw->port_type == QLCNIC_GBE) { - ecmd->supported = (SUPPORTED_10baseT_Half | + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); - ecmd->advertising = (ADVERTISED_100baseT_Half | + advertising = (ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); - ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed); - ecmd->duplex = adapter->ahw->link_duplex; - ecmd->autoneg = adapter->ahw->link_autoneg; + ecmd->base.speed = adapter->ahw->link_speed; + ecmd->base.duplex = adapter->ahw->link_duplex; + ecmd->base.autoneg = adapter->ahw->link_autoneg; } else if (adapter->ahw->port_type == QLCNIC_XGBE) { u32 val = 0; val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err); if (val == QLCNIC_PORT_MODE_802_3_AP) { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; + supported = SUPPORTED_1000baseT_Full; + advertising = ADVERTISED_1000baseT_Full; } else { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; + supported = SUPPORTED_10000baseT_Full; + advertising = ADVERTISED_10000baseT_Full; } if (netif_running(adapter->netdev) && ahw->has_link_events) { @@ -331,73 +332,72 @@ static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; } - ethtool_cmd_speed_set(ecmd, ahw->link_speed); - ecmd->autoneg = ahw->link_autoneg; - ecmd->duplex = ahw->link_duplex; + ecmd->base.speed = ahw->link_speed; + ecmd->base.autoneg = ahw->link_autoneg; + ecmd->base.duplex = ahw->link_duplex; goto skip; } - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - ecmd->autoneg = AUTONEG_DISABLE; + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; + ecmd->base.autoneg = AUTONEG_DISABLE; } else return -EIO; skip: - ecmd->phy_address = adapter->ahw->physical_port; - ecmd->transceiver = XCVR_EXTERNAL; + ecmd->base.phy_address = adapter->ahw->physical_port; switch (adapter->ahw->board_type) { case QLCNIC_BRDTYPE_P3P_REF_QG: case QLCNIC_BRDTYPE_P3P_4_GB: case QLCNIC_BRDTYPE_P3P_4_GB_MM: - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; case QLCNIC_BRDTYPE_P3P_10G_CX4: case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: case QLCNIC_BRDTYPE_P3P_10000_BASE_T: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->autoneg = adapter->ahw->link_autoneg; + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + ecmd->base.port = PORT_TP; + ecmd->base.autoneg = adapter->ahw->link_autoneg; break; case QLCNIC_BRDTYPE_P3P_IMEZ: case QLCNIC_BRDTYPE_P3P_XG_LOM: case QLCNIC_BRDTYPE_P3P_HMEZ: - ecmd->supported |= SUPPORTED_MII; - ecmd->advertising |= ADVERTISED_MII; - ecmd->port = PORT_MII; - ecmd->autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_MII; + advertising |= ADVERTISED_MII; + ecmd->base.port = PORT_MII; + ecmd->base.autoneg = AUTONEG_DISABLE; break; case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: - ecmd->advertising |= ADVERTISED_TP; - ecmd->supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + supported |= SUPPORTED_TP; check_sfp_module = netif_running(adapter->netdev) && ahw->has_link_events; case QLCNIC_BRDTYPE_P3P_10G_XFP: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + ecmd->base.port = PORT_FIBRE; + ecmd->base.autoneg = AUTONEG_DISABLE; break; case QLCNIC_BRDTYPE_P3P_10G_TP: if (adapter->ahw->port_type == QLCNIC_XGBE) { - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= + ecmd->base.autoneg = AUTONEG_DISABLE; + supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; + ecmd->base.port = PORT_FIBRE; check_sfp_module = netif_running(adapter->netdev) && ahw->has_link_events; } else { - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= + ecmd->base.autoneg = AUTONEG_ENABLE; + supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; + ecmd->base.port = PORT_TP; } break; default: @@ -412,47 +412,52 @@ skip: case LINKEVENT_MODULE_OPTICAL_SRLR: case LINKEVENT_MODULE_OPTICAL_LRM: case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ecmd->port = PORT_FIBRE; + ecmd->base.port = PORT_FIBRE; break; case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: case LINKEVENT_MODULE_TWINAX: - ecmd->port = PORT_TP; + ecmd->base.port = PORT_TP; break; default: - ecmd->port = PORT_OTHER; + ecmd->base.port = PORT_OTHER; } } + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); + return 0; } -static int qlcnic_get_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) +static int qlcnic_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { struct qlcnic_adapter *adapter = netdev_priv(dev); if (qlcnic_82xx_check(adapter)) - return qlcnic_82xx_get_settings(adapter, ecmd); + return qlcnic_82xx_get_link_ksettings(adapter, ecmd); else if (qlcnic_83xx_check(adapter)) - return qlcnic_83xx_get_settings(adapter, ecmd); + return qlcnic_83xx_get_link_ksettings(adapter, ecmd); return -EIO; } static int qlcnic_set_port_config(struct qlcnic_adapter *adapter, - struct ethtool_cmd *ecmd) + const struct ethtool_link_ksettings *ecmd) { u32 ret = 0, config = 0; /* read which mode */ - if (ecmd->duplex) + if (ecmd->base.duplex) config |= 0x1; - if (ecmd->autoneg) + if (ecmd->base.autoneg) config |= 0x2; - switch (ethtool_cmd_speed(ecmd)) { + switch (ecmd->base.speed) { case SPEED_10: config |= (0 << 8); break; @@ -475,7 +480,8 @@ static int qlcnic_set_port_config(struct qlcnic_adapter *adapter, return ret; } -static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int qlcnic_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { u32 ret = 0; struct qlcnic_adapter *adapter = netdev_priv(dev); @@ -484,16 +490,16 @@ static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EOPNOTSUPP; if (qlcnic_83xx_check(adapter)) - ret = qlcnic_83xx_set_settings(adapter, ecmd); + ret = qlcnic_83xx_set_link_ksettings(adapter, ecmd); else ret = qlcnic_set_port_config(adapter, ecmd); if (!ret) return ret; - adapter->ahw->link_speed = ethtool_cmd_speed(ecmd); - adapter->ahw->link_duplex = ecmd->duplex; - adapter->ahw->link_autoneg = ecmd->autoneg; + adapter->ahw->link_speed = ecmd->base.speed; + adapter->ahw->link_duplex = ecmd->base.duplex; + adapter->ahw->link_autoneg = ecmd->base.autoneg; if (!netif_running(dev)) return 0; @@ -1822,8 +1828,6 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) } const struct ethtool_ops qlcnic_ethtool_ops = { - .get_settings = qlcnic_get_settings, - .set_settings = qlcnic_set_settings, .get_drvinfo = qlcnic_get_drvinfo, .get_regs_len = qlcnic_get_regs_len, .get_regs = qlcnic_get_regs, @@ -1850,10 +1854,11 @@ const struct ethtool_ops qlcnic_ethtool_ops = { .get_dump_flag = qlcnic_get_dump_flag, .get_dump_data = qlcnic_get_dump_data, .set_dump = qlcnic_set_dump, + .get_link_ksettings = qlcnic_get_link_ksettings, + .set_link_ksettings = qlcnic_set_link_ksettings, }; const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = { - .get_settings = qlcnic_get_settings, .get_drvinfo = qlcnic_get_drvinfo, .get_regs_len = qlcnic_get_regs_len, .get_regs = qlcnic_get_regs, @@ -1872,12 +1877,13 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = { .set_coalesce = qlcnic_set_intr_coalesce, .set_msglevel = qlcnic_set_msglevel, .get_msglevel = qlcnic_get_msglevel, + .get_link_ksettings = qlcnic_get_link_ksettings, }; const struct ethtool_ops qlcnic_ethtool_failed_ops = { - .get_settings = qlcnic_get_settings, .get_drvinfo = qlcnic_get_drvinfo, .set_msglevel = qlcnic_set_msglevel, .get_msglevel = qlcnic_get_msglevel, .set_dump = qlcnic_set_dump, + .get_link_ksettings = qlcnic_get_link_ksettings, }; -- cgit v1.2.3 From 45ee2440a33f1c8768cd37a365154343eb5589e3 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 19 Feb 2017 23:21:41 +0100 Subject: net: qlogic: qlge: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c | 36 ++++++++++++++----------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 5dade1fd08b8..31f40148fa5c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -375,28 +375,34 @@ ql_get_ethtool_stats(struct net_device *ndev, } } -static int ql_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) +static int ql_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *ecmd) { struct ql_adapter *qdev = netdev_priv(ndev); + u32 supported, advertising; + + supported = SUPPORTED_10000baseT_Full; + advertising = ADVERTISED_10000baseT_Full; - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; - ecmd->transceiver = XCVR_EXTERNAL; if ((qdev->link_status & STS_LINK_TYPE_MASK) == STS_LINK_TYPE_10GBASET) { - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; - ecmd->autoneg = AUTONEG_ENABLE; + supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->base.port = PORT_TP; + ecmd->base.autoneg = AUTONEG_ENABLE; } else { - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + ecmd->base.port = PORT_FIBRE; } - ethtool_cmd_speed_set(ecmd, SPEED_10000); - ecmd->duplex = DUPLEX_FULL; + ecmd->base.speed = SPEED_10000; + ecmd->base.duplex = DUPLEX_FULL; + + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); return 0; } @@ -706,7 +712,6 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value) } const struct ethtool_ops qlge_ethtool_ops = { - .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, .get_wol = ql_get_wol, .set_wol = ql_set_wol, @@ -724,5 +729,6 @@ const struct ethtool_ops qlge_ethtool_ops = { .get_sset_count = ql_get_sset_count, .get_strings = ql_get_strings, .get_ethtool_stats = ql_get_ethtool_stats, + .get_link_ksettings = ql_get_link_ksettings, }; -- cgit v1.2.3 From 99e5582730eb18c5ab4402ba1edc73ec3fd5ba2b Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:39 +0300 Subject: net: ethernet: aquantia: Removed extra assignment for skb->dev. This assignment is not needed. Signed-off-by: Pavel Belous Reviewed-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index fed6ac51559f..22bb75eff274 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -209,7 +209,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) goto err_exit; } - skb->dev = ndev; skb_put(skb, buff->len); } else { skb = netdev_alloc_skb(ndev, ETH_HLEN); -- cgit v1.2.3 From 15e32a5e1e016f40712c3c843c84078385c49db2 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:40 +0300 Subject: net: ethernet: aquantia: Removed busy_count field. busy_count field and is_busy flag is not needed at all. Signed-off-by: Pavel Belous Reviewed-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 11 ----------- drivers/net/ethernet/aquantia/atlantic/aq_utils.h | 1 - 2 files changed, 12 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index aa22a7ce710b..a153750523bd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -122,14 +122,11 @@ static void aq_nic_service_timer_cb(unsigned long param) struct aq_nic_s *self = (struct aq_nic_s *)param; struct net_device *ndev = aq_nic_get_ndev(self); int err = 0; - bool is_busy = false; unsigned int i = 0U; struct aq_hw_link_status_s link_status; struct aq_ring_stats_rx_s stats_rx; struct aq_ring_stats_tx_s stats_tx; - atomic_inc(&self->header.busy_count); - is_busy = true; if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) goto err_exit; @@ -170,8 +167,6 @@ static void aq_nic_service_timer_cb(unsigned long param) ndev->stats.tx_errors = stats_tx.errors; err_exit: - if (is_busy) - atomic_dec(&self->header.busy_count); mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); } @@ -574,16 +569,12 @@ __acquires(&ring->lock) unsigned int trys = AQ_CFG_LOCK_TRYS; int err = 0; bool is_nic_in_bad_state; - bool is_busy = false; struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX]; frags = skb_shinfo(skb)->nr_frags + 1; ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; - atomic_inc(&self->header.busy_count); - is_busy = true; - if (frags > AQ_CFG_SKB_FRAGS_MAX) { dev_kfree_skb_any(skb); goto err_exit; @@ -629,8 +620,6 @@ __acquires(&ring->lock) } err_exit: - if (is_busy) - atomic_dec(&self->header.busy_count); return err; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h index 4446bd90fd86..f6012b34abe6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h @@ -19,7 +19,6 @@ struct aq_obj_s { spinlock_t lock; /* spinlock for nic/rings processing */ atomic_t flags; - atomic_t busy_count; }; static inline void aq_utils_obj_set(atomic_t *flags, u32 mask) -- cgit v1.2.3 From 5513e16421cb9538f4e394734e4b96ea0d857b23 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:41 +0300 Subject: net: ethernet: aquantia: Fixes for aq_ndev_change_mtu 1)Removed unnecessary comparsion "old_mtu == new_mtu". This check is not needed. Function aq_ndev_change_mtu wont be called if mtu has not changed. 2)Removed extra assignment ndev->mtu = new_mtu; This assignment already done inside __dev_set_mtu(). 3)Use core MTU checking for min_mtu. Signed-off-by: Pavel Belous Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_main.c | 12 +----------- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 1 + 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index c17c70adef0d..45c769e8c22a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -100,20 +100,10 @@ err_exit: static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - int err = 0; + int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); - if (new_mtu == ndev->mtu) { - err = 0; - goto err_exit; - } - if (new_mtu < 68) { - err = -EINVAL; - goto err_exit; - } - err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); if (err < 0) goto err_exit; - ndev->mtu = new_mtu; if (netif_running(ndev)) { aq_ndev_close(ndev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index a153750523bd..4b8d074a8450 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -214,6 +214,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, SET_NETDEV_DEV(ndev, dev); ndev->if_port = port; + ndev->min_mtu = ETH_MIN_MTU; self->ndev = ndev; self->aq_pci_func = aq_pci_func; -- cgit v1.2.3 From 14861e9de2c2ffe404fb41cc34150f09a39acb44 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:42 +0300 Subject: net: ethernet: aquantia: Using module_pci_driver. Remove boilerplate code by using macro module_pci_driver. Signed-off-by: Pavel Belous Reviewed-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_main.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 45c769e8c22a..91c66f577906 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -242,22 +242,4 @@ static struct pci_driver aq_pci_ops = { .resume = aq_pci_resume, }; -static int __init aq_module_init(void) -{ - int err = 0; - - err = pci_register_driver(&aq_pci_ops); - if (err < 0) - goto err_exit; - -err_exit: - return err; -} - -static void __exit aq_module_exit(void) -{ - pci_unregister_driver(&aq_pci_ops); -} - -module_init(aq_module_init); -module_exit(aq_module_exit); +module_pci_driver(aq_pci_ops); -- cgit v1.2.3 From 362f37b28ba52115be9b30134375b199ac612df4 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:43 +0300 Subject: net: ethernet: aquantia: Superfluous initialization of "err". Fixed superfluous initialization of err. Signed-off-by: Pavel Belous Reviewed-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_main.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 91c66f577906..dad63623be6a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -87,14 +87,8 @@ err_exit: static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - int err = 0; - - err = aq_nic_xmit(aq_nic, skb); - if (err < 0) - goto err_exit; -err_exit: - return err; + return aq_nic_xmit(aq_nic, skb); } static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) -- cgit v1.2.3 From d5919aeba1cc7a2b90210256d617625fceaf4e06 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:44 +0300 Subject: net: ethernet: aquantia: Fixed missing rtnl_unlock. rtnl_unlock should be called if error occurred. Signed-off-by: Pavel Belous Reviewed-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 4b8d074a8450..646314c6f237 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -932,7 +932,7 @@ int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) if (!netif_running(self->ndev)) { err = 0; - goto err_exit; + goto out; } rtnl_lock(); if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { @@ -957,8 +957,9 @@ int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) netif_device_attach(self->ndev); netif_tx_start_all_queues(self->ndev); } - rtnl_unlock(); err_exit: + rtnl_unlock(); +out: return err; } -- cgit v1.2.3 From b350d7b8b8c99d67dc9a21eeeb1f3564c5213cbd Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:45 +0300 Subject: net: ethernet: aquantia: Using NETDEV_TX_OK instead 0. Use NETDEV_TX_OK as the return value for successful transmission. Signed-off-by: Pavel Belous Reviewed-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 646314c6f237..019bcc725715 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -568,7 +568,7 @@ __acquires(&ring->lock) unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; unsigned int tc = 0U; unsigned int trys = AQ_CFG_LOCK_TRYS; - int err = 0; + int err = NETDEV_TX_OK; bool is_nic_in_bad_state; struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX]; -- cgit v1.2.3 From df9000ef9609c8a5e57783d0667ba7f39f52ed1e Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:46 +0300 Subject: net: ethernet: aquantia: Null pointer check for aq_nic_ndev_alloc. We should check for a null pointer for aq_nic_ndev_alloc instead netdev_priv. Signed-off-by: Pavel Belous Reviewed-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 019bcc725715..a8a27c5e8aeb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -202,12 +202,13 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, int err = 0; ndev = aq_nic_ndev_alloc(); - self = netdev_priv(ndev); - if (!self) { - err = -EINVAL; + if (!ndev) { + err = -ENOMEM; goto err_exit; } + self = netdev_priv(ndev); + ndev->netdev_ops = ndev_ops; ndev->ethtool_ops = et_ops; -- cgit v1.2.3 From 55629109d934b2c1b25457424fa72eb94e8c2a26 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:47 +0300 Subject: net: ethernet: aquantia: Call netdev_register after all initialized. netdev_register should be called when everything is initialized. Also we should use net_device->reg_state field instead own "is_ndev_registered" flag to avoid any race. Signed-off-by: Pavel Belous Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 10 +++++----- drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h | 1 - 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index a8a27c5e8aeb..bce312a26e7c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -261,16 +261,16 @@ int aq_nic_ndev_register(struct aq_nic_s *self) ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); } #endif - err = register_netdev(self->ndev); - if (err < 0) - goto err_exit; - self->is_ndev_registered = true; netif_carrier_off(self->ndev); for (i = AQ_CFG_VECS_MAX; i--;) aq_nic_ndev_queue_stop(self, i); + err = register_netdev(self->ndev); + if (err < 0) + goto err_exit; + err_exit: return err; } @@ -293,7 +293,7 @@ void aq_nic_ndev_free(struct aq_nic_s *self) if (!self->ndev) goto err_exit; - if (self->is_ndev_registered) + if (self->ndev->reg_state == NETREG_REGISTERED) unregister_netdev(self->ndev); if (self->aq_hw) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h index f81738a71c42..e7d2711dc165 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h @@ -22,7 +22,6 @@ struct aq_nic_s { unsigned int aq_vecs; unsigned int packet_filter; unsigned int power_state; - bool is_ndev_registered; u8 port; struct aq_hw_ops aq_hw_ops; struct aq_hw_caps_s aq_hw_caps; -- cgit v1.2.3 From c0788f7463f138bb1d30552b3edac442781b4e11 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:48 +0300 Subject: net: ethernet: aquantia: Fixed incorrect buff->len calculation. rxd_wb->pkt_len is the total length of the packet. If we received a large packet (with length > AQ_CFG_RX_FRAME_MAX) then we will get multiple buffers. We need to fix the length of the last buffer. Signed-off-by: Pavel Belous Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | 4 ++-- drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 1f388054a6c7..a2b746a2dd50 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -659,8 +659,8 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, } if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { - buff->len = (rxd_wb->pkt_len & - (AQ_CFG_RX_FRAME_MAX - 1U)); + buff->len = rxd_wb->pkt_len % + AQ_CFG_RX_FRAME_MAX; buff->len = buff->len ? buff->len : AQ_CFG_RX_FRAME_MAX; buff->next = 0U; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index e7e694f693bd..cab2931dab9a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -673,8 +673,8 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, } if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { - buff->len = (rxd_wb->pkt_len & - (AQ_CFG_RX_FRAME_MAX - 1U)); + buff->len = rxd_wb->pkt_len % + AQ_CFG_RX_FRAME_MAX; buff->len = buff->len ? buff->len : AQ_CFG_RX_FRAME_MAX; buff->next = 0U; -- cgit v1.2.3 From 89b643889b1f56d8b53728f6153a4237c849784b Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:49 +0300 Subject: net: ethernet: aquantia: Fixed memory allocation if AQ_CFG_RX_FRAME_MAX > 1 page. We should allocate the number of pages based on the config parameter AQ_CFG_RX_FRAME_MAX. Signed-off-by: Pavel Belous Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 22bb75eff274..51f4e7f5e132 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -270,6 +270,8 @@ err_exit: int aq_ring_rx_fill(struct aq_ring_s *self) { + unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + + (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; struct aq_ring_buff_s *buff = NULL; int err = 0; int i = 0; @@ -282,7 +284,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self) buff->len = AQ_CFG_RX_FRAME_MAX; buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD | - __GFP_COMP, 0); + __GFP_COMP, pages_order); if (!buff->page) { err = -ENOMEM; goto err_exit; -- cgit v1.2.3 From e399553d233678687ce4b149c822194d17e07675 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Feb 2017 22:36:50 +0300 Subject: net: ethernet: aquantia: Copying tx buffers is not needed. This fix removes copying of tx biffers. Now we use ring->buff_fing directly. Signed-off-by: Pavel Belous Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 170 +++++++++++++---------- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 19 --- drivers/net/ethernet/aquantia/atlantic/aq_ring.h | 3 - 3 files changed, 97 insertions(+), 95 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index bce312a26e7c..ee78444bfb88 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -468,95 +468,116 @@ err_exit: return err; } -static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self, - struct sk_buff *skb, - struct aq_ring_buff_s *dx) +static unsigned int aq_nic_map_skb(struct aq_nic_s *self, + struct sk_buff *skb, + struct aq_ring_s *ring) { unsigned int ret = 0U; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int frag_count = 0U; + unsigned int dx = ring->sw_tail; + struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; - dx->flags = 0U; - dx->len = skb_headlen(skb); - dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len, - DMA_TO_DEVICE); - dx->len_pkt = skb->len; - dx->is_sop = 1U; - dx->is_mapped = 1U; + if (unlikely(skb_is_gso(skb))) { + dx_buff->flags = 0U; + dx_buff->len_pkt = skb->len; + dx_buff->len_l2 = ETH_HLEN; + dx_buff->len_l3 = ip_hdrlen(skb); + dx_buff->len_l4 = tcp_hdrlen(skb); + dx_buff->mss = skb_shinfo(skb)->gso_size; + dx_buff->is_txc = 1U; + + dx = aq_ring_next_dx(ring, dx); + dx_buff = &ring->buff_ring[dx]; + ++ret; + } + + dx_buff->flags = 0U; + dx_buff->len = skb_headlen(skb); + dx_buff->pa = dma_map_single(aq_nic_get_dev(self), + skb->data, + dx_buff->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) + goto exit; + + dx_buff->len_pkt = skb->len; + dx_buff->is_sop = 1U; + dx_buff->is_mapped = 1U; ++ret; if (skb->ip_summed == CHECKSUM_PARTIAL) { - dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U; - dx->is_tcp_cso = + dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? + 1U : 0U; + dx_buff->is_tcp_cso = (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; - dx->is_udp_cso = + dx_buff->is_udp_cso = (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; } for (; nr_frags--; ++frag_count) { - unsigned int frag_len; + unsigned int frag_len = 0U; dma_addr_t frag_pa; skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; frag_len = skb_frag_size(frag); - frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, frag_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) + goto mapping_error; + while (frag_len > AQ_CFG_TX_FRAME_MAX) { - ++dx; - ++ret; - dx->flags = 0U; - dx->len = AQ_CFG_TX_FRAME_MAX; - dx->pa = frag_pa; - dx->is_mapped = 1U; + dx = aq_ring_next_dx(ring, dx); + dx_buff = &ring->buff_ring[dx]; + + dx_buff->flags = 0U; + dx_buff->len = AQ_CFG_TX_FRAME_MAX; + dx_buff->pa = frag_pa; + dx_buff->is_mapped = 1U; frag_len -= AQ_CFG_TX_FRAME_MAX; frag_pa += AQ_CFG_TX_FRAME_MAX; + ++ret; } - ++dx; - ++ret; + dx = aq_ring_next_dx(ring, dx); + dx_buff = &ring->buff_ring[dx]; - dx->flags = 0U; - dx->len = frag_len; - dx->pa = frag_pa; - dx->is_mapped = 1U; + dx_buff->flags = 0U; + dx_buff->len = frag_len; + dx_buff->pa = frag_pa; + dx_buff->is_mapped = 1U; + ++ret; } - dx->is_eop = 1U; - dx->skb = skb; - - return ret; -} - -static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self, - struct sk_buff *skb, - struct aq_ring_buff_s *dx) -{ - dx->flags = 0U; - dx->len_pkt = skb->len; - dx->len_l2 = ETH_HLEN; - dx->len_l3 = ip_hdrlen(skb); - dx->len_l4 = tcp_hdrlen(skb); - dx->mss = skb_shinfo(skb)->gso_size; - dx->is_txc = 1U; - return 1U; -} - -static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, - struct aq_ring_buff_s *dx) -{ - unsigned int ret = 0U; - - if (unlikely(skb_is_gso(skb))) { - ret = aq_nic_map_skb_lso(self, skb, dx); - ++dx; + dx_buff->is_eop = 1U; + dx_buff->skb = skb; + goto exit; + +mapping_error: + for (dx = ring->sw_tail; + ret > 0; + --ret, dx = aq_ring_next_dx(ring, dx)) { + dx_buff = &ring->buff_ring[dx]; + + if (!dx_buff->is_txc && dx_buff->pa) { + if (unlikely(dx_buff->is_sop)) { + dma_unmap_single(aq_nic_get_dev(self), + dx_buff->pa, + dx_buff->len, + DMA_TO_DEVICE); + } else { + dma_unmap_page(aq_nic_get_dev(self), + dx_buff->pa, + dx_buff->len, + DMA_TO_DEVICE); + } + } } - ret += aq_nic_map_skb_frag(self, skb, dx); - +exit: return ret; } @@ -571,7 +592,6 @@ __acquires(&ring->lock) unsigned int trys = AQ_CFG_LOCK_TRYS; int err = NETDEV_TX_OK; bool is_nic_in_bad_state; - struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX]; frags = skb_shinfo(skb)->nr_frags + 1; @@ -595,23 +615,27 @@ __acquires(&ring->lock) do { if (spin_trylock(&ring->header.lock)) { - frags = aq_nic_map_skb(self, skb, &buffers[0]); - - aq_ring_tx_append_buffs(ring, &buffers[0], frags); - - err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw, - ring, frags); - if (err >= 0) { - if (aq_ring_avail_dx(ring) < - AQ_CFG_SKB_FRAGS_MAX + 1) - aq_nic_ndev_queue_stop(self, ring->idx); + frags = aq_nic_map_skb(self, skb, ring); + + if (likely(frags)) { + err = self->aq_hw_ops.hw_ring_tx_xmit( + self->aq_hw, + ring, frags); + if (err >= 0) { + if (aq_ring_avail_dx(ring) < + AQ_CFG_SKB_FRAGS_MAX + 1) + aq_nic_ndev_queue_stop( + self, + ring->idx); + + ++ring->stats.tx.packets; + ring->stats.tx.bytes += skb->len; + } + } else { + err = NETDEV_TX_BUSY; } - spin_unlock(&ring->header.lock); - if (err >= 0) { - ++ring->stats.tx.packets; - ring->stats.tx.bytes += skb->len; - } + spin_unlock(&ring->header.lock); break; } } while (--trys); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 51f4e7f5e132..0358e6072d45 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -104,25 +104,6 @@ int aq_ring_init(struct aq_ring_s *self) return 0; } -void aq_ring_tx_append_buffs(struct aq_ring_s *self, - struct aq_ring_buff_s *buffer, - unsigned int buffers) -{ - if (likely(self->sw_tail + buffers < self->size)) { - memcpy(&self->buff_ring[self->sw_tail], buffer, - sizeof(buffer[0]) * buffers); - } else { - unsigned int first_part = self->size - self->sw_tail; - unsigned int second_part = buffers - first_part; - - memcpy(&self->buff_ring[self->sw_tail], buffer, - sizeof(buffer[0]) * first_part); - - memcpy(&self->buff_ring[0], &buffer[first_part], - sizeof(buffer[0]) * second_part); - } -} - void aq_ring_tx_clean(struct aq_ring_s *self) { struct device *dev = aq_nic_get_dev(self->aq_nic); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index fb296b3fa7fd..257254645068 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -146,9 +146,6 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, int aq_ring_init(struct aq_ring_s *self); void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self); -void aq_ring_tx_append_buffs(struct aq_ring_s *ring, - struct aq_ring_buff_s *buffer, - unsigned int buffers); void aq_ring_tx_clean(struct aq_ring_s *self); int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); int aq_ring_rx_fill(struct aq_ring_s *self); -- cgit v1.2.3 From 670dde55c6f182b8a159f0484be92b89d82107e1 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 20 Feb 2017 22:43:30 +0200 Subject: qed: Release CQ resource under lock on failure The CQ resource pool is protected by a spin lock. When a CQ creation fails it now deallocates under that lock as well. Signed-off-by: Ram Amrani Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_roce.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index c3c8c5018e93..50fdcd3fa9b4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -948,7 +948,9 @@ static int qed_rdma_create_cq(void *rdma_cxt, err: /* release allocated icid */ + spin_lock_bh(&p_info->lock); qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); + spin_unlock_bh(&p_info->lock); DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); return rc; -- cgit v1.2.3 From c5212b943d4b52a7d9e0d9f747e7ad59c50d31f1 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 20 Feb 2017 22:43:31 +0200 Subject: qed: Read queue state before releasing buffer Currently the state is read only after the buffers are relesed. Signed-off-by: Ram Amrani Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_roce.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 50fdcd3fa9b4..65d5b9a942f9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1768,13 +1768,13 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, if (rc) goto err_resp; - dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), - p_resp_ramrod_res, resp_ramrod_res_phys); - out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag), ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), + p_resp_ramrod_res, resp_ramrod_res_phys); + if (!(qp->req_offloaded)) { /* Don't send query qp for the requester */ out_params->sq_psn = qp->sq_psn; @@ -1815,9 +1815,6 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, if (rc) goto err_req; - dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), - p_req_ramrod_res, req_ramrod_res_phys); - out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn); sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG); @@ -1825,6 +1822,9 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), + p_req_ramrod_res, req_ramrod_res_phys); + out_params->draining = false; if (rq_err_state) -- cgit v1.2.3 From 300c0d7c232da572cb640f3bc803554f74e08f69 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 20 Feb 2017 22:43:32 +0200 Subject: qed: Don't free a QP more than once If QP is in reset state then there are no resources to free so avoid freeing any. Signed-off-by: Ram Amrani Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_roce.c | 49 ++++++++++++++++-------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 65d5b9a942f9..d9ff6b28591c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1849,6 +1849,7 @@ err_resp: static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; u32 num_invalidated_mw = 0; u32 num_bound_mw = 0; u32 start_cid; @@ -1863,35 +1864,39 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) return -EINVAL; } - rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw); - if (rc) - return rc; + if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { + rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, + &num_invalidated_mw); + if (rc) + return rc; - /* Send destroy requester ramrod */ - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw); - if (rc) - return rc; + /* Send destroy requester ramrod */ + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, + &num_bound_mw); + if (rc) + return rc; - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } + if (num_invalidated_mw != num_bound_mw) { + DP_NOTICE(p_hwfn, + "number of invalidate memory windows is different from bounded ones\n"); + return -EINVAL; + } - spin_lock_bh(&p_hwfn->p_rdma_info->lock); + spin_lock_bh(&p_rdma_info->lock); - start_cid = qed_cxt_get_proto_cid_start(p_hwfn, - p_hwfn->p_rdma_info->proto); + start_cid = qed_cxt_get_proto_cid_start(p_hwfn, + p_rdma_info->proto); - /* Release responder's icid */ - qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, - qp->icid - start_cid); + /* Release responder's icid */ + qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, + qp->icid - start_cid); - /* Release requester's icid */ - qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, - qp->icid + 1 - start_cid); + /* Release requester's icid */ + qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, + qp->icid + 1 - start_cid); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + spin_unlock_bh(&p_rdma_info->lock); + } return 0; } -- cgit v1.2.3 From c2dedf8773e873474535bd4a158609b9eda5403d Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 20 Feb 2017 22:43:33 +0200 Subject: qed: Reserve doorbell BAR space for present CPUs Reserving doorbell BAR space according to the currently active CPUs may result in a bug if disabled CPUs are later enabled but no doorbell space was reserved for them. Signed-off-by: Ram Amrani Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 5ee7f040c50a..d6c5a8165b5f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -913,7 +913,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* Either EDPM is mandatory, or we are attempting to allocate a * WID per CPU. */ - n_cpus = num_active_cpus(); + n_cpus = num_present_cpus(); rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); } -- cgit v1.2.3 From 0e0b80a9a7181cbdbb965a6b4750574932106e31 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Mon, 20 Feb 2017 22:43:34 +0200 Subject: qede: Initialize lock and slowpath workqueue early Need to make sure the slowpath workqueue and the qede lock are ready for the registration of the netdevice, as once registered there's no guarantee those wouldn't be used. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index d163e72aa2a6..7a8e07d0e01c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -854,6 +854,13 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, if (rc) goto err3; + /* Prepare the lock prior to the registeration of the netdev, + * as once it's registered we might reach flows requiring it + * [it's even possible to reach a flow needing it directly + * from there, although it's unlikely]. + */ + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); + mutex_init(&edev->qede_lock); rc = register_netdev(edev->ndev); if (rc) { DP_NOTICE(edev, "Cannot register net-device\n"); @@ -878,8 +885,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, qede_set_dcbnl_ops(edev->ndev); #endif - INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); - mutex_init(&edev->qede_lock); edev->rx_copybreak = QEDE_RX_HDR_SIZE; DP_INFO(edev, "Ending successfully qede probe\n"); -- cgit v1.2.3 From 885185dfc49bde35b45046bb6b7384eca68e404f Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Mon, 20 Feb 2017 22:43:35 +0200 Subject: qede: Free netdevice only after stoping slowpath qed needs to be informed of the removal of the qede interface prior to its actual removal, as qede has some registered callbacks that might get called async to the removal flow. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 7a8e07d0e01c..88453ed1a5bf 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -956,14 +956,20 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) if (edev->xdp_prog) bpf_prog_put(edev->xdp_prog); - free_netdev(ndev); - /* Use global ops since we've freed edev */ qed_ops->common->slowpath_stop(cdev); if (system_state == SYSTEM_POWER_OFF) return; qed_ops->common->remove(cdev); + /* Since this can happen out-of-sync with other flows, + * don't release the netdevice until after slowpath stop + * has been called to guarantee various other contexts + * [e.g., QED register callbacks] won't break anything when + * accessing the netdevice. + */ + free_netdev(ndev); + dev_info(&pdev->dev, "Ending qede_remove successfully\n"); } -- cgit v1.2.3 From 33b2fbd0a8d3c556b802d8753ac5bb3e8f90e988 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Mon, 20 Feb 2017 22:43:36 +0200 Subject: qed: Reflect PF link when initializing VF VF learns of the current link state via its bulletin board, which might reflect either the physical link state or some user-configured logical state. Whenever the physical link changes or whnever such a configuration is explicitly made by user the PF driver would update the bulletin that the VF reads. But if neither has happened - i.e., PF still hasn't got a physical link up and no additional configuration was done the VF wouldn't have a valid link information available. Simply reflect the physical link state whenever the VF is initialized. The user could then affect it however he wants. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 90 ++++++++++++++++------------- 1 file changed, 51 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 3f4bf31f45e0..29ed785f1dc2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -831,10 +831,52 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, vf->num_sbs = 0; } +static void qed_iov_set_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *params, + struct qed_mcp_link_state *link, + struct qed_mcp_link_capabilities *p_caps) +{ + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, + vfid, + false); + struct qed_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + p_bulletin->req_autoneg = params->speed.autoneg; + p_bulletin->req_adv_speed = params->speed.advertised_speeds; + p_bulletin->req_forced_speed = params->speed.forced_speed; + p_bulletin->req_autoneg_pause = params->pause.autoneg; + p_bulletin->req_forced_rx = params->pause.forced_rx; + p_bulletin->req_forced_tx = params->pause.forced_tx; + p_bulletin->req_loopback = params->loopback_mode; + + p_bulletin->link_up = link->link_up; + p_bulletin->speed = link->speed; + p_bulletin->full_duplex = link->full_duplex; + p_bulletin->autoneg = link->an; + p_bulletin->autoneg_complete = link->an_complete; + p_bulletin->parallel_detection = link->parallel_detection; + p_bulletin->pfc_enabled = link->pfc_enabled; + p_bulletin->partner_adv_speed = link->partner_adv_speed; + p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; + p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; + p_bulletin->partner_adv_pause = link->partner_adv_pause; + p_bulletin->sfp_tx_fault = link->sfp_tx_fault; + + p_bulletin->capability_speed = p_caps->speed_capabilities; +} + static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_iov_vf_init_params *p_params) { + struct qed_mcp_link_capabilities link_caps; + struct qed_mcp_link_params link_params; + struct qed_mcp_link_state link_state; u8 num_of_vf_avaiable_chains = 0; struct qed_vf_info *vf = NULL; u16 qid, num_irqs; @@ -923,6 +965,15 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, p_queue->fw_tx_qid, p_queue->fw_cid); } + /* Update the link configuration in bulletin */ + memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), + sizeof(link_params)); + memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); + memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), + sizeof(link_caps)); + qed_iov_set_link(p_hwfn, p_params->rel_vf_id, + &link_params, &link_state, &link_caps); + rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); if (!rc) { vf->b_init = true; @@ -934,45 +985,6 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, return rc; } -static void qed_iov_set_link(struct qed_hwfn *p_hwfn, - u16 vfid, - struct qed_mcp_link_params *params, - struct qed_mcp_link_state *link, - struct qed_mcp_link_capabilities *p_caps) -{ - struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, - vfid, - false); - struct qed_bulletin_content *p_bulletin; - - if (!p_vf) - return; - - p_bulletin = p_vf->bulletin.p_virt; - p_bulletin->req_autoneg = params->speed.autoneg; - p_bulletin->req_adv_speed = params->speed.advertised_speeds; - p_bulletin->req_forced_speed = params->speed.forced_speed; - p_bulletin->req_autoneg_pause = params->pause.autoneg; - p_bulletin->req_forced_rx = params->pause.forced_rx; - p_bulletin->req_forced_tx = params->pause.forced_tx; - p_bulletin->req_loopback = params->loopback_mode; - - p_bulletin->link_up = link->link_up; - p_bulletin->speed = link->speed; - p_bulletin->full_duplex = link->full_duplex; - p_bulletin->autoneg = link->an; - p_bulletin->autoneg_complete = link->an_complete; - p_bulletin->parallel_detection = link->parallel_detection; - p_bulletin->pfc_enabled = link->pfc_enabled; - p_bulletin->partner_adv_speed = link->partner_adv_speed; - p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; - p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; - p_bulletin->partner_adv_pause = link->partner_adv_pause; - p_bulletin->sfp_tx_fault = link->sfp_tx_fault; - - p_bulletin->capability_speed = p_caps->speed_capabilities; -} - static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 rel_vf_id) { -- cgit v1.2.3 From afe981d664aeeebc8d1bcbd7d2070b5432edaecb Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Mon, 20 Feb 2017 22:43:37 +0200 Subject: qede: Prevent index problems in loopback test Driver currently utilizes the same loop variable in two nested loops. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index c02754ddc0e9..897953133245 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1333,7 +1333,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) struct qede_rx_queue *rxq = NULL; struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; - int i, rc = 0; + int i, iter, rc = 0; u8 *data_ptr; for_each_queue(i) { @@ -1352,7 +1352,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) * enabled. This is because the queue 0 is configured as the default * queue and that the loopback traffic is not IP. */ - for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { + for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) { if (!qede_has_rx_work(rxq)) { usleep_range(100, 200); continue; @@ -1399,7 +1399,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) qed_chain_recycle_consumed(&rxq->rx_comp_ring); } - if (i == QEDE_SELFTEST_POLL_COUNT) { + if (iter == QEDE_SELFTEST_POLL_COUNT) { DP_NOTICE(edev, "Failed to receive the traffic\n"); return -1; } -- cgit v1.2.3 From 85750d74fba5a7c31309c038c5f9f019edc6ea31 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Mon, 20 Feb 2017 22:43:38 +0200 Subject: qed: Don't allocate SBs using main PTT Flows accessing registers require the flow to hold a PTT entry. To protect 'major' load/unload flows a main_ptt is pre-allocated to guarantee such flows wouldn't be blocked by PTT being unavailable. Status block initialization currently uses the main_ptt which is incorrect, as this flow might run concurrently to others [E.g., loading qedr while toggling qede]. That would have dire effects as it means registers' access to device breaks and further read/writes might access incorrect addresses. Instead, when initializing status blocks acquire/release a PTT as part of the flow. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 942c03ca3b59..4440ae7ec85b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1072,6 +1072,7 @@ static u32 qed_sb_init(struct qed_dev *cdev, enum qed_sb_type type) { struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; int hwfn_index; u16 rel_sb_id; u8 n_hwfns; @@ -1093,8 +1094,18 @@ static u32 qed_sb_init(struct qed_dev *cdev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", hwfn_index, rel_sb_id, sb_id); - rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, - sb_virt_addr, sb_phy_addr, rel_sb_id); + if (IS_PF(p_hwfn->cdev)) { + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, + sb_phy_addr, rel_sb_id); + qed_ptt_release(p_hwfn, p_ptt); + } else { + rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, + sb_phy_addr, rel_sb_id); + } return rc; } -- cgit v1.2.3 From 65ed2ffd640578166e4ec149573bcf1d10f81b81 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Mon, 20 Feb 2017 22:43:39 +0200 Subject: qed*: Fix link indication race Driver changes the link properties via communication with the management firmware, and re-reads the resulting link status when it receives an indication that the link has changed. However, there are certain scenarios where such indications might be missing, and so driver also re-reads the current link results without attention in several places. Specifically, it does so during load and when resetting the link. This creates a race where driver might reflect incorrect link status - e.g., when explicit reading of the link status is switched by attention with the changed configuration. Correct this flow by a lock syncronizing the handling of the link indications [both explicit requests and attention]. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 12 +++++++++--- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 18 ++++++++++++++---- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 6 ++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 1 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 3 +++ drivers/net/ethernet/qlogic/qede/qede_main.c | 5 ----- 6 files changed, 33 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 4440ae7ec85b..eef30a598b40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1146,12 +1146,18 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) if (!cdev) return -ENODEV; - if (IS_VF(cdev)) - return 0; - /* The link should be set only once per PF */ hwfn = &cdev->hwfns[0]; + /* When VF wants to set link, force it to read the bulletin instead. + * This mimics the PF behavior, where a noitification [both immediate + * and possible later] would be generated when changing properties. + */ + if (IS_VF(cdev)) { + qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); + return 0; + } + ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EBUSY; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 7624a38cbd39..314022df3469 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -192,6 +192,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* Initialize the MFW spinlock */ spin_lock_init(&p_info->lock); + spin_lock_init(&p_info->link_lock); return 0; @@ -610,6 +611,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, u8 max_bw, min_bw; u32 status = 0; + /* Prevent SW/attentions from doing this at the same time */ + spin_lock_bh(&p_hwfn->mcp_info->link_lock); + p_link = &p_hwfn->mcp_info->link_output; memset(p_link, 0, sizeof(*p_link)); if (!b_reset) { @@ -624,7 +628,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link indications\n"); - return; + goto out; } if (p_hwfn->b_drv_link_init) @@ -731,6 +735,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); qed_link_update(p_hwfn); +out: + spin_unlock_bh(&p_hwfn->mcp_info->link_lock); } int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) @@ -780,9 +786,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) return rc; } - /* Reset the link status if needed */ - if (!b_up) - qed_mcp_handle_link_change(p_hwfn, p_ptt, true); + /* Mimic link-change attention, done for several reasons: + * - On reset, there's no guarantee MFW would trigger + * an attention. + * - On initialization, older MFWs might not indicate link change + * during LFA, so we'll never get an UP indication. + */ + qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 0792224ac219..368e88de146c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -485,7 +485,13 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ ((_p_hwfn)->cdev->num_ports_in_engines * 2)) struct qed_mcp_info { + /* Spinlock used for protecting the access to the MFW mailbox */ spinlock_t lock; + + /* Spinlock used for syncing SW link-changes and link-changes + * originating from attention context. + */ + spinlock_t link_lock; bool block_mb_sending; u32 public_base; u32 drv_mb_addr; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 0a2e3a36d2cf..fc08cc2da6a7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -254,6 +254,7 @@ enum qed_iov_wq_flag { QED_IOV_WQ_STOP_WQ_FLAG, QED_IOV_WQ_FLR_FLAG, QED_IOV_WQ_TRUST_FLAG, + QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, }; #ifdef CONFIG_QED_SRIOV diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 9667059b15bd..15d2855ec563 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1285,6 +1285,9 @@ void qed_iov_vf_task(struct work_struct *work) /* Handle bulletin board changes */ qed_vf_read_bulletin(hwfn, &change); + if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, + &hwfn->iov_task_flags)) + change = 1; if (change) qed_handle_bulletin_change(hwfn); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 88453ed1a5bf..3a78c3f25157 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1872,7 +1872,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, bool is_locked) { struct qed_link_params link_params; - struct qed_link_output link_output; int rc; DP_INFO(edev, "Starting qede load\n"); @@ -1924,11 +1923,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, link_params.link_up = true; edev->ops->common->set_link(edev->cdev, &link_params); - /* Query whether link is already-up */ - memset(&link_output, 0, sizeof(link_output)); - edev->ops->common->get_link(edev->cdev, &link_output); qede_roce_dev_event_open(edev); - qede_link_update(edev, &link_output); qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL)); -- cgit v1.2.3 From 4e00338a61998de3502d0428c4f71ffc69772316 Mon Sep 17 00:00:00 2001 From: Ray Jui Date: Mon, 20 Feb 2017 19:25:16 -0500 Subject: bnxt_en: Reject driver probe against all bridge devices There are additional SoC devices that use the same device ID for bridge and NIC devices. The bnxt driver should reject probe against all bridge devices since it's meant to be used with only endpoint devices. Signed-off-by: Ray Jui Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 71f9a1894db9..f4dec1bdd911 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7400,7 +7400,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct bnxt *bp; int rc, max_irqs; - if (pdev->device == 0x16cd && pci_is_bridge(pdev)) + if (pci_is_bridge(pdev)) return -ENODEV; if (version_printed++ == 0) -- cgit v1.2.3 From daf1f1e7841138cb0e48d52c8573a5f064d8f495 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 20 Feb 2017 19:25:17 -0500 Subject: bnxt_en: Fix NULL pointer dereference in a failure path during open. If bnxt_hwrm_ring_free() is called during a failure path in bnxt_open(), it is possible that the completion rings have not been allocated yet. In that case, the completion doorbell has not been initialized, and calling bnxt_disable_int() will crash. Fix it by checking that the completion ring has been initialized before writing to the completion ring doorbell. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f4dec1bdd911..37b9f65f682d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3134,8 +3134,10 @@ static void bnxt_disable_int(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + if (ring->fw_ring_id != INVALID_HW_RING_ID) + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); } } -- cgit v1.2.3 From 17086399c113d933e1202697f85b8f0f82fcb8ce Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Mon, 20 Feb 2017 19:25:18 -0500 Subject: bnxt_en: fix pci cleanup in bnxt_init_one() failure path In the bnxt_init_one() failure path, bar1 and bar2 are not being unmapped. This commit fixes this issue. Reorganize the code so that bnxt_init_one()'s failure path and bnxt_remove_one() can call the same function to do the PCI cleanup. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 76 ++++++++++++++++--------------- 1 file changed, 39 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 37b9f65f682d..6dacdf1e4d26 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6676,6 +6676,31 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp) return 0; } +static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) +{ + if (bp->bar2) { + pci_iounmap(pdev, bp->bar2); + bp->bar2 = NULL; + } + + if (bp->bar1) { + pci_iounmap(pdev, bp->bar1); + bp->bar1 = NULL; + } + + if (bp->bar0) { + pci_iounmap(pdev, bp->bar0); + bp->bar0 = NULL; + } +} + +static void bnxt_cleanup_pci(struct bnxt *bp) +{ + bnxt_unmap_bars(bp, bp->pdev); + pci_release_regions(bp->pdev); + pci_disable_device(bp->pdev); +} + static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) { int rc; @@ -6763,25 +6788,10 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->current_interval = BNXT_TIMER_INTERVAL; clear_bit(BNXT_STATE_OPEN, &bp->state); - return 0; init_err_release: - if (bp->bar2) { - pci_iounmap(pdev, bp->bar2); - bp->bar2 = NULL; - } - - if (bp->bar1) { - pci_iounmap(pdev, bp->bar1); - bp->bar1 = NULL; - } - - if (bp->bar0) { - pci_iounmap(pdev, bp->bar0); - bp->bar0 = NULL; - } - + bnxt_unmap_bars(bp, pdev); pci_release_regions(pdev); init_err_disable: @@ -7186,17 +7196,12 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_dcb_free(bp); - pci_iounmap(pdev, bp->bar2); - pci_iounmap(pdev, bp->bar1); - pci_iounmap(pdev, bp->bar0); kfree(bp->edev); bp->edev = NULL; if (bp->xdp_prog) bpf_prog_put(bp->xdp_prog); + bnxt_cleanup_pci(bp); free_netdev(dev); - - pci_release_regions(pdev); - pci_disable_device(pdev); } static int bnxt_probe_phy(struct bnxt *bp) @@ -7428,17 +7433,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; - pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); if (rc) - goto init_err; + goto init_err_pci_clean; mutex_init(&bp->hwrm_cmd_lock); rc = bnxt_hwrm_ver_get(bp); if (rc) - goto init_err; + goto init_err_pci_clean; bnxt_hwrm_fw_set_time(bp); @@ -7482,11 +7486,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = bnxt_hwrm_func_drv_rgtr(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); if (rc) - goto init_err; + goto init_err_pci_clean; bp->ulp_probe = bnxt_ulp_probe; @@ -7496,7 +7500,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", rc); rc = -1; - goto init_err; + goto init_err_pci_clean; } rc = bnxt_hwrm_queue_qportcfg(bp); @@ -7504,7 +7508,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", rc); rc = -1; - goto init_err; + goto init_err_pci_clean; } bnxt_hwrm_func_qcfg(bp); @@ -7518,7 +7522,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) { netdev_err(bp->dev, "Not enough rings available.\n"); rc = -ENOMEM; - goto init_err; + goto init_err_pci_clean; } /* Default RSS hash cfg. */ @@ -7548,15 +7552,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = bnxt_probe_phy(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = bnxt_hwrm_func_reset(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = bnxt_init_int_mode(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = register_netdev(dev); if (rc) @@ -7573,10 +7577,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) init_err_clr_int: bnxt_clear_int_mode(bp); -init_err: - pci_iounmap(pdev, bp->bar0); - pci_release_regions(pdev); - pci_disable_device(pdev); +init_err_pci_clean: + bnxt_cleanup_pci(bp); init_err_free: free_netdev(dev); -- cgit v1.2.3 From 2cf8a897116a4433ca579aff8b0813d8fb2c4473 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 20 Feb 2017 22:59:13 +0100 Subject: net: qualcomm: qca: use new api ethtool_{get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move this driver to new api {get|set}_link_ksettings. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/qca_debug.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index 8e28234dddad..d145df98feff 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -188,14 +188,16 @@ qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p) } static int -qcaspi_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +qcaspi_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - cmd->transceiver = XCVR_INTERNAL; - cmd->supported = SUPPORTED_10baseT_Half; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = DUPLEX_HALF; - cmd->port = PORT_OTHER; - cmd->autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_HALF; + cmd->base.port = PORT_OTHER; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } @@ -295,7 +297,6 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) static const struct ethtool_ops qcaspi_ethtool_ops = { .get_drvinfo = qcaspi_get_drvinfo, .get_link = ethtool_op_get_link, - .get_settings = qcaspi_get_settings, .get_ethtool_stats = qcaspi_get_ethtool_stats, .get_strings = qcaspi_get_strings, .get_sset_count = qcaspi_get_sset_count, @@ -303,6 +304,7 @@ static const struct ethtool_ops qcaspi_ethtool_ops = { .get_regs = qcaspi_get_regs, .get_ringparam = qcaspi_get_ringparam, .set_ringparam = qcaspi_set_ringparam, + .get_link_ksettings = qcaspi_get_link_ksettings, }; void qcaspi_set_ethtool_ops(struct net_device *dev) -- cgit v1.2.3 From 1763413ac2c15f80fa2c2e308ecbf1a1b550a4c3 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Mon, 20 Feb 2017 16:54:18 +0100 Subject: dpaa_eth: implement ioctl() for PHY-related ops This commit adds the ndo_do_ioctl() callback which allows the userspace to access PHY registers, for example. This will make mii-diag and similar tools work. Signed-off-by: Michael Walle Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index bc5a3347fd4a..e2ca107f9d94 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2333,6 +2333,13 @@ static int dpaa_eth_stop(struct net_device *net_dev) return err; } +static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) +{ + if (!net_dev->phydev) + return -EINVAL; + return phy_mii_ioctl(net_dev->phydev, rq, cmd); +} + static const struct net_device_ops dpaa_ops = { .ndo_open = dpaa_open, .ndo_start_xmit = dpaa_start_xmit, @@ -2342,6 +2349,7 @@ static const struct net_device_ops dpaa_ops = { .ndo_set_mac_address = dpaa_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = dpaa_set_rx_mode, + .ndo_do_ioctl = dpaa_ioctl, }; static int dpaa_napi_add(struct net_device *net_dev) -- cgit v1.2.3 From 017b29c38c0dc8aece778b44fad32adf8862c188 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Mon, 20 Feb 2017 11:50:20 +0800 Subject: virito-net: set queues after reset during xdp_set We set queues before reset which will cause a crash[1]. This is because is_xdp_raw_buffer_queue() depends on the old xdp queue pairs number to do the correct detection. So fix this by - passing xdp queue pairs and current queue pairs to virtnet_reset() - change vi->xdp_qp after reset but before refill, to make sure both free_unused_bufs() and refill can make correct detection of XDP. - remove the duplicated queue pairs setting before virtnet_reset() since we will do it inside virtnet_reset() [1] [ 74.328168] general protection fault: 0000 [#1] SMP [ 74.328625] Modules linked in: nfsd xfs libcrc32c virtio_net virtio_pci [ 74.329117] CPU: 0 PID: 2849 Comm: xdp2 Not tainted 4.10.0-rc7+ #499 [ 74.329577] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.10.1-0-g8891697-prebuilt.qemu-project.org 04/01/2014 [ 74.330424] task: ffff88007a894000 task.stack: ffffc90004388000 [ 74.330844] RIP: 0010:skb_release_head_state+0x28/0x80 [ 74.331298] RSP: 0018:ffffc9000438b8d0 EFLAGS: 00010206 [ 74.331676] RAX: 0000000000000000 RBX: ffff88007ad96300 RCX: 0000000000000000 [ 74.332217] RDX: ffff88007fc137a8 RSI: ffff88007fc0db28 RDI: 0001bf00000001be [ 74.332758] RBP: ffffc9000438b8d8 R08: 000000000005008f R09: 00000000000005f9 [ 74.333274] R10: ffff88007d001700 R11: ffffffff820a8a4d R12: ffff88007ad96300 [ 74.333787] R13: 0000000000000002 R14: ffff880036604000 R15: 000077ff80000000 [ 74.334308] FS: 00007fc70d8a7b40(0000) GS:ffff88007fc00000(0000) knlGS:0000000000000000 [ 74.334891] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 74.335314] CR2: 00007fff4144a710 CR3: 000000007ab56000 CR4: 00000000003406f0 [ 74.335830] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 74.336373] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 74.336895] Call Trace: [ 74.337086] skb_release_all+0xd/0x30 [ 74.337356] consume_skb+0x2c/0x90 [ 74.337607] free_unused_bufs+0x1ff/0x270 [virtio_net] [ 74.337988] ? vp_synchronize_vectors+0x3b/0x60 [virtio_pci] [ 74.338398] virtnet_xdp+0x21e/0x440 [virtio_net] [ 74.338741] dev_change_xdp_fd+0x101/0x140 [ 74.339048] do_setlink+0xcf4/0xd20 [ 74.339304] ? symcmp+0xf/0x20 [ 74.339529] ? mls_level_isvalid+0x52/0x60 [ 74.339828] ? mls_range_isvalid+0x43/0x50 [ 74.340135] ? nla_parse+0xa0/0x100 [ 74.340400] rtnl_setlink+0xd4/0x120 [ 74.340664] ? cpumask_next_and+0x30/0x50 [ 74.340966] rtnetlink_rcv_msg+0x7f/0x1f0 [ 74.341259] ? sock_has_perm+0x59/0x60 [ 74.341586] ? napi_consume_skb+0xe2/0x100 [ 74.342010] ? rtnl_newlink+0x890/0x890 [ 74.342435] netlink_rcv_skb+0x92/0xb0 [ 74.342846] rtnetlink_rcv+0x23/0x30 [ 74.343277] netlink_unicast+0x162/0x210 [ 74.343677] netlink_sendmsg+0x2db/0x390 [ 74.343968] sock_sendmsg+0x33/0x40 [ 74.344233] SYSC_sendto+0xee/0x160 [ 74.344482] ? SYSC_bind+0xb0/0xe0 [ 74.344806] ? sock_alloc_file+0x92/0x110 [ 74.345106] ? fd_install+0x20/0x30 [ 74.345360] ? sock_map_fd+0x3f/0x60 [ 74.345586] SyS_sendto+0x9/0x10 [ 74.345790] entry_SYSCALL_64_fastpath+0x1a/0xa9 [ 74.346086] RIP: 0033:0x7fc70d1b8f6d [ 74.346312] RSP: 002b:00007fff4144a708 EFLAGS: 00000246 ORIG_RAX: 000000000000002c [ 74.346785] RAX: ffffffffffffffda RBX: 00000000ffffffff RCX: 00007fc70d1b8f6d [ 74.347244] RDX: 000000000000002c RSI: 00007fff4144a720 RDI: 0000000000000003 [ 74.347683] RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000 [ 74.348544] R10: 0000000000000000 R11: 0000000000000246 R12: 00007fff4144bd90 [ 74.349082] R13: 0000000000000002 R14: 0000000000000002 R15: 00007fff4144cda0 [ 74.349607] Code: 00 00 00 55 48 89 e5 53 48 89 fb 48 8b 7f 58 48 85 ff 74 0e 40 f6 c7 01 74 3d 48 c7 43 58 00 00 00 00 48 8b 7b 68 48 85 ff 74 05 ff 0f 74 20 48 8b 43 60 48 85 c0 74 14 65 8b 15 f3 ab 8d 7e [ 74.351008] RIP: skb_release_head_state+0x28/0x80 RSP: ffffc9000438b8d0 [ 74.351625] ---[ end trace fe6e19fd11cfc80b ]--- Fixes: 2de2f7f40ef9 ("virtio_net: XDP support for adjust_head") Cc: John Fastabend Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 05a83dbc910d..ca489e064f85 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1752,7 +1752,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) return err; } -static int virtnet_reset(struct virtnet_info *vi) +static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp) { struct virtio_device *dev = vi->vdev; int ret; @@ -1770,10 +1770,11 @@ static int virtnet_reset(struct virtnet_info *vi) if (ret) goto err; + vi->xdp_queue_pairs = xdp_qp; ret = virtnet_restore_up(dev); if (ret) goto err; - ret = _virtnet_set_queues(vi, vi->curr_queue_pairs); + ret = _virtnet_set_queues(vi, curr_qp); if (ret) goto err; @@ -1790,7 +1791,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); struct virtnet_info *vi = netdev_priv(dev); struct bpf_prog *old_prog; - u16 oxdp_qp, xdp_qp = 0, curr_qp; + u16 xdp_qp = 0, curr_qp; int i, err; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || @@ -1828,24 +1829,17 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) return PTR_ERR(prog); } - err = _virtnet_set_queues(vi, curr_qp + xdp_qp); - if (err) { - dev_warn(&dev->dev, "XDP Device queue allocation failure.\n"); - goto virtio_queue_err; - } - - oxdp_qp = vi->xdp_queue_pairs; - /* Changing the headroom in buffers is a disruptive operation because * existing buffers must be flushed and reallocated. This will happen * when a xdp program is initially added or xdp is disabled by removing * the xdp program resulting in number of XDP queues changing. */ if (vi->xdp_queue_pairs != xdp_qp) { - vi->xdp_queue_pairs = xdp_qp; - err = virtnet_reset(vi); - if (err) + err = virtnet_reset(vi, curr_qp + xdp_qp, xdp_qp); + if (err) { + dev_warn(&dev->dev, "XDP reset failure.\n"); goto virtio_reset_err; + } } netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); @@ -1864,12 +1858,6 @@ virtio_reset_err: * error up to user space for resolution. The underlying reset hung on * us so not much we can do here. */ - dev_warn(&dev->dev, "XDP reset failure and queues unstable\n"); - vi->xdp_queue_pairs = oxdp_qp; -virtio_queue_err: - /* On queue set error we can unwind bpf ref count and user space can - * retry this is most likely an allocation failure. - */ if (prog) bpf_prog_sub(prog, vi->max_queue_pairs - 1); return err; -- cgit v1.2.3 From d1892e4ec96aad4b733dfab3ed989718f04f03e9 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 20 Feb 2017 16:32:06 +0100 Subject: rtnl: simplify error return path in rtnl_create_link() There is only one possible error path which reaches the err label, so return ERR_PTR(-ENOMEM) directly if alloc_netdev_mqs() fails. This also allows to omit the err variable. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e3286d32eca5..c4e84c558240 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2358,7 +2358,6 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, unsigned char name_assign_type, const struct rtnl_link_ops *ops, struct nlattr *tb[]) { - int err; struct net_device *dev; unsigned int num_tx_queues = 1; unsigned int num_rx_queues = 1; @@ -2373,11 +2372,10 @@ struct net_device *rtnl_create_link(struct net *net, else if (ops->get_num_rx_queues) num_rx_queues = ops->get_num_rx_queues(); - err = -ENOMEM; dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type, ops->setup, num_tx_queues, num_rx_queues); if (!dev) - goto err; + return ERR_PTR(-ENOMEM); dev_net_set(dev, net); dev->rtnl_link_ops = ops; @@ -2403,9 +2401,6 @@ struct net_device *rtnl_create_link(struct net *net, dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); return dev; - -err: - return ERR_PTR(err); } EXPORT_SYMBOL(rtnl_create_link); -- cgit v1.2.3 From 8bcdc4f3a20be949df54b67e5ae2734daabb5792 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Mon, 20 Feb 2017 08:29:19 -0800 Subject: vxlan: add changelink support This patch adds changelink rtnl op support for vxlan netdevs. code changes involve: - refactor vxlan_newlink into vxlan_nl2conf to be used by vxlan_newlink and vxlan_changelink - vxlan_nl2conf and vxlan_dev_configure take a changelink argument to isolate changelink checks and updates. - Allow changing only a few attributes: - return -EOPNOTSUPP for attributes that cannot be changed for now. Incremental patches can make the non-supported one available in the future if needed. Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 383 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 270 insertions(+), 113 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c5db8f8563c1..87f63f61dcf7 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2835,39 +2835,40 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan) } static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, - struct vxlan_config *conf) + struct vxlan_config *conf, + bool changelink) { struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev), *tmp; struct vxlan_rdst *dst = &vxlan->default_dst; unsigned short needed_headroom = ETH_HLEN; - int err; bool use_ipv6 = false; __be16 default_port = vxlan->cfg.dst_port; struct net_device *lowerdev = NULL; - if (conf->flags & VXLAN_F_GPE) { - /* For now, allow GPE only together with COLLECT_METADATA. - * This can be relaxed later; in such case, the other side - * of the PtP link will have to be provided. - */ - if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || - !(conf->flags & VXLAN_F_COLLECT_METADATA)) { - pr_info("unsupported combination of extensions\n"); - return -EINVAL; + if (!changelink) { + if (conf->flags & VXLAN_F_GPE) { + /* For now, allow GPE only together with + * COLLECT_METADATA. This can be relaxed later; in such + * case, the other side of the PtP link will have to be + * provided. + */ + if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || + !(conf->flags & VXLAN_F_COLLECT_METADATA)) { + pr_info("unsupported combination of extensions\n"); + return -EINVAL; + } + vxlan_raw_setup(dev); + } else { + vxlan_ether_setup(dev); } - vxlan_raw_setup(dev); - } else { - vxlan_ether_setup(dev); + /* MTU range: 68 - 65535 */ + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = ETH_MAX_MTU; + vxlan->net = src_net; } - /* MTU range: 68 - 65535 */ - dev->min_mtu = ETH_MIN_MTU; - dev->max_mtu = ETH_MAX_MTU; - - vxlan->net = src_net; - dst->remote_vni = conf->vni; memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip)); @@ -2889,12 +2890,14 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, return -EINVAL; } - if (conf->remote_ifindex) { + if (conf->remote_ifindex && + conf->remote_ifindex != vxlan->cfg.remote_ifindex) { lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); dst->remote_ifindex = conf->remote_ifindex; if (!lowerdev) { - pr_info("ifindex %d does not exist\n", dst->remote_ifindex); + pr_info("ifindex %d does not exist\n", + dst->remote_ifindex); return -ENODEV; } @@ -2913,7 +2916,8 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); needed_headroom = lowerdev->hard_header_len; - } else if (vxlan_addr_multicast(&dst->remote_ip)) { + } else if (!conf->remote_ifindex && + vxlan_addr_multicast(&dst->remote_ip)) { pr_info("multicast destination requires interface to be specified\n"); return -EINVAL; } @@ -2953,6 +2957,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, if (!vxlan->cfg.age_interval) vxlan->cfg.age_interval = FDB_AGE_DEFAULT; + if (changelink) + return 0; + list_for_each_entry(tmp, &vn->vxlan_list, next) { if (tmp->cfg.vni == conf->vni && (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 || @@ -2965,147 +2972,296 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, } } - dev->ethtool_ops = &vxlan_ethtool_ops; - - /* create an fdb entry for a valid default destination */ - if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { - err = vxlan_fdb_create(vxlan, all_zeros_mac, - &vxlan->default_dst.remote_ip, - NUD_REACHABLE|NUD_PERMANENT, - NLM_F_EXCL|NLM_F_CREATE, - vxlan->cfg.dst_port, - vxlan->default_dst.remote_vni, - vxlan->default_dst.remote_vni, - vxlan->default_dst.remote_ifindex, - NTF_SELF); - if (err) - return err; - } - - err = register_netdevice(dev); - if (err) { - vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); - return err; - } - - list_add(&vxlan->next, &vn->vxlan_list); - return 0; } -static int vxlan_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) +static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], + struct net_device *dev, struct vxlan_config *conf, + bool changelink) { - struct vxlan_config conf; + struct vxlan_dev *vxlan = netdev_priv(dev); - memset(&conf, 0, sizeof(conf)); + memset(conf, 0, sizeof(*conf)); - if (data[IFLA_VXLAN_ID]) - conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); + /* if changelink operation, start with old existing cfg */ + if (changelink) + memcpy(conf, &vxlan->cfg, sizeof(*conf)); + + if (data[IFLA_VXLAN_ID]) { + __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); + + if (changelink && (vni != conf->vni)) + return -EOPNOTSUPP; + conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); + } if (data[IFLA_VXLAN_GROUP]) { - conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); + conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); } else if (data[IFLA_VXLAN_GROUP6]) { if (!IS_ENABLED(CONFIG_IPV6)) return -EPFNOSUPPORT; - conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); - conf.remote_ip.sa.sa_family = AF_INET6; + conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); + conf->remote_ip.sa.sa_family = AF_INET6; } if (data[IFLA_VXLAN_LOCAL]) { - conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); - conf.saddr.sa.sa_family = AF_INET; + conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); + conf->saddr.sa.sa_family = AF_INET; } else if (data[IFLA_VXLAN_LOCAL6]) { if (!IS_ENABLED(CONFIG_IPV6)) return -EPFNOSUPPORT; /* TODO: respect scope id */ - conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); - conf.saddr.sa.sa_family = AF_INET6; + conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); + conf->saddr.sa.sa_family = AF_INET6; } if (data[IFLA_VXLAN_LINK]) - conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); + conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); if (data[IFLA_VXLAN_TOS]) - conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]); + conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); if (data[IFLA_VXLAN_TTL]) - conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); + conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); if (data[IFLA_VXLAN_LABEL]) - conf.label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & + conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & IPV6_FLOWLABEL_MASK; - if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) - conf.flags |= VXLAN_F_LEARN; + if (data[IFLA_VXLAN_LEARNING]) { + if (nla_get_u8(data[IFLA_VXLAN_LEARNING])) { + conf->flags |= VXLAN_F_LEARN; + } else { + conf->flags &= ~VXLAN_F_LEARN; + vxlan->flags &= ~VXLAN_F_LEARN; + } + } else if (!changelink) { + /* default to learn on a new device */ + conf->flags |= VXLAN_F_LEARN; + } - if (data[IFLA_VXLAN_AGEING]) - conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); + if (data[IFLA_VXLAN_AGEING]) { + if (changelink) + return -EOPNOTSUPP; + conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); + } - if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY])) - conf.flags |= VXLAN_F_PROXY; + if (data[IFLA_VXLAN_PROXY]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_VXLAN_PROXY])) + conf->flags |= VXLAN_F_PROXY; + } - if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC])) - conf.flags |= VXLAN_F_RSC; + if (data[IFLA_VXLAN_RSC]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_VXLAN_RSC])) + conf->flags |= VXLAN_F_RSC; + } - if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS])) - conf.flags |= VXLAN_F_L2MISS; + if (data[IFLA_VXLAN_L2MISS]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_VXLAN_L2MISS])) + conf->flags |= VXLAN_F_L2MISS; + } - if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS])) - conf.flags |= VXLAN_F_L3MISS; + if (data[IFLA_VXLAN_L3MISS]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_VXLAN_L3MISS])) + conf->flags |= VXLAN_F_L3MISS; + } - if (data[IFLA_VXLAN_LIMIT]) - conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); + if (data[IFLA_VXLAN_LIMIT]) { + if (changelink) + return -EOPNOTSUPP; + conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); + } - if (data[IFLA_VXLAN_COLLECT_METADATA] && - nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) - conf.flags |= VXLAN_F_COLLECT_METADATA; + if (data[IFLA_VXLAN_COLLECT_METADATA]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) + conf->flags |= VXLAN_F_COLLECT_METADATA; + } if (data[IFLA_VXLAN_PORT_RANGE]) { - const struct ifla_vxlan_port_range *p - = nla_data(data[IFLA_VXLAN_PORT_RANGE]); - conf.port_min = ntohs(p->low); - conf.port_max = ntohs(p->high); + if (!changelink) { + const struct ifla_vxlan_port_range *p + = nla_data(data[IFLA_VXLAN_PORT_RANGE]); + conf->port_min = ntohs(p->low); + conf->port_max = ntohs(p->high); + } else { + return -EOPNOTSUPP; + } } - if (data[IFLA_VXLAN_PORT]) - conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); + if (data[IFLA_VXLAN_PORT]) { + if (changelink) + return -EOPNOTSUPP; + conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); + } - if (data[IFLA_VXLAN_UDP_CSUM] && - !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) - conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX; + if (data[IFLA_VXLAN_UDP_CSUM]) { + if (changelink) + return -EOPNOTSUPP; + if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) + conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX; + } - if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] && - nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) - conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; + if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) + conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; + } - if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] && - nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) - conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; + if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) + conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; + } - if (data[IFLA_VXLAN_REMCSUM_TX] && - nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) - conf.flags |= VXLAN_F_REMCSUM_TX; + if (data[IFLA_VXLAN_REMCSUM_TX]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) + conf->flags |= VXLAN_F_REMCSUM_TX; + } - if (data[IFLA_VXLAN_REMCSUM_RX] && - nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) - conf.flags |= VXLAN_F_REMCSUM_RX; + if (data[IFLA_VXLAN_REMCSUM_RX]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) + conf->flags |= VXLAN_F_REMCSUM_RX; + } + + if (data[IFLA_VXLAN_GBP]) { + if (changelink) + return -EOPNOTSUPP; + conf->flags |= VXLAN_F_GBP; + } + + if (data[IFLA_VXLAN_GPE]) { + if (changelink) + return -EOPNOTSUPP; + conf->flags |= VXLAN_F_GPE; + } + + if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) { + if (changelink) + return -EOPNOTSUPP; + conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL; + } + + if (tb[IFLA_MTU]) { + if (changelink) + return -EOPNOTSUPP; + conf->mtu = nla_get_u32(tb[IFLA_MTU]); + } + + return 0; +} + +static int vxlan_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_config conf; + int err; + + err = vxlan_nl2conf(tb, data, dev, &conf, false); + if (err) + return err; + + err = vxlan_dev_configure(src_net, dev, &conf, false); + if (err) + return err; + + dev->ethtool_ops = &vxlan_ethtool_ops; + + /* create an fdb entry for a valid default destination */ + if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { + err = vxlan_fdb_create(vxlan, all_zeros_mac, + &vxlan->default_dst.remote_ip, + NUD_REACHABLE | NUD_PERMANENT, + NLM_F_EXCL | NLM_F_CREATE, + vxlan->cfg.dst_port, + vxlan->default_dst.remote_vni, + vxlan->default_dst.remote_vni, + vxlan->default_dst.remote_ifindex, + NTF_SELF); + if (err) + return err; + } + + err = register_netdevice(dev); + if (err) { + vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); + return err; + } + + list_add(&vxlan->next, &vn->vxlan_list); + + return 0; +} - if (data[IFLA_VXLAN_GBP]) - conf.flags |= VXLAN_F_GBP; +static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[]) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_rdst *dst = &vxlan->default_dst; + struct vxlan_rdst old_dst; + struct vxlan_config conf; + int err; + + err = vxlan_nl2conf(tb, data, + dev, &conf, true); + if (err) + return err; - if (data[IFLA_VXLAN_GPE]) - conf.flags |= VXLAN_F_GPE; + memcpy(&old_dst, dst, sizeof(struct vxlan_rdst)); - if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) - conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; + err = vxlan_dev_configure(vxlan->net, dev, &conf, true); + if (err) + return err; - if (tb[IFLA_MTU]) - conf.mtu = nla_get_u32(tb[IFLA_MTU]); + /* handle default dst entry */ + if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) { + spin_lock_bh(&vxlan->hash_lock); + if (!vxlan_addr_any(&old_dst.remote_ip)) + __vxlan_fdb_delete(vxlan, all_zeros_mac, + old_dst.remote_ip, + vxlan->cfg.dst_port, + old_dst.remote_vni, + old_dst.remote_vni, + old_dst.remote_ifindex, 0); + + if (!vxlan_addr_any(&dst->remote_ip)) { + err = vxlan_fdb_create(vxlan, all_zeros_mac, + &dst->remote_ip, + NUD_REACHABLE | NUD_PERMANENT, + NLM_F_CREATE | NLM_F_APPEND, + vxlan->cfg.dst_port, + dst->remote_vni, + dst->remote_vni, + dst->remote_ifindex, + NTF_SELF); + if (err) { + spin_unlock_bh(&vxlan->hash_lock); + return err; + } + } + spin_unlock_bh(&vxlan->hash_lock); + } - return vxlan_dev_configure(src_net, dev, &conf); + return 0; } static void vxlan_dellink(struct net_device *dev, struct list_head *head) @@ -3261,6 +3417,7 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = { .setup = vxlan_setup, .validate = vxlan_validate, .newlink = vxlan_newlink, + .changelink = vxlan_changelink, .dellink = vxlan_dellink, .get_size = vxlan_get_size, .fill_info = vxlan_fill_info, @@ -3282,7 +3439,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name, if (IS_ERR(dev)) return dev; - err = vxlan_dev_configure(net, dev, conf); + err = vxlan_dev_configure(net, dev, conf, false); if (err < 0) { free_netdev(dev); return ERR_PTR(err); -- cgit v1.2.3 From 8dcd81a993f4f65d22dfecb4360204b428c36cff Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Mon, 20 Feb 2017 08:41:16 -0800 Subject: vxlan: remove unused variable saddr in neigh_reduce MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit silences the below warning: drivers/net/vxlan.c: In function ‘neigh_reduce’: drivers/net/vxlan.c:1599:25: warning: variable ‘saddr’ set but not used [-Wunused-but-set-variable] Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 87f63f61dcf7..556953f53437 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1596,7 +1596,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) struct vxlan_dev *vxlan = netdev_priv(dev); struct nd_msg *msg; const struct ipv6hdr *iphdr; - const struct in6_addr *saddr, *daddr; + const struct in6_addr *daddr; struct neighbour *n; struct inet6_dev *in6_dev; @@ -1605,7 +1605,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) goto out; iphdr = ipv6_hdr(skb); - saddr = &iphdr->saddr; daddr = &iphdr->daddr; msg = (struct nd_msg *)skb_transport_header(skb); -- cgit v1.2.3 From ca4ef4574f1ee5252e2cd365f8f5d5bafd048f32 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Tue, 21 Feb 2017 09:33:18 +0100 Subject: ip: fix IP_CHECKSUM handling The skbs processed by ip_cmsg_recv() are not guaranteed to be linear e.g. when sending UDP packets over loopback with MSGMORE. Using csum_partial() on [potentially] the whole skb len is dangerous; instead be on the safe side and use skb_checksum(). Thanks to syzkaller team to detect the issue and provide the reproducer. v1 -> v2: - move the variable declaration in a tighter scope Fixes: ad6f939ab193 ("ip: Add offset parameter to ip_cmsg_recv") Reported-by: Andrey Konovalov Signed-off-by: Paolo Abeni Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ce1386a67e24..ebd953bc5607 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -116,10 +116,10 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, if (skb->ip_summed != CHECKSUM_COMPLETE) return; - if (offset != 0) - csum = csum_sub(csum, - csum_partial(skb_transport_header(skb) + tlen, - offset, 0)); + if (offset != 0) { + int tend_off = skb_transport_offset(skb) + tlen; + csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0)); + } put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); } -- cgit v1.2.3 From f6b10209b90d48a84f886ab2b317b5d2cbfe3143 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 21 Feb 2017 16:46:28 +0800 Subject: virtio-net: switch to use build_skb() for small buffer This patch switch to use build_skb() for small buffer which can have better performance for both TCP and XDP (since we can work at page before skb creation). It also remove lots of XDP codes since both mergeable and small buffer use page frag during refill now. Before | After XDP_DROP(xdp1) 64B : 11.1Mpps | 14.4Mpps Tested with xdp1/xdp2/xdp_ip_tx_tunnel and netperf. Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 138 ++++++++++++++++++++++------------------------- 1 file changed, 63 insertions(+), 75 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ca489e064f85..bf95016f442a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -41,6 +41,8 @@ module_param(gso, bool, 0444); #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 +#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) + /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ #define VIRTIO_XDP_HEADROOM 256 @@ -343,11 +345,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, static bool virtnet_xdp_xmit(struct virtnet_info *vi, struct receive_queue *rq, - struct xdp_buff *xdp, - void *data) + struct xdp_buff *xdp) { struct virtio_net_hdr_mrg_rxbuf *hdr; - unsigned int num_sg, len; + unsigned int len; struct send_queue *sq; unsigned int qp; void *xdp_sent; @@ -358,49 +359,23 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi, /* Free up any pending old buffers before queueing new ones. */ while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { - if (vi->mergeable_rx_bufs) { - struct page *sent_page = virt_to_head_page(xdp_sent); + struct page *sent_page = virt_to_head_page(xdp_sent); - put_page(sent_page); - } else { /* small buffer */ - struct sk_buff *skb = xdp_sent; - - kfree_skb(skb); - } + put_page(sent_page); } - if (vi->mergeable_rx_bufs) { - xdp->data -= sizeof(struct virtio_net_hdr_mrg_rxbuf); - /* Zero header and leave csum up to XDP layers */ - hdr = xdp->data; - memset(hdr, 0, vi->hdr_len); - - num_sg = 1; - sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); - } else { /* small buffer */ - struct sk_buff *skb = data; + xdp->data -= vi->hdr_len; + /* Zero header and leave csum up to XDP layers */ + hdr = xdp->data; + memset(hdr, 0, vi->hdr_len); - /* Zero header and leave csum up to XDP layers */ - hdr = skb_vnet_hdr(skb); - memset(hdr, 0, vi->hdr_len); + sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); - num_sg = 2; - sg_init_table(sq->sg, 2); - sg_set_buf(sq->sg, hdr, vi->hdr_len); - skb_to_sgvec(skb, sq->sg + 1, - xdp->data - xdp->data_hard_start, - xdp->data_end - xdp->data); - } - err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, - data, GFP_ATOMIC); + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); if (unlikely(err)) { - if (vi->mergeable_rx_bufs) { - struct page *page = virt_to_head_page(xdp->data); + struct page *page = virt_to_head_page(xdp->data); - put_page(page); - } else /* small buffer */ - kfree_skb(data); - /* On error abort to avoid unnecessary kick */ + put_page(page); return false; } @@ -408,39 +383,50 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi, return true; } +static unsigned int virtnet_get_headroom(struct virtnet_info *vi) +{ + return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; +} + static struct sk_buff *receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len) { - struct sk_buff * skb = buf; + struct sk_buff *skb; struct bpf_prog *xdp_prog; - + unsigned int xdp_headroom = virtnet_get_headroom(vi); + unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; + unsigned int headroom = vi->hdr_len + header_offset; + unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + unsigned int delta = 0; len -= vi->hdr_len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { - struct virtio_net_hdr_mrg_rxbuf *hdr = buf; + struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; struct xdp_buff xdp; + void *orig_data; u32 act; if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) goto err_xdp; - xdp.data_hard_start = skb->data; - xdp.data = skb->data + VIRTIO_XDP_HEADROOM; + xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; + xdp.data = xdp.data_hard_start + xdp_headroom; xdp.data_end = xdp.data + len; + orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); switch (act) { case XDP_PASS: /* Recalculate length in case bpf program changed it */ - __skb_pull(skb, xdp.data - xdp.data_hard_start); - len = xdp.data_end - xdp.data; + delta = orig_data - xdp.data; break; case XDP_TX: - if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp, skb))) + if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp))) trace_xdp_exception(vi->dev, xdp_prog, act); rcu_read_unlock(); goto xdp_xmit; @@ -454,13 +440,25 @@ static struct sk_buff *receive_small(struct net_device *dev, } rcu_read_unlock(); - skb_trim(skb, len); + skb = build_skb(buf, buflen); + if (!skb) { + put_page(virt_to_head_page(buf)); + goto err; + } + skb_reserve(skb, headroom - delta); + skb_put(skb, len + delta); + if (!delta) { + buf += header_offset; + memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); + } /* keep zeroed vnet hdr since packet was changed by bpf */ + +err: return skb; err_xdp: rcu_read_unlock(); dev->stats.rx_dropped++; - kfree_skb(skb); + put_page(virt_to_head_page(buf)); xdp_xmit: return NULL; } @@ -621,7 +619,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: - if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp, data))) + if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp))) trace_xdp_exception(vi->dev, xdp_prog, act); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); if (unlikely(xdp_page != page)) @@ -737,7 +735,7 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, } else if (vi->big_packets) { give_pages(rq, buf); } else { - dev_kfree_skb(buf); + put_page(virt_to_head_page(buf)); } return 0; } @@ -780,34 +778,28 @@ frame_err: return 0; } -static unsigned int virtnet_get_headroom(struct virtnet_info *vi) -{ - return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; -} - static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { - int headroom = GOOD_PACKET_LEN + virtnet_get_headroom(vi); + struct page_frag *alloc_frag = &rq->alloc_frag; + char *buf; unsigned int xdp_headroom = virtnet_get_headroom(vi); - struct sk_buff *skb; - struct virtio_net_hdr_mrg_rxbuf *hdr; + int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; int err; - skb = __netdev_alloc_skb_ip_align(vi->dev, headroom, gfp); - if (unlikely(!skb)) + len = SKB_DATA_ALIGN(len) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) return -ENOMEM; - skb_put(skb, headroom); - - hdr = skb_vnet_hdr(skb); - sg_init_table(rq->sg, 2); - sg_set_buf(rq->sg, hdr, vi->hdr_len); - skb_to_sgvec(skb, rq->sg + 1, xdp_headroom, skb->len - xdp_headroom); - - err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); + buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; + get_page(alloc_frag->page); + alloc_frag->offset += len; + sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, + vi->hdr_len + GOOD_PACKET_LEN); + err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); if (err < 0) - dev_kfree_skb(skb); + put_page(virt_to_head_page(buf)); return err; } @@ -1994,10 +1986,6 @@ static void free_receive_page_frags(struct virtnet_info *vi) static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) { - /* For small receive mode always use kfree_skb variants */ - if (!vi->mergeable_rx_bufs) - return false; - if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) return false; else if (q < vi->curr_queue_pairs) @@ -2032,7 +2020,7 @@ static void free_unused_bufs(struct virtnet_info *vi) } else if (vi->big_packets) { give_pages(&vi->rq[i], buf); } else { - dev_kfree_skb(buf); + put_page(virt_to_head_page(buf)); } } } -- cgit v1.2.3 From 8ccde4c562076f280c0321c549d822448b64e565 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Tue, 21 Feb 2017 17:09:19 +0800 Subject: net: sock: Use USEC_PER_SEC macro instead of literal 1000000 The USEC_PER_SEC is used once in sock_set_timeout as the max value of tv_usec. But there are other similar codes which use the literal 1000000 in this file. It is minor cleanup to keep consitent. Signed-off-by: Gao Feng Signed-off-by: David S. Miller --- net/core/sock.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index b74356535559..e7d74940e863 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -367,7 +367,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) if (tv.tv_sec == 0 && tv.tv_usec == 0) return 0; if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) - *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); + *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC / HZ); return 0; } @@ -1146,7 +1146,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.tm.tv_usec = 0; } else { v.tm.tv_sec = sk->sk_rcvtimeo / HZ; - v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; + v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * USEC_PER_SEC) / HZ; } break; @@ -1157,7 +1157,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.tm.tv_usec = 0; } else { v.tm.tv_sec = sk->sk_sndtimeo / HZ; - v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; + v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * USEC_PER_SEC) / HZ; } break; -- cgit v1.2.3 From b3bdc3acbb44d74d0b7ba4d97169577a2b46dc88 Mon Sep 17 00:00:00 2001 From: Lee Ryder Date: Tue, 21 Feb 2017 17:40:45 +0800 Subject: macsec: fix validation failed in asynchronous operation. MACSec test failed when asynchronous crypto operations is used. It encounters packet validation failed since macsec_skb_cb(skb)->valid is always 'false'. This patch adds missing "macsec_skb_cb(skb)->valid = true" in macsec_decrypt_done() when "err == 0". Signed-off-by: Ryder Lee Signed-off-by: David S. Miller --- drivers/net/macsec.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 778a77303c49..ff0a5ed3ca80 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -879,6 +879,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) aead_request_free(macsec_skb_cb(skb)->req); + if (!err) + macsec_skb_cb(skb)->valid = true; + rcu_read_lock_bh(); pn = ntohl(macsec_ethhdr(skb)->packet_number); if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { -- cgit v1.2.3 From 239a3b663647869330955ec59caac0100ef9b60a Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:01 +0100 Subject: net: mvpp2: fix DMA address calculation in mvpp2_txq_inc_put() When TX descriptors are filled in, the buffer DMA address is split between the tx_desc->buf_phys_addr field (high-order bits) and tx_desc->packet_offset field (5 low-order bits). However, when we re-calculate the DMA address from the TX descriptor in mvpp2_txq_inc_put(), we do not take tx_desc->packet_offset into account. This means that when the DMA address is not aligned on a 32 bytes boundary, we end up calling dma_unmap_single() with a DMA address that was not the one returned by dma_map_single(). This inconsistency is detected by the kernel when DMA_API_DEBUG is enabled. We fix this problem by properly calculating the DMA address in mvpp2_txq_inc_put(). Cc: Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index c48632048f71..a6992ce34565 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -991,7 +991,7 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, txq_pcpu->buffs + txq_pcpu->txq_put_index; tx_buf->skb = skb; tx_buf->size = tx_desc->data_size; - tx_buf->phys = tx_desc->buf_phys_addr; + tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset; txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; -- cgit v1.2.3 From d63f9e41f981722b11f42ea2b19798c00fcae0f9 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:02 +0100 Subject: net: mvpp2: remove useless arguments in mvpp2_rx_{pkts, time}_coal_set As noticed by Russell King, the last argument of mvpp2_rx_{pkts,time}_coal_set() is useless, since the packet/time coalescing value is already stored in the 'struct mvpp2_rx_queue *' passed as argument to these functions. So passing the packet/time value as an additional argument, and setting them again in the mvpp2_rx_queue structure is useles. This commit therefore gets rid of this additional argument, assuming the caller has assigned the appropriate value to rxq->pkts_coal or rxq->time_coal before calling the respective functions. Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index a6992ce34565..dfeee9636c7a 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -4379,27 +4379,23 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) * will be generated by HW. */ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq, u32 pkts) + struct mvpp2_rx_queue *rxq) { u32 val; - val = (pkts & MVPP2_OCCUPIED_THRESH_MASK); + val = (rxq->pkts_coal & MVPP2_OCCUPIED_THRESH_MASK); mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); - - rxq->pkts_coal = pkts; } /* Set the time delay in usec before Rx interrupt */ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq, u32 usec) + struct mvpp2_rx_queue *rxq) { u32 val; - val = (port->priv->tclk / USEC_PER_SEC) * usec; + val = (port->priv->tclk / USEC_PER_SEC) * rxq->time_coal; mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); - - rxq->time_coal = usec; } /* Free Tx queue skbuffs */ @@ -4543,8 +4539,8 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); /* Set coalescing pkts and time */ - mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); - mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); /* Add number of descriptors ready for receiving packets */ mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); @@ -5801,8 +5797,8 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, rxq->time_coal = c->rx_coalesce_usecs; rxq->pkts_coal = c->rx_max_coalesced_frames; - mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); - mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); } for (queue = 0; queue < txq_number; queue++) { -- cgit v1.2.3 From f8b0d5f8cc10f43642f97db6b37d60d765cff34a Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:03 +0100 Subject: net: mvpp2: handle too large value handling in mvpp2_rx_pkts_coal_set() Currently, mvpp2_rx_pkts_coal_set() does the following to avoid setting a too large value for the RX coalescing by packet number: val = (pkts & MVPP2_OCCUPIED_THRESH_MASK); This means that if you set a value that is slightly higher the the maximum number of packets, you in fact get a very low value. It makes a lot more sense to simply check if the value is too high, and if it's too high, limit it to the maximum possible value. Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index dfeee9636c7a..679811db51a1 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -4381,11 +4381,12 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - u32 val; + if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) + rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; - val = (rxq->pkts_coal & MVPP2_OCCUPIED_THRESH_MASK); mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); + mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, + rxq->pkts_coal); } /* Set the time delay in usec before Rx interrupt */ -- cgit v1.2.3 From ab42676af052e6d3502b31c2dc6b07af08ff126f Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:04 +0100 Subject: net: mvpp2: handle too large value in mvpp2_rx_time_coal_set() When configuring the MVPP2_ISR_RX_THRESHOLD_REG with the RX coalescing time threshold, we do not check for the maximum allowed value supported by the driver, which means we might overflow and use a bogus value. This commit adds a check for this situation, and if a value higher than what is supported by the hardware is provided, then we use the maximum value supported by the hardware. In order to achieve this in a way that avoids overflow and rounding errors, we introduce two utility functions mvpp2_usec_to_cycles() and cycles_to_usec(). Many thanks to Russell King for suggesting this implementation. Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 679811db51a1..47fb949178b1 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -154,6 +154,7 @@ /* Interrupt Cause and Mask registers */ #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) +#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) @@ -4389,13 +4390,39 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, rxq->pkts_coal); } +static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) +{ + u64 tmp = (u64)clk_hz * usec; + + do_div(tmp, USEC_PER_SEC); + + return tmp > U32_MAX ? U32_MAX : tmp; +} + +static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) +{ + u64 tmp = (u64)cycles * USEC_PER_SEC; + + do_div(tmp, clk_hz); + + return tmp > U32_MAX ? U32_MAX : tmp; +} + /* Set the time delay in usec before Rx interrupt */ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - u32 val; + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); + + if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { + rxq->time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(rxq->time_coal, freq); + } - val = (port->priv->tclk / USEC_PER_SEC) * rxq->time_coal; mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); } -- cgit v1.2.3 From 36fb7435b6ac4d288a2d4deea8934f9456ab46b6 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:05 +0100 Subject: net: mvpp2: release reference to txq_cpu[] entry after unmapping The mvpp2_txq_bufs_free() function is called upon TX completion to DMA unmap TX buffers, and free the corresponding SKBs. It gets the references to the SKB to free and the DMA buffer to unmap from a per-CPU txq_pcpu data structure. However, the code currently increments the pointer to the next entry before doing the DMA unmap and freeing the SKB. It does not cause any visible problem because for a given SKB the TX completion is guaranteed to take place on the CPU where the TX was started. However, it is much more logical to increment the pointer to the next entry once the current entry has been completely unmapped/released. Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 47fb949178b1..5d6b4edcc13b 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -4437,13 +4437,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_get_index; - mvpp2_txq_inc_get(txq_pcpu); - dma_unmap_single(port->dev->dev.parent, tx_buf->phys, tx_buf->size, DMA_TO_DEVICE); - if (!tx_buf->skb) - continue; - dev_kfree_skb_any(tx_buf->skb); + if (tx_buf->skb) + dev_kfree_skb_any(tx_buf->skb); + + mvpp2_txq_inc_get(txq_pcpu); } } -- cgit v1.2.3 From 5dfa9e8337b1c8a7e291929e676c167b6ea953e6 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:06 +0100 Subject: net: mvpp2: remove unused 'tx_skb' field of 'struct mvpp2_tx_queue' This commit remove a field of 'struct mvpp2_tx_queue' that is not used anywhere. Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 5d6b4edcc13b..27699c8af3a1 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -823,9 +823,6 @@ struct mvpp2_tx_queue { /* Per-CPU control of physical Tx queues */ struct mvpp2_txq_pcpu __percpu *pcpu; - /* Array of transmitted skb */ - struct sk_buff **tx_skb; - u32 done_pkts_coal; /* Virtual address of thex Tx DMA descriptors array */ -- cgit v1.2.3 From 7ef7e1d949cd517d30a3cc6c7f48bd017d81af6b Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:07 +0100 Subject: net: mvpp2: drop useless fields in mvpp2_bm_pool and related code This commit drops dead code from the mvpp2 driver. The 'in_use' and 'in_use_thresh' fields of 'struct mvpp2_bm_pool' are incremented/decremented/initialized in various places. But they are only used in one place: if (is_recycle && (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh)) return 0; However 'is_recycle', passed as argument to mvpp2_rx_refill() is always false. So in fact, this code is never reached, and the 'is_recycle' argument is useless. So let's drop this code. Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 27699c8af3a1..1a51c71e156c 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -930,10 +930,6 @@ struct mvpp2_bm_pool { /* Ports using BM pool */ u32 port_map; - - /* Occupied buffers indicator */ - atomic_t in_use; - int in_use_thresh; }; struct mvpp2_buff_hdr { @@ -3399,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, bm_pool->size = size; bm_pool->pkt_size = 0; bm_pool->buf_num = 0; - atomic_set(&bm_pool->in_use, 0); return 0; } @@ -3656,7 +3651,6 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, /* Update BM driver with number of buffers added to pool */ bm_pool->buf_num += i; - bm_pool->in_use_thresh = bm_pool->buf_num / 4; netdev_dbg(port->dev, "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", @@ -5014,23 +5008,18 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, - u32 bm, int is_recycle) + struct mvpp2_bm_pool *bm_pool, u32 bm) { struct sk_buff *skb; dma_addr_t phys_addr; - if (is_recycle && - (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh)) - return 0; - /* No recycle or too many buffers are in use, so allocate a new skb */ skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC); if (!skb) return -ENOMEM; mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); - atomic_dec(&bm_pool->in_use); + return 0; } @@ -5156,7 +5145,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, skb = (struct sk_buff *)rx_desc->buf_cookie; - err = mvpp2_rx_refill(port, bm_pool, bm, 0); + err = mvpp2_rx_refill(port, bm_pool, bm); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); goto err_drop_frame; @@ -5167,7 +5156,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, rcvd_pkts++; rcvd_bytes += rx_bytes; - atomic_inc(&bm_pool->in_use); skb_reserve(skb, MVPP2_MH_SIZE); skb_put(skb, rx_bytes); -- cgit v1.2.3 From 3eb2d9982288fbbbdc671af0fee4b9eb71d11b75 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:08 +0100 Subject: net: mvpp2: simplify mvpp2_bm_bufs_add() The mvpp2_bm_bufs_add() currently creates a fake cookie by calling mvpp2_bm_cookie_pool_set(), just to be able to call mvpp2_pool_refill(). But all what mvpp2_pool_refill() does is extract the pool ID from the cookie, and call mvpp2_bm_pool_put() with this ID. Instead of doing this convoluted thing, just call mvpp2_bm_pool_put() directly, since we have the BM pool ID. Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 1a51c71e156c..2fcb0080d7f2 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3626,7 +3626,6 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, { struct sk_buff *skb; int i, buf_size, total_size; - u32 bm; dma_addr_t phys_addr; buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); @@ -3640,13 +3639,12 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, return 0; } - bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id); for (i = 0; i < buf_num; i++) { skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL); if (!skb) break; - mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); + mvpp2_bm_pool_put(port, bm_pool->id, (u32)phys_addr, (u32)skb); } /* Update BM driver with number of buffers added to pool */ -- cgit v1.2.3 From a8c249a71dca2992001ad582797c03fca0488d00 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:09 +0100 Subject: net: mvpp2: remove unused register definitions Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 2fcb0080d7f2..6341f344278c 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -253,10 +253,6 @@ #define MVPP2_SRC_ADDR_HIGH 0x28 #define MVPP2_PHY_AN_CFG0_REG 0x34 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) -#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \ - 0x400 + (port) * 0x400) -#define MVPP2_MIB_LATE_COLLISION 0x7c -#define MVPP2_ISR_SUM_MASK_REG 0x220c #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 -- cgit v1.2.3 From 31d7677b91307e2771b6922fd9920542a92d376d Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:10 +0100 Subject: net: mvpp2: fix indentation of MVPP2_EXT_GLOBAL_CTRL_DEFAULT Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 6341f344278c..23f96e9c392f 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -254,7 +254,7 @@ #define MVPP2_PHY_AN_CFG0_REG 0x34 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c -#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 +#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 /* Per-port registers */ #define MVPP2_GMAC_CTRL_0_REG 0x0 -- cgit v1.2.3 From 8138affc4999bbe4ca5ed805b36f80b56354339e Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:11 +0100 Subject: net: mvpp2: simplify MVPP2_PRS_RI_* definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some of the MVPP2_PRS_RI_* definitions use the ~(value) syntax, which doesn't compile nicely on 64-bit. Moreover, those definitions are in fact unneeded, since they are always used in combination with a bit mask that ensures only the appropriate bits are modified. Therefore, such definitions should just be set to 0x0. In addition, as suggested by Russell King, we change the _MASK definitions to also use the BIT() macro so that it is clear they are related to the values defined afterwards. For example: #define MVPP2_PRS_RI_L2_CAST_MASK 0x600 #define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10)) #define MVPP2_PRS_RI_L2_MCAST BIT(9) #define MVPP2_PRS_RI_L2_BCAST BIT(10) becomes #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) #define MVPP2_PRS_RI_L2_UCAST 0x0 #define MVPP2_PRS_RI_L2_MCAST BIT(9) #define MVPP2_PRS_RI_L2_BCAST BIT(10) Because the values (MVPP2_PRS_RI_L2_UCAST, MVPP2_PRS_RI_L2_MCAST and MVPP2_PRS_RI_L2_BCAST) are always applied with MVPP2_PRS_RI_L2_CAST_MASK, and therefore there is no need for MVPP2_PRS_RI_L2_UCAST to be defined as ~(BIT(9) | BIT(10)). It fixes the following warnings when building the driver on a 64-bit platform (which is not possible as of this commit, but will be enabled in a follow-up commit): drivers/net/ethernet/marvell/mvpp2.c: In function ‘mvpp2_prs_mac_promisc_set’: drivers/net/ethernet/marvell/mvpp2.c:524:33: warning: large integer implicitly truncated to unsigned type [-Woverflow] #define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10)) ^ drivers/net/ethernet/marvell/mvpp2.c:1459:33: note: in expansion of macro ‘MVPP2_PRS_RI_L2_UCAST’ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 23f96e9c392f..ed4abb3d7e23 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -510,28 +510,28 @@ enum mvpp2_tag_type { /* Sram result info bits assignment */ #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 #define MVPP2_PRS_RI_DSA_MASK 0x2 -#define MVPP2_PRS_RI_VLAN_MASK 0xc -#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_NONE 0x0 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) -#define MVPP2_PRS_RI_L2_CAST_MASK 0x600 -#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_UCAST 0x0 #define MVPP2_PRS_RI_L2_MCAST BIT(9) #define MVPP2_PRS_RI_L2_BCAST BIT(10) #define MVPP2_PRS_RI_PPPOE_MASK 0x800 -#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000 -#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_UN 0x0 #define MVPP2_PRS_RI_L3_IP4 BIT(12) #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) #define MVPP2_PRS_RI_L3_IP6 BIT(14) #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) -#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000 -#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_UCAST 0x0 #define MVPP2_PRS_RI_L3_MCAST BIT(15) #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 -- cgit v1.2.3 From 0e0372816b9cbd22c82e3e7cd36e8e74c58ba641 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:12 +0100 Subject: net: mvpp2: switch to build_skb() in the RX path This commit adapts the mvpp2 RX path to use the build_skb() method. Not only build_skb() is now the recommended mechanism, but it also simplifies the addition of support for the PPv2.2 variant. Indeed, without build_skb(), we have to keep track for each RX descriptor of the physical address of the packet buffer, and the virtual address of the SKB. However, in PPv2.2 running on 64 bits platform, there is not enough space in the descriptor to store the virtual address of the SKB. So having to take care only of the address of the packet buffer, and building the SKB upon reception helps in supporting PPv2.2. The implementation is fairly straightforward: - mvpp2_skb_alloc() is renamed to mvpp2_buf_alloc() and no longer allocates a SKB. Instead, it allocates a buffer using the new mvpp2_frag_alloc() function, with enough space for the data and SKB. - The initialization of the RX buffers in mvpp2_bm_bufs_add() as well as the refill of the RX buffers in mvpp2_rx_refill() is adjusted accordingly. - Finally, the mvpp2_rx() is modified to use build_skb(). Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 79 ++++++++++++++++++++++++++---------- 1 file changed, 57 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index ed4abb3d7e23..735a04e7e66b 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -918,6 +918,7 @@ struct mvpp2_bm_pool { int buf_size; /* Packet size */ int pkt_size; + int frag_size; /* BPPE virtual base address */ u32 *virt_addr; @@ -3354,6 +3355,22 @@ static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } +static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) +{ + if (likely(pool->frag_size <= PAGE_SIZE)) + return netdev_alloc_frag(pool->frag_size); + else + return kmalloc(pool->frag_size, GFP_ATOMIC); +} + +static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) +{ + if (likely(pool->frag_size <= PAGE_SIZE)) + skb_free_frag(data); + else + kfree(data); +} + /* Buffer Manager configuration routines */ /* Create pool */ @@ -3428,7 +3445,8 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, if (!vaddr) break; - dev_kfree_skb_any((struct sk_buff *)vaddr); + + mvpp2_frag_free(bm_pool, (void *)vaddr); } /* Update BM driver with number of buffers removed from pool */ @@ -3542,29 +3560,28 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } -/* Allocate skb for BM pool */ -static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, - dma_addr_t *buf_phys_addr, - gfp_t gfp_mask) +static void *mvpp2_buf_alloc(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *buf_phys_addr, + gfp_t gfp_mask) { - struct sk_buff *skb; dma_addr_t phys_addr; + void *data; - skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask); - if (!skb) + data = mvpp2_frag_alloc(bm_pool); + if (!data) return NULL; - phys_addr = dma_map_single(port->dev->dev.parent, skb->head, + phys_addr = dma_map_single(port->dev->dev.parent, data, MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) { - dev_kfree_skb_any(skb); + mvpp2_frag_free(bm_pool, data); return NULL; } *buf_phys_addr = phys_addr; - return skb; + return data; } /* Set pool number in a BM cookie */ @@ -3620,9 +3637,9 @@ static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, static int mvpp2_bm_bufs_add(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, int buf_num) { - struct sk_buff *skb; int i, buf_size, total_size; dma_addr_t phys_addr; + void *buf; buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); total_size = MVPP2_RX_TOTAL_SIZE(buf_size); @@ -3636,11 +3653,11 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, } for (i = 0; i < buf_num; i++) { - skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL); - if (!skb) + buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL); + if (!buf) break; - mvpp2_bm_pool_put(port, bm_pool->id, (u32)phys_addr, (u32)skb); + mvpp2_bm_pool_put(port, bm_pool->id, (u32)phys_addr, (u32)buf); } /* Update BM driver with number of buffers added to pool */ @@ -3696,6 +3713,9 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, port->priv, new_pool); new_pool->pkt_size = pkt_size; + new_pool->frag_size = + SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; /* Allocate buffers for this pool */ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); @@ -3764,6 +3784,8 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) } port_pool->pkt_size = pkt_size; + port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; num = mvpp2_bm_bufs_add(port, port_pool, pkts_num); if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", @@ -5004,15 +5026,15 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, static int mvpp2_rx_refill(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, u32 bm) { - struct sk_buff *skb; dma_addr_t phys_addr; + void *buf; /* No recycle or too many buffers are in use, so allocate a new skb */ - skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC); - if (!skb) + buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC); + if (!buf) return -ENOMEM; - mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); + mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)buf); return 0; } @@ -5104,14 +5126,17 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); struct mvpp2_bm_pool *bm_pool; struct sk_buff *skb; + unsigned int frag_size; dma_addr_t phys_addr; u32 bm, rx_status; int pool, rx_bytes, err; + void *data; rx_done++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; phys_addr = rx_desc->buf_phys_addr; + data = (void *)rx_desc->buf_cookie; bm = mvpp2_bm_cookie_build(rx_desc); pool = mvpp2_bm_cookie_pool_get(bm); @@ -5132,12 +5157,22 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ + mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, rx_desc->buf_cookie); continue; } - skb = (struct sk_buff *)rx_desc->buf_cookie; + if (bm_pool->frag_size > PAGE_SIZE) + frag_size = 0; + else + frag_size = bm_pool->frag_size; + + skb = build_skb(data, frag_size); + if (!skb) { + netdev_warn(port->dev, "skb build failed\n"); + goto err_drop_frame; + } err = mvpp2_rx_refill(port, bm_pool, bm); if (err) { @@ -5151,7 +5186,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, rcvd_pkts++; rcvd_bytes += rx_bytes; - skb_reserve(skb, MVPP2_MH_SIZE); + skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); skb_put(skb, rx_bytes); skb->protocol = eth_type_trans(skb, dev); mvpp2_rx_csum(port, rx_status, skb); -- cgit v1.2.3 From d3158807e8d6d4877c36dba3d3c64d9c6671c1bc Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 21 Feb 2017 11:28:13 +0100 Subject: net: mvpp2: enable building on 64-bit platforms The mvpp2 is going to be extended to support the Marvell Armada 7K/8K platform, which is ARM64. As a preparation to this work, this commit enables building the mvpp2 driver on ARM64, by: - Adjusting the Kconfig dependency - Fixing the types used in the driver so that they are 32/64-bits compliant. We use dma_addr_t for DMA addresses, and unsigned long for virtual addresses. It is worth mentioning that after this commit, the driver is for now still only used on 32-bits platforms, and will only work on 32-bits platforms. Signed-off-by: Thomas Petazzoni Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/Kconfig | 3 +-- drivers/net/ethernet/marvell/mvpp2.c | 31 ++++++++++++++++++------------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index f4b7cf18fb0f..d2555e8b947e 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -83,9 +83,8 @@ config MVNETA_BM config MVPP2 tristate "Marvell Armada 375 network interface support" - depends on MACH_ARMADA_375 || COMPILE_TEST + depends on ARCH_MVEBU || COMPILE_TEST depends on HAS_DMA - depends on !64BIT select MVMDIO ---help--- This driver supports the network interface units in the diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 735a04e7e66b..d00421b9ffea 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3388,7 +3388,8 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, if (!bm_pool->virt_addr) return -ENOMEM; - if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) { + if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, + MVPP2_BM_POOL_PTR_ALIGN)) { dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, bm_pool->phys_addr); dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", @@ -3433,7 +3434,7 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, for (i = 0; i < bm_pool->buf_num; i++) { dma_addr_t buf_phys_addr; - u32 vaddr; + unsigned long vaddr; /* Get buffer virtual address (indirect access) */ buf_phys_addr = mvpp2_read(priv, @@ -3596,14 +3597,15 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) } /* Get pool number from a BM cookie */ -static inline int mvpp2_bm_cookie_pool_get(u32 cookie) +static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) { return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; } /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, - u32 buf_phys_addr, u32 buf_virt_addr) + dma_addr_t buf_phys_addr, + unsigned long buf_virt_addr) { mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr); @@ -3611,7 +3613,8 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, /* Release multicast buffer */ static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool, - u32 buf_phys_addr, u32 buf_virt_addr, + dma_addr_t buf_phys_addr, + unsigned long buf_virt_addr, int mc_id) { u32 val = 0; @@ -3626,7 +3629,8 @@ static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool, /* Refill BM pool */ static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, - u32 phys_addr, u32 cookie) + dma_addr_t phys_addr, + unsigned long cookie) { int pool = mvpp2_bm_cookie_pool_get(bm); @@ -3657,7 +3661,8 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, if (!buf) break; - mvpp2_bm_pool_put(port, bm_pool->id, (u32)phys_addr, (u32)buf); + mvpp2_bm_pool_put(port, bm_pool->id, phys_addr, + (unsigned long)buf); } /* Update BM driver with number of buffers added to pool */ @@ -5034,7 +5039,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port, if (!buf) return -ENOMEM; - mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)buf); + mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf); return 0; } @@ -5076,10 +5081,10 @@ static void mvpp2_buff_hdr_rx(struct mvpp2_port *port, struct mvpp2_buff_hdr *buff_hdr; struct sk_buff *skb; u32 rx_status = rx_desc->status; - u32 buff_phys_addr; - u32 buff_virt_addr; - u32 buff_phys_addr_next; - u32 buff_virt_addr_next; + dma_addr_t buff_phys_addr; + unsigned long buff_virt_addr; + dma_addr_t buff_phys_addr_next; + unsigned long buff_virt_addr_next; int mc_id; int pool_id; @@ -5136,7 +5141,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; phys_addr = rx_desc->buf_phys_addr; - data = (void *)rx_desc->buf_cookie; + data = (void *)(uintptr_t)rx_desc->buf_cookie; bm = mvpp2_bm_cookie_build(rx_desc); pool = mvpp2_bm_cookie_pool_get(bm); -- cgit v1.2.3 From d4af4c0ad0a4d070ef756010a2a10d96ff53d73d Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 21 Feb 2017 14:04:59 +0100 Subject: net/hsr: use eth_hw_addr_random() Use eth_hw_addr_random() to set a random MAC address in order to make sure dev->addr_assign_type will be properly set to NET_ADDR_RANDOM. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- net/hsr/hsr_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index fc65b145f6e7..c73160fb11e7 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -395,7 +395,7 @@ static struct device_type hsr_type = { void hsr_dev_setup(struct net_device *dev) { - random_ether_addr(dev->dev_addr); + eth_hw_addr_random(dev); ether_setup(dev); dev->min_mtu = 0; -- cgit v1.2.3 From 29869d66870a715177bfb505f66a7e0e8bcc89c3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 21 Feb 2017 06:21:47 -0800 Subject: tcp: Revert "tcp: tcp_probe: use spin_lock_bh()" This reverts commit e70ac171658679ecf6bea4bbd9e9325cd6079d2b. jtcp_rcv_established() is in fact called with hard irq being disabled. Initial bug report from Ricardo Nabinger Sanchez [1] still needs to be investigated, but does not look like a TCP bug. [1] https://www.spinics.net/lists/netdev/msg420960.html Signed-off-by: Eric Dumazet Reported-by: kernel test robot Cc: Ricardo Nabinger Sanchez Signed-off-by: David S. Miller --- net/ipv4/tcp_probe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 3d063eb37848..f6c50af24a64 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, (fwmark > 0 && skb->mark == fwmark)) && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { - spin_lock_bh(&tcp_probe.lock); + spin_lock(&tcp_probe.lock); /* If log fills, just silently drop */ if (tcp_probe_avail() > 1) { struct tcp_log *p = tcp_probe.log + tcp_probe.head; @@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); } tcp_probe.lastcwnd = tp->snd_cwnd; - spin_unlock_bh(&tcp_probe.lock); + spin_unlock(&tcp_probe.lock); wake_up(&tcp_probe.wait); } -- cgit v1.2.3 From 559c59b238ebb7d39b732b6b08a59693b972e75c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 21 Feb 2017 08:20:56 -0800 Subject: net: napi_watchdog() can use napi_schedule_irqoff() hrtimer handlers run with masked hard IRQ, we can therefore use napi_schedule_irqoff() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index 05d19c6acf94..304f2deae5f9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5089,7 +5089,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) napi = container_of(timer, struct napi_struct, timer); if (napi->gro_list) - napi_schedule(napi); + napi_schedule_irqoff(napi); return HRTIMER_NORESTART; } -- cgit v1.2.3 From d2852a2240509e512712e25de2d0796cda435ecb Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 21 Feb 2017 16:09:33 +0100 Subject: arch: add ARCH_HAS_SET_MEMORY config Currently, there's no good way to test for the presence of set_memory_ro/rw/x/nx() helpers implemented by archs such as x86, arm, arm64 and s390. There's DEBUG_SET_MODULE_RONX and DEBUG_RODATA, however both don't really reflect that: set_memory_*() are also available even when DEBUG_SET_MODULE_RONX is turned off, and DEBUG_RODATA is set by parisc, but doesn't implement above functions. Thus, add ARCH_HAS_SET_MEMORY that is selected by mentioned archs, where generic code can test against this. This also allows later on to move DEBUG_SET_MODULE_RONX out of the arch specific Kconfig to define it only once depending on ARCH_HAS_SET_MEMORY. Suggested-by: Laura Abbott Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/Kconfig | 4 ++++ arch/arm/Kconfig | 1 + arch/arm64/Kconfig | 1 + arch/s390/Kconfig | 1 + arch/x86/Kconfig | 1 + 5 files changed, 8 insertions(+) diff --git a/arch/Kconfig b/arch/Kconfig index bd04eace455c..e8ada79ec71f 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -222,6 +222,10 @@ config GENERIC_SMP_IDLE_THREAD config GENERIC_IDLE_POLL_SETUP bool +# Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h +config ARCH_HAS_SET_MEMORY + bool + # Select if arch init_task initializer is different to init/init_task.c config ARCH_INIT_TASK bool diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 186c4c214e0a..edae056b2af0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -4,6 +4,7 @@ config ARM select ARCH_CLOCKSOURCE_DATA select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_SET_MEMORY select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 111742126897..1853405a897e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -12,6 +12,7 @@ config ARM64 select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV + select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_USE_CMPXCHG_LOCKREF diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c6722112527d..094deb1abbe7 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -72,6 +72,7 @@ config S390 select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV + select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e487493bbd47..434dd2a1c5f2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -53,6 +53,7 @@ config X86 select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_MMIO_FLUSH select ARCH_HAS_PMEM_API if X86_64 + select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAVE_NMI_SAFE_CMPXCHG -- cgit v1.2.3 From 9d876e79df6a2f364b9f2737eacd72ceb27da53a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 21 Feb 2017 16:09:34 +0100 Subject: bpf: fix unlocking of jited image when module ronx not set Eric and Willem reported that they recently saw random crashes when JIT was in use and bisected this to 74451e66d516 ("bpf: make jited programs visible in traces"). Issue was that the consolidation part added bpf_jit_binary_unlock_ro() that would unlock previously made read-only memory back to read-write. However, DEBUG_SET_MODULE_RONX cannot be used for this to test for presence of set_memory_*() functions. We need to use ARCH_HAS_SET_MEMORY instead to fix this; also add the corresponding bpf_jit_binary_lock_ro() to filter.h. Fixes: 74451e66d516 ("bpf: make jited programs visible in traces") Reported-by: Eric Dumazet Reported-by: Willem de Bruijn Bisected-by: Eric Dumazet Signed-off-by: Daniel Borkmann Tested-by: Willem de Bruijn Signed-off-by: David S. Miller --- arch/arm64/net/bpf_jit_comp.c | 2 +- arch/s390/net/bpf_jit_comp.c | 2 +- arch/x86/net/bpf_jit_comp.c | 2 +- include/linux/filter.h | 13 +++++++++++-- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 05d12104d270..a785554916c0 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -898,7 +898,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) bpf_flush_icache(header, ctx.image + ctx.idx); - set_memory_ro((unsigned long)header, header->pages); + bpf_jit_binary_lock_ro(header); prog->bpf_func = (void *)ctx.image; prog->jited = 1; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index f1d0e62ec1dd..b49c52a02087 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1327,7 +1327,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) print_fn_code(jit.prg_buf, jit.size_prg); } if (jit.prg_buf) { - set_memory_ro((unsigned long)header, header->pages); + bpf_jit_binary_lock_ro(header); fp->bpf_func = (void *) jit.prg_buf; fp->jited = 1; } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 18a62e208826..32322ce9b405 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1165,7 +1165,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (image) { bpf_flush_icache(header, image + proglen); - set_memory_ro((unsigned long)header, header->pages); + bpf_jit_binary_lock_ro(header); prog->bpf_func = (void *)image; prog->jited = 1; } else { diff --git a/include/linux/filter.h b/include/linux/filter.h index 0c1cc9143cb2..0c167fdee5f7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -551,7 +551,7 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) -#ifdef CONFIG_DEBUG_SET_MODULE_RONX +#ifdef CONFIG_ARCH_HAS_SET_MEMORY static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { set_memory_ro((unsigned long)fp, fp->pages); @@ -562,6 +562,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) set_memory_rw((unsigned long)fp, fp->pages); } +static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) +{ + set_memory_ro((unsigned long)hdr, hdr->pages); +} + static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) { set_memory_rw((unsigned long)hdr, hdr->pages); @@ -575,10 +580,14 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { } +static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) +{ +} + static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) { } -#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ +#endif /* CONFIG_ARCH_HAS_SET_MEMORY */ static inline struct bpf_binary_header * bpf_jit_binary_hdr(const struct bpf_prog *fp) -- cgit v1.2.3 From 1faaa78f36cb2915ae89138ba5846f87ade85dcb Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 21 Feb 2017 15:27:28 +0100 Subject: bnxt_en: use eth_hw_addr_random() Use eth_hw_addr_random() to set a random MAC address in order to make sure bp->dev->addr_assign_type will be properly set to NET_ADDR_RANDOM. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6dacdf1e4d26..235733e91c79 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4552,7 +4552,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) /* overwrite netdev dev_adr with admin VF MAC */ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); } else { - random_ether_addr(bp->dev->dev_addr); + eth_hw_addr_random(bp->dev); rc = bnxt_approve_mac(bp, bp->dev->dev_addr); } return rc; -- cgit v1.2.3 From e623a9e9dec29ae811d11f83d0074ba254aba374 Mon Sep 17 00:00:00 2001 From: Maxime Jayat Date: Tue, 21 Feb 2017 18:35:51 +0100 Subject: net: socket: fix recvmmsg not returning error from sock_error Commit 34b88a68f26a ("net: Fix use after free in the recvmmsg exit path"), changed the exit path of recvmmsg to always return the datagrams variable and modified the error paths to set the variable to the error code returned by recvmsg if necessary. However in the case sock_error returned an error, the error code was then ignored, and recvmmsg returned 0. Change the error path of recvmmsg to correctly return the error code of sock_error. The bug was triggered by using recvmmsg on a CAN interface which was not up. Linux 4.6 and later return 0 in this case while earlier releases returned -ENETDOWN. Fixes: 34b88a68f26a ("net: Fix use after free in the recvmmsg exit path") Signed-off-by: Maxime Jayat Signed-off-by: David S. Miller --- net/socket.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/socket.c b/net/socket.c index b7a63d5bc915..2c1e8677ff2d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2228,8 +2228,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, return err; err = sock_error(sock->sk); - if (err) + if (err) { + datagrams = err; goto out_put; + } entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; -- cgit v1.2.3 From 005c3490e9db23738d91e02788606c0fe4734723 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Tue, 21 Feb 2017 21:47:06 +0200 Subject: Revert "ath10k: Search SMBIOS for OEM board file extension" This reverts commit f2593cb1b29185d38db706cbcbe22ed538720ae1. Paul reported that this patch with older board-2.bin ath10k initialisation fails on Dell XPS 13: ath10k_pci 0000:3a:00.0: failed to fetch board data for bus=pci,vendor=168c, device=003e,subsystem-vendor=1a56,subsystem-device=1535,variant=RV_0520 from ath10k/QCA6174/hw3.0/board-2.bin The reason is that the older board-2.bin does not have the variant version of the image name and ath10k does not fallback to the older naming scheme. Reported-by: Paul Menzel Link: https://bugzilla.kernel.org/show_bug.cgi?id=185621#c9 Fixes: f2593cb1b291 ("ath10k: Search SMBIOS for OEM board file extension") Signed-off-by: Kalle Valo Signed-off-by: David S. Miller --- drivers/net/wireless/ath/ath10k/core.c | 84 ++-------------------------------- drivers/net/wireless/ath/ath10k/core.h | 19 -------- 2 files changed, 3 insertions(+), 100 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 59729aa8cd82..dd902b43f8f7 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -18,8 +18,6 @@ #include #include #include -#include -#include #include #include "core.h" @@ -713,72 +711,6 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) return 0; } -static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data) -{ - struct ath10k *ar = data; - const char *bdf_ext; - const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC; - u8 bdf_enabled; - int i; - - if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE) - return; - - if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) { - ath10k_dbg(ar, ATH10K_DBG_BOOT, - "wrong smbios bdf ext type length (%d).\n", - hdr->length); - return; - } - - bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET); - if (!bdf_enabled) { - ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n"); - return; - } - - /* Only one string exists (per spec) */ - bdf_ext = (char *)hdr + hdr->length; - - if (memcmp(bdf_ext, magic, strlen(magic)) != 0) { - ath10k_dbg(ar, ATH10K_DBG_BOOT, - "bdf variant magic does not match.\n"); - return; - } - - for (i = 0; i < strlen(bdf_ext); i++) { - if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) { - ath10k_dbg(ar, ATH10K_DBG_BOOT, - "bdf variant name contains non ascii chars.\n"); - return; - } - } - - /* Copy extension name without magic suffix */ - if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic), - sizeof(ar->id.bdf_ext)) < 0) { - ath10k_dbg(ar, ATH10K_DBG_BOOT, - "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", - bdf_ext); - return; - } - - ath10k_dbg(ar, ATH10K_DBG_BOOT, - "found and validated bdf variant smbios_type 0x%x bdf %s\n", - ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext); -} - -static int ath10k_core_check_smbios(struct ath10k *ar) -{ - ar->id.bdf_ext[0] = '\0'; - dmi_walk(ath10k_core_check_bdfext, ar); - - if (ar->id.bdf_ext[0] == '\0') - return -ENODATA; - - return 0; -} - static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; @@ -1125,9 +1057,6 @@ err: static int ath10k_core_create_board_name(struct ath10k *ar, char *name, size_t name_len) { - /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ - char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; - if (ar->id.bmi_ids_valid) { scnprintf(name, name_len, "bus=%s,bmi-chip-id=%d,bmi-board-id=%d", @@ -1137,15 +1066,12 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name, goto out; } - if (ar->id.bdf_ext[0] != '\0') - scnprintf(variant, sizeof(variant), ",variant=%s", - ar->id.bdf_ext); - scnprintf(name, name_len, - "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x", ath10k_bus_str(ar->hif.bus), ar->id.vendor, ar->id.device, - ar->id.subsystem_vendor, ar->id.subsystem_device, variant); + ar->id.subsystem_vendor, ar->id.subsystem_device); + out: ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name); @@ -2202,10 +2128,6 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_free_firmware_files; } - ret = ath10k_core_check_smbios(ar); - if (ret) - ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n"); - ret = ath10k_core_fetch_board_file(ar); if (ret) { ath10k_err(ar, "failed to fetch board file: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 88d14be7fcce..757242ef52ac 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -69,23 +69,6 @@ #define ATH10K_NAPI_BUDGET 64 #define ATH10K_NAPI_QUOTA_LIMIT 60 -/* SMBIOS type containing Board Data File Name Extension */ -#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8 - -/* SMBIOS type structure length (excluding strings-set) */ -#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9 - -/* Offset pointing to Board Data File Name Extension */ -#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8 - -/* Board Data File Name Extension string length. - * String format: BDF__\0 - */ -#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20 - -/* The magic used by QCA spec */ -#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_" - struct ath10k; enum ath10k_bus { @@ -815,8 +798,6 @@ struct ath10k { bool bmi_ids_valid; u8 bmi_board_id; u8 bmi_chip_id; - - char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; } id; int fw_api; -- cgit v1.2.3