aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c103
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c351
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c86
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c6
17 files changed, 569 insertions, 188 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 7e75706f76db..4a6a6e48c615 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2183,9 +2183,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err)
goto rx_unwind;
- err = i40e_alloc_rx_bi(&rx_rings[i]);
- if (err)
- goto rx_unwind;
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
@@ -3188,10 +3185,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
if (cmd->flow_type == TCP_V4_FLOW ||
cmd->flow_type == UDP_V4_FLOW) {
- if (i_set & I40E_L3_SRC_MASK)
- cmd->data |= RXH_IP_SRC;
- if (i_set & I40E_L3_DST_MASK)
- cmd->data |= RXH_IP_DST;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (i_set & I40E_X722_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_X722_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ } else {
+ if (i_set & I40E_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ }
} else if (cmd->flow_type == TCP_V6_FLOW ||
cmd->flow_type == UDP_V6_FLOW) {
if (i_set & I40E_L3_V6_SRC_MASK)
@@ -3549,12 +3553,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
/**
* i40e_get_rss_hash_bits - Read RSS Hash bits from register
+ * @hw: hw structure
* @nfc: pointer to user request
* @i_setc: bits currently set
*
* Returns value of bits to be set per user request
**/
-static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
+static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
+ struct ethtool_rxnfc *nfc,
+ u64 i_setc)
{
u64 i_set = i_setc;
u64 src_l3 = 0, dst_l3 = 0;
@@ -3573,8 +3580,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
dst_l3 = I40E_L3_V6_DST_MASK;
} else if (nfc->flow_type == TCP_V4_FLOW ||
nfc->flow_type == UDP_V4_FLOW) {
- src_l3 = I40E_L3_SRC_MASK;
- dst_l3 = I40E_L3_DST_MASK;
+ if (hw->mac.type == I40E_MAC_X722) {
+ src_l3 = I40E_X722_L3_SRC_MASK;
+ dst_l3 = I40E_X722_L3_DST_MASK;
+ } else {
+ src_l3 = I40E_L3_SRC_MASK;
+ dst_l3 = I40E_L3_DST_MASK;
+ }
} else {
/* Any other flow type are not supported here */
return i_set;
@@ -3592,6 +3604,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
return i_set;
}
+#define FLOW_PCTYPES_SIZE 64
/**
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @pf: pointer to the physical function struct
@@ -3604,9 +3617,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- u8 flow_pctype = 0;
+ DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);
u64 i_set, i_setc;
+ bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
+
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
"Change of RSS hash input set is not supported when MFP mode is enabled\n");
@@ -3622,36 +3637,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ flow_pctypes);
break;
case TCP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ flow_pctypes);
break;
case UDP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ flow_pctypes);
+ }
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ flow_pctypes);
+ }
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
@@ -3684,17 +3698,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- if (flow_pctype) {
- i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
- flow_pctype)) |
- ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
- flow_pctype)) << 32);
- i_set = i40e_get_rss_hash_bits(nfc, i_setc);
- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
- (u32)i_set);
- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
- (u32)(i_set >> 32));
- hena |= BIT_ULL(flow_pctype);
+ if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) {
+ u8 flow_id;
+
+ for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) {
+ i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32);
+ i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
+
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id),
+ (u32)i_set);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id),
+ (u32)(i_set >> 32));
+ hena |= BIT_ULL(flow_id);
+ }
}
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2c07fa8ecfc8..b5dcd15ced36 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3566,12 +3566,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ring->vsi->type == I40E_VSI_MAIN)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- kfree(ring->rx_bi);
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
- ret = i40e_alloc_rx_bi_zc(ring);
- if (ret)
- return ret;
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
@@ -3589,9 +3585,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->queue_index);
} else {
- ret = i40e_alloc_rx_bi(ring);
- if (ret)
- return ret;
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == I40E_VSI_MAIN) {
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -13296,6 +13289,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
i40e_reset_and_rebuild(pf, true, true);
}
+ if (!i40e_enabled_xdp_vsi(vsi) && prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, true))
+ return -ENOMEM;
+ } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, false))
+ return -ENOMEM;
+ }
+
for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
@@ -13528,6 +13529,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
i40e_queue_pair_disable_irq(vsi, queue_pair);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 69e67eb6aea7..b97c95f89fa0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1457,14 +1457,6 @@ err:
return -ENOMEM;
}
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
-
- rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi ? 0 : -ENOMEM;
-}
-
static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
@@ -1593,6 +1585,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+ rx_ring->rx_bi =
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 41f86e9535a0..768290dc6f48 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -469,7 +469,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 7b3f30beb757..388c3d36d96a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1404,6 +1404,10 @@ struct i40e_lldp_variables {
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_X722_L3_SRC_SHIFT 49
+#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT)
+#define I40E_X722_L3_DST_SHIFT 41
+#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT)
#define I40E_L3_SRC_SHIFT 47
#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
#define I40E_L3_V6_SRC_SHIFT 43
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 7e9f6a69eb10..72ddcefc45b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1536,10 +1536,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
return true;
- /* If the VFs have been disabled, this means something else is
- * resetting the VF, so we shouldn't continue.
- */
- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ /* Bail out if VFs are disabled. */
+ if (test_bit(__I40E_VF_DISABLE, pf->state))
+ return true;
+
+ /* If VF is being reset already we don't need to continue. */
+ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
return true;
i40e_trigger_vf_reset(vf, flr);
@@ -1576,7 +1578,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_cleanup_reset_vf(vf);
i40e_flush(hw);
- clear_bit(__I40E_VF_DISABLE, pf->state);
+ clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
return true;
}
@@ -1609,8 +1611,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+ /* If VF is being reset no need to trigger reset again */
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ i40e_trigger_vf_reset(&pf->vf[v], flr);
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -1626,9 +1632,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
*/
while (v < pf->num_alloc_vfs) {
vf = &pf->vf[v];
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
- break;
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+ break;
+ }
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@@ -1656,6 +1664,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1667,6 +1679,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1676,8 +1692,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++)
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_cleanup_reset_vf(&pf->vf[v]);
+ }
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, pf->state);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index a554d0a0b09b..358bbdb58795 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -39,6 +39,7 @@ enum i40e_vf_states {
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
+ I40E_VF_STATE_RESETTING
};
/* VF capabilities */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 6d4009e0cbd6..cd7b52fb6b46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -10,14 +10,6 @@
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
-
- rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
-}
-
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi_zc, 0,
@@ -30,6 +22,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
}
/**
+ * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
+ * @rx_ring: Current rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
+ sizeof(*rx_ring->rx_bi);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ rx_ring->rx_bi_zc = sw_ring;
+ } else {
+ kfree(rx_ring->rx_bi_zc);
+ rx_ring->rx_bi_zc = NULL;
+ rx_ring->rx_bi = sw_ring;
+ }
+ return 0;
+}
+
+/**
+ * i40e_realloc_rx_bi_zc - reallocate rx SW rings
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
+{
+ struct i40e_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
+ rx_ring = vsi->rx_rings[q];
+ if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
* i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
* certain ring/qid
* @vsi: Current VSI
@@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
if (err)
return err;
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
+ if (err)
+ return err;
+
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
+ if (err)
+ return err;
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index bb962987f300..821df248f8be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -32,7 +32,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
#endif /* _I40E_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 001500afc4a6..f88ee051e71c 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -137,6 +137,21 @@
*/
#define ICE_BW_KBPS_DIVISOR 125
+/* Default recipes have priority 4 and below, hence priority values between 5..7
+ * can be used as filter priority for advanced switch filter (advanced switch
+ * filters need new recipe to be created for specified extraction sequence
+ * because default recipe extraction sequence does not represent custom
+ * extraction)
+ */
+#define ICE_SWITCH_FLTR_PRIO_QUEUE 7
+/* prio 6 is reserved for future use (e.g. switch filter with L3 fields +
+ * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as
+ * SYN/FIN/RST))
+ */
+#define ICE_SWITCH_FLTR_PRIO_RSVD 6
+#define ICE_SWITCH_FLTR_PRIO_VSI 5
+#define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI
+
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
@@ -594,6 +609,8 @@ struct ice_pf {
u16 num_dmac_chnl_fltrs;
struct hlist_head tc_flower_fltr_list;
+ u64 supported_rxdids;
+
__le64 nvm_phy_type_lo; /* NVM PHY type low */
__le64 nvm_phy_type_hi; /* NVM PHY type high */
struct ice_link_default_override_tlv link_dflt_override;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index d16738a3d3a7..a92dc9a16035 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -110,6 +110,9 @@
#define PRTDCB_TUP2TC 0x001D26C0
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
+#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0f6718719453..df65e829ea33 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -8283,7 +8283,7 @@ static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
rule.rid = fltr->rid;
rule.rule_id = fltr->rule_id;
- rule.vsi_handle = fltr->dest_id;
+ rule.vsi_handle = fltr->dest_vsi_handle;
status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
if (status) {
if (status == -ENOENT)
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index f68c555be4e9..faba0f857cd9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -724,7 +724,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
*/
fltr->rid = rule_added.rid;
fltr->rule_id = rule_added.rule_id;
- fltr->dest_id = rule_added.vsi_handle;
+ fltr->dest_vsi_handle = rule_added.vsi_handle;
exit:
kfree(list);
@@ -732,6 +732,116 @@ exit:
}
/**
+ * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action)
+ * @vsi: Pointer to VSI
+ * @tc_fltr: Pointer to tc_flower_filter
+ *
+ * Locate the VSI using specified queue. When ADQ is not enabled, always
+ * return input VSI, otherwise locate corresponding VSI based on per channel
+ * offset and qcount
+ */
+static struct ice_vsi *
+ice_locate_vsi_using_queue(struct ice_vsi *vsi,
+ struct ice_tc_flower_fltr *tc_fltr)
+{
+ int num_tc, tc, queue;
+
+ /* if ADQ is not active, passed VSI is the candidate VSI */
+ if (!ice_is_adq_active(vsi->back))
+ return vsi;
+
+ /* Locate the VSI (it could still be main PF VSI or CHNL_VSI depending
+ * upon queue number)
+ */
+ num_tc = vsi->mqprio_qopt.qopt.num_tc;
+ queue = tc_fltr->action.fwd.q.queue;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ int qcount = vsi->mqprio_qopt.qopt.count[tc];
+ int offset = vsi->mqprio_qopt.qopt.offset[tc];
+
+ if (queue >= offset && queue < offset + qcount) {
+ /* for non-ADQ TCs, passed VSI is the candidate VSI */
+ if (tc < ICE_CHNL_START_TC)
+ return vsi;
+ else
+ return vsi->tc_map_vsi[tc];
+ }
+ }
+ return NULL;
+}
+
+static struct ice_rx_ring *
+ice_locate_rx_ring_using_queue(struct ice_vsi *vsi,
+ struct ice_tc_flower_fltr *tc_fltr)
+{
+ u16 queue = tc_fltr->action.fwd.q.queue;
+
+ return queue < vsi->num_rxq ? vsi->rx_rings[queue] : NULL;
+}
+
+/**
+ * ice_tc_forward_action - Determine destination VSI and queue for the action
+ * @vsi: Pointer to VSI
+ * @tc_fltr: Pointer to TC flower filter structure
+ *
+ * Validates the tc forward action and determines the destination VSI and queue
+ * for the forward action.
+ */
+static struct ice_vsi *
+ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
+{
+ struct ice_rx_ring *ring = NULL;
+ struct ice_vsi *ch_vsi = NULL;
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ u32 tc_class;
+
+ dev = ice_pf_to_dev(pf);
+
+ /* Get the destination VSI and/or destination queue and validate them */
+ switch (tc_fltr->action.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ tc_class = tc_fltr->action.fwd.tc.tc_class;
+ /* Select the destination VSI */
+ if (tc_class < ICE_CHNL_START_TC) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter because of unsupported destination");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ /* Locate ADQ VSI depending on hw_tc number */
+ ch_vsi = vsi->tc_map_vsi[tc_class];
+ break;
+ case ICE_FWD_TO_Q:
+ /* Locate the Rx queue */
+ ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr);
+ if (!ring) {
+ dev_err(dev,
+ "Unable to locate Rx queue for action fwd_to_queue: %u\n",
+ tc_fltr->action.fwd.q.queue);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Determine destination VSI even though the action is
+ * FWD_TO_QUEUE, because QUEUE is associated with VSI
+ */
+ ch_vsi = tc_fltr->dest_vsi;
+ break;
+ default:
+ dev_err(dev,
+ "Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n",
+ tc_fltr->action.fltr_act);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Must have valid ch_vsi (it could be main VSI or ADQ VSI) */
+ if (!ch_vsi) {
+ dev_err(dev,
+ "Unable to add filter because specified destination VSI doesn't exist\n");
+ return ERR_PTR(-EINVAL);
+ }
+ return ch_vsi;
+}
+
+/**
* ice_add_tc_flower_adv_fltr - add appropriate filter rules
* @vsi: Pointer to VSI
* @tc_fltr: Pointer to TC flower filter structure
@@ -772,11 +882,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
return -EOPNOTSUPP;
}
- /* get the channel (aka ADQ VSI) */
- if (tc_fltr->dest_vsi)
- ch_vsi = tc_fltr->dest_vsi;
- else
- ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
+ /* validate forwarding action VSI and queue */
+ ch_vsi = ice_tc_forward_action(vsi, tc_fltr);
+ if (IS_ERR(ch_vsi))
+ return PTR_ERR(ch_vsi);
lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
@@ -790,30 +899,40 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
}
rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
- if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
- if (!ch_vsi) {
- NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
- ret = -EINVAL;
- goto exit;
- }
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = tc_fltr->cookie;
- rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+ switch (tc_fltr->action.fltr_act) {
+ case ICE_FWD_TO_VSI:
rule_info.sw_act.vsi_handle = ch_vsi->idx;
- rule_info.priority = 7;
+ rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
- tc_fltr->action.tc_class,
+ tc_fltr->action.fwd.tc.tc_class,
rule_info.sw_act.vsi_handle, lkups_cnt);
- } else {
+ break;
+ case ICE_FWD_TO_Q:
+ /* HW queue number in global space */
+ rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue;
+ rule_info.sw_act.vsi_handle = ch_vsi->idx;
+ rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n",
+ tc_fltr->action.fwd.q.queue,
+ tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
+ break;
+ default:
rule_info.sw_act.flag |= ICE_FLTR_TX;
+ /* In case of Tx (LOOKUP_TX), src needs to be src VSI */
rule_info.sw_act.src = vsi->idx;
+ /* 'Rx' is false, direction of rule(LOOKUPTRX) */
rule_info.rx = false;
+ rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
+ break;
}
- /* specify the cookie as filter_rule_id */
- rule_info.fltr_rule_id = tc_fltr->cookie;
-
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
@@ -831,19 +950,14 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
*/
tc_fltr->rid = rule_added.rid;
tc_fltr->rule_id = rule_added.rule_id;
- if (tc_fltr->action.tc_class > 0 && ch_vsi) {
- /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
- * for PF ADQ filter, it is not yet set in tc_fltr,
- * hence store the dest_vsi ptr in tc_fltr
- */
- if (ch_vsi->type == ICE_VSI_CHNL)
- tc_fltr->dest_vsi = ch_vsi;
+ tc_fltr->dest_vsi_handle = rule_added.vsi_handle;
+ if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI ||
+ tc_fltr->action.fltr_act == ICE_FWD_TO_Q) {
+ tc_fltr->dest_vsi = ch_vsi;
/* keep track of advanced switch filter for
- * destination VSI (channel VSI)
+ * destination VSI
*/
ch_vsi->num_chnl_fltr++;
- /* in this case, dest_id is VSI handle (sw handle) */
- tc_fltr->dest_id = rule_added.vsi_handle;
/* keeps track of channel filters for PF VSI */
if (vsi->type == ICE_VSI_PF &&
@@ -851,10 +965,22 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
pf->num_dmac_chnl_fltrs++;
}
- dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
- lkups_cnt, flags,
- tc_fltr->action.tc_class, rule_added.rid,
- rule_added.rule_id, rule_added.vsi_handle);
+ switch (tc_fltr->action.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to TC %u, rid %u, rule_id %u, vsi_idx %u\n",
+ lkups_cnt, flags,
+ tc_fltr->action.fwd.tc.tc_class, rule_added.rid,
+ rule_added.rule_id, rule_added.vsi_handle);
+ break;
+ case ICE_FWD_TO_Q:
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u) , rid %u, rule_id %u\n",
+ lkups_cnt, flags, tc_fltr->action.fwd.q.queue,
+ tc_fltr->action.fwd.q.hw_queue, rule_added.rid,
+ rule_added.rule_id);
+ break;
+ default:
+ break;
+ }
exit:
kfree(list);
return ret;
@@ -1455,43 +1581,15 @@ ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
}
/**
- * ice_handle_tclass_action - Support directing to a traffic class
+ * ice_prep_adq_filter - Prepare ADQ filter with the required additional headers
* @vsi: Pointer to VSI
- * @cls_flower: Pointer to TC flower offload structure
* @fltr: Pointer to TC flower filter structure
*
- * Support directing traffic to a traffic class
+ * Prepare ADQ filter with the required additional header fields
*/
static int
-ice_handle_tclass_action(struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower,
- struct ice_tc_flower_fltr *fltr)
+ice_prep_adq_filter(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{
- int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
- struct ice_vsi *main_vsi;
-
- if (tc < 0) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
- return -EINVAL;
- }
- if (!tc) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
- return -EINVAL;
- }
-
- if (!(vsi->all_enatc & BIT(tc))) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
- return -EINVAL;
- }
-
- /* Redirect to a TC class or Queue Group */
- main_vsi = ice_get_main_vsi(vsi->back);
- if (!main_vsi || !main_vsi->netdev) {
- NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unable to add filter because of invalid netdevice");
- return -EINVAL;
- }
-
if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
(fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_SRC_MAC))) {
@@ -1503,9 +1601,8 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
/* For ADQ, filter must include dest MAC address, otherwise unwanted
* packets with unrelated MAC address get delivered to ADQ VSIs as long
* as remaining filter criteria is satisfied such as dest IP address
- * and dest/src L4 port. Following code is trying to handle:
- * 1. For non-tunnel, if user specify MAC addresses, use them (means
- * this code won't do anything
+ * and dest/src L4 port. Below code handles the following cases:
+ * 1. For non-tunnel, if user specify MAC addresses, use them.
* 2. For non-tunnel, if user didn't specify MAC address, add implicit
* dest MAC to be lower netdev's active unicast MAC address
* 3. For tunnel, as of now TC-filter through flower classifier doesn't
@@ -1528,35 +1625,97 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
}
- /* validate specified dest MAC address, make sure either it belongs to
- * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
- * unicast MAC filter destined to main VSI.
- */
- if (!ice_mac_fltr_exist(&main_vsi->back->hw,
- fltr->outer_headers.l2_key.dst_mac,
- main_vsi->idx)) {
- NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
- return -EINVAL;
- }
-
/* Make sure VLAN is already added to main VSI, before allowing ADQ to
* add a VLAN based filter such as MAC + VLAN + L4 port.
*/
if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
- if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
- main_vsi->idx)) {
+ if (!ice_vlan_fltr_exist(&vsi->back->hw, vlan_id, vsi->idx)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
return -EINVAL;
}
}
+ return 0;
+}
+
+/**
+ * ice_handle_tclass_action - Support directing to a traffic class
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Support directing traffic to a traffic class/queue-set
+ */
+static int
+ice_handle_tclass_action(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
+
+ /* user specified hw_tc (must be non-zero for ADQ TC), action is forward
+ * to hw_tc (i.e. ADQ channel number)
+ */
+ if (tc < ICE_CHNL_START_TC) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because of unsupported destination");
+ return -EOPNOTSUPP;
+ }
+ if (!(vsi->all_enatc & BIT(tc))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because of non-existence destination");
+ return -EINVAL;
+ }
fltr->action.fltr_act = ICE_FWD_TO_VSI;
- fltr->action.tc_class = tc;
+ fltr->action.fwd.tc.tc_class = tc;
- return 0;
+ return ice_prep_adq_filter(vsi, fltr);
+}
+
+static int
+ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ struct ice_vsi *ch_vsi = NULL;
+ u16 queue = act->rx_queue;
+
+ if (queue > vsi->num_rxq) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because specified queue is invalid");
+ return -EINVAL;
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_Q;
+ fltr->action.fwd.q.queue = queue;
+ /* determine corresponding HW queue */
+ fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue];
+
+ /* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare
+ * ADQ switch filter
+ */
+ ch_vsi = ice_locate_vsi_using_queue(vsi, fltr);
+ if (!ch_vsi)
+ return -EINVAL;
+ fltr->dest_vsi = ch_vsi;
+ if (!ice_is_chnl_fltr(fltr))
+ return 0;
+
+ return ice_prep_adq_filter(vsi, fltr);
+}
+
+static int
+ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ switch (act->id) {
+ case FLOW_ACTION_RX_QUEUE_MAPPING:
+ /* forward to queue */
+ return ice_tc_forward_to_queue(vsi, fltr, act);
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action");
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -1575,7 +1734,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
struct flow_action *flow_action = &rule->action;
struct flow_action_entry *act;
- int i;
+ int i, err;
if (cls_flower->classid)
return ice_handle_tclass_action(vsi, cls_flower, fltr);
@@ -1584,21 +1743,13 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
return -EINVAL;
flow_action_for_each(i, act, flow_action) {
- if (ice_is_eswitch_mode_switchdev(vsi->back)) {
- int err = ice_eswitch_tc_parse_action(fltr, act);
-
- if (err)
- return err;
- continue;
- }
- /* Allow only one rule per filter */
-
- /* Drop action */
- if (act->id == FLOW_ACTION_DROP) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
- return -EINVAL;
- }
- fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ if (ice_is_eswitch_mode_switchdev(vsi->back))
+ err = ice_eswitch_tc_parse_action(fltr, act);
+ else
+ err = ice_tc_parse_action(vsi, fltr, act);
+ if (err)
+ return err;
+ continue;
}
return 0;
}
@@ -1618,7 +1769,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_rem.rid = fltr->rid;
rule_rem.rule_id = fltr->rule_id;
- rule_rem.vsi_handle = fltr->dest_id;
+ rule_rem.vsi_handle = fltr->dest_vsi_handle;
err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
if (err) {
if (err == -ENOENT) {
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 92642faad595..d916d1e92aa3 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -45,7 +45,20 @@ struct ice_indr_block_priv {
};
struct ice_tc_flower_action {
- u32 tc_class;
+ /* forward action specific params */
+ union {
+ struct {
+ u32 tc_class; /* forward to hw_tc */
+ u32 rsvd;
+ } tc;
+ struct {
+ u16 queue; /* forward to queue */
+ /* To add filter in HW, absolute queue number in global
+ * space of queues (between 0...N) is needed
+ */
+ u16 hw_queue;
+ } q;
+ } fwd;
enum ice_sw_fwd_act_type fltr_act;
};
@@ -131,11 +144,11 @@ struct ice_tc_flower_fltr {
*/
u16 rid;
u16 rule_id;
- /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon
- * destination type
+ /* VSI handle of the destination VSI (it could be main PF VSI, CHNL_VSI,
+ * VF VSI)
*/
- u16 dest_id;
- /* if dest_id is vsi_idx, then need to store destination VSI ptr */
+ u16 dest_vsi_handle;
+ /* ptr to destination VSI */
struct ice_vsi *dest_vsi;
/* direction of fltr for eswitch use case */
enum ice_eswitch_fltr_direction direction;
@@ -162,12 +175,23 @@ struct ice_tc_flower_fltr {
* @f: Pointer to tc-flower filter
*
* Criteria to determine of given filter is valid channel filter
- * or not is based on its "destination". If destination is hw_tc (aka tc_class)
- * and it is non-zero, then it is valid channel (aka ADQ) filter
+ * or not is based on its destination.
+ * For forward to VSI action, if destination is valid hw_tc (aka tc_class)
+ * and in supported range of TCs for ADQ, then return true.
+ * For forward to queue, as long as dest_vsi is valid and it is of type
+ * VSI_CHNL (PF ADQ VSI is of type VSI_CHNL), return true.
+ * NOTE: For forward to queue, correct dest_vsi is still set in tc_fltr based
+ * on destination queue specified.
*/
static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f)
{
- return !!f->action.tc_class;
+ if (f->action.fltr_act == ICE_FWD_TO_VSI)
+ return f->action.fwd.tc.tc_class >= ICE_CHNL_START_TC &&
+ f->action.fwd.tc.tc_class < ICE_CHNL_MAX_TC;
+ else if (f->action.fltr_act == ICE_FWD_TO_Q)
+ return f->dest_vsi && f->dest_vsi->type == ICE_VSI_CHNL;
+
+ return false;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 2b4c791b6cba..c1fa94381f4e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -462,6 +462,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
@@ -1618,6 +1621,9 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
for (i = 0; i < qci->num_queue_pairs; i++) {
+ struct ice_hw *hw;
+ u32 rxdid;
+ u16 pf_q;
qpi = &qci->qpair[i];
if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
@@ -1686,6 +1692,25 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
}
+
+ /* VF Rx queue RXDID configuration */
+ pf_q = vsi->rxq_map[qpi->rxq.queue_id];
+ rxdid = qpi->rxq.rxdid;
+ hw = &vsi->back->hw;
+
+ /* If Rx flex desc is supported, select RXDID for Rx queues.
+ * Otherwise, use legacy 32byte descriptor format.
+ * Legacy 16byte descriptor is not supported. If this RXDID
+ * is selected, return error.
+ */
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ if (!(BIT(rxdid) & pf->supported_rxdids))
+ goto error_param;
+ } else {
+ rxdid = ICE_RXDID_LEGACY_1;
+ }
+
+ ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x03, false);
}
/* send the response to the VF */
@@ -2457,6 +2482,62 @@ error_param:
}
/**
+ * ice_vc_query_rxdid - query RXDID supported by DDP package
+ * @vf: pointer to VF info
+ *
+ * Called from VF to query a bitmap of supported flexible
+ * descriptor RXDIDs of a DDP package.
+ */
+static int ice_vc_query_rxdid(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_supported_rxdids *rxdid = NULL;
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_pf *pf = vf->pf;
+ int len = 0;
+ int ret, i;
+ u32 regval;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_supported_rxdids);
+ rxdid = kzalloc(len, GFP_KERNEL);
+ if (!rxdid) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ /* Read flexiflag registers to determine whether the
+ * corresponding RXDID is configured and supported or not.
+ * Since Legacy 16byte descriptor format is not supported,
+ * start from Legacy 32byte descriptor.
+ */
+ for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
+ if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+ & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
+ rxdid->supported_rxdids |= BIT(i);
+ }
+
+ pf->supported_rxdids = rxdid->supported_rxdids;
+
+err:
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ v_ret, (u8 *)rxdid, len);
+ kfree(rxdid);
+ return ret;
+}
+
+/**
* ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
* @vf: VF to enable/disable VLAN stripping for on initialization
*
@@ -3490,6 +3571,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .query_rxdid = ice_vc_query_rxdid,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -3624,6 +3706,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .query_rxdid = ice_vc_query_rxdid,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -3764,6 +3847,9 @@ error_handler:
case VIRTCHNL_OP_DEL_VLAN:
err = ops->remove_vlan_msg(vf, msg);
break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ err = ops->query_rxdid(vf);
+ break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
err = ops->ena_vlan_stripping(vf);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index b5a3fd8adbb4..4867a92ebefb 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -17,6 +17,7 @@
* broadcast, and 16 for additional unicast/multicast filters
*/
#define ICE_MAX_MACADDR_PER_VF 18
+#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
struct ice_virtchnl_ops {
int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
@@ -35,6 +36,7 @@ struct ice_virtchnl_ops {
int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*query_rxdid)(struct ice_vf *vf);
int (*ena_vlan_stripping)(struct ice_vf *vf);
int (*dis_vlan_stripping)(struct ice_vf *vf);
int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 5a82216e7d03..7d547fa616fa 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -70,6 +70,11 @@ static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
};
+/* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */
+static const u32 rx_flex_desc_allowlist_opcodes[] = {
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+};
+
/* VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF */
static const u32 adv_rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_RSS_CFG, VIRTCHNL_OP_DEL_RSS_CFG,
@@ -96,6 +101,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_REQ_QUEUES, req_queues_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN, vlan_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RSS_PF, rss_pf_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, rx_flex_desc_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),