[v6,2/3] net/iavf: enable Protocol Agnostic Flow Offloading FDIR
Checks
Commit Message
This patch enabled Protocol Agnostic Flow (raw flow) Offloading Flow
Director (FDIR) in AVF, based on the Parser Library feature and the
existing rte_flow `raw` API.
The input spec and mask of raw pattern are first parsed via the
Parser Library, and then passed to the kernel driver to create the
flow rule.
Similar as PF FDIR, each raw flow requires:
1. A byte string of raw target packet bits.
2. A byte string contains mask of target packet.
Here is an example:
FDIR matching ipv4 dst addr with 1.2.3.4 and redirect to queue 3:
flow create 0 ingress pattern raw \
pattern spec \
00000000000000000000000008004500001400004000401000000000000001020304 \
pattern mask \
000000000000000000000000000000000000000000000000000000000000ffffffff \
/ end actions queue index 3 / mark id 3 / end
Note that mask of some key bits (e.g., 0x0800 to indicate ipv4 proto)
is optional in our cases. To avoid redundancy, we just omit the mask
of 0x0800 (with 0xFFFF) in the mask byte string example. The prefix
'0x' for the spec and mask byte (hex) strings are also omitted here.
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
doc/guides/rel_notes/release_22_07.rst | 1 +
drivers/net/iavf/iavf_fdir.c | 67 ++++++++++++++++++++++++++
drivers/net/iavf/iavf_generic_flow.c | 6 +++
drivers/net/iavf/iavf_generic_flow.h | 3 ++
4 files changed, 77 insertions(+)
Comments
> -----Original Message-----
> From: Guo, Junfeng <junfeng.guo@intel.com>
> Sent: Monday, May 23, 2022 10:32 AM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Xu, Ting <ting.xu@intel.com>; Guo, Junfeng
> <junfeng.guo@intel.com>
> Subject: [PATCH v6 2/3] net/iavf: enable Protocol Agnostic Flow Offloading FDIR
>
> This patch enabled Protocol Agnostic Flow (raw flow) Offloading Flow Director
> (FDIR) in AVF, based on the Parser Library feature and the existing rte_flow
> `raw` API.
>
> The input spec and mask of raw pattern are first parsed via the Parser Library,
> and then passed to the kernel driver to create the flow rule.
>
> Similar as PF FDIR,
Re-worded to below during merging.
"Similar to ice PMD's implementation"
> each raw flow requires:
@@ -65,6 +65,7 @@ New Features
* Added Tx QoS queue rate limitation support.
* Added quanta size configuration support.
* Added ``DEV_RX_OFFLOAD_TIMESTAMP`` support.
+ * Added Protocol Agnostic Flow Offloading support in AVF FDIR and RSS.
* **Updated Intel ice driver.**
@@ -194,6 +194,7 @@
IAVF_INSET_TUN_TCP_DST_PORT)
static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
+ {iavf_pattern_raw, IAVF_INSET_NONE, IAVF_INSET_NONE},
{iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE},
{iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE},
{iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
@@ -720,6 +721,7 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
struct virtchnl_proto_hdrs *hdrs =
&filter->add_fltr.rule_cfg.proto_hdrs;
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_eth *eth_spec, *eth_mask;
const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
@@ -746,6 +748,7 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
enum rte_flow_item_type next_type;
uint8_t tun_inner = 0;
uint16_t ether_type, flags_version;
+ uint8_t item_num = 0;
int layer = 0;
uint8_t ipv6_addr_mask[16] = {
@@ -763,8 +766,72 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"Not support range");
}
+ item_num++;
switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_RAW: {
+ raw_spec = item->spec;
+ raw_mask = item->mask;
+
+ if (item_num != 1)
+ return -rte_errno;
+
+ if (raw_spec->length != raw_mask->length)
+ return -rte_errno;
+
+ uint16_t pkt_len = 0;
+ uint16_t tmp_val = 0;
+ uint8_t tmp = 0;
+ int i, j;
+
+ pkt_len = raw_spec->length;
+
+ for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
+ tmp = raw_spec->pattern[i];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_val = tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_val = tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_val = tmp - '0';
+
+ tmp_val *= 16;
+ tmp = raw_spec->pattern[i + 1];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_val += (tmp - 'a' + 10);
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_val += (tmp - 'A' + 10);
+ if (tmp >= '0' && tmp <= '9')
+ tmp_val += (tmp - '0');
+
+ hdrs->raw.spec[j] = tmp_val;
+
+ tmp = raw_mask->pattern[i];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_val = tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_val = tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_val = tmp - '0';
+
+ tmp_val *= 16;
+ tmp = raw_mask->pattern[i + 1];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_val += (tmp - 'a' + 10);
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_val += (tmp - 'A' + 10);
+ if (tmp >= '0' && tmp <= '9')
+ tmp_val += (tmp - '0');
+
+ hdrs->raw.mask[j] = tmp_val;
+ }
+
+ hdrs->raw.pkt_len = pkt_len / 2;
+ hdrs->tunnel_level = 0;
+ hdrs->count = 0;
+ return 0;
+ }
+
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = item->spec;
eth_mask = item->mask;
@@ -48,6 +48,12 @@ const struct rte_flow_ops iavf_flow_ops = {
.query = iavf_flow_query,
};
+/* raw */
+enum rte_flow_item_type iavf_pattern_raw[] = {
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
/* empty */
enum rte_flow_item_type iavf_pattern_empty[] = {
RTE_FLOW_ITEM_TYPE_END,
@@ -180,6 +180,9 @@
#define IAVF_INSET_L2TPV2 \
(IAVF_PROT_L2TPV2 | IAVF_L2TPV2_SESSION_ID)
+/* raw pattern */
+extern enum rte_flow_item_type iavf_pattern_raw[];
+
/* empty pattern */
extern enum rte_flow_item_type iavf_pattern_empty[];