From patchwork Mon Oct 10 07:25:44 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jie Wang X-Patchwork-Id: 117790 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 61B61A0544; Mon, 10 Oct 2022 09:27:06 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 429934021E; Mon, 10 Oct 2022 09:27:06 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 7ECD740146 for ; Mon, 10 Oct 2022 09:27:04 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1665386824; x=1696922824; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=FknnH5vHzrXMIi/lR8hzEazztYA6X0YWLZRzzgO3dPk=; b=jCReHOLkH27Ys4agNylpY8wvsfqwg2NiQ4qX9dCYaWq8nXnJuPnbhMAH TgHx2lE5p2t2GdmcnhU54E7lhsLSIL837rvMJ0SA4mPP5PJpomH+/4acO BQz8kUj/9bPRyEUGM0BNaZ0TZgbg35zxgcPTUHUg3ziqXJiTftICJ6GyX gn6eF79UFZQEwMuS4WJQkU6rJQekjQDQKMajbdVcX20s1gP662nEerL+a AJ3Zni8Xc41HmuF5C6b6fAKjbcb+RjsEyU9cmxojM+tm9ddYNsIVLqGA9 9FNNlt0sgqef7jGNnd7TaxWHG1JUE3k64eiNG4Iq5v7vQ9KIxKOvEhRbR Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10495"; a="330626859" X-IronPort-AV: E=Sophos;i="5.95,173,1661842800"; d="scan'208";a="330626859" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 10 Oct 2022 00:27:03 -0700 X-IronPort-AV: E=McAfee;i="6500,9779,10495"; a="688690812" X-IronPort-AV: E=Sophos;i="5.95,173,1661842800"; d="scan'208";a="688690812" Received: from intel-cd-odc-gavin.cd.intel.com ([10.240.178.187]) by fmsmga008-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 10 Oct 2022 00:27:00 -0700 From: Jie Wang To: dev@dpdk.org Cc: stevex.yang@intel.com, qi.z.zhang@intel.com, qiming.yang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, Jie Wang Subject: [PATCH v2] net/iavf: support raw packet for flow subscription Date: Mon, 10 Oct 2022 15:25:44 +0800 Message-Id: <20221010072544.142046-1-jie1x.wang@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add Protocol Agnostic Flow (raw flow) support for flow subscription in AVF. For example, testpmd creates a flow subscription raw packet rule: rule: eth + ipv4 src is 1.1.1.1 dst is 2.2.2.2 cmd: flow create 0 ingress pattern raw pattern spec \ 00000000000000000000000008004500001400000000000000000101010102020202 \ pattern mask \ 0000000000000000000000000000000000000000000000000000FFFFFFFFFFFFFFFF \ / end actions port_representor port_id 0 / end Signed-off-by: Jie Wang --- v2: remove flow action passthru --- drivers/net/iavf/iavf_fsub.c | 88 ++++++++++++++++++++++++++++++++---- 1 file changed, 78 insertions(+), 10 deletions(-) diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c index 3be75923a5..4082c0069f 100644 --- a/drivers/net/iavf/iavf_fsub.c +++ b/drivers/net/iavf/iavf_fsub.c @@ -57,6 +57,7 @@ static struct iavf_flow_parser iavf_fsub_parser; static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = { + {iavf_pattern_raw, IAVF_INSET_NONE, IAVF_INSET_NONE}, {iavf_pattern_ethertype, IAVF_SW_INSET_ETHER, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv4, IAVF_SW_INSET_MAC_IPV4, IAVF_INSET_NONE}, {iavf_pattern_eth_vlan_ipv4, IAVF_SW_INSET_MAC_VLAN_IPV4, IAVF_INSET_NONE}, @@ -153,6 +154,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], { struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs; enum rte_flow_item_type item_type; + const struct rte_flow_item_raw *raw_spec, *raw_mask; const struct rte_flow_item_eth *eth_spec, *eth_mask; const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; @@ -164,20 +166,83 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], uint64_t outer_input_set = IAVF_INSET_NONE; uint64_t *input = NULL; uint16_t input_set_byte = 0; - uint16_t j; + uint8_t item_num = 0; uint32_t layer = 0; - for (item = pattern; item->type != - RTE_FLOW_ITEM_TYPE_END; item++) { + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not support range"); - return false; + return -rte_errno; } + item_type = item->type; + item_num++; switch (item_type) { + case RTE_FLOW_ITEM_TYPE_RAW: { + raw_spec = item->spec; + raw_mask = item->mask; + + if (item_num != 1) + return -rte_errno; + + if (raw_spec->length != raw_mask->length) + return -rte_errno; + + uint16_t pkt_len = 0; + uint16_t tmp_val = 0; + uint8_t tmp = 0; + int i, j; + + pkt_len = raw_spec->length; + + for (i = 0, j = 0; i < pkt_len; i += 2, j++) { + tmp = raw_spec->pattern[i]; + if (tmp >= 'a' && tmp <= 'f') + tmp_val = tmp - 'a' + 10; + if (tmp >= 'A' && tmp <= 'F') + tmp_val = tmp - 'A' + 10; + if (tmp >= '0' && tmp <= '9') + tmp_val = tmp - '0'; + + tmp_val *= 16; + tmp = raw_spec->pattern[i + 1]; + if (tmp >= 'a' && tmp <= 'f') + tmp_val += (tmp - 'a' + 10); + if (tmp >= 'A' && tmp <= 'F') + tmp_val += (tmp - 'A' + 10); + if (tmp >= '0' && tmp <= '9') + tmp_val += (tmp - '0'); + + hdrs->raw.spec[j] = tmp_val; + + tmp = raw_mask->pattern[i]; + if (tmp >= 'a' && tmp <= 'f') + tmp_val = tmp - 'a' + 10; + if (tmp >= 'A' && tmp <= 'F') + tmp_val = tmp - 'A' + 10; + if (tmp >= '0' && tmp <= '9') + tmp_val = tmp - '0'; + + tmp_val *= 16; + tmp = raw_mask->pattern[i + 1]; + if (tmp >= 'a' && tmp <= 'f') + tmp_val += (tmp - 'a' + 10); + if (tmp >= 'A' && tmp <= 'F') + tmp_val += (tmp - 'A' + 10); + if (tmp >= '0' && tmp <= '9') + tmp_val += (tmp - '0'); + + hdrs->raw.mask[j] = tmp_val; + } + + hdrs->raw.pkt_len = pkt_len / 2; + hdrs->tunnel_level = 0; + hdrs->count = 0; + return 0; + } case RTE_FLOW_ITEM_TYPE_ETH: eth_spec = item->spec; eth_mask = item->mask; @@ -236,7 +301,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid IPv4 mask."); - return false; + return -rte_errno; } if (ipv4_mask->hdr.src_addr) { @@ -268,7 +333,9 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], hdrs->count = ++layer; break; - case RTE_FLOW_ITEM_TYPE_IPV6: + case RTE_FLOW_ITEM_TYPE_IPV6: { + int j; + ipv6_spec = item->spec; ipv6_mask = item->mask; @@ -283,7 +350,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid IPv6 mask"); - return false; + return -rte_errno; } for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) { @@ -329,6 +396,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], hdrs->count = ++layer; break; + } case RTE_FLOW_ITEM_TYPE_UDP: udp_spec = item->spec; udp_mask = item->mask; @@ -345,7 +413,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid UDP mask"); - return false; + return -rte_errno; } if (udp_mask->hdr.src_port) { @@ -386,7 +454,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid TCP mask"); - return false; + return -rte_errno; } if (tcp_mask->hdr.src_port) { @@ -427,7 +495,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid VLAN input set."); - return false; + return -rte_errno; } rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,